From 91df1ee6a541de63b5f6646626bfb86a6bc470d3 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 12:06:54 +0800 Subject: [PATCH 01/19] KVM: Define CSV3 key management command id hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- Define Hygon CSV3 key management command id and structure. The command definition is available in Hygon CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- include/uapi/linux/kvm.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 15012e713205..326bb994d9c9 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2356,4 +2356,25 @@ struct kvm_csv_init { #define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) #define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) +/* CSV3 command */ +enum csv3_cmd_id { + KVM_CSV3_NR_MIN = 0xc0, + + KVM_CSV3_INIT = KVM_CSV3_NR_MIN, + KVM_CSV3_LAUNCH_ENCRYPT_DATA, + KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + + KVM_CSV3_NR_MAX, +}; + +struct kvm_csv3_init_data { + __u64 nodemask; +}; + +struct kvm_csv3_launch_encrypt_data { + __u64 gpa; + __u64 uaddr; + __u32 len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From a87600ec437ed4d6c000c710230ebd5b4b9e9ab9 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 12:13:58 +0800 Subject: [PATCH 02/19] KVM: SVM: CSV: Add KVM_CSV3_INIT command hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The command initializes the CSV3 guest's context. The firmware should be initialized before we issue any CSV3 guest commands. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 129 ++++++++++++++++++++++++++++++++++++----- arch/x86/kvm/svm/csv.h | 19 ++---- arch/x86/kvm/svm/sev.c | 22 ++----- 3 files changed, 124 insertions(+), 46 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 562bbf9d95be..abbd95670297 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -25,6 +25,9 @@ /* Function and variable pointers for hooks */ struct hygon_kvm_hooks_table hygon_kvm_hooks; +/* enable/disable CSV3 support */ +static bool csv3_enabled = true; + static struct kvm_x86_ops csv_x86_ops; static const char csv_vm_mnonce[] = "VM_ATTESTATION"; static DEFINE_MUTEX(csv_cmd_batch_mutex); @@ -134,7 +137,19 @@ static void csv_reset_mempool_offset(void) g_mempool_offset = 0; } -int csv_alloc_trans_mempool(void) +static void csv_free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + csv_reset_mempool_offset(); +} + +static int csv_alloc_trans_mempool(void) { int i; @@ -157,18 +172,6 @@ int csv_alloc_trans_mempool(void) return -ENOMEM; } -void csv_free_trans_mempool(void) -{ - int i; - - for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { - kfree(g_trans_mempool[i]); - g_trans_mempool[i] = NULL; - } - - csv_reset_mempool_offset(); -} - static void __maybe_unused *get_trans_data_from_mempool(size_t size) { void *trans = NULL; @@ -795,6 +798,46 @@ static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +struct kvm_csv_info { + struct kvm_sev_info *sev; + + bool csv3_active; /* CSV3 enabled guest */ + unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ +}; + +struct kvm_svm_csv { + struct kvm_svm kvm_svm; + struct kvm_csv_info csv_info; +}; + +static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) +{ + return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); +} + +static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_init_data params; + + if (unlikely(csv->csv3_active)) + return -EINVAL; + + if (unlikely(!sev->es_active)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + csv->csv3_active = true; + csv->sev = sev; + csv->nodemask = (unsigned long)params.nodemask; + + return 0; +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -836,6 +879,13 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) */ r = csv_receive_update_vmsa(kvm, &sev_cmd); break; + case KVM_CSV3_INIT: + if (!csv3_enabled) { + r = -ENOTTY; + goto out; + } + r = csv3_guest_init(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the @@ -851,6 +901,7 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) r = -EFAULT; +out: mutex_unlock(&kvm->lock); return r; } @@ -1062,7 +1113,7 @@ static int csv_control_post_system_reset(struct kvm *kvm) struct csv_asid_userid *csv_asid_userid_array; -int csv_alloc_asid_userid_array(unsigned int nr_asids) +static int csv_alloc_asid_userid_array(unsigned int nr_asids) { int ret = 0; @@ -1077,14 +1128,59 @@ int csv_alloc_asid_userid_array(unsigned int nr_asids) return ret; } -void csv_free_asid_userid_array(void) +static void csv_free_asid_userid_array(void) { kfree(csv_asid_userid_array); csv_asid_userid_array = NULL; } +#else /* !CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +static int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + pr_warn("reuse ASID is unavailable\n"); + return -EFAULT; +} + +static void csv_free_asid_userid_array(void) +{ +} + #endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ +void __init csv_hardware_setup(unsigned int max_csv_asid) +{ + unsigned int nr_asids = max_csv_asid + 1; + + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ + csv_alloc_trans_mempool(); + /* + * Allocate a buffer to support reuse ASID, reuse ASID + * will not work if the allocation fails. + */ + csv_alloc_asid_userid_array(nr_asids); + + /* CSV3 depends on X86_FEATURE_CSV3 */ + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) + csv3_enabled = true; + else + csv3_enabled = false; + + pr_info("CSV3 %s (ASIDs 1 - %u)\n", + csv3_enabled ? "enabled" : "disabled", max_csv_asid); +} + +void csv_hardware_unsetup(void) +{ + /* Free the memory that allocated in csv_hardware_setup(). */ + csv_free_trans_mempool(); + csv_free_asid_userid_array(); +} + void csv_exit(void) { } @@ -1104,4 +1200,7 @@ void __init csv_init(struct kvm_x86_ops *ops) ops->vm_attestation = csv_vm_attestation; ops->control_pre_system_reset = csv_control_pre_system_reset; ops->control_post_system_reset = csv_control_post_system_reset; + + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) + ops->vm_size = sizeof(struct kvm_svm_csv); } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 0d5d2b7191aa..9b0563062a94 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -41,17 +41,8 @@ struct csv_asid_userid { u32 userid_len; char userid[ASID_USERID_LENGTH]; }; - extern struct csv_asid_userid *csv_asid_userid_array; -int csv_alloc_asid_userid_array(unsigned int nr_asids); -void csv_free_asid_userid_array(void); - -#else - -static inline int csv_alloc_asid_userid_array(unsigned int nr_asids) { return -ENOMEM; } -static inline void csv_free_asid_userid_array(void) { } - #endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ #ifdef CONFIG_HYGON_CSV @@ -79,8 +70,9 @@ extern struct hygon_kvm_hooks_table { void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); -int csv_alloc_trans_mempool(void); -void csv_free_trans_mempool(void); +void __init csv_hardware_setup(unsigned int max_csv_asid); +void csv_hardware_unsetup(void); + int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); bool csv_has_emulated_ghcb_msr(struct kvm *kvm); @@ -98,8 +90,9 @@ static inline bool csv2_state_unstable(struct vcpu_svm *svm) static inline void __init csv_init(struct kvm_x86_ops *ops) { } static inline void csv_exit(void) { } -static inline int csv_alloc_trans_mempool(void) { return 0; } -static inline void csv_free_trans_mempool(void) { } +static inline void __init csv_hardware_setup(unsigned int max_csv_asid) { } +static inline void csv_hardware_unsetup(void) { } + static inline int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } static inline diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0c469487694f..8f3b63627462 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2450,19 +2450,8 @@ void __init sev_hardware_setup(void) */ sev_install_hooks(); - if (sev_enabled) { - /* - * Allocate a memory pool to speed up live migration of - * the CSV/CSV2 guests. If the allocation fails, no - * acceleration is performed at live migration. - */ - csv_alloc_trans_mempool(); - /* - * Allocate a buffer to support reuse ASID, reuse ASID - * will not work if the allocation fails. - */ - csv_alloc_asid_userid_array(nr_asids); - } + if (sev_enabled) + csv_hardware_setup(max_sev_asid); } #endif @@ -2474,11 +2463,8 @@ void sev_hardware_unsetup(void) if (!sev_enabled) return; - /* Free the memory that allocated in sev_hardware_setup(). */ - if (is_x86_vendor_hygon()) { - csv_free_trans_mempool(); - csv_free_asid_userid_array(); - } + if (is_x86_vendor_hygon()) + csv_hardware_unsetup(); /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); -- Gitee From e23a88d66dd209d8c87df6b19e15ea8ddd9b7915 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 14:54:45 +0800 Subject: [PATCH 03/19] KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_DATA command hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The command is used to load and encrypt data in CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 268 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 268 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index abbd95670297..9e66ea42e286 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "kvm_cache_regs.h" #include "svm.h" #include "csv.h" @@ -798,10 +799,19 @@ static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +struct encrypt_data_block { + struct { + u64 npages: 12; + u64 pfn: 52; + } entry[512]; +}; + struct kvm_csv_info { struct kvm_sev_info *sev; bool csv3_active; /* CSV3 enabled guest */ + + struct list_head smr_list; /* List of guest secure memory regions */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ }; @@ -810,11 +820,24 @@ struct kvm_svm_csv { struct kvm_csv_info csv_info; }; +struct secure_memory_region { + struct list_head list; + u64 npages; + u64 hpa; +}; + static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) { return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); } +static bool csv3_guest(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + return sev_es_guest(kvm) && csv->csv3_active; +} + static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -838,6 +861,248 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) return 0; } +static int csv3_set_guest_private_memory(struct kvm *kvm) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + struct secure_memory_region *smr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct csv3_data_memory_region *regions; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0, error; + u64 size = 0, nr_smr = 0, nr_pages = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + int npages; + struct page *page; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + /* + * NPT secure memory size + * + * PTEs_entries = nr_pages + * PDEs_entries = nr_pages / 512 + * PDPEs_entries = nr_pages / (512 * 512) + * PML4Es_entries = nr_pages / (512 * 512 * 512) + * + * Totals_entries = nr_pages + nr_pages / 512 + nr_pages / (512 * 512) + + * nr_pages / (512 * 512 * 512) <= nr_pages + nr_pages / 256 + * + * Total_NPT_size = (Totals_entries / 512) * PAGE_SIZE = ((nr_pages + + * nr_pages / 256) / 512) * PAGE_SIZE = nr_pages * 8 + nr_pages / 32 + * <= nr_pages * 9 + * + */ + smr_entry_shift = csv_get_smr_entry_shift(); + size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + + ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + nr_smr = size >> smr_entry_shift; + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), + nodemask_ptr, + get_order(1 << smr_entry_shift)); + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + + goto done; + +e_free_smr: + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +done: + kfree(set_guest_private_memory); + kfree(regions); + return ret; +} + +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_encrypt_data params; + struct csv3_data_launch_encrypt_data *encrypt_data = NULL; + struct encrypt_data_block *blocks = NULL; + u8 *data = NULL; + u32 offset; + u32 num_entries, num_entries_in_block; + u32 num_blocks, num_blocks_max; + u32 i, n; + unsigned long pfn, pfn_sme_mask; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + ret = -EFAULT; + goto exit; + } + + if ((params.len & ~PAGE_MASK) || !params.len || !params.uaddr) { + ret = -EINVAL; + goto exit; + } + + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + + num_entries = params.len / PAGE_SIZE; + num_entries_in_block = ARRAY_SIZE(blocks->entry); + num_blocks = (num_entries + num_entries_in_block - 1) / num_entries_in_block; + num_blocks_max = ARRAY_SIZE(encrypt_data->data_blocks); + + if (num_blocks >= num_blocks_max) { + ret = -EINVAL; + goto exit; + } + + data = vzalloc(params.len); + if (!data) { + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(data, (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto data_free; + } + + blocks = vzalloc(num_blocks * sizeof(*blocks)); + if (!blocks) { + ret = -ENOMEM; + goto data_free; + } + + for (offset = 0, i = 0, n = 0; offset < params.len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + data); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + if (offset && ((blocks[n].entry[i].pfn + 1) == pfn_sme_mask)) + blocks[n].entry[i].npages += 1; + else { + if (offset) { + i = (i + 1) % num_entries_in_block; + n = (i == 0) ? (n + 1) : n; + } + blocks[n].entry[i].pfn = pfn_sme_mask; + blocks[n].entry[i].npages = 1; + } + } + + encrypt_data = kzalloc(sizeof(*encrypt_data), GFP_KERNEL); + if (!encrypt_data) { + ret = -ENOMEM; + goto block_free; + } + + encrypt_data->handle = csv->sev->handle; + encrypt_data->length = params.len; + encrypt_data->gpa = params.gpa; + for (i = 0; i <= n; i++) { + encrypt_data->data_blocks[i] = + __sme_set(vmalloc_to_pfn((void *)blocks + i * sizeof(*blocks)) << PAGE_SHIFT); + } + + clflush_cache_range(data, params.len); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); + + kfree(encrypt_data); +block_free: + vfree(blocks); +data_free: + vfree(data); +exit: + return ret; +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -886,6 +1151,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) } r = csv3_guest_init(kvm, &sev_cmd); break; + case KVM_CSV3_LAUNCH_ENCRYPT_DATA: + r = csv3_launch_encrypt_data(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the -- Gitee From 3f0c982b06bf72f86a8df42cd30dc3138c46beb6 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 15:00:59 +0800 Subject: [PATCH 04/19] KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_VMCB command hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The command is used to get secure VMCB physical address which is allocated in private memory by firmware. Besides, shadow VMCB physical address will be updated in secure VMCB. Also the firmware creates a new private page for guest's VMSA per vcpu. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 70 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 9e66ea42e286..814dcac381b5 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1103,6 +1103,73 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + save->xcr0 = svm->vcpu.arch.xcr0; + save->xss = svm->vcpu.arch.ia32_xss; + + return 0; +} + +static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_vmcb *encrypt_vmcb = NULL; + struct kvm_vcpu *vcpu; + int ret = 0; + unsigned long i = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + encrypt_vmcb = kzalloc(sizeof(*encrypt_vmcb), GFP_KERNEL); + if (!encrypt_vmcb) { + ret = -ENOMEM; + goto exit; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = csv3_sync_vmsa(svm); + if (ret) + goto e_free; + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + clflush_cache_range(svm->vmcb, PAGE_SIZE); + encrypt_vmcb->handle = csv->sev->handle; + encrypt_vmcb->vcpu_id = i; + encrypt_vmcb->vmsa_addr = __sme_pa(svm->sev_es.vmsa); + encrypt_vmcb->vmsa_len = PAGE_SIZE; + encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); + encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); + if (ret) + goto e_free; + + svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free: + kfree(encrypt_vmcb); +exit: + return ret; +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1154,6 +1221,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_LAUNCH_ENCRYPT_DATA: r = csv3_launch_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: + r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the -- Gitee From 9080aa62c48a7e4fa7c4d6d732bfa4f9925f4222 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 15:07:10 +0800 Subject: [PATCH 05/19] KVM: SVM: CSV: Manage CSV3 guest's nested page table hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- CSV3 guest's nested page table is managed by firmware. All the guest memory is private by default. The firmware maps guest's private memory in nested page table in advance. CSV3 guest may declare some memory regions as shared. It needs to send secure call command with specified memory region to firmware, then firmware frees the private pages which is mapped to the memory region. When guest access the specified memory region by then, nested page fault happens. When nested page fault happens, host needs to issue an external command UPDATE_NPT to firmware. Then firmware helps to map the specified shared pages in nested page table. Signed-off-by: Xin Jiang Signed-off-by: yangwencheng Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 564 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 563 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 814dcac381b5..be0609d0ac7d 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -13,7 +13,9 @@ #include #include #include +#include #include +#include #include #include "kvm_cache_regs.h" #include "svm.h" @@ -806,11 +808,49 @@ struct encrypt_data_block { } entry[512]; }; +union csv3_page_attr { + struct { + u64 reserved: 1; + u64 rw: 1; + u64 reserved1: 49; + u64 mmio: 1; + u64 reserved2: 12; + }; + u64 val; +}; + +enum csv3_pg_level { + CSV3_PG_LEVEL_NONE, + CSV3_PG_LEVEL_4K, + CSV3_PG_LEVEL_2M, + CSV3_PG_LEVEL_NUM +}; + +/* + * Manage shared page in rbtree, the node within the rbtree + * is indexed by gfn. @page points to the page mapped by @gfn + * in NPT. + */ +struct shared_page { + struct rb_node node; + gfn_t gfn; + struct page *page; +}; + +struct shared_page_mgr { + struct rb_root root; + u64 count; +}; + struct kvm_csv_info { struct kvm_sev_info *sev; bool csv3_active; /* CSV3 enabled guest */ + struct kmem_cache *sp_slab; /* shared page slab */ + struct shared_page_mgr sp_mgr; /* shared page manager */ + struct mutex sp_lock; /* shared page lock */ + struct list_head smr_list; /* List of guest secure memory regions */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ }; @@ -826,11 +866,97 @@ struct secure_memory_region { u64 hpa; }; +static bool shared_page_insert(struct shared_page_mgr *mgr, + struct shared_page *sp) +{ + struct shared_page *sp_iter; + struct rb_root *root; + struct rb_node **new; + struct rb_node *parent = NULL; + + root = &mgr->root; + new = &(root->rb_node); + + /* Figure out where to put new node */ + while (*new) { + sp_iter = rb_entry(*new, struct shared_page, node); + parent = *new; + + if (sp->gfn < sp_iter->gfn) + new = &((*new)->rb_left); + else if (sp->gfn > sp_iter->gfn) + new = &((*new)->rb_right); + else + return false; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&sp->node, parent, new); + rb_insert_color(&sp->node, root); + mgr->count++; + + return true; +} + +static struct shared_page *shared_page_search(struct shared_page_mgr *mgr, + gfn_t gfn) +{ + struct shared_page *sp; + struct rb_root *root; + struct rb_node *node; + + root = &mgr->root; + node = root->rb_node; + while (node) { + sp = rb_entry(node, struct shared_page, node); + if (gfn < sp->gfn) + node = node->rb_left; + else if (gfn > sp->gfn) + node = node->rb_right; + else + return sp; + } + + return NULL; +} + +static struct shared_page *shared_page_remove(struct shared_page_mgr *mgr, + gfn_t gfn) +{ + struct shared_page *sp; + + sp = shared_page_search(mgr, gfn); + if (sp) { + rb_erase(&sp->node, &mgr->root); + mgr->count--; + } + + return sp; +} + static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) { return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); } +static int to_csv3_pg_level(int level) +{ + int ret; + + switch (level) { + case PG_LEVEL_4K: + ret = CSV3_PG_LEVEL_4K; + break; + case PG_LEVEL_2M: + ret = CSV3_PG_LEVEL_2M; + break; + default: + ret = CSV3_PG_LEVEL_NONE; + } + + return ret; +} + static bool csv3_guest(struct kvm *kvm) { struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; @@ -838,11 +964,23 @@ static bool csv3_guest(struct kvm *kvm) return sev_es_guest(kvm) && csv->csv3_active; } +static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, + gpa_t gpa, u32 error, u32 handle) +{ + memset(update_npt, 0x00, sizeof(*update_npt)); + + update_npt->gpa = gpa & PAGE_MASK; + update_npt->error_code = error; + update_npt->handle = handle; +} + static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct kvm_csv3_init_data params; + struct kmem_cache *sp_slab; + char slab_name[0x40]; if (unlikely(csv->csv3_active)) return -EINVAL; @@ -854,13 +992,33 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sizeof(params))) return -EFAULT; + memset(slab_name, 0, sizeof(slab_name)); + snprintf(slab_name, sizeof(slab_name), "csv3_%d_sp_slab", sev->asid); + sp_slab = kmem_cache_create(slab_name, sizeof(struct shared_page), + 0, 0, NULL); + if (!sp_slab) + return -ENOMEM; + + csv->sp_slab = sp_slab; + csv->sp_mgr.root = RB_ROOT; + csv->csv3_active = true; csv->sev = sev; csv->nodemask = (unsigned long)params.nodemask; + INIT_LIST_HEAD(&csv->smr_list); + mutex_init(&csv->sp_lock); + return 0; } +static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) +{ + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); +} + static int csv3_set_guest_private_memory(struct kvm *kvm) { struct kvm_memslots *slots = kvm_memslots(kvm); @@ -1170,6 +1328,405 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, + unsigned long npages) +{ + gfn_t gfn; + gfn_t gfn_end; + + gfn = gpa >> PAGE_SHIFT; + gfn_end = gfn + npages; +#ifdef KVM_HAVE_MMU_RWLOCK + write_lock(&vcpu->kvm->mmu_lock); +#else + spin_lock(&vcpu->kvm->mmu_lock); +#endif + for (; gfn < gfn_end; gfn++) + kvm_vcpu_mark_page_dirty(vcpu, gfn); +#ifdef KVM_HAVE_MMU_RWLOCK + write_unlock(&vcpu->kvm->mmu_lock); +#else + spin_unlock(&vcpu->kvm->mmu_lock); +#endif +} + +static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code) +{ + int r = 0; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + union csv3_page_attr page_attr = {.mmio = 1}; + union csv3_page_attr page_attr_mask = {.mmio = 1}; + struct csv3_data_update_npt *update_npt; + int psp_ret; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + update_npt->page_attr = page_attr.val; + update_npt->page_attr_mask = page_attr_mask.val; + update_npt->level = CSV3_PG_LEVEL_4K; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + if (psp_ret != SEV_RET_SUCCESS) + r = -EFAULT; + + kfree(update_npt); +exit: + return r; +} + +static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, + u32 error_code, struct kvm_memory_slot *slot, + int *psp_ret_ptr, kvm_pfn_t pfn, u32 level) +{ + int r = 0; + struct csv3_data_update_npt *update_npt; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + int psp_ret = 0; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + + update_npt->spa = pfn << PAGE_SHIFT; + update_npt->level = level; + + if (!csv3_is_mmio_pfn(pfn)) + update_npt->spa |= sme_me_mask; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + + csv3_mark_page_dirty(vcpu, update_npt->gpa, update_npt->npages); + + if (psp_ret_ptr) + *psp_ret_ptr = psp_ret; + + kfree(update_npt); +exit: + return r; +} + +static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn) +{ + struct page *page; + u64 hva; + int npinned; + kvm_pfn_t tmp_pfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page *sp; + bool write = !(slot->flags & KVM_MEM_READONLY); + + tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, + NULL, NULL); + if (unlikely(is_error_pfn(tmp_pfn))) + return -ENOMEM; + + if (csv3_is_mmio_pfn(tmp_pfn)) { + *pfn = tmp_pfn; + return 0; + } + + if (page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { + kvm_release_pfn_clean(tmp_pfn); + *pfn = tmp_pfn; + return 0; + } + + kvm_release_pfn_clean(tmp_pfn); + + sp = shared_page_search(&csv->sp_mgr, gfn); + if (!sp) { + sp = kmem_cache_zalloc(csv->sp_slab, GFP_KERNEL); + if (!sp) + return -ENOMEM; + + hva = __gfn_to_hva_memslot(slot, gfn); + npinned = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, + &page); + if (npinned != 1) { + kmem_cache_free(csv->sp_slab, sp); + return -ENOMEM; + } + + sp->page = page; + sp->gfn = gfn; + shared_page_insert(&csv->sp_mgr, sp); + } + + *pfn = page_to_pfn(sp->page); + + return 0; +} + +static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = PG_LEVEL_4K; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + if (pud_large(pud)) { + level = PG_LEVEL_1G; + goto out; + } + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (pmd_large(pmd)) + level = PG_LEVEL_2M; + +out: + local_irq_restore(flags); + return level; +} + +static int csv3_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) +{ + int level; + int page_num; + gfn_t gfn_base; + + if (csv3_is_mmio_pfn(pfn)) { + level = PG_LEVEL_4K; + goto end; + } + + if (!PageCompound(pfn_to_page(pfn))) { + level = PG_LEVEL_4K; + goto end; + } + + level = PG_LEVEL_2M; + page_num = KVM_PAGES_PER_HPAGE(level); + gfn_base = gfn & ~(page_num - 1); + + /* + * 2M aligned guest address in memslot. + */ + if ((gfn_base < slot->base_gfn) || + (gfn_base + page_num > slot->base_gfn + slot->npages)) { + level = PG_LEVEL_4K; + goto end; + } + + /* + * hva in memslot is 2M aligned. + */ + if (__gfn_to_hva_memslot(slot, gfn_base) & ~PMD_MASK) { + level = PG_LEVEL_4K; + goto end; + } + + level = __pfn_mapping_level(vcpu->kvm, gfn, slot); + + /* + * Firmware supports 2M/4K level. + */ + level = level > PG_LEVEL_2M ? PG_LEVEL_2M : level; + +end: + return to_csv3_pg_level(level); +} + +static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, u32 error_code) +{ + int ret = 0; + int psp_ret = 0; + int level; + kvm_pfn_t pfn = KVM_PFN_NOSLOT; + struct kvm_csv_info *csv = &to_kvm_svm_csv(vcpu->kvm)->csv_info; + + if (error_code & PFERR_PRESENT_MASK) + level = CSV3_PG_LEVEL_4K; + else { + mutex_lock(&csv->sp_lock); + ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + mutex_unlock(&csv->sp_lock); + if (ret) + goto exit; + + level = csv3_mapping_level(vcpu, gfn, pfn, slot); + } + + ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, + &psp_ret, pfn, level); + + if (psp_ret != SEV_RET_SUCCESS) + ret = -EFAULT; +exit: + return ret; +} + +static void csv_vm_destroy(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_vcpu *vcpu; + + struct list_head *smr_head = &csv->smr_list; + struct list_head *pos, *q; + struct secure_memory_region *smr; + struct shared_page *sp; + struct rb_node *node; + unsigned long i = 0; + + if (csv3_guest(kvm)) { + mutex_lock(&csv->sp_lock); + while ((node = rb_first(&csv->sp_mgr.root))) { + sp = rb_entry(node, struct shared_page, node); + rb_erase(&sp->node, &csv->sp_mgr.root); + unpin_user_page(sp->page); + kmem_cache_free(csv->sp_slab, sp); + csv->sp_mgr.count--; + } + mutex_unlock(&csv->sp_lock); + + kmem_cache_destroy(csv->sp_slab); + csv->sp_slab = NULL; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + svm->current_vmcb->pa = __sme_pa(svm->vmcb); + } + } + + if (likely(csv_x86_ops.vm_destroy)) + csv_x86_ops.vm_destroy(kvm); + + if (!csv3_guest(kvm)) + return; + + /* free secure memory region */ + if (!list_empty(smr_head)) { + list_for_each_safe(pos, q, smr_head) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +} + +static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + u32 error_code) +{ + gfn_t gfn = gpa_to_gfn(gpa); + struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn); + int ret; + int r = -EIO; + + if (kvm_is_visible_memslot(slot)) + ret = csv3_page_fault(vcpu, slot, gfn, error_code); + else + ret = csv3_mmio_page_fault(vcpu, gpa, error_code); + + if (!ret) + r = 1; + + return r; +} + +static int csv_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + int ret = -EIO; + + /* + * NPF for csv3 is dedicated. + */ + if (csv3_guest(vcpu->kvm) && exit_code == SVM_EXIT_NPF) { + gpa_t gpa = __sme_clr(svm->vmcb->control.exit_info_2); + u64 error_code = svm->vmcb->control.exit_info_1; + + ret = csv3_handle_page_fault(vcpu, gpa, error_code); + } else { + if (likely(csv_x86_ops.handle_exit)) + ret = csv_x86_ops.handle_exit(vcpu, exit_fastpath); + } + + return ret; +} + +static void csv_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!csv3_guest(kvm)) { + if (likely(csv_x86_ops.guest_memory_reclaimed)) + csv_x86_ops.guest_memory_reclaimed(kvm); + } +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1539,6 +2096,11 @@ void __init csv_init(struct kvm_x86_ops *ops) ops->control_pre_system_reset = csv_control_pre_system_reset; ops->control_post_system_reset = csv_control_post_system_reset; - if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { ops->vm_size = sizeof(struct kvm_svm_csv); + + ops->vm_destroy = csv_vm_destroy; + ops->handle_exit = csv_handle_exit; + ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; + } } -- Gitee From a0d7a003fe50db149a71ff4828f7c5f44e25e5a0 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 14:28:48 +0800 Subject: [PATCH 06/19] crypto: ccp: Define CSV3 migration command id hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- Define CSV3 migration command id and structure. The command definition is available in CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- drivers/crypto/ccp/hygon/csv-dev.c | 7 +++ include/linux/psp-hygon.h | 97 ++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 9d283b06553e..173bd8b61d19 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -72,6 +72,13 @@ int csv_cmd_buffer_len(int cmd) return sizeof(struct csv3_data_set_guest_private_memory); case CSV3_CMD_DBG_READ_VMSA: return sizeof(struct csv3_data_dbg_read_vmsa); case CSV3_CMD_DBG_READ_MEM: return sizeof(struct csv3_data_dbg_read_mem); + case CSV3_CMD_SEND_ENCRYPT_DATA: return sizeof(struct csv3_data_send_encrypt_data); + case CSV3_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_send_encrypt_context); + case CSV3_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv3_data_receive_encrypt_data); + case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_receive_encrypt_context); default: return 0; } } diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 7d734ce7eea7..1888d9d72592 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -306,6 +306,103 @@ struct csv3_data_dbg_read_mem { u32 size; /* In */ } __packed; +/** + * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv3_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + /* * enum VPSP_CMD_STATUS - virtual psp command status * -- Gitee From ef841260856e950d0825f7eade2fe1e8ff2679e0 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:01:57 +0800 Subject: [PATCH 07/19] KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_DATA command hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The command is used for encrypting the guest memory page using the encryption context. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 173 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 10 +++ 2 files changed, 183 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index be0609d0ac7d..edd2af88330c 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -819,6 +819,18 @@ union csv3_page_attr { u64 val; }; +struct guest_paddr_block { + struct { + u64 share: 1; + u64 reserved: 11; + u64 gfn: 52; + } entry[512]; +}; + +struct trans_paddr_block { + u64 trans_paddr[512]; +}; + enum csv3_pg_level { CSV3_PG_LEVEL_NONE, CSV3_PG_LEVEL_4K, @@ -1328,6 +1340,164 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +#define CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE 0x00000000 +#define CSV3_SEND_ENCRYPT_DATA_SET_READONLY 0x00000001 +static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + struct kvm_csv3_send_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + u32 offset; + int ret = 0; + int i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_addr_data || + !params.guest_addr_len || !params.hdr_uaddr) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + if ((params.trans_len & PAGE_MASK) == 0 || + (params.trans_len & ~PAGE_MASK) != 0) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + + data.flag = CSV3_SEND_ENCRYPT_DATA_SET_READONLY; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + if (ret) + goto e_free_trans_data; + + kvm_flush_remote_tlbs(kvm); + + data.flag = CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + if (ret) + goto e_free_trans_data; + + ret = -EFAULT; + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) + goto e_free_trans_data; + + /* copy guest address block to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.guest_addr_data, + guest_block, params.guest_addr_len)) + goto e_free_trans_data; + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + goto e_free_trans_data; + + ret = 0; +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1781,6 +1951,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); break; + case KVM_CSV3_SEND_ENCRYPT_DATA: + r = csv3_send_encrypt_data(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 326bb994d9c9..6986455c20d6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2363,6 +2363,7 @@ enum csv3_cmd_id { KVM_CSV3_INIT = KVM_CSV3_NR_MIN, KVM_CSV3_LAUNCH_ENCRYPT_DATA, KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_NR_MAX, }; @@ -2377,4 +2378,13 @@ struct kvm_csv3_launch_encrypt_data { __u32 len; }; +struct kvm_csv3_send_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From fca88def31a46f4fead991a410fb332a0746fb90 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:05:56 +0800 Subject: [PATCH 08/19] KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_CONTEXT command hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The command is used for encrypting the guest cpu context using the encryption context. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 120 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 128 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index edd2af88330c..9299d2c15f04 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1498,6 +1498,123 @@ static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_context *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + struct kvm_csv3_send_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + unsigned long pfn; + unsigned long i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_context_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.handle = sev->handle; + + /* flush hdr, trans data, trans block, secure VMSAs */ + wbinvd_on_all_cpus(); + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1954,6 +2071,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_SEND_ENCRYPT_DATA: r = csv3_send_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_SEND_ENCRYPT_CONTEXT: + r = csv3_send_encrypt_context(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6986455c20d6..6b42e198f250 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2364,6 +2364,7 @@ enum csv3_cmd_id { KVM_CSV3_LAUNCH_ENCRYPT_DATA, KVM_CSV3_LAUNCH_ENCRYPT_VMCB, KVM_CSV3_SEND_ENCRYPT_DATA, + KVM_CSV3_SEND_ENCRYPT_CONTEXT, KVM_CSV3_NR_MAX, }; @@ -2387,4 +2388,11 @@ struct kvm_csv3_send_encrypt_data { __u32 trans_len; }; +struct kvm_csv3_send_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 80ab300b1c466a9bb6a9f59daec6a50854772cf0 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:24:11 +0800 Subject: [PATCH 09/19] KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_DATA command hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The command is used for copying the incoming buffer into the CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 122 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 10 ++++ 2 files changed, 132 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 9299d2c15f04..ebe73286077e 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1615,6 +1615,125 @@ static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_receive_encrypt_data data; + struct kvm_csv3_receive_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + int i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (unlikely(list_empty(&csv->smr_list))) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + } + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_addr_data || !params.guest_addr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, + &data, &argp->error); + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -2074,6 +2193,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_SEND_ENCRYPT_CONTEXT: r = csv3_send_encrypt_context(kvm, &sev_cmd); break; + case KVM_CSV3_RECEIVE_ENCRYPT_DATA: + r = csv3_receive_encrypt_data(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6b42e198f250..e477cfdc359c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2365,6 +2365,7 @@ enum csv3_cmd_id { KVM_CSV3_LAUNCH_ENCRYPT_VMCB, KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_SEND_ENCRYPT_CONTEXT, + KVM_CSV3_RECEIVE_ENCRYPT_DATA, KVM_CSV3_NR_MAX, }; @@ -2395,4 +2396,13 @@ struct kvm_csv3_send_encrypt_context { __u32 trans_len; }; +struct kvm_csv3_receive_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From f11dc54e0813a8d5f4ddd08328c7452b7bd71d94 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:29:50 +0800 Subject: [PATCH 10/19] KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT command hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The command is used for copying the incoming context into the CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 147 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 155 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index ebe73286077e..a69f4f7d95df 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -831,6 +831,10 @@ struct trans_paddr_block { u64 trans_paddr[512]; }; +struct vmcb_paddr_block { + u64 vmcb_paddr[512]; +}; + enum csv3_pg_level { CSV3_PG_LEVEL_NONE, CSV3_PG_LEVEL_4K, @@ -1734,6 +1738,146 @@ static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_receive_encrypt_context data; + struct kvm_csv3_receive_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct vmcb_paddr_block *shadow_vmcb_block; + struct vmcb_paddr_block *secure_vmcb_block; + unsigned long pfn; + u32 offset; + int ret = 0; + struct kvm_vcpu *vcpu; + unsigned long i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.trans_uaddr || !params.trans_len || + !params.hdr_uaddr || !params.hdr_len) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + secure_vmcb_block = kzalloc(sizeof(*secure_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!secure_vmcb_block) { + ret = -ENOMEM; + goto e_free_trans_data; + } + + shadow_vmcb_block = kzalloc(sizeof(*shadow_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!shadow_vmcb_block) { + ret = -ENOMEM; + goto e_free_secure_vmcb_block; + } + + memset(&data, 0, sizeof(data)); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(shadow_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + shadow_vmcb_block->vmcb_paddr[i] = __sme_pa(svm->vmcb); + data.vmcb_block_len += sizeof(shadow_vmcb_block->vmcb_paddr[0]); + } + + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.shadow_vmcb_block = __psp_pa(shadow_vmcb_block); + data.secure_vmcb_block = __psp_pa(secure_vmcb_block); + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); + clflush_cache_range(secure_vmcb_block, PAGE_SIZE); + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, + &data, &argp->error); + if (ret) + goto e_free_shadow_vmcb_block; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(secure_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + + svm->current_vmcb->pa = secure_vmcb_block->vmcb_paddr[i]; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free_shadow_vmcb_block: + kfree(shadow_vmcb_block); +e_free_secure_vmcb_block: + kfree(secure_vmcb_block); +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -2196,6 +2340,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_RECEIVE_ENCRYPT_DATA: r = csv3_receive_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: + r = csv3_receive_encrypt_context(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e477cfdc359c..59488665cec6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2366,6 +2366,7 @@ enum csv3_cmd_id { KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_SEND_ENCRYPT_CONTEXT, KVM_CSV3_RECEIVE_ENCRYPT_DATA, + KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, KVM_CSV3_NR_MAX, }; @@ -2405,4 +2406,11 @@ struct kvm_csv3_receive_encrypt_data { __u32 trans_len; }; +struct kvm_csv3_receive_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From c97c1d0165cc29f48725448620197d76e751d734 Mon Sep 17 00:00:00 2001 From: yangwencheng Date: Thu, 16 Nov 2023 16:32:43 +0800 Subject: [PATCH 11/19] KVM: SVM: CSV: Add ioctl API to unpin shared pages of CSV3 guest hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The shared pages between CSV3 guest and host are pinned in memory, and managed in list, they will be released to system till the guest VM was destroyed. The new ioctl API supports to unpin the shared pages, and remove them from the list. For shared memory allocated from guest user-space process, they must be unpinned dynamically when the process exits. Signed-off-by: yangwencheng Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 56 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 9 +++++++ 2 files changed, 65 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index a69f4f7d95df..dfb74dffa406 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -2033,6 +2033,36 @@ static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, return 0; } +/** + * Return negative error code on fail, + * or return the number of pages unpinned successfully + */ +static int csv3_unpin_shared_memory(struct kvm *kvm, gpa_t gpa, u32 num_pages) +{ + struct kvm_csv_info *csv; + struct shared_page *sp; + gfn_t gfn; + unsigned long i; + int unpin_cnt = 0; + + csv = &to_kvm_svm_csv(kvm)->csv_info; + gfn = gpa_to_gfn(gpa); + + mutex_lock(&csv->sp_lock); + for (i = 0; i < num_pages; i++, gfn++) { + sp = shared_page_remove(&csv->sp_mgr, gfn); + if (sp) { + unpin_user_page(sp->page); + kmem_cache_free(csv->sp_slab, sp); + csv->sp_mgr.count--; + unpin_cnt++; + } + } + mutex_unlock(&csv->sp_lock); + + return unpin_cnt; +} + static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, const struct kvm_memory_slot *slot) { @@ -2277,6 +2307,29 @@ static void csv_guest_memory_reclaimed(struct kvm *kvm) } } +static int csv3_handle_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv3_handle_memory params; + int r = -EINVAL; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + switch (params.opcode) { + case KVM_CSV3_RELEASE_SHARED_MEMORY: + r = csv3_unpin_shared_memory(kvm, params.gpa, params.num_pages); + break; + default: + break; + } + + return r; +}; + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -2343,6 +2396,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: r = csv3_receive_encrypt_context(kvm, &sev_cmd); break; + case KVM_CSV3_HANDLE_MEMORY: + r = csv3_handle_memory(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 59488665cec6..adce28acc87a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2367,6 +2367,7 @@ enum csv3_cmd_id { KVM_CSV3_SEND_ENCRYPT_CONTEXT, KVM_CSV3_RECEIVE_ENCRYPT_DATA, KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, + KVM_CSV3_HANDLE_MEMORY, KVM_CSV3_NR_MAX, }; @@ -2413,4 +2414,12 @@ struct kvm_csv3_receive_encrypt_context { __u32 trans_len; }; +#define KVM_CSV3_RELEASE_SHARED_MEMORY (0x0001) + +struct kvm_csv3_handle_memory { + __u64 gpa; + __u32 num_pages; + __u32 opcode; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From be760c6efdeac83d17dbb07ac0dbc284effc5179 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 15:27:59 +0800 Subject: [PATCH 12/19] x86/boot/compressed/64: Add CSV3 guest detection hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- Check if CSV3 guest is active at boot compressed stage. It checks HYGON hardware with CPUID 0x00000000 and bit30 of MSR 0xc0010131. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/boot/compressed/Makefile | 1 + arch/x86/boot/compressed/csv.c | 38 ++++++++++++++++++++++++++++++ arch/x86/boot/compressed/csv.h | 23 ++++++++++++++++++ arch/x86/boot/compressed/head_64.S | 10 ++++++++ arch/x86/boot/compressed/misc.h | 1 + arch/x86/include/asm/csv.h | 7 ++++++ 6 files changed, 80 insertions(+) create mode 100644 arch/x86/boot/compressed/csv.c create mode 100644 arch/x86/boot/compressed/csv.h diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index b435c55e1e34..5b7de8c8be8d 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -109,6 +109,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-$(CONFIG_HYGON_CSV) += $(obj)/csv.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c new file mode 100644 index 000000000000..fab81672383e --- /dev/null +++ b/arch/x86/boot/compressed/csv.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV Support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include "misc.h" + +#include +#include + +static unsigned int csv3_enabled __section(".data"); + +void csv_set_status(void) +{ + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + + eax = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* HygonGenuine */ + if (ebx == CPUID_VENDOR_HygonGenuine_ebx && + ecx == CPUID_VENDOR_HygonGenuine_ecx && + edx == CPUID_VENDOR_HygonGenuine_edx && + sme_me_mask) { + unsigned long low, high; + + asm volatile("rdmsr\n" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV)); + + if (low & MSR_CSV3_ENABLED) + csv3_enabled = 1; + } +} diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h new file mode 100644 index 000000000000..2331d4ade97f --- /dev/null +++ b/arch/x86/boot/compressed/csv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon CSV header for early boot related functions. + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef BOOT_COMPRESSED_CSV_H +#define BOOT_COMPRESSED_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void csv_set_status(void); + +#else + +static inline void csv_set_status(void) { } + +#endif + +#endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1dcb794c5479..215d74c1a6d9 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -397,6 +397,16 @@ SYM_CODE_START(startup_64) movq %r15, %rdi call sev_enable #endif +#ifdef CONFIG_HYGON_CSV + /* + * Check CSV active status. The CSV and CSV2 guest are indicated by + * MSR_AMD64_SEV_ENABLED_BIT and MSR_AMD64_SEV_ES_ENABLED_BIT in MSR + * register 0xc0010131, respectively. + * The CSV3 guest is indicated by MSR_CSV3_ENABLED in MSR register + * 0xc0010131. + */ + call csv_set_status +#endif /* Preserve only the CR4 bits that must be preserved, and clear the rest */ movq %cr4, %rax diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index aae1a2db4251..674433c522ed 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -37,6 +37,7 @@ #include #include "tdx.h" +#include "csv.h" #define BOOT_CTYPE_H #include diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index fc575d2f00cf..7d83d1422484 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -48,6 +48,13 @@ static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } #endif /* CONFIG_HYGON_CSV */ +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + #endif /* __ASSEMBLY__ */ #endif /* __ASM_X86_CSV_H__ */ -- Gitee From d2600299dc44d38ae1c69b2c0d5a0259c577290f Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 15:39:50 +0800 Subject: [PATCH 13/19] x86/boot/compressed/64: Init CSV3 secure call pages hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- CSV3 secure call is a method to communicate with the dedicated secure processor that host cannot tamper with. We declare two dedicated pages named secure call pages to hold the command which guest wants to send to the secure processor. The secure processor always sets only one page of the two as present in nested page table. Read/write action on the two pages will triger NPF then host must issue an external command to the secure processor. The secure processor gets the guest's command if the fault address is secure call page. CSV3 secure call command is used to set specified memory as shared or private in usual. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/boot/compressed/csv.c | 33 +++++ arch/x86/boot/compressed/csv.h | 2 + arch/x86/boot/compressed/head_64.S | 10 ++ arch/x86/kernel/csv-shared.c | 208 +++++++++++++++++++++++++++++ 4 files changed, 253 insertions(+) create mode 100644 arch/x86/kernel/csv-shared.c diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index fab81672383e..d88da87eef3c 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -7,10 +7,43 @@ #include "misc.h" +#undef __init +#undef __initdata +#undef __pa +#define __init +#define __initdata +#define __pa(x) ((unsigned long)(x)) + #include #include +/* Include code for early secure calls */ +#include "../../kernel/csv-shared.c" + static unsigned int csv3_enabled __section(".data"); +static unsigned int csv3_secure_call_init; + +/* Invoke it before jump to real kernel in case secure call pages are not mapped + * in the identity page table. + * + * If no #VC happens, there is no identity mapping in page table for secure call + * pages. And page fault is not supported in the early stage when real kernel is + * running. As a result, CSV3 guest will shutdown when access secure call pages + * by then. + */ +void csv_init_secure_call_pages(void *boot_params) +{ + if (!csv3_enabled || csv3_secure_call_init) + return; + + /* + * boot_params may be not sanitized, but it's OK to access e820_table + * field. + */ + csv3_scan_secure_call_pages(boot_params); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); + csv3_secure_call_init = 1; +} void csv_set_status(void) { diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h index 2331d4ade97f..3a2196b328c6 100644 --- a/arch/x86/boot/compressed/csv.h +++ b/arch/x86/boot/compressed/csv.h @@ -13,10 +13,12 @@ #ifdef CONFIG_HYGON_CSV void csv_set_status(void); +void csv_init_secure_call_pages(void *boot_params); #else static inline void csv_set_status(void) { } +static inline void csv_init_secure_call_pages(void *boot_params) { } #endif diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 215d74c1a6d9..e02a88b880f1 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -478,6 +478,16 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %r15, %rdi call initialize_identity_maps +#ifdef CONFIG_HYGON_CSV + /* + * If running as a CSV3 guest, secure call pages must be mapped in + * the identity page table before jumping to the decompressed kernel. + * Scan secure call pages here in safe. + */ + movq %r15, %rdi + call csv_init_secure_call_pages +#endif + /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c new file mode 100644 index 000000000000..0763195764da --- /dev/null +++ b/arch/x86/kernel/csv-shared.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV support + * + * This file is shared between decompression boot code and running + * linux kernel. + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include + +/* + ****************************** CSV3 secure call ******************************* + * + * CSV3 guest is based on hygon secure isolated virualization feature. An secure + * processor which resides in hygon SOC manages guest's private memory. The + * secure processor allocates or frees private memory for CSV3 guest and manages + * CSV3 guest's nested page table. + * + * As the secure processor is considered as a PCI device in host, CSV3 guest can + * not communicate with it directly. Howerver, CSV3 guest must request the secure + * processor to change its physical memory between private memory and shared + * memory. CSV3 secure call command is a method used to communicate with secure + * processor that host cannot tamper with the data in CSV3 guest. Host can only + * perform an external command to notify the secure processor to handle the + * pending guest's command. + * + * CSV3 secure call pages: + * Secure call pages are two dedicated pages that reserved by BIOS. We define + * secure call pages as page A and page B. During guest launch stage, the secure + * processor will parse the address of secure call pages. The secure processor + * maps the two pages with same private memory page in NPT. The secure processor + * always set one page as present and another page as non-present in NPT. + + * CSV3 secure call main work flow: + * If we write the guest's commands in one page then read them from another page, + * nested page fault happens and the guest exits to host. Then host will perform + * an external command with the gpa(page A or page B) to the secure processor. + * The secure processor checks that the gpa in NPF belongs to secure call pages, + * read the guest's command to handle, then switch the present bit between the + * two pages. + * + * guest page A guest page B + * | | + * ____|______________|____ + * | | + * | nested page table | + * |______________________| + * \ / + * \ / + * \ / + * \ / + * \ / + * secure memory page + * + * CSV3_SECURE_CMD_ENC: + * CSV3 guest declares a specifid memory range as secure. By default, all of + * CSV3 guest's memory mapped as secure. + * The secure processor allocate a block of secure memory and map the memory + * in CSV3 guest's NPT with the specified guest physical memory range in CSV3 + * secure call. + * + * CSV3_SECURE_CMD_DEC: + * CSV3 guest declares a specified memory range as shared. + * The secure processor save the guest physical memory range in its own ram + * and free the range in CSV3 guest's NPT. When CSV3 guest access the memory, + * a new nested page fault happens. + * + * CSV3_SECURE_CMD_RESET: + * CSV3 guest switches all of the shared memory to secure. + * The secure processor resets all the shared memory in CSV3 guest's NPT and + * clears the saved shared memory range. Then the secure process allocates + * secure memory to map in CSV3 guest's NPT. + * + * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: + * CSV3 guest wants to change the secure call pages. + * The secure processor re-init the secure call context. + */ +enum csv3_secure_command_type { + CSV3_SECURE_CMD_ENC = 1, + CSV3_SECURE_CMD_DEC, + CSV3_SECURE_CMD_RESET, + CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, +}; + +/* + * Secure call page fields. + * Secure call page size is 4KB always. We define CSV3 secure call page structure + * as below. + * guid: Must be in the first 128 bytes of the page. Its value should be + * (0xceba2fa59a5d926ful, 0xa556555d276b21abul) always. + * cmd_type: Command to be issued to the secure processor. + * nums: number of entries in the command. + * base_address:Start address of the memory range. + * size: Size of the memory range. + */ +#define SECURE_CALL_ENTRY_MAX (254) + +/* size of secure call cmd is 4KB. */ +struct csv3_secure_call_cmd { + union { + u8 guid[16]; + u64 guid_64[2]; + }; + u32 cmd_type; + u32 nums; + u64 unused; + struct { + u64 base_address; + u64 size; + } entry[SECURE_CALL_ENTRY_MAX]; +}; + +/* csv3 secure call guid, do not change the value. */ +#define CSV3_SECURE_CALL_GUID_LOW 0xceba2fa59a5d926ful +#define CSV3_SECURE_CALL_GUID_HIGH 0xa556555d276b21abul + +static u64 csv3_boot_sc_page_a __initdata = -1ul; +static u64 csv3_boot_sc_page_b __initdata = -1ul; +static u32 early_page_idx __initdata; + +/** + * csv3_scan_secure_call_pages - try to find the secure call pages. + * @boot_params: boot parameters where e820_table resides. + * + * The secure call pages are reserved by BIOS. We scan all the reserved pages + * to check the CSV3 secure call guid bytes. + */ +void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) +{ + struct boot_e820_entry *entry; + struct csv3_secure_call_cmd *sc_page; + u64 offset; + u64 addr; + u8 i; + u8 table_num; + int count = 0; + + if (!boot_params) + return; + + if (csv3_boot_sc_page_a != -1ul && csv3_boot_sc_page_b != -1ul) + return; + + table_num = min_t(u8, boot_params->e820_entries, + E820_MAX_ENTRIES_ZEROPAGE); + entry = &boot_params->e820_table[0]; + for (i = 0; i < table_num; i++) { + if (entry[i].type != E820_TYPE_RESERVED) + continue; + + addr = entry[i].addr & PAGE_MASK; + for (offset = 0; offset < entry[i].size; offset += PAGE_SIZE) { + sc_page = (void *)(addr + offset); + if (sc_page->guid_64[0] == CSV3_SECURE_CALL_GUID_LOW && + sc_page->guid_64[1] == CSV3_SECURE_CALL_GUID_HIGH) { + if (count == 0) + csv3_boot_sc_page_a = addr + offset; + else if (count == 1) + csv3_boot_sc_page_b = addr + offset; + count++; + } + if (count >= 2) + return; + } + } +} + +/** + * csv3_early_secure_call_ident_map - issue early secure call command at the + * stage where identity page table is created. + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +void __init csv3_early_secure_call_ident_map(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + /* identity mapping at the stage. */ + page_rd = (void *)(early_page_idx ? csv3_boot_sc_page_a : csv3_boot_sc_page_b); + page_wr = (void *)(early_page_idx ? csv3_boot_sc_page_b : csv3_boot_sc_page_a); + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + early_page_idx ^= 1; +} -- Gitee From e943f2c7b68e2587e36983453a1d706149419268 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:26:46 +0800 Subject: [PATCH 14/19] x86/boot/compressed/64: Add CSV3 update page attr(private/shared) hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The function is needed to set encrypted page as private or set decrypted page as shared at the stage where identity page table is created. By default, all memory is set as private. CSV3 guest's NPT is managed by the secure processor. The secure processor must perform the correct action for private/shared memory. The secure processor manages the guest's secure isolated memory which cannot be accessed by other guest or host. As CSV3 feaure, CSV3 guest's encrypted memory maps to secure isolated memory and decrypted memory which is shared with host maps to normal memory. At the stage of kernel decompressing, only GHCB page is set as shared. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/boot/compressed/csv.c | 16 ++++++++++++++++ arch/x86/boot/compressed/csv.h | 5 +++++ arch/x86/boot/compressed/ident_map_64.c | 3 +++ 3 files changed, 24 insertions(+) diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index d88da87eef3c..18e0bde5bca2 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -23,6 +23,22 @@ static unsigned int csv3_enabled __section(".data"); static unsigned int csv3_secure_call_init; +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) +{ + if (!csv3_enabled) + return; + + if ((set | clr) & _PAGE_ENC) { + if (set & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_ENC); + + if (clr & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_DEC); + } +} + /* Invoke it before jump to real kernel in case secure call pages are not mapped * in the identity page table. * diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h index 3a2196b328c6..8b8a33551895 100644 --- a/arch/x86/boot/compressed/csv.h +++ b/arch/x86/boot/compressed/csv.h @@ -15,11 +15,16 @@ void csv_set_status(void); void csv_init_secure_call_pages(void *boot_params); +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr); + #else static inline void csv_set_status(void) { } static inline void csv_init_secure_call_pages(void *boot_params) { } +static inline void csv_update_page_attr(unsigned long address, + pteval_t set, pteval_t clr) { } + #endif #endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index aead80ec70a0..a7b4148a943f 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -298,6 +298,9 @@ static int set_clr_page_flags(struct x86_mapping_info *info, if ((set | clr) & _PAGE_ENC) { clflush_page(address); + /* On CSV3, notify secure processor to manage page attr changes */ + csv_update_page_attr(address, set, clr); + /* * If the encryption attribute is being cleared, change the page state * to shared in the RMP table. -- Gitee From ce19f23605e1ab1ab99b4caefdcd1d07dd3dc329 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:32:56 +0800 Subject: [PATCH 15/19] x86/kernel: Add CSV3 early update(enc/dec)/reset memory helpers hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- The functions are needed to set memory as private/shared memory or reset all memory as private memory at the stage where the identity mapping page table is available. Generally, at early runtime of the decompressed kernel, it needs to obtain CSV3 secure call pages then reset all memory as private before switching to new kernel page table. Otherwise, prior shared memory regions will be wrongly used and private data in guest may be accessed maliciously. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/include/asm/csv.h | 18 ++++++++++++ arch/x86/kernel/Makefile | 2 ++ arch/x86/kernel/csv.c | 49 +++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_hygon.c | 22 +++++++++++++++ 4 files changed, 91 insertions(+) create mode 100644 arch/x86/kernel/csv.c diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 7d83d1422484..15cce01170e5 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -55,6 +55,24 @@ static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } #define MSR_CSV3_ENABLED_BIT 30 #define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) +#ifdef CONFIG_HYGON_CSV + +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + +#else /* !CONFIG_HYGON_CSV */ + +static inline bool csv3_active(void) { return false; } + +static inline void __init csv_early_reset_memory(struct boot_params *bp) { } +static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } +static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } + +#endif /* CONFIG_HYGON_CSV */ + #endif /* __ASSEMBLY__ */ #endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 448309c92f6b..391b37f07a3e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -162,3 +162,5 @@ ifeq ($(CONFIG_X86_64),y) obj-y += vsmp_64.o obj-$(CONFIG_PCI) += zhaoxin_kh40000.o endif + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c new file mode 100644 index 000000000000..2bc5173e353c --- /dev/null +++ b/arch/x86/kernel/csv.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include + +#include "../mm/mm_internal.h" +#include "csv-shared.c" + +struct secure_call_pages { + struct csv3_secure_call_cmd page_a; + struct csv3_secure_call_cmd page_b; +}; + +void __init csv_early_reset_memory(struct boot_params *bp) +{ + if (!csv3_active()) + return; + + csv3_scan_secure_call_pages(bp); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); +} + +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_DEC); +} + +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_ENC); +} diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index a02ec8488aba..cfa15a0da565 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -26,6 +26,10 @@ #include #include +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; + void print_hygon_cc_feature_info(void) { /* Secure Memory Encryption */ @@ -106,6 +110,24 @@ static bool __init __maybe_unused csv3_check_cpu_support(void) return !!me_mask && csv3_enabled; } +/* csv3_active() indicate whether the guest is protected by CSV3 */ +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} + /******************************************************************************/ /**************************** CSV3 CMA interfaces *****************************/ /******************************************************************************/ -- Gitee From f90a577090f7016e2dd59d0aa5fa9bd5facabd79 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:36:33 +0800 Subject: [PATCH 16/19] x86/kernel: Set bss decrypted memory as shared in CSV3 guest hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- Guest kernel declares bss decrypted memory section to share data with host. In CSV3 guest, the decrypted memory must be set as shared. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/kernel/head64.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 360dcd0d8454..1defe865de67 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -42,6 +42,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -160,6 +161,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv i = pmd_index(vaddr); pmd[i] -= sme_get_me_mask(); } + + /* On CSV3, move the shared pages out of isolated memory region. */ + if (csv3_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + csv_early_reset_memory(bp); + csv_early_update_memory_dec((unsigned long)vaddr, + (vaddr_end - vaddr) >> PAGE_SHIFT); + } } /* -- Gitee From 5cd480146ce94f7adf466f70126d7eb4930adfae Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:42:09 +0800 Subject: [PATCH 17/19] x86: Update memory shared/private attribute in early boot for CSV3 guest hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- Add functions to change the memory shared/private attribute in early boot code. When CSV3 is active, the decrypted memory must be mapped to normal (non-isolated) memory in nested page table so that hypervisor and guest can access shared data. But in-place encrypt/decrypt action on the memory is not applicable in CSV3 as CSV3 guest's private page will not be changed to shared page until the secure processor update NPT. Also new secure call pages should be initialized for per cpu to support multiple cpu secure call commands simultaneously. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/include/asm/csv.h | 5 + arch/x86/kernel/csv.c | 186 ++++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_amd.c | 14 +++ 3 files changed, 205 insertions(+) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 15cce01170e5..07164817e70a 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -63,6 +63,8 @@ void __init csv_early_reset_memory(struct boot_params *bp); void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + #else /* !CONFIG_HYGON_CSV */ static inline bool csv3_active(void) { return false; } @@ -71,6 +73,9 @@ static inline void __init csv_early_reset_memory(struct boot_params *bp) { } static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } +static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, + bool enc) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index 2bc5173e353c..89093c7179af 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -19,6 +19,15 @@ struct secure_call_pages { struct csv3_secure_call_cmd page_b; }; +static u32 csv3_percpu_secure_call_init __initdata; +static u32 early_secure_call_page_idx __initdata; + +static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data); +static DEFINE_PER_CPU(int, secure_call_page_idx); + +typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type); + void __init csv_early_reset_memory(struct boot_params *bp) { if (!csv3_active()) @@ -47,3 +56,180 @@ void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) csv3_early_secure_call_ident_map(__pa(vaddr), pages, CSV3_SECURE_CMD_ENC); } + +static void __init csv3_alloc_secure_call_data(int cpu) +{ + struct secure_call_pages *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate CSV3 secure all data"); + + per_cpu(secure_call_data, cpu) = data; +} + +static void __init csv3_secure_call_update_table(void) +{ + int cpu; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (!csv3_active()) + return; + + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE); + + while (1) { + page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE; + page_wr->nums = 0; + + /* initialize per-cpu secure call pages */ + for_each_possible_cpu(cpu) { + if (cpu >= SECURE_CALL_ENTRY_MAX) + panic("csv does not support cpus > %d\n", + SECURE_CALL_ENTRY_MAX); + csv3_alloc_secure_call_data(cpu); + data = per_cpu(secure_call_data, cpu); + per_cpu(secure_call_page_idx, cpu) = 0; + page_wr->entry[cpu].base_address = __pa(data); + page_wr->entry[cpu].size = PAGE_SIZE * 2; + page_wr->nums++; + } + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); +} + +/** + * __csv3_early_secure_call - issue secure call command at the stage where new + * kernel page table is created and early identity page + * table is deprecated . + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + if (!csv3_percpu_secure_call_init) { + csv3_secure_call_update_table(); + csv3_percpu_secure_call_init = 1; + } + + if (early_secure_call_page_idx == 0) { + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } else { + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); + + early_secure_call_page_idx ^= 1; +} + + +static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, + u64 pages, bool enc) +{ + u64 vaddr_end, vaddr_next; + u64 psize, pmask; + u64 last_paddr, paddr; + u64 last_psize = 0; + pte_t *kpte; + int level; + enum csv3_secure_command_type cmd_type; + + cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC; + vaddr_next = vaddr; + vaddr_end = vaddr + (pages << PAGE_SHIFT); + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + panic("invalid pte, vaddr 0x%llx\n", vaddr); + goto out; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + vaddr_next = (vaddr & pmask) + psize; + paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) + + (vaddr & ~pmask); + psize -= (vaddr & ~pmask); + + if (vaddr_end - vaddr < psize) + psize = vaddr_end - vaddr; + if (last_psize == 0 || (last_paddr + last_psize) == paddr) { + last_paddr = (last_psize == 0 ? paddr : last_paddr); + last_psize += psize; + } else { + secure_call(last_paddr, last_psize >> PAGE_SHIFT, + cmd_type); + last_paddr = paddr; + last_psize = psize; + } + } + + if (last_psize) + secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type); + +out: + return; +} + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) +{ + u64 npages; + + if (!csv3_active()) + return; + + npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; + __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, + npages, enc); +} diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 1873a65b5655..9645bf5d6f95 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -377,6 +378,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) */ clflush_cache_range(__va(pa), size); + if (csv3_active()) + goto skip_in_place_enc_dec; + /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(pa, size); @@ -390,6 +394,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); } +skip_in_place_enc_dec: /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); @@ -469,6 +474,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); + + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_early_memory_enc_dec(vaddr_end - size, size, enc); + return ret; } -- Gitee From 21882838e14a45d201db2fe83a0b3a423b1b64bb Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:47:23 +0800 Subject: [PATCH 18/19] x86: Add support for changing the memory attribute for CSV3 guest hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- Add support for changing the memory to private or shared memory for multiple pages if CSV3 is active. When CSV3 guest wants to share data with host like SWIOTLB or change the unused shared memory to private memory, it must perform an secure call command to the secure processor to update mapping in nested page table. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/include/asm/csv.h | 4 +++ arch/x86/kernel/csv.c | 52 +++++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_amd.c | 8 ++++++ 3 files changed, 64 insertions(+) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 07164817e70a..e2fcaf4ded5f 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -65,6 +65,8 @@ void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); + #else /* !CONFIG_HYGON_CSV */ static inline bool csv3_active(void) { return false; } @@ -76,6 +78,8 @@ static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) { } +static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index 89093c7179af..4f80c97798de 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -172,6 +172,50 @@ static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, early_secure_call_page_idx ^= 1; } +static void csv3_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + u32 cmd_ack; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + int page_idx; + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + data = per_cpu(secure_call_data, cpu); + page_idx = per_cpu(secure_call_page_idx, cpu); + + if (page_idx == 0) { + page_rd = &data->page_a; + page_wr = &data->page_b; + } else { + page_rd = &data->page_b; + page_wr = &data->page_a; + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the smp_mb below. + */ + smp_mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + per_cpu(secure_call_page_idx, cpu) ^= 1; + preempt_enable(); +} static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, u64 pages, bool enc) @@ -233,3 +277,11 @@ void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, npages, enc); } + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) +{ + if (!csv3_active()) + return; + + __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); +} diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 9645bf5d6f95..f7d88ad030b9 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -345,6 +345,14 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_memory_enc_dec(vaddr, npages, enc); + return true; } -- Gitee From 2d76a6f0ec70a66346f07977a479415f1e2d905b Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:20:27 +0800 Subject: [PATCH 19/19] x86/mm: Print CSV3 info into kernel log hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY CVE: NA --------------------------- Print Hygon secure virtualization feature. Add CSV3 info in feature list if CSV3 is active. Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/mm/mem_encrypt_hygon.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index cfa15a0da565..8f1e6e4d468d 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -49,6 +49,9 @@ void print_hygon_cc_feature_info(void) /* Encrypted Register State */ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); } /* -- Gitee