diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 9696cbbab6b2160291c15b65aabad2fbd102bc37..97df8a375bd567f62d635c7c169bb20a22f4d86a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -51,6 +51,15 @@ struct enc_region { unsigned long size; }; +#define ASID_USERID_LENGTH 20 +struct csv_asid_userid { + int refcnt; // reference count of the ASID + u32 userid_len; + char userid[ASID_USERID_LENGTH]; +}; + +static struct csv_asid_userid *csv_asid_userid_array; + /* Called with the sev_bitmap_lock held, or on shutdown */ static int sev_flush_asids(int min_asid, int max_asid) { @@ -92,13 +101,40 @@ static bool __sev_recycle_asids(int min_asid, int max_asid) return true; } -static int sev_asid_new(bool es_active) +static int sev_asid_new(bool es_active, const char *userid, u32 userid_len) { int pos, min_asid, max_asid; bool retry = true; + int i; mutex_lock(&sev_bitmap_lock); + /* For Hygon CPU, check whether the userid exists */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + i = (!min_sev_asid) ? 0 : min_sev_asid - 1; + + for (; i < max_sev_asid; i++) { + /* skip ASIDs without correspond userid */ + if (!csv_asid_userid_array[i].userid_len) + continue; + + /* skip if length of userid is different */ + if (csv_asid_userid_array[i].userid_len != userid_len) + continue; + + if (!memcmp(csv_asid_userid_array[i].userid, + userid, userid_len)) { + pr_debug("Found reusable pos %d\n", i); + /* Increase reference count if userid exists */ + csv_asid_userid_array[i].refcnt++; + + mutex_unlock(&sev_bitmap_lock); + return i + 1; + } + } + } + /* * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. @@ -125,6 +161,14 @@ static int sev_asid_new(bool es_active) __set_bit(pos, sev_asid_bitmap); + /* For Hygon CPU, initialize the new userid */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + memcpy(csv_asid_userid_array[pos].userid, userid, userid_len); + csv_asid_userid_array[pos].userid_len = userid_len; + csv_asid_userid_array[pos].refcnt = 1; + } + mutex_unlock(&sev_bitmap_lock); return pos + 1; @@ -145,7 +189,22 @@ static void sev_asid_free(int asid) mutex_lock(&sev_bitmap_lock); pos = asid - 1; - __set_bit(pos, sev_reclaim_asid_bitmap); + + /* For Hygon CPU, decrease the reference count if userid exist */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[pos].userid_len) { + /* If reach here, reference count should large than 0. */ + WARN_ON(csv_asid_userid_array[pos].refcnt <= 0); + + if (--csv_asid_userid_array[pos].refcnt == 0) { + __set_bit(pos, sev_reclaim_asid_bitmap); + + memset(&csv_asid_userid_array[pos], 0, + sizeof(struct csv_asid_userid)); + } + } else { + __set_bit(pos, sev_reclaim_asid_bitmap); + } for_each_possible_cpu(cpu) { sd = per_cpu(svm_data, cpu); @@ -202,6 +261,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) bool es_active = argp->id == KVM_SEV_ES_INIT; int asid, ret; + struct kvm_csv_init params; + void *csv_blob = NULL; + if (kvm->created_vcpus) return -EINVAL; @@ -209,9 +271,36 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) if (unlikely(sev->active)) return ret; - asid = sev_asid_new(es_active); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + memset(¶ms, 0, sizeof(params)); + + if (argp->data && + copy_from_user(¶ms, + (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + if (params.userid_addr) { + if (params.len >= ASID_USERID_LENGTH) { + pr_err("Invalid length of userid %d > %d\n", + params.len, ASID_USERID_LENGTH); + return -EINVAL; + } + + csv_blob = psp_copy_user_blob(params.userid_addr, params.len); + if (IS_ERR(csv_blob)) { + pr_err("Copy userid failed, %llx (%u)\n", + params.userid_addr, params.len); + return PTR_ERR(csv_blob); + } + } + + asid = sev_asid_new(es_active, (const char *)csv_blob, params.len); + } else { + asid = sev_asid_new(es_active, NULL, 0); + } + if (asid < 0) - return ret; + goto e_free_blob; ret = sev_platform_init(&argp->error); if (ret) @@ -226,6 +315,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) e_free: sev_asid_free(asid); +e_free_blob: + kfree(csv_blob); + return ret; } @@ -2689,7 +2781,22 @@ void __init sev_hardware_setup(void) } if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* Initialize CSV ASID reuse array */ + csv_asid_userid_array = kcalloc(max_sev_asid, + sizeof(struct csv_asid_userid), GFP_KERNEL); + if (!csv_asid_userid_array) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } + + /* Initialize buffer to accelerate migration of CSV/CSV2 guest */ if (alloc_trans_mempool()) { + kfree(csv_asid_userid_array); + csv_asid_userid_array = NULL; + bitmap_free(sev_asid_bitmap); sev_asid_bitmap = NULL; bitmap_free(sev_reclaim_asid_bitmap); @@ -2742,8 +2849,10 @@ void sev_hardware_teardown(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(0, max_sev_asid); - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { free_trans_mempool(); + kfree(csv_asid_userid_array); + } bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -3082,6 +3191,11 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) /* Assign the asid allocated with this SEV guest */ svm->vmcb->control.asid = asid; + /* If ASID is shared with other guests, then flush TLB before VMRUN */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[asid - 1].userid_len) + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; + /* * Flush guest TLB: * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index adcac0eb0b470b0f8400c4c1d7f0deb57ff637b8..bfc1b3137b73a02343aa53a258fd550912ae671a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1784,6 +1784,11 @@ struct kvm_csv_command_batch { __u64 csv_batch_list_uaddr; }; +struct kvm_csv_init { + __u64 userid_addr; + __u32 len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)