diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index dbdc87a02f390d3eb62068bf9e313bff6f538afe..f31f01dfa2d648564e31a9f57d2fbbfaf1e9b4c9 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -42,6 +42,22 @@ union csv_page_attr { u64 val; }; +struct guest_paddr_block { + struct { + u64 share: 1; + u64 reserved: 11; + u64 gfn: 52; + } entry[512]; +}; + +struct trans_paddr_block { + u64 trans_paddr[512]; +}; + +struct vmcb_paddr_block { + u64 vmcb_paddr[512]; +}; + enum csv_pg_level { CSV_PG_LEVEL_NONE, CSV_PG_LEVEL_4K, @@ -484,6 +500,535 @@ static int csv_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +csv_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv_send_encrypt_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv_data_send_encrypt_data data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +#define CSV_SEND_ENCRYPT_DATA_MIGRATE_PAGE 0x00000000 +#define CSV_SEND_ENCRYPT_DATA_SET_READONLY 0x00000001 +static int csv_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv_data_send_encrypt_data data; + struct kvm_csv_send_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + u32 offset; + int ret = 0; + int i; + + if (!csv_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv_send_encrypt_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_addr_data || + !params.guest_addr_len || !params.hdr_uaddr) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + if ((params.trans_len & PAGE_MASK) == 0 || + (params.trans_len & ~PAGE_MASK) != 0) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + + data.flag = CSV_SEND_ENCRYPT_DATA_SET_READONLY; + ret = csv_issue_cmd(kvm, CSV_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + kvm_flush_remote_tlbs(kvm); + + data.flag = CSV_SEND_ENCRYPT_DATA_MIGRATE_PAGE; + ret = csv_issue_cmd(kvm, CSV_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + ret = -EFAULT; + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) + goto e_free_trans_data; + + /* copy guest address block to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.guest_addr_data, + guest_block, params.guest_addr_len)) + goto e_free_trans_data; + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + goto e_free_trans_data; + + ret = 0; +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +csv_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv_send_encrypt_context *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv_data_send_encrypt_context data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +static int csv_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv_data_send_encrypt_context data; + struct kvm_csv_send_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + unsigned long pfn; + unsigned long i; + u32 offset; + int ret = 0; + + if (!csv_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv_send_encrypt_context_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.handle = sev->handle; + + /* flush hdr, trans data, trans block, secure VMSAs */ + wbinvd_on_all_cpus(); + + ret = csv_issue_cmd(kvm, CSV_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv_data_receive_encrypt_data data; + struct kvm_csv_receive_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + int i; + u32 offset; + int ret = 0; + + if (!csv_guest(kvm)) + return -ENOTTY; + + if (unlikely(list_empty(&csv->smr_list))) { + /* Allocate all the guest memory from CMA */ + ret = csv_set_guest_private_memory(kvm); + if (ret) + goto exit; + } + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_addr_data || !params.guest_addr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + ret = csv_issue_cmd(kvm, CSV_CMD_RECEIVE_ENCRYPT_DATA, &data, + &argp->error); + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv_data_receive_encrypt_context data; + struct kvm_csv_receive_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct vmcb_paddr_block *shadow_vmcb_block; + struct vmcb_paddr_block *secure_vmcb_block; + unsigned long pfn; + u32 offset; + int ret = 0; + struct kvm_vcpu *vcpu; + unsigned long i; + + if (!csv_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.trans_uaddr || !params.trans_len || + !params.hdr_uaddr || !params.hdr_len) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + secure_vmcb_block = kzalloc(sizeof(*secure_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!secure_vmcb_block) { + ret = -ENOMEM; + goto e_free_trans_data; + } + + shadow_vmcb_block = kzalloc(sizeof(*shadow_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!shadow_vmcb_block) { + ret = -ENOMEM; + goto e_free_secure_vmcb_block; + } + + memset(&data, 0, sizeof(data)); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(shadow_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + shadow_vmcb_block->vmcb_paddr[i] = __sme_pa(svm->vmcb); + data.vmcb_block_len += sizeof(shadow_vmcb_block->vmcb_paddr[0]); + } + + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.shadow_vmcb_block = __psp_pa(shadow_vmcb_block); + data.secure_vmcb_block = __psp_pa(secure_vmcb_block); + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); + clflush_cache_range(secure_vmcb_block, PAGE_SIZE); + + ret = csv_issue_cmd(kvm, CSV_CMD_RECEIVE_ENCRYPT_CONTEXT, &data, + &argp->error); + if (ret) + goto e_free_shadow_vmcb_block; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(secure_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + + svm->vmcb_pa = secure_vmcb_block->vmcb_paddr[i]; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free_shadow_vmcb_block: + kfree(shadow_vmcb_block); +e_free_secure_vmcb_block: + kfree(secure_vmcb_block); +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -849,6 +1394,18 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV_LAUNCH_ENCRYPT_VMCB: r = csv_launch_encrypt_vmcb(kvm, &sev_cmd); break; + case KVM_CSV_SEND_ENCRYPT_DATA: + r = csv_send_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV_SEND_ENCRYPT_CONTEXT: + r = csv_send_encrypt_context(kvm, &sev_cmd); + break; + case KVM_CSV_RECEIVE_ENCRYPT_DATA: + r = csv_receive_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV_RECEIVE_ENCRYPT_CONTEXT: + r = csv_receive_encrypt_context(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_op)) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index cf8537ba2c443d8ad87cf3d9c057b65c3dfc4885..8d3de764372c69aa41e2966f9dc016d0b1a8944f 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -169,6 +169,14 @@ static int sev_cmd_buffer_len(int cmd) return sizeof(struct csv_data_dbg_read_vmsa); case CSV_CMD_DBG_READ_MEM: return sizeof(struct csv_data_dbg_read_mem); + case CSV_CMD_SEND_ENCRYPT_DATA: + return sizeof(struct csv_data_send_encrypt_data); + case CSV_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv_data_send_encrypt_context); + case CSV_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv_data_receive_encrypt_data); + case CSV_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv_data_receive_encrypt_context); default: break; } diff --git a/include/linux/psp-csv.h b/include/linux/psp-csv.h index 20c0658c1927aa864af57e8cd5f46e010cc50dcd..2a899e32ae89a8a5d6efd7f0002ba052339a2024 100644 --- a/include/linux/psp-csv.h +++ b/include/linux/psp-csv.h @@ -188,4 +188,101 @@ struct csv_data_dbg_read_mem { u32 size; /* In */ } __packed; +/** + * struct csv_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index abd754b034c699ee96ad7d9a983b4f1cf2e8b4ba..adcac0eb0b470b0f8400c4c1d7f0deb57ff637b8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1871,6 +1871,10 @@ enum csv_cmd_id { KVM_CSV_INIT = KVM_CSV_NR_MIN, KVM_CSV_LAUNCH_ENCRYPT_DATA, KVM_CSV_LAUNCH_ENCRYPT_VMCB, + KVM_CSV_SEND_ENCRYPT_DATA, + KVM_CSV_SEND_ENCRYPT_CONTEXT, + KVM_CSV_RECEIVE_ENCRYPT_DATA, + KVM_CSV_RECEIVE_ENCRYPT_CONTEXT, KVM_CSV_NR_MAX, }; @@ -1885,4 +1889,36 @@ struct kvm_csv_launch_encrypt_data { __u32 len; }; +struct kvm_csv_send_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv_send_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv_receive_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv_receive_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */