From c0e36d2e20fedcd314b23e159186d1d6bbbe8c18 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 14:42:49 +0800 Subject: [PATCH 01/17] anolis: crypto: ccp: Introduce init and free helpers to manage CSV RING_BUFFER queues ANBZ: #8572 There are up to two queues created in RING_BUFFER mode, each with two sub-queues. The sub-queues store the command pointer entries (written only by the x86) and status entries (written only by the CSV Firmware) respectively. The two queues are low priority queue (required) and high priority queue (optional) respectively. In this change, we introduce csv_ring_buffer_queue_init() to initialize CSV RING_BUFFER queues, and csv_ring_buffer_queue_free() to cleanup CSV RING_BUFFER queues. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/psp-ringbuf.c | 29 +++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 31 ++++++++++++ drivers/crypto/ccp/sev-dev.c | 87 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.h | 4 ++ include/linux/psp-sev.h | 38 ++++++++++++++ 6 files changed, 191 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/psp-ringbuf.c create mode 100644 drivers/crypto/ccp/psp-ringbuf.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index aa0ba2d17e1e..82be0ac4a0b6 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -12,7 +12,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ - dbc.o + dbc.o \ + psp-ringbuf.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c new file mode 100644 index 000000000000..485c6da91ca9 --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "psp-ringbuf.h" + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize) +{ + size /= esize; + + queue->head = 0; + queue->tail = 0; + queue->esize = esize; + queue->data = (u64)buffer; + queue->mask = size - 1; + queue->data_align = ALIGN(queue->data, CSV_RING_BUFFER_ALIGN); + + return 0; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h new file mode 100644 index 000000000000..cb6f1f7b5736 --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + */ + +#ifndef __PSP_RINGBUF_H__ +#define __PSP_RINGBUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize); + +#endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index c8cd83dc0e77..8717923f4720 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1247,6 +1247,93 @@ int sev_guest_df_flush(int *error) } EXPORT_SYMBOL_GPL(sev_guest_df_flush); +int csv_ring_buffer_queue_free(void); + +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + int ret = 0; + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + memset((void *)ring_buffer, 0, sizeof(struct csv_ringbuffer_queue)); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) { + ret = -ENOMEM; + goto free_cmdptr; + } + + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + return 0; + +free_cmdptr: + kfree(cmd_ptr_buffer); + + return ret; +} + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + ring_buffer->cmd_ptr.data = 0; + } + + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + ring_buffer->stat_val.data = 0; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 778c95155e74..372183b8c58f 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,6 +25,8 @@ #include #include +#include "psp-ringbuf.h" + #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) #define SEV_CMDRESP_IOC BIT(0) @@ -52,6 +54,8 @@ struct sev_device { u8 build; void *cmd_buf; + + struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; int sev_dev_init(struct psp_device *psp); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 2b40efb57274..190b79f0471b 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -86,6 +86,18 @@ enum csv_cmd { CSV_CMD_MAX, }; +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + /** * struct sev_data_init - INIT command parameters * @@ -544,6 +556,24 @@ struct csv_data_hgsc_cert_import { u32 hgsc_cert_len; /* In */ } __packed; +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** @@ -660,6 +690,10 @@ int sev_guest_decommission(struct sev_data_decommission *data, int *error); void *psp_copy_user_blob(u64 uaddr, u32 len); +int csv_ring_buffer_queue_init(void); + +int csv_ring_buffer_queue_free(void); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -683,6 +717,10 @@ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); } +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } + +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From e1118c354ba6e8c3a6f74a2983b2172a65dfa4c7 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:21:33 +0800 Subject: [PATCH 02/17] anolis: crypto: ccp: Add support for enqueue command pointers in CSV RING_BUFFER mode ANBZ: #8572 In CSV RING_BUFFER mode, X86 will enqueue command pointers to the sub-queue which stores the command pointers. The priority will be given through parameter. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- drivers/crypto/ccp/psp-ringbuf.c | 51 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 2 ++ drivers/crypto/ccp/sev-dev.c | 22 ++++++++++++++ include/linux/psp-sev.h | 12 ++++++++ 4 files changed, 87 insertions(+) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index 485c6da91ca9..e2c236b71fec 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -13,6 +13,43 @@ #include "psp-ringbuf.h" +static void enqueue_data(struct csv_queue *queue, + const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + void *data; + + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + data = (void *)queue->data_align; + memcpy(data + off, src, l); + memcpy(data, src + l, len - l); + + /* + * Make sure that the data in the ring buffer is up to date before + * incrementing the queue->tail index counter. + */ + smp_wmb(); +} + +static unsigned int queue_avail_size(struct csv_queue *queue) +{ + /* + * According to the nature of unsigned Numbers, it always work + * well even though tail < head. Reserved 1 element to distinguish + * full and empty. + */ + return queue->mask - (queue->tail - queue->head); +} + int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize) { @@ -27,3 +64,17 @@ int csv_queue_init(struct csv_queue *queue, return 0; } + +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(queue); + if (len > size) + len = size; + + enqueue_data(queue, buf, len, queue->tail); + queue->tail += len; + return len; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index cb6f1f7b5736..416caefb06a2 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -27,5 +27,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 8717923f4720..3942f5b30850 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1280,6 +1280,28 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer return ret; } +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + int csv_ring_buffer_queue_init(void) { struct psp_device *psp = psp_master; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 190b79f0471b..3b79e5c6a056 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -560,6 +560,13 @@ struct csv_data_hgsc_cert_import { #define CSV_COMMAND_PRIORITY_LOW 1 #define CSV_COMMAND_PRIORITY_NUM 2 +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -694,6 +701,8 @@ int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -721,6 +730,9 @@ static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From d9ae17acc747694385d40ec007ec8fbdcea76774 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:35:47 +0800 Subject: [PATCH 03/17] anolis: crypto: ccp: Add support for dequeue status in CSV RING_BUFFER mode ANBZ: #8572 In CSV RING_BUFFER mode, X86 will dequeue status entries written by PSP after the corresponding command has been handled. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- drivers/crypto/ccp/psp-ringbuf.c | 39 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 2 ++ drivers/crypto/ccp/sev-dev.c | 32 ++++++++++++++++++++++++++ include/linux/psp-sev.h | 11 +++++++++ 4 files changed, 84 insertions(+) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index e2c236b71fec..3b2f461b672c 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -78,3 +78,42 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, queue->tail += len; return len; } + +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index 416caefb06a2..50e014deb5ce 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -29,5 +29,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 3942f5b30850..4ad05513dde9 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1302,6 +1302,38 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) } EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); + int csv_ring_buffer_queue_init(void) { struct psp_device *psp = psp_master; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 3b79e5c6a056..cc4a402caaaa 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -567,6 +567,13 @@ struct csv_cmdptr_entry { u64 cmd_buf_ptr; } __packed; +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -703,6 +710,8 @@ int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); +int csv_check_stat_queue_status(int *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -733,6 +742,8 @@ static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 04873e891241f53f90a635361d0f8adf12ea51c7 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:03:54 +0800 Subject: [PATCH 04/17] anolis: crypto: ccp: Add support to switch to CSV RING_BUFFER mode ANBZ: #8572 Invoke RING_BUFFER command will switch CSV firmware to RING_BUFFER mode. When CSV firmware stays in RING_BUFFER mode, it will fetch commands from CSV RING_BUFFER queues which are filled by X86. The CSV firmware will exit RING_BUFFER mode after SHUTDOWN command is completed. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- drivers/crypto/ccp/sev-dev.c | 51 ++++++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 40 ++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 4ad05513dde9..091f484b2978 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -64,6 +64,8 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; +static int csv_comm_mode = CSV_COMM_MAILBOX_ON; + /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -138,6 +140,8 @@ static int sev_cmd_buffer_len(int cmd) switch (cmd) { case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: + return sizeof(struct csv_data_ring_buffer); default: break; } @@ -405,6 +409,48 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } +static int __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -550,6 +596,11 @@ static int __sev_platform_shutdown_locked(int *error) if (ret) return ret; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); + } + sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index cc4a402caaaa..f6ae3d50a253 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -81,7 +81,18 @@ enum sev_cmd { SEV_CMD_MAX, }; +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, CSV_CMD_HGSC_CERT_IMPORT = 0x300, CSV_CMD_MAX, }; @@ -588,6 +599,35 @@ struct csv_ringbuffer_queue { struct csv_queue stat_val; } __packed; +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** -- Gitee From 5c220a079b64cb3c1cd71633a2185f6b95499a12 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:36:19 +0800 Subject: [PATCH 05/17] anolis: crypto: ccp: Add support for issue commands in CSV RING_BUFFER mode ANBZ: #8572 The CSV firmware stays in Mailbox mode by default. Upon successfully switched to CSV RING_BUFFER mode, the semantics of the 3 registers used for communicate between X86 and CSV firmware will be changed: - The CmdResp register becomes the RBCtl register. It is only ever written by X86. - The CmdBufAddr_Hi register becomes the RBTail register. It is only ever written by X86. - The CmdBufAddr_Lo register becomes the RBHead register. It should never be written by X86; the PSP will update it. The CSV firmware will exit CSV RING_BUFFER mode when it read invalid value from the RBCtl register. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- drivers/crypto/ccp/psp-dev.h | 13 ++++ drivers/crypto/ccp/sev-dev.c | 125 ++++++++++++++++++++++++++++++++++- include/linux/psp-sev.h | 9 +++ 3 files changed, 146 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 8a4de69399c5..45b6e17d5770 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -17,6 +17,19 @@ #include "sp-dev.h" +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) + #define MAX_PSP_NAME_LEN 16 extern struct psp_device *psp_master; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 091f484b2978..7e438ffa6ded 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -113,7 +113,9 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); - if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { + if (FIELD_GET(PSP_CMDRESP_RESP, reg) || + ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (csv_comm_mode == CSV_COMM_RINGBUFFER_ON))) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } @@ -134,6 +136,22 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, return 0; } +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + static int sev_cmd_buffer_len(int cmd) { if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { @@ -451,6 +469,102 @@ static int __csv_ring_buffer_enter_locked(int *error) return ret; } +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +static int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + + mutex_lock(&sev_cmd_mutex); + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -1588,6 +1702,15 @@ int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, } EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &sev_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index f6ae3d50a253..5a52f324d2c2 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -752,6 +752,12 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); int csv_check_stat_queue_status(int *psp_ret); +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -784,6 +790,9 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return - static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From c3b343068edab1b5011214a2f9e169809bc23e59 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:51:55 +0800 Subject: [PATCH 06/17] anolis: KVM: SVM: Add KVM_CSV_COMMAND_BATCH command for applying CSV RING_BUFFER mode ANBZ: #8572 The API KVM_CSV_COMMAD_BATCH receives data of structure kvm_csv_command_batch which embedded a link list of CSV command requests from userspace. It will do some preparation works to ensure data available for CSV RING_BUFFER mode, and then issues RING_BUFFER command. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- arch/x86/include/asm/svm.h | 20 +++++ arch/x86/kvm/svm/sev.c | 176 +++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 14 +++ 3 files changed, 210 insertions(+) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 3ac0ffc4f3e2..24b6a7e60f33 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -680,4 +680,24 @@ DEFINE_GHCB_ACCESSORS(sw_exit_info_2) DEFINE_GHCB_ACCESSORS(sw_scratch) DEFINE_GHCB_ACCESSORS(xcr0) +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + #endif diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 9d1b396a4172..8b7b0d892b28 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -75,6 +75,8 @@ static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + static const char sev_vm_mnonce[] = "VM_ATTESTATION"; struct enc_region { @@ -320,6 +322,28 @@ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) return __sev_issue_cmd(sev->fd, id, data, error); } +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} + static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1851,6 +1875,8 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) return ret; } +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp); + int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1935,6 +1961,14 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; + case KVM_CSV_COMMAND_BATCH: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + } + fallthrough; default: r = -EINVAL; goto out; @@ -3207,3 +3241,145 @@ int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) sev_unpin_memory(kvm, pages, n); return ret; } + +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + sev_unpin_memory(kvm, item->pages, item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 863f84619a15..30a516adfc6c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1927,6 +1927,9 @@ enum sev_cmd_id { /* Guest Migration Extension */ KVM_SEV_SEND_CANCEL, + /* Hygon CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + KVM_SEV_NR_MAX, }; @@ -2023,6 +2026,17 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From 1336de3ca974b5df0814dfec313e9dcf7e34d565 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 18:22:04 +0800 Subject: [PATCH 07/17] anolis: KVM: SVM: Prepare memory pool to allocate buffers for KVM_CSV_COMMAND_BATCH ANBZ: #8572 In the upcoming patches, many buffers need to be allocated in KVM_CSV_COMMAND_BATCH code paths. To avoid memory allocation failures, directly allocate a memory pool in sev_hardware_setup() and free the memory pool in sev_hardware_teardown(). When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA/RECEIVE_UPDATE_DATA commands, it will allocate trans buffers from the memory pool. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 102 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 8b7b0d892b28..365fe496f2fd 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -79,6 +79,9 @@ static DEFINE_MUTEX(csv_cmd_batch_mutex); static const char sev_vm_mnonce[] = "VM_ATTESTATION"; +static int alloc_trans_mempool(void); +static void free_trans_mempool(void); + struct enc_region { struct list_head list; unsigned long npages; @@ -2265,6 +2268,16 @@ void __init sev_hardware_setup(void) goto out; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (alloc_trans_mempool()) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } + } + sev_asid_count = max_sev_asid - min_sev_asid + 1; WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); sev_supported = true; @@ -2322,6 +2335,9 @@ void sev_hardware_unsetup(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + free_trans_mempool(); + bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -3242,6 +3258,91 @@ int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +static int alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "CSV: g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + g_mempool_offset = 0; + return 0; + +free_trans_mempool: + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + return -ENOMEM; +} + +static void free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + g_mempool_offset = 0; +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("CSV: mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -3377,6 +3478,7 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) err_free_ring_buffer_infos_items: csv_ringbuf_infos_free(kvm, ringbuf_infos); kfree(ringbuf_infos); + reset_mempool_offset(); err_free_ring_buffer: csv_ring_buffer_queue_free(); -- Gitee From 10ab0ac540d6726c90c35c00f02872d80349cdf4 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:38:41 +0800 Subject: [PATCH 08/17] anolis: KVM: SVM: Add SEND_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH ANBZ: #8572 When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA commands, it need execute 3 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command 3. Copy the output of RING_BUFFER command to userspace In this change, we add sev_send_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1, and add sev_send_update_data_copy_to_user() to copy output userspace as dictated in step 3. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 143 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 365fe496f2fd..2abacb878c07 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3370,6 +3370,145 @@ static int csv_ringbuf_infos_free(struct kvm *kvm, return 0; } +static int +sev_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kmalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans_data; + item->data_vaddr = (uintptr_t)data; + item->hdr_uaddr = params.hdr_uaddr; + item->trans_uaddr = params.trans_uaddr; + item->hdr_len = params.hdr_len; + item->trans_len = params.trans_len; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + sev_unpin_memory(kvm, guest_page, n); + +finish: + return ret; +} + +static int +sev_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, uintptr_t data_ptr, struct csv_ringbuf_infos *ringbuf_infos); @@ -3384,6 +3523,10 @@ static int get_cmd_helpers(__u32 cmd, /* copy commands to ring buffer*/ switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = sev_send_update_data_to_ringbuf; + *to_user_fn = sev_send_update_data_copy_to_user; + break; default: ret = -EINVAL; break; -- Gitee From 1afae13a3f020249346e59728f8e41bdafc9e6a5 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:50:54 +0800 Subject: [PATCH 09/17] anolis: KVM: SVM: Add RECEIVE_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH ANBZ: #8572 When KVM_CSV_COMMAND_BATCH handling a batch of RECEIVE_UPDATE_DATA commands, it need execute 2 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command In this change, we add sev_receive_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1. Signed-off-by: fangbaoshun Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 121 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2abacb878c07..71d205ec6e65 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3509,6 +3509,123 @@ sev_send_update_data_copy_to_user(struct kvm *kvm, return ret; } +static int +sev_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_unpin: + sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + +finish: + return ret; +} + typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, uintptr_t data_ptr, struct csv_ringbuf_infos *ringbuf_infos); @@ -3527,6 +3644,10 @@ static int get_cmd_helpers(__u32 cmd, *to_ringbuf_fn = sev_send_update_data_to_ringbuf; *to_user_fn = sev_send_update_data_copy_to_user; break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = sev_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; default: ret = -EINVAL; break; -- Gitee From 799094b7b62e082476b5e03b952711bb98e47b70 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 24 May 2022 22:03:04 +0800 Subject: [PATCH 10/17] anolis: crypto: ccp: Fix definition of struct sev_data_send_update_vmsa ANBZ: #8572 The current definition of struct sev_data_send_update_vmsa in include/linux/psp-sev.h does not comply with SEV API spec. Fix it here. Signed-off-by: hanliyang --- include/linux/psp-sev.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 5a52f324d2c2..f8019035f403 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -415,6 +415,7 @@ struct sev_data_send_update_data { */ struct sev_data_send_update_vmsa { u32 handle; /* In */ + u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; -- Gitee From 896ade557c1f40e3e9a30a701ae7efb9f7ac8ef9 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:07:08 -0400 Subject: [PATCH 11/17] anolis: KVM: SVM: Add KVM_SEV_SEND_UPDATE_VMSA command ANBZ: #8572 The command is used for encrypting the VCPU register states of CSV2 guest using the encryption context created with KVM_SEV_SEND_START. Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 115 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 123 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 71d205ec6e65..2d451e7d3bc0 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1396,6 +1396,115 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +__sev_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_update_vmsa *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + int ret; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + return -ENOMEM; + + vmsa->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + params->hdr_len = vmsa->hdr_len; + params->trans_len = vmsa->trans_len; + + if (copy_to_user((void __user *)argp->data, params, + sizeof(struct kvm_sev_send_update_vmsa))) + ret = -EFAULT; + + kfree(vmsa); + return ret; +} + +static int sev_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + struct kvm_sev_send_update_vmsa params; + struct kvm_vcpu *vcpu; + void *hdr, *trans_data; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_update_vmsa))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __sev_send_update_vmsa_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + return ret; + + trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans_data; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans_data); + vmsa->trans_len = params.trans_len; + + /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(vmsa); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1949,6 +2058,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_UPDATE_DATA: r = sev_send_update_data(kvm, &sev_cmd); break; + case KVM_SEV_SEND_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_send_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_SEND_FINISH: r = sev_send_finish(kvm, &sev_cmd); break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 30a516adfc6c..5bd1c36a6420 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2008,6 +2008,14 @@ struct kvm_sev_send_update_data { __u32 trans_len; }; +struct kvm_sev_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + struct kvm_sev_receive_start { __u32 handle; __u32 policy; -- Gitee From cc28426ebc78ffa0103b9454d528e9592c243aec Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:39:49 -0400 Subject: [PATCH 12/17] anolis: KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_VMSA command ANBZ: #8572 The command is used for copying the incoming buffer into the VMSA memory regions of CSV2 guest. Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 81 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 ++++ 2 files changed, 89 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2d451e7d3bc0..1b75dacfeb3e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1680,6 +1680,81 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_vmsa params; + struct sev_data_receive_update_vmsa *vmsa; + struct kvm_vcpu *vcpu; + void *hdr = NULL, *trans = NULL; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_update_vmsa))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans); + vmsa->trans_len = params.trans_len; + + /* + * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the + * written VMSA memory content with the guest's key), and + * the cache may contain dirty, unencrypted data. + */ + clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); + + /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, vmsa, &argp->error); + + if (!ret) + vcpu->arch.guest_state_protected = true; + + kfree(vmsa); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -2076,6 +2151,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_UPDATE_DATA: r = sev_receive_update_data(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_receive_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5bd1c36a6420..b8df7672e3bc 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2034,6 +2034,14 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; +struct kvm_sev_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + struct kvm_csv_batch_list_node { __u64 cmd_data_addr; __u64 addr; -- Gitee From faefcb28bd674ce53118059bb93f4d44a6e8bf67 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 7 Apr 2021 02:46:11 -0400 Subject: [PATCH 13/17] anolis: KVM: x86: Restore control registers in __set_sregs() to support CSV2 guest live migration ANBZ: #8572 When migrate CSV2 guest to the recipient, the KVM which on recipient's side needs to update the guest context so that the guest can continues to run. The control register state is necessary for updating the guest context. Allows the control registers to be updated in __set_sregs() so that the CSV2 guest could continue running correctly after migrated to the recipient. Signed-off-by: hanliyang --- arch/x86/kvm/x86.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c1708306e561..24c9f663472e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11517,21 +11517,24 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (kvm_set_apic_base(vcpu, &apic_base_msr)) return -EINVAL; - if (vcpu->arch.guest_state_protected) + if (vcpu->arch.guest_state_protected && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; - dt.size = sregs->idt.limit; - dt.address = sregs->idt.base; - static_call(kvm_x86_set_idt)(vcpu, &dt); - dt.size = sregs->gdt.limit; - dt.address = sregs->gdt.base; - static_call(kvm_x86_set_gdt)(vcpu, &dt); + if (!vcpu->arch.guest_state_protected) { + dt.size = sregs->idt.limit; + dt.address = sregs->idt.base; + static_call(kvm_x86_set_idt)(vcpu, &dt); + dt.size = sregs->gdt.limit; + dt.address = sregs->gdt.base; + static_call(kvm_x86_set_gdt)(vcpu, &dt); - vcpu->arch.cr2 = sregs->cr2; - *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; - vcpu->arch.cr3 = sregs->cr3; - kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); - static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + vcpu->arch.cr2 = sregs->cr2; + *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; + vcpu->arch.cr3 = sregs->cr3; + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); + static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + } kvm_set_cr8(vcpu, sregs->cr8); @@ -11545,6 +11548,9 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); + if (vcpu->arch.guest_state_protected) + return 0; + if (update_pdptrs) { idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { -- Gitee From cf3834866a10c64518b5f8bea85a780169161870 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 15 Jun 2021 11:29:13 +0800 Subject: [PATCH 14/17] anolis: KVM: SVM: Export MSR_AMD64_SEV_ES_GHCB to userspace for CSV2 guest ANBZ: #8572 VMCB.control.ghcb_gpa contains necessary info to support runtime CSV2 guest. At present, it includes the following points: 1. For GHCB MSR protocol, ghcb_gpa stores the negotiation result 2. For GHCB page protocol, ghcb_gpa stores the GPA of GHCB page In addition, AP VCPU's SIPI state and GHCB page mapping state are temporarily stored in KVM. When CSV2 guest was migrated to the recipient, KVM needs to restore VMCB.control.ghcb_gpa, VCPU's SIPI state and GHCB page mapping state on the source side. This patch is to support export MSR_AMD64_SEV_ES_GHCB to userspace. KVM can collect all the infos dictated above and return to userspace if userspace request to get MSR_AMD64_SEV_ES_GHCB, and KVM can restore all the infos dictated above if userspace request to set MSR_AMD64_SEV_ES_GHCB. Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 19 +++++++++ arch/x86/kvm/svm/svm.c | 88 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.h | 42 +++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 13 ++++++ include/uapi/linux/kvm.h | 2 + 6 files changed, 165 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 1b75dacfeb3e..07cfd0958b58 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3945,3 +3945,22 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } + +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 13cb7b2919c7..7353c45982f6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2946,6 +2946,31 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr_info->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + } + return 1; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3181,6 +3206,47 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + /* + * Value 0 means uninitialized userspace MSR data, + * userspace need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && + (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on recipient of migration, + * should return error if cannot map the + * ghcb page. + */ + if (sev_es_ghcb_map(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + } + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4136,6 +4202,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu); + /* + * For receipient side of CSV2 guest, fake the exit code as + * SVM_EXIT_ERR and return directly if failed to mapping + * the necessary GHCB page. When handling the exit code + * afterwards, it can exit to userspace and stop the guest. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + sev_es_guest(vcpu->kvm) && + svm->sev_es.receiver_ghcb_map_fail) { + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + return EXIT_FASTPATH_NONE; + } + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -4310,6 +4389,15 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) if (kvm && sev_es_guest(kvm)) return false; break; + case MSR_AMD64_SEV_ES_GHCB: + /* + * Only CSV2 guests support to export this MSR, this should + * be determined after KVM_CREATE_VM. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || + (kvm && !sev_es_guest(kvm))) + return false; + break; default: break; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index b2249897a7db..82ff3f931dcf 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -202,6 +202,9 @@ struct vcpu_sev_es_state { u32 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; + + /* CSV2 migrated ghcb mapping state support */ + bool receiver_ghcb_map_fail; }; struct vcpu_svm { @@ -665,6 +668,44 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); #define GHCB_VERSION_MAX 1ULL #define GHCB_VERSION_MIN 1ULL +/* + * CSV2 live migration support: + * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, + * reuse bits [52-63] to indicate vcpu status. The following status are + * currently included: + * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB + * page may be filled with GPRs before VMRUN, so we must + * remap GHCB page on the recipient's side. + * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse + * these bits for received_first_sipi is acceptable cause + * runtime stage of guest's linux only applies GHCB page + * protocol. + * It's unlikely that the migration encounter other stages + * of guest's linux. Once encountered, AP bringup may fail + * which will not impact user payload. + * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) + */ +#define GHCB_MSR_KVM_STATUS_POS 52 +#define GHCB_MSR_KVM_STATUS_BITS 12 +#define GHCB_MSR_KVM_STATUS_MASK \ + ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ + << GHCB_MSR_KVM_STATUS_POS) +#define GHCB_MSR_MAPPED_POS 63 +#define GHCB_MSR_MAPPED_BITS 1 +#define GHCB_MSR_MAPPED_MASK \ + ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ + << GHCB_MSR_MAPPED_POS) +#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ + ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ + << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) + + +static inline bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return ghcb_val & GHCB_MSR_INFO_MASK; +} extern unsigned int max_sev_asid; @@ -694,6 +735,7 @@ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); /* vmenter.S */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9bba5352582c..d9055688df51 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7007,6 +7007,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: + case MSR_AMD64_SEV_ES_GHCB: /* This is AMD only. */ return false; default: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 24c9f663472e..e35409d47252 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1564,6 +1564,8 @@ static const u32 emulated_msrs_all[] = { MSR_K7_HWCR, MSR_KVM_POLL_CONTROL, + + MSR_AMD64_SEV_ES_GHCB, }; static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; @@ -4633,6 +4635,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_NOTIFY_VMEXIT: r = kvm_caps.has_notify_vmexit; break; + case KVM_CAP_SEV_ES_GHCB: + r = 0; + + /* Both CSV2 and SEV-ES guests support MSR_AMD64_SEV_ES_GHCB, + * but only CSV2 guest support export to emulate + * MSR_AMD64_SEV_ES_GHCB. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = static_call(kvm_x86_has_emulated_msr)(kvm, + MSR_AMD64_SEV_ES_GHCB); + break; default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index b8df7672e3bc..0163a275c109 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1201,6 +1201,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_SEV_ES_GHCB 500 + #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing_irqchip { -- Gitee From 8386d8900d0c374665475ec65b5e9f4121de8e9e Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 8 Aug 2023 23:47:22 +0800 Subject: [PATCH 15/17] anolis: KVM: x86: Introduce control_{pre,post}_system_reset ioctl interfaces ANBZ: #8572 In the upcoming patches, we will support for rebooting CSV2 guests. In order to support rebooting CSV2 guest, we will set vcpu->arch.guest_state_protected to false, before VMRUN, so that VMM can initialize vCPU states and VMSA, and then set vcpu->arch.guest_state_protected back to true to bypass unexpected behaviour in KVM. Besides, cache flush is necessary during rebooting a memory encrypted guest. Introduce control_{pre,post}_system_reset ioctl interfaces to support rebooting memory encrypted guests correctly. Signed-off-by: hanliyang --- arch/x86/include/asm/kvm-x86-ops.h | 2 ++ arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/sev.c | 10 ++++++++++ arch/x86/kvm/svm/svm.c | 2 ++ arch/x86/kvm/svm/svm.h | 3 +++ arch/x86/kvm/x86.c | 14 ++++++++++++++ include/uapi/linux/kvm.h | 4 ++++ 7 files changed, 37 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 3ab3e361ea81..b54e72a0100b 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -136,6 +136,8 @@ KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(control_pre_system_reset) +KVM_X86_OP_OPTIONAL(control_post_system_reset) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0dbbe96afb8a..a46465695e0d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1752,6 +1752,8 @@ struct kvm_x86_ops { unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); + int (*control_pre_system_reset)(struct kvm *kvm); + int (*control_post_system_reset)(struct kvm *kvm); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 07cfd0958b58..bd87543588f8 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3964,3 +3964,13 @@ int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) return 0; } + +int csv_control_pre_system_reset(struct kvm *kvm) +{ + return 0; +} + +int csv_control_post_system_reset(struct kvm *kvm) +{ + return 0; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7353c45982f6..e688fcb72181 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5132,6 +5132,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, .vm_attestation = sev_vm_attestation, + .control_pre_system_reset = csv_control_pre_system_reset, + .control_post_system_reset = csv_control_post_system_reset, }; /* diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 82ff3f931dcf..dc318ed71e84 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -737,6 +737,9 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); +int csv_control_pre_system_reset(struct kvm *kvm); +int csv_control_post_system_reset(struct kvm *kvm); + /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e35409d47252..ef3bee85c835 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7114,6 +7114,20 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; } + case KVM_CONTROL_PRE_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_pre_system_reset) + r = static_call(kvm_x86_control_pre_system_reset)(kvm); + else + r = -ENOTTY; + break; + case KVM_CONTROL_POST_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_post_system_reset) + r = static_call(kvm_x86_control_post_system_reset)(kvm); + else + r = -ENOTTY; + break; default: r = -ENOTTY; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0163a275c109..347056d56b5e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1582,6 +1582,10 @@ struct kvm_s390_ucas_mapping { #define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr) #define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr) +/* ioctls for control vm during system reset */ +#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) +#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) + /* * ioctls for vcpu fds */ -- Gitee From f8f34b65ac6c69b42f8f535a881b32bfa18fed3e Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 15 Apr 2021 07:56:55 -0400 Subject: [PATCH 16/17] anolis: KVM: SVM: Add support for rebooting CSV2 guest ANBZ: #8572 Currently, reboot a CSV2 guest is unsupported because vCPU state is encrypted and can't be initialized when guest reboots to execute OVMF code. In order to support reboot a CSV2 guest, make a backup of the encrypted VMSA before booting the guest, and restore VMSA from the backup before rebooting the guest. Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 59 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 10 +++++++ arch/x86/kvm/svm/svm.h | 2 ++ 3 files changed, 71 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index bd87543588f8..d9c313100922 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -683,6 +683,18 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, return ret; vcpu->arch.guest_state_protected = true; + + /* + * Backup encrypted vmsa to support rebooting CSV2 guest. The + * clflush_cache_range() is necessary to invalidate prefetched + * memory area pointed by svm->sev_es.vmsa so that we can read + * fresh memory updated by PSP. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); + } + return 0; } @@ -2617,6 +2629,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); + + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); } static void dump_ghcb(struct vcpu_svm *svm) @@ -3967,10 +3981,55 @@ int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) int csv_control_pre_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } int csv_control_post_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + /* Flush both host and guest caches of VMSA */ + wbinvd_on_all_cpus(); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); + + /* Flush encrypted vmsa to memory */ + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + + svm->vcpu.arch.guest_state_protected = true; + svm->sev_es.received_first_sipi = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e688fcb72181..cf497583b841 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1435,6 +1435,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) struct vcpu_svm *svm; struct page *vmcb01_page; struct page *vmsa_page = NULL; + struct page *reset_vmsa_page = NULL; int err; BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); @@ -1454,6 +1455,10 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (!vmsa_page) goto error_free_vmcb_page; + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + goto error_free_vmsa_page; + /* * SEV-ES guests maintain an encrypted version of their FPU * state which is restored and saved on VMRUN and VMEXIT. @@ -1482,6 +1487,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (vmsa_page) svm->sev_es.vmsa = page_address(vmsa_page); + if (reset_vmsa_page) + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + svm->guest_state_loaded = false; return 0; @@ -1489,6 +1497,8 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); + if (reset_vmsa_page) + __free_page(reset_vmsa_page); error_free_vmcb_page: __free_page(vmcb01_page); out: diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index dc318ed71e84..3be7e827f9b2 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -205,6 +205,8 @@ struct vcpu_sev_es_state { /* CSV2 migrated ghcb mapping state support */ bool receiver_ghcb_map_fail; + /* CSV2 reboot vmsa */ + struct vmcb_save_area *reset_vmsa; }; struct vcpu_svm { -- Gitee From 898d265800af07f91bf6f583ea32a27af500f376 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sat, 6 May 2023 16:01:25 +0800 Subject: [PATCH 17/17] anolis: KVM: SVM: Force flush caches before reboot CSV guest ANBZ: #8572 For memory encrypted guest, its pages' encrypt status will changed at runtime. When user reboot the guest, the pages' encrypt status during last boot were ignored. So during the boot flow of reboot, there may be 2 versions of memory data lies in cache as follows: +--------+ | | | | +--------------+ --+ | | | | \ |________| | | \ cacheline for -> |________| <-+ | | \ pa1(c=0) | | \ |______________| \ | | \_ 64 bytes aligned <- pa1 \ | | _ |______________| 4K | | / | | page cacheline for |________| / | | / pa1(c=1) -> |________| <-+ | | / | | | | / | | | | / | | | | / | | +--------------+ --+ | | | | If the older version cache was flushed after that of newer version, and guest read the memory again, then it will get corrupted data and may lead to crash. In this change, for any memory encrypted guest, the cache is forcibly flushed to memory before the next boot flow, which ensures that memory access is up-to-date. Signed-off-by: hanliyang --- arch/x86/kvm/svm/sev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index d9c313100922..f264453f1d42 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4007,12 +4007,12 @@ int csv_control_post_system_reset(struct kvm *kvm) unsigned long i; int ret; + /* Flush both host and guest caches before next boot flow */ + wbinvd_on_all_cpus(); + if (!sev_es_guest(kvm)) return 0; - /* Flush both host and guest caches of VMSA */ - wbinvd_on_all_cpus(); - kvm_for_each_vcpu(i, vcpu, kvm) { struct vcpu_svm *svm = to_svm(vcpu); -- Gitee