diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 053bf8c17e820dff088d585d8654862fdf2f8a0b..6a73f2543b94f81612289cf1e31a2b1608a3bdeb 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -11,20 +11,23 @@ #include #include #include -#include -#include #include #include "sp-dev.h" #include "psp-dev.h" #include "sev-dev.h" #include "tee-dev.h" +#include "vpsp.h" #ifdef CONFIG_TDM_DEV_HYGON #include "tdm-dev.h" #endif struct psp_device *psp_master; +struct kmem_cache *vpsp_cmd_ctx_slab; +static struct workqueue_struct *vpsp_wq; +static struct work_struct vpsp_work; + struct psp_misc_dev *psp_misc; int is_hygon_psp; @@ -38,33 +41,6 @@ enum HYGON_PSP_OPCODE { HYGON_PSP_OPCODE_MAX_NR, }; -enum VPSP_DEV_CTRL_OPCODE { - VPSP_OP_VID_ADD, - VPSP_OP_VID_DEL, - VPSP_OP_SET_DEFAULT_VID_PERMISSION, - VPSP_OP_GET_DEFAULT_VID_PERMISSION, - VPSP_OP_SET_GPA, -}; - -struct vpsp_dev_ctrl { - unsigned char op; - /** - * To be compatible with old user mode, - * struct vpsp_dev_ctrl must be kept at 132 bytes. - */ - unsigned char resv[3]; - union { - unsigned int vid; - // Set or check the permissions for the default VID - unsigned int def_vid_perm; - struct { - u64 gpa_start; - u64 gpa_end; - } gpa; - unsigned char reserved[128]; - } __packed data; -}; - int psp_mutex_enabled; extern struct mutex sev_cmd_mutex; @@ -215,8 +191,12 @@ static irqreturn_t psp_irq_handler_hygon(int irq, void *data) /* Check if it is SEV command completion: */ reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); if (reg & PSP_CMDRESP_RESP) { - sev->int_rcvd = 1; - wake_up(&sev->int_queue); + if (vpsp_in_ringbuffer_mode) { + queue_work(vpsp_wq, &vpsp_work); + } else { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } } } @@ -389,141 +369,6 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } -DEFINE_RWLOCK(vpsp_rwlock); - -/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. - * but, the performance of finding vid is determined by g_vpsp_vid_num, - * so VPSP_VID_MAX_ENTRIES can be set larger. - */ -#define VPSP_VID_MAX_ENTRIES 2048 -#define VPSP_VID_NUM_MAX 64 - -static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; -static uint32_t g_vpsp_vid_num; -static int compare_vid_entries(const void *a, const void *b) -{ - return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid; -} -static void swap_vid_entries(void *a, void *b, int size) -{ - struct vpsp_context entry; - - memcpy(&entry, a, size); - memcpy(a, b, size); - memcpy(b, &entry, size); -} - -/** - * When 'allow_default_vid' is set to 1, - * QEMU is allowed to use 'vid 0' by default - * in the absence of a valid 'vid' setting. - */ -uint32_t allow_default_vid = 1; -void vpsp_set_default_vid_permission(uint32_t is_allow) -{ - allow_default_vid = is_allow; -} - -int vpsp_get_default_vid_permission(void) -{ - return allow_default_vid; -} -EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); - -/** - * get a vpsp context from pid - */ -int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) -{ - struct vpsp_context new_entry = {.pid = pid}; - struct vpsp_context *existing_entry = NULL; - - read_lock(&vpsp_rwlock); - existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, - sizeof(struct vpsp_context), compare_vid_entries); - read_unlock(&vpsp_rwlock); - - if (!existing_entry) - return -ENOENT; - if (ctx) - *ctx = existing_entry; - return 0; -} EXPORT_SYMBOL_GPL(vpsp_get_context); - -/** - * Upon qemu startup, this section checks whether - * the '-device psp,vid' parameter is specified. - * If set, it utilizes the 'vpsp_add_vid' function - * to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'. - * The insertion is done in ascending order of 'pid'. - */ -static int vpsp_add_vid(uint32_t vid) -{ - pid_t cur_pid = task_pid_nr(current); - struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid}; - - if (vpsp_get_context(NULL, cur_pid) == 0) - return -EEXIST; - if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) - return -ENOMEM; - if (vid >= VPSP_VID_NUM_MAX) - return -EINVAL; - - write_lock(&vpsp_rwlock); - memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context)); - sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context), - compare_vid_entries, swap_vid_entries); - pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); - write_unlock(&vpsp_rwlock); - return 0; -} - -/** - * Upon the virtual machine is shut down, - * the 'vpsp_del_vid' function is employed to remove - * the 'vid' associated with the current 'pid'. - */ -static int vpsp_del_vid(void) -{ - pid_t cur_pid = task_pid_nr(current); - int i, ret = -ENOENT; - - write_lock(&vpsp_rwlock); - for (i = 0; i < g_vpsp_vid_num; ++i) { - if (g_vpsp_context_array[i].pid == cur_pid) { - --g_vpsp_vid_num; - pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", - g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); - memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], - sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); - ret = 0; - goto end; - } - } - -end: - write_unlock(&vpsp_rwlock); - return ret; -} - -static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) -{ - pid_t cur_pid = task_pid_nr(current); - struct vpsp_context *ctx = NULL; - - vpsp_get_context(&ctx, cur_pid); - if (!ctx) { - pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); - return -ENOENT; - } - - ctx->gpa_start = gpa_start; - ctx->gpa_end = gpa_end; - pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n", - gpa_start, gpa_end, cur_pid); - return 0; -} - /** * Try to pin a page * @@ -585,39 +430,6 @@ static int psp_unpin_user_page(u64 vaddr) return 0; } -static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) -{ - int ret = 0; - unsigned char op = ctrl->op; - - switch (op) { - case VPSP_OP_VID_ADD: - ret = vpsp_add_vid(ctrl->data.vid); - break; - - case VPSP_OP_VID_DEL: - ret = vpsp_del_vid(); - break; - - case VPSP_OP_SET_DEFAULT_VID_PERMISSION: - vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); - break; - - case VPSP_OP_GET_DEFAULT_VID_PERMISSION: - ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); - break; - - case VPSP_OP_SET_GPA: - ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end); - break; - - default: - ret = -EINVAL; - break; - } - return ret; -} - static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) { unsigned int opcode = 0; @@ -695,6 +507,17 @@ static int hygon_psp_additional_setup(struct sp_device *sp) if (!psp_misc) { struct miscdevice *misc; + vpsp_wq = create_singlethread_workqueue("vpsp_workqueue"); + if (!vpsp_wq) + return -ENOMEM; + + INIT_WORK(&vpsp_work, vpsp_worker_handler); + + vpsp_cmd_ctx_slab = kmem_cache_create("vpsp_cmd_ctx", + sizeof(struct vpsp_cmd_ctx), 0, SLAB_HWCACHE_ALIGN, NULL); + if (!vpsp_cmd_ctx_slab) + return -ENOMEM; + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); if (!psp_misc) return -ENOMEM; @@ -733,6 +556,9 @@ static void hygon_psp_exit(struct kref *ref) ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); free_page((unsigned long)misc_dev->data_pg_aligned); psp_misc = NULL; + kmem_cache_destroy(vpsp_cmd_ctx_slab); + flush_workqueue(vpsp_wq); + destroy_workqueue(vpsp_wq); } int psp_dev_init(struct sp_device *sp) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 8510987f0a2addd0d23d2ef16a4ac4dfd68f260d..8723b65194c063a51923dfb474f71506b0fd74e4 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -32,6 +32,7 @@ #include "sev-dev.h" #include "hygon/csv-dev.h" +#include "vpsp.h" #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" @@ -63,17 +64,12 @@ extern int psp_mutex_trylock(struct psp_mutex *mutex); extern int psp_mutex_unlock(struct psp_mutex *mutex); extern int psp_mutex_enabled; -/* defination of variabled used by virtual psp */ -enum VPSP_RB_CHECK_STATUS { - RB_NOT_CHECK = 0, - RB_CHECKING, - RB_CHECKED, - RB_CHECK_MAX -}; -#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) -#define VPSP_CMD_STATUS_RUNNING 0xffff +bool vpsp_in_ringbuffer_mode; +static struct vpsp_cmd_ctx *vpsp_cmd_ctx_array[CSV_COMMAND_PRIORITY_NUM] + [CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE]; static DEFINE_MUTEX(vpsp_rb_mutex); struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +uint8_t vpsp_rb_oc_supported; // support overcommit static uint8_t vpsp_rb_supported; static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); @@ -1673,11 +1669,27 @@ int csv_ring_buffer_queue_free(void) } EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); -static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +static unsigned int vpsp_queue_cmd_size(int prio) { + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int get_queue_tail(int prio) +{ + struct csv_ringbuffer_queue *ringbuffer = &vpsp_ring_buffer[prio]; return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; } +static int get_queue_overcommit_tail(int prio) +{ + uint32_t que_size = vpsp_queue_cmd_size(prio); + struct csv_ringbuffer_queue *ringbuffer = &vpsp_ring_buffer[prio]; + + if (que_size >= VPSP_RB_OVERCOMMIT_SIZE || que_size == 0 || !vpsp_rb_oc_supported) + return get_queue_tail(prio); + return (ringbuffer->cmd_ptr.head + VPSP_RB_OVERCOMMIT_SIZE) & ringbuffer->cmd_ptr.mask; +} + static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) { return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; @@ -1699,23 +1711,47 @@ static int vpsp_get_cmd_status(int prio, int index) return statval[index].status; } -static unsigned int vpsp_queue_cmd_size(int prio) +static int vpsp_dequeue_and_notify(int prio, struct csv_cmdptr_entry *cmd_ptr) { - return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); + struct vpsp_cmd_ctx *ctx = NULL; + int mask = vpsp_ring_buffer[prio].cmd_ptr.mask; + int index = vpsp_ring_buffer[prio].cmd_ptr.head & mask; + + ctx = vpsp_cmd_ctx_array[prio][index]; + if (ctx) { + /** + * Write the result back to the cmd ctx, + * after which we can safely perform + * the ringbuffer dequeue operation without + * waiting for the Guest to retrieve the result. + */ + ctx->statval = vpsp_get_cmd_status(prio, index); + vpsp_cmd_ctx_obj_put(ctx, false); + } + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + mutex_lock(&vpsp_rb_mutex); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + mutex_unlock(&vpsp_rb_mutex); + return 0; } -static int vpsp_dequeue_cmd(int prio, int index, - struct csv_cmdptr_entry *cmd_ptr) +/** + * Ensure that the 'status' field of cmd statval + * in the range from tail to overcommit tail in the queue is 0. + */ +static void vpsp_queue_overcommit_entry_inactive(int prio) { + int tail = 0, overcommit_tail = 0, i = 0; + mutex_lock(&vpsp_rb_mutex); - /* The status update must be before the head update */ - vpsp_set_cmd_status(prio, index, 0); - csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + tail = get_queue_tail(prio); + overcommit_tail = get_queue_overcommit_tail(prio); + for (i = tail; i < overcommit_tail; ++i) + vpsp_set_cmd_status(prio, i, 0); mutex_unlock(&vpsp_rb_mutex); - - return 0; } /* @@ -1732,20 +1768,34 @@ static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t cmdptr.cmd_flags = flags; mutex_lock(&vpsp_rb_mutex); - index = get_queue_tail(&vpsp_ring_buffer[prio]); + index = get_queue_tail(prio); - /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ - if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { - index = -1; - goto out; - } - - /* The status must be written first, and then the cmd can be enqueued */ - vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); - if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { - vpsp_set_cmd_status(prio, index, 0); - index = -1; - goto out; + /** + * If the firmware does not support the overcommit function: + * the firmware may not check the 'status' before executing cmd. + * Therefore, the 'status' must be written before the cmd be enqueued, + * otherwise, X86 may overwrite the result written by the firmware. + * + * If the firmware support the overcommit function: + * The firmware will forcefully check the 'status' + * before executing cmd until the 'status' becomes 0xffff. + * In order to prevent the firmware from getting the cmd to be valid, + * the 'status' must be written after waiting for the cmd to be queued. + */ + if (vpsp_rb_oc_supported) { + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + } else { + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } } out: @@ -1753,11 +1803,13 @@ static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t return index; } -static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, - uint32_t new_head) +static void vpsp_ring_update_head(int prio, uint32_t new_head) { + struct csv_ringbuffer_queue *ring_buffer = &vpsp_ring_buffer[prio]; uint32_t orig_head = get_queue_head(ring_buffer); + struct csv_cmdptr_entry entry; uint32_t comple_num = 0; + int i; if (new_head >= orig_head) comple_num = new_head - orig_head; @@ -1765,7 +1817,8 @@ static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + 1; - ring_buffer->cmd_ptr.head += comple_num; + for (i = 0; i < comple_num; ++i) + vpsp_dequeue_and_notify(prio, &entry); } static int vpsp_ring_buffer_queue_init(void) @@ -1782,6 +1835,28 @@ static int vpsp_ring_buffer_queue_init(void) return 0; } +static int vpsp_psp_mutex_trylock(void) +{ + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) + return psp_mutex_trylock(&psp_misc->data_pg_aligned->mb_mutex); + else + return mutex_trylock(&sev_cmd_mutex); +} + +static int vpsp_psp_mutex_unlock(void) +{ + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return 0; +} + static int __vpsp_ring_buffer_enter_locked(int *error) { int ret; @@ -1818,13 +1893,49 @@ static int __vpsp_ring_buffer_enter_locked(int *error) return ret; } -static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +void vpsp_worker_handler(struct work_struct *unused) +{ + struct sev_user_data_status data; + struct sev_device *sev = psp_master->sev_data; + unsigned int reg; + unsigned int rb_head, rb_tail; + + reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + goto end; + + rb_head = reg; + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + pr_debug("ringbuffer exit rb_head %x, rb_tail %x\n", rb_head, rb_tail); + /* update head */ + vpsp_ring_update_head(CSV_COMMAND_PRIORITY_HIGH, + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(CSV_COMMAND_PRIORITY_LOW, + reg & PSP_RBHEAD_QLO_HEAD_MASK); +end: + /** + * Before send new mailbox command, set vpsp_in_ringbuffer_mode + * to false to avoid nested triggering of the workqueue. + */ + vpsp_in_ringbuffer_mode = false; + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + vpsp_psp_mutex_unlock(); +} + +static int __vpsp_do_ringbuf_cmds_locked(void) { struct psp_device *psp = psp_master; - unsigned int reg, ret = 0; unsigned int rb_tail, rb_head; unsigned int rb_ctl; struct sev_device *sev; + struct csv_queue *queue; + struct csv_cmdptr_entry *first_cmd; if (!psp) return -ENODEV; @@ -1835,12 +1946,12 @@ static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) sev = psp->sev_data; /* update rb tail */ + vpsp_queue_overcommit_entry_inactive(CSV_COMMAND_PRIORITY_LOW); rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); - rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) - << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail |= (get_queue_tail(CSV_COMMAND_PRIORITY_HIGH) << PSP_RBTAIL_QHI_TAIL_SHIFT); rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); - rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + rb_tail |= get_queue_overcommit_tail(CSV_COMMAND_PRIORITY_LOW); iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); /* update rb head */ @@ -1852,54 +1963,42 @@ static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + /** + * In some PSP firmware, even if the high priority queue is empty, + * it will still try to read the element at the head of the queue and try to process it. + * When the element at the head of the queue happens to be an illegal cmd id, + * PSP returns the PSP_RBHEAD_QPAUSE_INT_STAT error. + * + * Therefore, now we need to manually set the head element of the queue to + * the default tkm cmd id before sending the ringbuffer each time when + * the high priority queue is empty. + * + * The low priority queue has no such bug, and future PSP firmware should fix it. + */ + if (vpsp_queue_cmd_size(CSV_COMMAND_PRIORITY_HIGH) == 0) { + queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr; + first_cmd = (struct csv_cmdptr_entry *)queue->data_align; + first_cmd[queue->head & queue->mask].cmd_id = TKM_PSP_CMDID; + } + pr_debug("ringbuffer launch rb_head %x, rb_tail %x\n", rb_head, rb_tail); + /* update rb ctl to trigger psp irq */ sev->int_rcvd = 0; /* PSP response to x86 only when all queue is empty or error happends */ rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); - /* wait for all commands in ring buffer completed */ - ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout*10); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(psp->dev, "sev command in ringbuffer mode timed out, disabling PSP\n"); - psp_dead = true; - return ret; - } - /* cmd error happends */ - if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) - ret = -EFAULT; - - /* update head */ - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], - (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], - reg & PSP_RBHEAD_QLO_HEAD_MASK); - - if (psp_ret) - *psp_ret = vpsp_get_cmd_status(prio, index); - - return ret; + vpsp_in_ringbuffer_mode = true; + return 0; } -static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret) { - struct sev_user_data_status data; - int rc; - - rc = __vpsp_ring_buffer_enter_locked(psp_ret); + int rc = __vpsp_ring_buffer_enter_locked(psp_ret); if (rc) goto end; - rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); - - /* exit ringbuf mode by send CMD in mailbox mode */ - __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, - &data, NULL); - csv_comm_mode = CSV_COMM_MAILBOX_ON; - + rc = __vpsp_do_ringbuf_cmds_locked(); end: return rc; } @@ -1972,6 +2071,8 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, goto end; } WRITE_ONCE(vpsp_rb_supported, 1); + if (VPSP_RB_OC_IS_SUPPORTED(status->build)) + WRITE_ONCE(vpsp_rb_oc_supported, 1); } atomic_set(&vpsp_rb_check_status, RB_CHECKED); @@ -1995,33 +2096,36 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode */ -int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, - struct vpsp_ret *psp_ret) +int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) { int ret = 0; + uint8_t prio = cmd_ctx->rb_prio; + uint16_t statval = VPSP_CMD_STATUS_RUNNING; + uint32_t index = cmd_ctx->rb_index; + phys_addr_t phy_addr = cmd_ctx->psp_cmdbuf_paddr; struct csv_cmdptr_entry cmd = {0}; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); /* Get the retult directly if the command has been executed */ - if (index >= 0 && vpsp_get_cmd_status(prio, index) != - VPSP_CMD_STATUS_RUNNING) { - psp_ret->pret = vpsp_get_cmd_status(prio, index); - psp_ret->status = VPSP_FINISH; - return 0; + if (index >= 0) { + if (cmd_ctx->statval != VPSP_CMD_STATUS_RUNNING) + statval = cmd_ctx->statval; + else + statval = vpsp_get_cmd_status(prio, index); + if (statval != VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = statval; + psp_ret->status = VPSP_FINISH; + return 0; + } } - if (is_hygon_psp && mutex_enabled) - ret = psp_mutex_trylock(&psp_misc->data_pg_aligned->mb_mutex); - else - ret = mutex_trylock(&sev_cmd_mutex); - - if (ret) { + if (vpsp_psp_mutex_trylock()) { /* Use mailbox mode to execute a command if there is only one command */ if (vpsp_queue_cmd_size(prio) == 1) { /* dequeue command from queue*/ - vpsp_dequeue_cmd(prio, index, &cmd); + vpsp_dequeue_and_notify(prio, &cmd); ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; @@ -2033,29 +2137,24 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, } } } else { - ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, - index); - psp_ret->status = VPSP_FINISH; + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", __func__, ret); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); goto end; } + psp_ret->status = VPSP_RUNNING; } } else { /* Change the command to the running state if getting the mutex fails */ - psp_ret->index = index; psp_ret->status = VPSP_RUNNING; return 0; } end: - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(&sev_cmd_mutex); return ret; } -EXPORT_SYMBOL_GPL(vpsp_try_get_result); int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) { @@ -2089,7 +2188,8 @@ int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) * vpsp_try_get_result interface will be used to obtain the result * later again */ -int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, + struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) { int ret = 0; int rb_supported; @@ -2119,8 +2219,14 @@ int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) goto end; } + cmd_ctx->rb_index = index; + cmd_ctx->rb_prio = prio; + cmd_ctx->psp_cmdbuf_paddr = phy_addr; + vpsp_cmd_ctx_array[prio][index] = cmd_ctx; + vpsp_cmd_ctx_obj_get(cmd_ctx); + /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret); + ret = vpsp_try_get_result(cmd_ctx, psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); goto end; @@ -2144,7 +2250,6 @@ int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) end: return ret; } -EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); static void sev_exit(struct kref *ref) { diff --git a/drivers/crypto/ccp/vpsp.c b/drivers/crypto/ccp/vpsp.c index 58976d30542e542adfe145e4064c55a47b339b71..129842b33289e9c38a4431dc34cd500ae3e9b1a3 100644 --- a/drivers/crypto/ccp/vpsp.c +++ b/drivers/crypto/ccp/vpsp.c @@ -13,6 +13,17 @@ #include #include +#include +#include +#include +#include +#include + +#include "psp-ringbuf.h" +#include "psp-dev.h" +#include "sev-dev.h" +#include "vpsp.h" + #ifdef pr_fmt #undef pr_fmt #endif @@ -47,6 +58,127 @@ * kvm_pv_psp_forward_op-> |-> vpsp_try_do_cmd/vpsp_try_get_result * |<=> psp device driver */ +/** + * used to locate the command context, + * when the guest enters the host via vmmcall + */ +DEFINE_HASHTABLE(vpsp_cmd_ctx_table, 11); +DEFINE_RWLOCK(table_rwlock); + +static struct vpsp_cmd_ctx *vpsp_hashtable_find_cmd_ctx(gpa_t key1, pid_t key2) +{ + struct vpsp_cmd_ctx *entry = NULL; + bool is_found = false; + + read_lock(&table_rwlock); + hash_for_each_possible(vpsp_cmd_ctx_table, entry, node, key1) { + if (entry->key1 == key1 && entry->key2 == key2) { + is_found = true; + break; + } + } + read_unlock(&table_rwlock); + if (!is_found) + entry = NULL; + + return entry; +} + +static void vpsp_hashtable_add_cmd_ctx(struct vpsp_cmd_ctx *ctx) +{ + struct vpsp_cmd_ctx *entry = NULL; + + write_lock(&table_rwlock); + hash_for_each_possible(vpsp_cmd_ctx_table, entry, node, ctx->key1) { + if (entry->key1 == ctx->key1 && + entry->key2 == ctx->key2) { + vpsp_cmd_ctx_obj_put(entry, true); + break; + } + } + hash_add(vpsp_cmd_ctx_table, &ctx->node, ctx->key1); + write_unlock(&table_rwlock); + + vpsp_cmd_ctx_obj_get(ctx); +} + +static void vpsp_hashtable_remove_cmd_ctx(struct vpsp_cmd_ctx *ctx) +{ + write_lock(&table_rwlock); + hash_del(&ctx->node); + write_unlock(&table_rwlock); + + vpsp_cmd_ctx_obj_put(ctx, false); +} + +/** + * Create a vpsp_cmd_ctx object and insert it into the + * vpsp_cmd_ctx_table hash table. + * + * @hkey: The key value for the hash table vpsp_cmd_ctx_table + * + * Return: the address of the vpsp_cmd_ctx object + * if created successfully, otherwise returns NULL + */ +static struct vpsp_cmd_ctx *vpsp_cmd_ctx_create(gpa_t key1, pid_t key2) +{ + struct vpsp_cmd_ctx *cmd_ctx = kmem_cache_zalloc(vpsp_cmd_ctx_slab, GFP_KERNEL); + + if (cmd_ctx) { + /** + * According to the implementation of refcount, + * the initial value must be greater than 0. + */ + refcount_set(&cmd_ctx->ref, 1); + cmd_ctx->statval = VPSP_CMD_STATUS_RUNNING; + cmd_ctx->key1 = key1; + cmd_ctx->key2 = key2; + vpsp_hashtable_add_cmd_ctx(cmd_ctx); + } + return cmd_ctx; +} + +/** + * Destroys the specified vpsp_cmd_ctx object, + * indicating it will no longer be accessed. + * + * But does not necessarily free the cmd_ctx memory immediately, + * only additional to perform decrement refcount. + * + * Actual memory release occurs when the refcount drops to 0, + * which may happen during the vpsp_worker_handler or + * vpsp_cmd_ctx_destroy process. + * + * @cmd_ctx: the vpsp_cmd_ctx object + */ +static void vpsp_cmd_ctx_destroy(struct vpsp_cmd_ctx *cmd_ctx) +{ + if (!cmd_ctx) + return; + /** + * The initial refcount is 1, + * need to additional decrement a refcount. + */ + vpsp_cmd_ctx_obj_put(cmd_ctx, false); + vpsp_hashtable_remove_cmd_ctx(cmd_ctx); +} + +void vpsp_cmd_ctx_obj_get(struct vpsp_cmd_ctx *cmd_ctx) +{ + refcount_inc(&cmd_ctx->ref); +} + +void vpsp_cmd_ctx_obj_put(struct vpsp_cmd_ctx *cmd_ctx, bool force) +{ + do { + if (refcount_dec_and_test(&cmd_ctx->ref)) { + kfree(cmd_ctx->data); + memset(cmd_ctx, 0, sizeof(*cmd_ctx)); + kmem_cache_free(vpsp_cmd_ctx_slab, cmd_ctx); + force = false; + } + } while (force); +} struct psp_cmdresp_head { uint32_t buf_size; @@ -54,19 +186,7 @@ struct psp_cmdresp_head { uint32_t cmdresp_code; } __packed; -/* save command data for restoring later */ -struct vpsp_hbuf_wrapper { - void *data; - uint32_t data_size; -}; - -/* - * Virtual PSP host memory information maintenance, used in ringbuffer mode - */ -struct vpsp_hbuf_wrapper -g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; - -static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t size) +static int check_gpa_range(struct vpsp_dev_ctx *vpsp_ctx, gpa_t addr, uint32_t size) { if (!vpsp_ctx || !addr) return -EFAULT; @@ -76,7 +196,7 @@ static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t s return -EFAULT; } -static int check_psp_mem_range(struct vpsp_context *vpsp_ctx, +static int check_psp_mem_range(struct vpsp_dev_ctx *vpsp_ctx, void *data, uint32_t size) { if ((((uintptr_t)data + size - 1) & ~PSP_2MB_MASK) != @@ -92,19 +212,21 @@ static int check_psp_mem_range(struct vpsp_context *vpsp_ctx, } /** - * Copy the guest data to the host kernel buffer - * and record the host buffer address in 'hbuf'. - * This 'hbuf' is used to restore context information - * during asynchronous processing. + * Copy Guest data to the Host kernel buffer + * and allocate a cmd_ctx to insert into the vpsp_cmd_ctx_table. */ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - struct vpsp_hbuf_wrapper *hbuf) + struct vpsp_cmd_ctx **cmd_ctx) { int ret = 0; void *data = NULL; struct psp_cmdresp_head psp_head; uint32_t data_size; + if (unlikely(!cmd_ctx)) + return -EFAULT; + *cmd_ctx = NULL; + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, sizeof(struct psp_cmdresp_head)))) return -EFAULT; @@ -117,28 +239,35 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, if (!data) return -ENOMEM; - if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { + *cmd_ctx = vpsp_cmd_ctx_create(data_gpa, vpsp->kvm->userspace_pid); + if (!(*cmd_ctx)) { ret = -EFAULT; goto end; } - hbuf->data = data; - hbuf->data_size = data_size; + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + (*cmd_ctx)->data = data; + (*cmd_ctx)->data_size = data_size; end: - if (ret && data) + if (ret) { + vpsp_cmd_ctx_destroy(*cmd_ctx); kfree(data); + } return ret; } static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - struct vpsp_hbuf_wrapper *hbuf) + struct vpsp_cmd_ctx *cmd_ctx) { int ret = 0; /* restore cmdresp's buffer from context */ - if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, - hbuf->data_size))) { + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, cmd_ctx->data, + cmd_ctx->data_size))) { pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", __func__); ret = -EFAULT; @@ -146,8 +275,7 @@ static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, } end: - kfree(hbuf->data); - memset(hbuf, 0, sizeof(*hbuf)); + vpsp_cmd_ctx_destroy(cmd_ctx); return ret; } @@ -253,7 +381,7 @@ static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa) return hpa; } -static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, +static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_dev_ctx *vpsp_ctx, uint64_t data, uint32_t cmd) { int ret; @@ -299,7 +427,7 @@ static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_co static int check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp, - struct vpsp_context *vpsp_ctx, + struct vpsp_dev_ctx *vpsp_ctx, uint64_t data, uint32_t cmd) { int ret = 0; @@ -323,7 +451,7 @@ check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp, return ret; } -static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, +static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_dev_ctx *vpsp_ctx, uint32_t cmd, uint32_t *psp_ret) { int ret; @@ -358,58 +486,64 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, gpa_t data_gpa, uint32_t psp_ret) { int ret; - uint64_t data_hpa; - uint32_t index = 0, vid = 0; + uint32_t vid = 0; struct vpsp_ret psp_async = {0}; - struct vpsp_context *vpsp_ctx = NULL; - struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; - uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + struct vpsp_dev_ctx *vpsp_dev_ctx = NULL; + struct vpsp_cmd_ctx *cmd_ctx = NULL; phys_addr_t hpa; - vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + vpsp_get_dev_ctx(&vpsp_dev_ctx, vpsp->kvm->userspace_pid); - ret = check_cmd_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + ret = check_cmd_forward_op_permission(vpsp, vpsp_dev_ctx, data_gpa, cmd); if (unlikely(ret)) { pr_err("directly operation not allowed\n"); goto end; } - ret = vpsp_try_bind_vtkm(vpsp, vpsp_ctx, cmd, (uint32_t *)&psp_async); + ret = vpsp_try_bind_vtkm(vpsp, vpsp_dev_ctx, cmd, (uint32_t *)&psp_async); if (unlikely(ret || *(uint32_t *)&psp_async)) { pr_err("try to bind vtkm failed (ret %x, psp_async %x)\n", ret, *(uint32_t *)&psp_async); goto end; } - if (vpsp_ctx) - vid = vpsp_ctx->vid; + if (vpsp_dev_ctx) + vid = vpsp_dev_ctx->vid; *((uint32_t *)&psp_async) = psp_ret; - hpa = gpa_to_hpa(vpsp, data_gpa); - if (unlikely(!hpa)) { - ret = -EFAULT; - goto end; - } - - data_hpa = PUT_PSP_VID(hpa, vid); - switch (psp_async.status) { case VPSP_INIT: + cmd_ctx = vpsp_cmd_ctx_create(data_gpa, + vpsp->kvm->userspace_pid); + if (unlikely(!cmd_ctx)) { + ret = -ENOMEM; + goto end; + } + + hpa = gpa_to_hpa(vpsp, data_gpa); + if (unlikely(!hpa)) { + ret = -EFAULT; + goto end; + } /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(cmd, data_hpa, &psp_async); + ret = vpsp_try_do_cmd(cmd, PUT_PSP_VID(hpa, vid), cmd_ctx, &psp_async); if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__); goto end; } break; case VPSP_RUNNING: - prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : - CSV_COMMAND_PRIORITY_LOW; - index = psp_async.index; + cmd_ctx = vpsp_hashtable_find_cmd_ctx(data_gpa, vpsp->kvm->userspace_pid); + if (unlikely(!cmd_ctx)) { + pr_err("[%s]: vpsp_hashtable_find_cmd_ctx failed, data_gpa %llx\n", + __func__, data_gpa); + ret = -EFAULT; + goto end; + } /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(prio, index, data_hpa, &psp_async); + ret = vpsp_try_get_result(cmd_ctx, &psp_async); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); goto end; @@ -421,6 +555,9 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, break; } + if (psp_async.status == VPSP_FINISH) + vpsp_cmd_ctx_destroy(cmd_ctx); + end: /** * In order to indicate both system errors and PSP errors, @@ -432,6 +569,8 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, if (ret > 0) ret = -ret; psp_async.pret = (uint16_t)ret; + psp_async.status = VPSP_FINISH; + vpsp_cmd_ctx_destroy(cmd_ctx); } return *((int *)&psp_async); } @@ -449,24 +588,21 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g { int ret = 0; struct vpsp_ret psp_ret = {0}; - struct vpsp_hbuf_wrapper hbuf = {0}; - struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; - struct vpsp_context *vpsp_ctx = NULL; + struct vpsp_cmd_ctx *cmd_ctx = NULL; + struct vpsp_dev_ctx *vpsp_dev_ctx = NULL; phys_addr_t data_paddr = 0; - uint8_t prio = CSV_COMMAND_PRIORITY_LOW; - uint32_t index = 0; uint32_t vid = 0; - vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + vpsp_get_dev_ctx(&vpsp_dev_ctx, vpsp->kvm->userspace_pid); - ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_dev_ctx, data_gpa, cmd); if (unlikely(ret)) { pr_err("copy operation not allowed\n"); return -EPERM; } - if (vpsp_ctx) - vid = vpsp_ctx->vid; + if (vpsp_dev_ctx) + vid = vpsp_dev_ctx->vid; if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) @@ -475,32 +611,25 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g switch (psp_ret.status) { case VPSP_INIT: /* copy data from guest */ - ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &hbuf); + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &cmd_ctx); if (unlikely(ret)) { - psp_ret.status = VPSP_FINISH; pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", __func__); ret = -EFAULT; goto end; } - data_paddr = PUT_PSP_VID(__psp_pa(hbuf.data), vid); + data_paddr = PUT_PSP_VID(__psp_pa(cmd_ctx->data), vid); /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(cmd, data_paddr, (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_do_cmd(cmd, data_paddr, cmd_ctx, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_do_cmd failed\n", __func__); ret = -EFAULT; goto end; } - ret = -EFAULT; - if (psp_ret.status == VPSP_RUNNING) { - prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : - CSV_COMMAND_PRIORITY_LOW; - g_hbuf_wrap[prio][psp_ret.index] = hbuf; - ret = 0; - } else if (psp_ret.status == VPSP_FINISH) { - ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); + if (psp_ret.status == VPSP_FINISH) { + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, cmd_ctx); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); @@ -511,33 +640,38 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g break; case VPSP_RUNNING: - prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : - CSV_COMMAND_PRIORITY_LOW; - index = psp_ret.index; - data_paddr = PUT_PSP_VID(__psp_pa(g_hbuf_wrap[prio][index].data), vid); + cmd_ctx = vpsp_hashtable_find_cmd_ctx(data_gpa, vpsp->kvm->userspace_pid); + if (unlikely(!cmd_ctx)) { + pr_err("[%s]: vpsp_hashtable_find_cmd_ctx failed, data_gpa %llx\n", + __func__, data_gpa); + ret = -EFAULT; + goto end; + } + /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(prio, index, data_paddr, - (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_get_result(cmd_ctx, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); ret = -EFAULT; goto end; } - ret = -EFAULT; if (psp_ret.status == VPSP_RUNNING) { ret = 0; + goto end; } else if (psp_ret.status == VPSP_FINISH) { /* copy data to guest */ - ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, - &g_hbuf_wrap[prio][index]); + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, cmd_ctx); + cmd_ctx = NULL; if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); ret = -EFAULT; - goto end; } + goto end; } + + ret = -EFAULT; break; default: @@ -546,7 +680,181 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g break; } end: + if (ret) { + psp_ret.status = VPSP_FINISH; + vpsp_cmd_ctx_destroy(cmd_ctx); + } /* return psp_ret to guest */ vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; -} EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op); +} +EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op); + +DEFINE_RWLOCK(vpsp_dev_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +static struct vpsp_dev_ctx g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_dev_ctx *)a)->pid - ((struct vpsp_dev_ctx *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_dev_ctx entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} + +/** + * get a vpsp device context from pid + */ +int vpsp_get_dev_ctx(struct vpsp_dev_ctx **ctx, pid_t pid) +{ + struct vpsp_dev_ctx new_entry = {.pid = pid}; + struct vpsp_dev_ctx *existing_entry = NULL; + + read_lock(&vpsp_dev_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, + sizeof(struct vpsp_dev_ctx), compare_vid_entries); + read_unlock(&vpsp_dev_rwlock); + + if (!existing_entry) + return -ENOENT; + + if (ctx) + *ctx = existing_entry; + + return 0; +} + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_dev_ctx new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_dev_ctx(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_dev_rwlock); + memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_dev_ctx)); + sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_dev_ctx), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_dev_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_dev_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_context_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); + memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], + sizeof(struct vpsp_dev_ctx) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_dev_rwlock); + return ret; +} + +static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_dev_ctx *ctx = NULL; + + vpsp_get_dev_ctx(&ctx, cur_pid); + if (!ctx) { + pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); + return -ENOENT; + } + + ctx->gpa_start = gpa_start; + ctx->gpa_end = gpa_end; + pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n", + gpa_start, gpa_end, cur_pid); + return 0; +} + +int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + + case VPSP_OP_SET_GPA: + ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/crypto/ccp/vpsp.h b/drivers/crypto/ccp/vpsp.h new file mode 100644 index 0000000000000000000000000000000000000000..7420aedf4d98ddc1b6cf38fe6748fd04ca14c9ea --- /dev/null +++ b/drivers/crypto/ccp/vpsp.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Secure Processor interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Mengbiao Xiong + */ + +#ifndef __CCP_HYGON_VPSP_H__ +#define __CCP_HYGON_VPSP_H__ + + +/* + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) + * @resv2: reserved bits + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 1; + u32 format : 1; + u32 resv2 : 12; + u32 status : 2; +}; +#define VPSP_RET_SYS_FORMAT 1 +#define VPSP_RET_PSP_FORMAT 0 + +#define PSP_2MB_MASK (2*1024*1024 - 1) +#define PSP_HUGEPAGE_2MB (2*1024*1024) +#define PSP_HUGEPAGE_NUM_MAX 128 +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f +#define TKM_PSP_CMDID TKM_CMD_ID_MIN +#define TKM_PSP_CMDID_OFFSET 0x128 +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + +struct vpsp_dev_ctx { + u32 vid; + pid_t pid; + u64 gpa_start; + u64 gpa_end; + + // `vm_is_bound` indicates whether the binding operation has been performed + u32 vm_is_bound; + u32 vm_handle; // only for csv +}; + +struct vpsp_cmd_ctx { + void *data; // copy forward mode only + uint32_t data_size; // copy forward mode only + uint8_t rb_prio; + uint32_t rb_index; + uint32_t statval; + phys_addr_t psp_cmdbuf_paddr; + refcount_t ref; + + /** + * key1 indicates the GPA + * to the data passed by the Guest + * + * key2 indicates the pid of Qemu Process + * + * Serves as the key for the vpsp_cmd_ctx_table. + */ + gpa_t key1; + pid_t key2; + struct hlist_node node; +}; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, + VPSP_OP_SET_GPA, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + /** + * To be compatible with old user mode, + * struct vpsp_dev_ctrl must be kept at 132 bytes. + */ + unsigned char resv[3]; + union { + unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; + struct { + u64 gpa_start; + u64 gpa_end; + } gpa; + unsigned char reserved[128]; + } __packed data; +}; + +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_RB_OC_IS_SUPPORTED(buildid) (buildid >= 2167) +#define VPSP_CMD_STATUS_RUNNING 0xffff +#define VPSP_RB_OVERCOMMIT_SIZE 1024 + +extern bool vpsp_in_ringbuffer_mode; +extern struct kmem_cache *vpsp_cmd_ctx_slab; + +void vpsp_worker_handler(struct work_struct *unused); +int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret); +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, + struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret); +void vpsp_cmd_ctx_obj_get(struct vpsp_cmd_ctx *cmd_ctx); + +void vpsp_cmd_ctx_obj_put(struct vpsp_cmd_ctx *cmd_ctx, bool force); +int vpsp_get_dev_ctx(struct vpsp_dev_ctx **ctx, pid_t pid); +int vpsp_get_default_vid_permission(void); +int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl); + +#endif /* __CCP_HYGON_VPSP_H__ */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 8f4c671c53936ad8eb4b4b7c6814a27c4875ec52..7eb0c657c9736a5e57c7d199be64d26f32b1ecd0 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -629,50 +629,6 @@ int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, ui #endif -/** - * enum VPSP_CMD_STATUS - virtual psp command status - * - * @VPSP_INIT: the initial command from guest - * @VPSP_RUNNING: the middle command to check and run ringbuffer command - * @VPSP_FINISH: inform the guest that the command ran successfully - */ -enum VPSP_CMD_STATUS { - VPSP_INIT = 0, - VPSP_RUNNING, - VPSP_FINISH, - VPSP_MAX -}; - -/** - * struct vpsp_cmd - virtual psp command - * - * @cmd_id: the command id is used to distinguish different commands - * @is_high_rb: indicates the ringbuffer level in which the command is placed - */ -struct vpsp_cmd { - u32 cmd_id : 31; - u32 is_high_rb : 1; -}; - -/** - * struct vpsp_ret - virtual psp return result - * - * @pret: the return code from device - * @resv: reserved bits - * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) - * @index: used to distinguish the position of command in the ringbuffer - * @status: indicates the current status of the related command - */ -struct vpsp_ret { - u32 pret : 16; - u32 resv : 1; - u32 format : 1; - u32 index : 12; - u32 status : 2; -}; -#define VPSP_RET_SYS_FORMAT 1 -#define VPSP_RET_PSP_FORMAT 0 - struct kvm_vpsp { struct kvm *kvm; int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); @@ -682,30 +638,6 @@ struct kvm_vpsp { u8 is_csv_guest; }; -#define PSP_2MB_MASK (2*1024*1024 - 1) -#define PSP_HUGEPAGE_2MB (2*1024*1024) -#define PSP_HUGEPAGE_NUM_MAX 128 -#define TKM_CMD_ID_MIN 0x120 -#define TKM_CMD_ID_MAX 0x12f -#define TKM_PSP_CMDID TKM_CMD_ID_MIN -#define TKM_PSP_CMDID_OFFSET 0x128 -#define PSP_VID_MASK 0xff -#define PSP_VID_SHIFT 56 -#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) -#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) -#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) - -struct vpsp_context { - u32 vid; - pid_t pid; - u64 gpa_start; - u64 gpa_end; - - // `vm_is_bound` indicates whether the binding operation has been performed - u32 vm_is_bound; - u32 vm_handle; // only for csv -}; - #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -838,15 +770,6 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); -int vpsp_try_get_result(uint8_t prio, uint32_t index, - phys_addr_t phy_addr, struct vpsp_ret *psp_ret); - -int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret); - -int vpsp_get_context(struct vpsp_context **ctx, pid_t pid); - -int vpsp_get_default_vid_permission(void); - int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa); int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, @@ -888,23 +811,6 @@ static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } -static inline int -vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, - struct vpsp_ret *psp_ret) { return -ENODEV; } - -static inline int -vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, - struct vpsp_ret *psp_ret) { return -ENODEV; } - -static inline int -vpsp_get_context(struct vpsp_context **ctx, pid_t pid); - -static inline int -vpsp_get_default_vid_permission(void) { return -ENODEV; } - -static inline int -kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, - gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } static inline int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa) { return -ENODEV; }