From 813ac1fff8ef30ed3496eecf7887052d29a4c47d Mon Sep 17 00:00:00 2001 From: duhui Date: Fri, 17 May 2024 04:13:45 +0800 Subject: [PATCH 1/6] anolis: crypto: ccp: Support SM4-GCM for hygon ccp ANBZ: #6244 Add sm4-gcm support for Hygon 4th CPU Signed-off-by: duhui Signed-off-by: yangdepei --- drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/ccp-crypto-main.c | 4 + .../crypto/ccp/ccp-crypto-sm4-galois-hygon.c | 240 ++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 4 + drivers/crypto/ccp/ccp-dev-v5.c | 51 ++++ drivers/crypto/ccp/ccp-dev.c | 23 ++ drivers/crypto/ccp/ccp-dev.h | 13 + drivers/crypto/ccp/ccp-ops.c | 191 ++++++++++++++ include/linux/ccp.h | 56 ++++ 9 files changed, 583 insertions(+) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 032dc121fbcb..c6cde2467f3d 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -34,6 +34,7 @@ $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ ccp-crypto-sm3-hygon.o \ ccp-crypto-sm4-hygon.o \ + ccp-crypto-sm4-galois-hygon.o \ ccp_sm2_sign.asn1.o obj-$(CONFIG_TDM_KERNEL_GUARD) += tdm-kernel-guard.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 128e8526a001..83a915ae9e45 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -357,6 +357,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm4_hygon_aeads(&aead_algs); + if (ret) + return ret; + return 0; } #endif diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c new file mode 100644 index 000000000000..a9ec783ea322 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 GCM crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" +#include "ccp-dev.h" + +static int ccp_sm4_gcm_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->u.sm4.mode = CCP_SM4_MODE_GCM; + ctx->u.sm4.key_len = key_len; + + memcpy(ctx->u.sm4.key, key, key_len); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, key_len); + + return 0; +} + +static int ccp_sm4_gcm_complete(struct crypto_async_request *async_req, int ret) +{ + return ret; +} + +static int ccp_sm4_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + //16 byte tag only + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ccp_sm4_gcm_crypt(struct aead_request *req, bool encrypt) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = aead_request_ctx(req); + struct scatterlist *iv_sg = NULL; + unsigned int iv_len = 0; + int ret = 0; + + if (!ctx->u.sm4.key_len) + return -EINVAL; + + if (ctx->u.sm4.mode != CCP_SM4_MODE_GCM) + return -EINVAL; + + if (!req->iv) + return -EINVAL; + + /* + * 5 parts: + * plaintext/ciphertext input + * AAD + * key + * IV + * Destination+tag buffer + */ + + /* Prepare the IV (12 byte iv only)*/ + memcpy(rctx->iv, req->iv, HGGON_CCP_SM4GCM_IV_LEN); + /* Set up a scatterlist for the IV */ + iv_sg = &rctx->iv_sg; + iv_len = HGGON_CCP_SM4GCM_IV_LEN; + sg_init_one(iv_sg, rctx->iv, iv_len); + + /* The AAD + plaintext are concatenated in the src buffer */ + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM4_GCM; + rctx->cmd.u.sm4_gcm.authsize = crypto_aead_authsize(tfm); + rctx->cmd.u.sm4_gcm.mode = ctx->u.sm4.mode; + rctx->cmd.u.sm4_gcm.action = encrypt; + rctx->cmd.u.sm4_gcm.key = &ctx->u.sm4.key_sg; + rctx->cmd.u.sm4_gcm.key_len = ctx->u.sm4.key_len; + rctx->cmd.u.sm4_gcm.iv = iv_sg; + rctx->cmd.u.sm4_gcm.iv_len = iv_len; + rctx->cmd.u.sm4_gcm.src = req->src; + rctx->cmd.u.sm4_gcm.src_len = req->cryptlen; + rctx->cmd.u.sm4_gcm.aad_len = req->assoclen; + + /* The cipher text + the tag are in the dst buffer */ + rctx->cmd.u.sm4_gcm.dst = req->dst; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm4_gcm_encrypt(struct aead_request *req) +{ + return ccp_sm4_gcm_crypt(req, CCP_SM4_ACTION_ENCRYPT); +} + +static int ccp_sm4_gcm_decrypt(struct aead_request *req) +{ + return ccp_sm4_gcm_crypt(req, CCP_SM4_ACTION_DECRYPT); +} + +static int ccp_sm4_gcm_cra_init(struct crypto_aead *tfm) +{ + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->complete = ccp_sm4_gcm_complete; + ctx->u.sm4.key_len = 0; + + crypto_aead_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static void ccp_sm4_gcm_cra_exit(struct crypto_tfm *tfm) +{ +} + +static struct aead_alg ccp_sm4_gcm_defaults = { + .setkey = ccp_sm4_gcm_setkey, + .setauthsize = ccp_sm4_gcm_setauthsize, + .encrypt = ccp_sm4_gcm_encrypt, + .decrypt = ccp_sm4_gcm_decrypt, + .init = ccp_sm4_gcm_cra_init, + .ivsize = HGGON_CCP_SM4GCM_IV_LEN, + .maxauthsize = SM4_BLOCK_SIZE, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_exit = ccp_sm4_gcm_cra_exit, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm4_aead_def { + enum ccp_sm4_aead_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + struct aead_alg *alg_defaults; +}; + +static struct ccp_sm4_aead_def sm4_aead_algs[] = { + { + .mode = CCP_SM4_MODE_GCM, + .version = CCP_VERSION(5, 0), + .name = "gcm(sm4)", + .driver_name = "gcm-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = HGGON_CCP_SM4GCM_IV_LEN, + .alg_defaults = &ccp_sm4_gcm_defaults, + }, +}; + +static int ccp_register_sm4_aead(struct list_head *head, + const struct ccp_sm4_aead_def *def) +{ + struct ccp_crypto_aead *ccp_aead; + struct aead_alg *alg; + int ret; + + ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL); + if (!ccp_aead) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_aead->entry); + + ccp_aead->mode = def->mode; + + /* Copy the defaults and override as necessary */ + alg = &ccp_aead->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + + ret = crypto_register_aead(alg); + if (ret) { + pr_err("%s aead algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_aead); + return ret; + } + + list_add(&ccp_aead->entry, head); + + return 0; +} + +#define RI_SM4GCM_PRESENT_BIT 14 +int ccp_register_sm4_hygon_aeads(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + unsigned int pspccp_version_reg; + + pspccp_version_reg = get_ccp_version_reg_val(); + if (!test_bit(RI_SM4GCM_PRESENT_BIT, (unsigned long *)&pspccp_version_reg)) { + pr_warn("SM4 GCM CCP ENGINE NOT SUPPORTED!\n"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(sm4_aead_algs); i++) { + if (sm4_aead_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_aead(head, &sm4_aead_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 05a80f11397c..c0bd6f654f50 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -353,6 +353,9 @@ struct ccp_sm4_req_ctx { struct scatterlist iv_sg; u8 iv[SM4_BLOCK_SIZE]; + struct scatterlist tag_sg; + u8 tag[SM4_BLOCK_SIZE]; + struct ccp_cmd cmd; }; @@ -386,5 +389,6 @@ int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); int ccp_register_sm3_hygon_algs(struct list_head *head); int ccp_register_sm4_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_aeads(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index c98062f52a6b..980d2cdb2083 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -161,6 +161,12 @@ union ccp_function { u16 encrypt:1; u16 step:7; } sm4_ctr; + struct { + u16 size:7; + u16 encrypt:1; + u16 rsvd:5; + u16 mode:1; + } sm4_gcm; u16 raw; }; @@ -193,6 +199,9 @@ union ccp_function { #define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) #define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) #define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) +#define CCP_SM4_GCM_SIZE(p) ((p)->sm4_gcm.size) +#define CCP_SM4_GCM_ENCRYPT(p) ((p)->sm4_gcm.encrypt) +#define CCP_SM4_GCM_MODE(p) ((p)->sm4_gcm.mode) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -940,6 +949,47 @@ static int ccp5_perform_sm4_ctr(struct ccp_op *op) return ccp5_do_multi_cmds(&desc, op->cmd_q); } +static int ccp5_perform_sm4_gcm(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_key * LSB_ITEM_SIZE; + + op->cmd_q->total_sm4_gcm_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_GCM; + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_GCM_ENCRYPT(&function) = op->u.sm4_gcm.action; + CCP_SM4_GCM_MODE(&function) = op->u.sm4_gcm.mode; + CCP_SM4_GCM_SIZE(&function) = op->u.sm4_gcm.size; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1514,6 +1564,7 @@ static const struct ccp_actions ccp5_actions = { .sm3 = ccp5_perform_sm3, .sm4 = ccp5_perform_sm4, .sm4_ctr = ccp5_perform_sm4_ctr, + .sm4_gcm = ccp5_perform_sm4_gcm, .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 8b00875a0d65..f4b856e8d003 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -270,6 +270,29 @@ unsigned int ccp_version(void) } EXPORT_SYMBOL_GPL(ccp_version); +/** + * get_ccp_version_reg_val - get PspCcpVersion register value + * + * Returns the PspCcpVersion register value of the fist CCP on list; + * otherwise a zero if no CCP device is present + */ +unsigned int get_ccp_version_reg_val(void) +{ + struct ccp_device *dp; + unsigned long flags; + int ret = 0; + + read_lock_irqsave(&ccp_unit_lock, flags); + if (!list_empty(&ccp_units)) { + dp = list_first_entry(&ccp_units, struct ccp_device, entry); + ret = ioread32(dp->io_regs + CMD5_PSP_CCP_VERSION); + } + read_unlock_irqrestore(&ccp_unit_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(get_ccp_version_reg_val); + /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index cb93a57a75c6..6727e50bf607 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -167,6 +167,10 @@ #define REQ1_ECC_AFFINE_CONVERT 0x00200000 #define REQ1_ECC_FUNCTION_SHIFT 18 +/***** HYGON CCP SM4 GCM related defines *****/ +#define HGGON_CCP_SM4GCM_IV_LEN 12 +#define HGGON_CCP_SM4GCM_TAG_LEN 16 + /****** REQ4 Related Values ******/ #define REQ4_KSB_SHIFT 18 #define REQ4_MEMTYPE_SHIFT 16 @@ -344,6 +348,7 @@ struct ccp_cmd_queue { unsigned long total_sm3_ops; unsigned long total_sm4_ops; unsigned long total_sm4_ctr_ops; + unsigned long total_sm4_gcm_ops; } ____cacheline_aligned; struct ccp_device { @@ -563,6 +568,12 @@ struct ccp_sm4_ctr_op { u32 step; }; +struct ccp_sm4_gcm_op { + enum ccp_sm4_action action; + enum ccp_sm4_aead_mode mode; + u32 size; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -590,6 +601,7 @@ struct ccp_op { struct ccp_sm3_op sm3; struct ccp_sm4_op sm4; struct ccp_sm4_ctr_op sm4_ctr; + struct ccp_sm4_gcm_op sm4_gcm; } u; }; @@ -702,6 +714,7 @@ struct ccp_actions { int (*sm3)(struct ccp_op *op); int (*sm4)(struct ccp_op *op); int (*sm4_ctr)(struct ccp_op *op); + int (*sm4_gcm)(struct ccp_op *op); int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 88ed6bc2f129..664bbacb198e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2975,6 +2975,194 @@ static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_run_sm4_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_gcm_engine *sm4_gcm = &cmd->u.sm4_gcm; + struct ccp_dm_workarea key, ctx; + struct ccp_data src, dst; + struct ccp_op op; + unsigned int authsize; + bool in_place = false; /* Default value */ + int ret; + u8 *pt_dec_data = NULL; + struct scatterlist sg_outp; + + if (sm4_gcm->iv == NULL || sm4_gcm->iv_len != HGGON_CCP_SM4GCM_IV_LEN) + return -EINVAL; + + if (sm4_gcm->key == NULL || sm4_gcm->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sm4_gcm->src == NULL || sm4_gcm->dst == NULL) + return -EINVAL; + + if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) { + pt_dec_data = kmalloc(sm4_gcm->src_len, GFP_KERNEL); + if (!pt_dec_data) { + ret = -ENOMEM; + goto out; + } + memset(pt_dec_data, 0, sm4_gcm->src_len); + sg_init_one(&sg_outp, pt_dec_data, sm4_gcm->src_len); + } + + /* Zero defaults to 16 bytes, the maximum size */ + authsize = sm4_gcm->authsize ? sm4_gcm->authsize : SM4_BLOCK_SIZE; + switch (authsize) { + case 16: + break; + default: + ret = -EINVAL; + goto out; + } + + ret = -EIO; + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.sb_key = cmd_q->sb_key; /* Pre-allocated */ + op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ + op.u.sm4_gcm.action = sm4_gcm->action; + op.u.sm4_gcm.mode = sm4_gcm->mode; + + if (sg_virt(sm4_gcm->src) == sg_virt(sm4_gcm->dst)) + in_place = true; + + /* Copy the key to the LSB */ + ret = ccp_init_dm_workarea(&key, cmd_q, SM4_KEY_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_key; + + ret = ccp_set_dm_area(&key, 0, sm4_gcm->key, 0, sm4_gcm->key_len); + if (ret) + goto e_key; + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_key; + } + + /* Copy the context (IV) to the LSB.*/ + ret = ccp_init_dm_workarea(&ctx, cmd_q, HGGON_CCP_SM4GCM_IV_LEN, DMA_BIDIRECTIONAL); + if (ret) + goto e_ctx; + + ret = ccp_set_dm_area(&ctx, 0, sm4_gcm->iv, 0, sm4_gcm->iv_len); + if (ret) + goto e_ctx; + + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) { + ret = ccp_init_data(&src, cmd_q, sm4_gcm->src, + sm4_gcm->aad_len + sm4_gcm->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + goto e_src; + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, + sm4_gcm->dst, sm4_gcm->aad_len + sm4_gcm->src_len + authsize, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_dst; + } + } else { + ret = ccp_init_data(&src, cmd_q, sm4_gcm->src, + sm4_gcm->aad_len + sm4_gcm->src_len - authsize, + SM4_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_src; + ret = ccp_init_data(&dst, cmd_q, &sg_outp, sm4_gcm->src_len, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_dst; + } + + op.init = 1; + /* send data to the CCP SM4 GCM engine */ + while (src.sg_wa.bytes_left) { + if (op.init == 1) { + ccp_update_sg_workarea(&src.sg_wa, sm4_gcm->aad_len); + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) + ccp_update_sg_workarea(&dst.sg_wa, sm4_gcm->aad_len); + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + op.src.u.dma.offset -= sm4_gcm->aad_len; + op.src.u.dma.length += sm4_gcm->aad_len; + op.u.sm4_gcm.size = sm4_gcm->aad_len; + } else { + op.u.sm4_gcm.size = 0; + if (src.sg_wa.bytes_left != 0) + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + } + + if (!src.sg_wa.bytes_left) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4_gcm(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + } + + ccp_process_data(&src, &dst, &op); + op.init = 0; + } + + /* retrieve the SM4 GCM iv */ + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ccp_get_dm_area(&ctx, 0, sm4_gcm->iv, 0, HGGON_CCP_SM4GCM_IV_LEN); + + if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) { + if (memcmp((u8 *)sg_virt(sm4_gcm->src) + + sm4_gcm->aad_len + sm4_gcm->src_len - authsize, + &pt_dec_data[sm4_gcm->src_len - authsize], HGGON_CCP_SM4GCM_TAG_LEN) != 0) { + pr_err("SM4 GCM Dec error!\n"); + ret = -EINVAL; + } + memcpy((u8 *)sg_virt(sm4_gcm->dst) + sm4_gcm->aad_len, + pt_dec_data, sm4_gcm->src_len - authsize); + } + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); +e_src: + ccp_free_data(&src, cmd_q); +e_ctx: + memset(ctx.address, 0, SM4_BLOCK_SIZE); + ccp_dm_free(&ctx); +e_key: + memset(key.address, 0, SM4_KEY_SIZE); + ccp_dm_free(&key); +out: + kfree(pt_dec_data); + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -3031,6 +3219,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM4_CTR: ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM4_GCM: + ret = ccp_run_sm4_gcm_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 45cc6fedd131..c644f99556e2 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -42,6 +42,13 @@ int ccp_present(void); */ unsigned int ccp_version(void); +/** + * ccp_read_version - read PspCcpVersion register value of CCP + * + * Returns PspCcpVersion register value , or zero if no CCP + */ +unsigned int get_ccp_version_reg_val(void); + /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * @@ -79,6 +86,11 @@ static inline unsigned int ccp_version(void) return 0; } +static inline unsigned int get_ccp_version_reg_val(void) +{ + return 0; +} + static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) { return -ENODEV; @@ -698,6 +710,16 @@ enum ccp_sm4_mode { CCP_SM4_MODE__LAST, }; +/** + * ccp_sm4_aead_mode - SM4 AEAD operation mode + * + * @CCP_SM4_MODE_GCM: GCM mode + */ +enum ccp_sm4_aead_mode { + CCP_SM4_MODE_GCM = 0, + CCP_SM4_AEAD_MODE__LAST, +}; + /** * ccp_sm4_action - SM4 operation * @@ -784,6 +806,38 @@ struct ccp_sm4_ctr_engine { u64 src_len; /* In bytes */ }; +/** + * struct ccp_sm4_gcm_engine - CCP SM4 GCM operation + * @action: SM4 GCM operation (decrypt/encrypt) + * @key: key to be used for this SM4 GCM operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 GCM operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @aad_len: length in bytes of additional authenticated date used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + */ +struct ccp_sm4_gcm_engine { + enum ccp_sm4_action action; + enum ccp_sm4_aead_mode mode; + + u32 authsize; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 aad_len; /* In bytes */ +}; + /** * ccp_engine - CCP operation identifiers * @@ -811,6 +865,7 @@ enum ccp_engine { CCP_ENGINE_SM3, CCP_ENGINE_SM4, CCP_ENGINE_SM4_CTR, + CCP_ENGINE_SM4_GCM, CCP_ENGINE__LAST, }; @@ -863,6 +918,7 @@ struct ccp_cmd { struct ccp_sm3_engine sm3; struct ccp_sm4_engine sm4; struct ccp_sm4_ctr_engine sm4_ctr; + struct ccp_sm4_gcm_engine sm4_gcm; } u; /* Completion callback support */ -- Gitee From cdccfdff8aab5e50f32a0c9975e1fec51e569fe3 Mon Sep 17 00:00:00 2001 From: duhui Date: Wed, 10 Jul 2024 22:50:59 +0800 Subject: [PATCH 2/6] anolis: crypto: ccp: Support SM4-XTS for hygon ccp ANBZ: #6244 Add sm4-xts support for Hygon 4th CPU Signed-off-by: duhui Signed-off-by: yangdepei --- drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 22 ++- drivers/crypto/ccp/ccp-crypto.h | 2 +- drivers/crypto/ccp/ccp-dev-v5.c | 2 +- drivers/crypto/ccp/ccp-dev.h | 3 + drivers/crypto/ccp/ccp-ops.c | 156 +++++++++++++++++----- include/linux/ccp.h | 2 + 6 files changed, 150 insertions(+), 37 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c index 2328a9f87218..b7a72aee3a13 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -23,6 +23,7 @@ enum ccp_sm4_alg_mode { CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_XTS = CCP_SM4_MODE_XTS, CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, @@ -57,10 +58,10 @@ static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!key) return -EINVAL; - memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); - sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + memcpy(ctx->u.sm4.key, key, key_len); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, key_len); - ctx->u.sm4.key_len = SM4_KEY_SIZE; + ctx->u.sm4.key_len = key_len; return 0; } @@ -122,7 +123,7 @@ static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) cmd->u.sm4.select = 1; cmd->u.sm4.key = &ctx->u.sm4.key_sg; - cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.key_len = ctx->u.sm4.key_len; cmd->u.sm4.iv = iv_sg; cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; @@ -241,6 +242,15 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = SM4_BLOCK_SIZE, .alg_defaults = &ccp_sm4_defaults, }, + { + .mode = CCP_SM4_ALG_MODE_XTS, + .version = CCP_VERSION(5, 0), + .name = "xts(sm4)", + .driver_name = "xts-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, { .mode = CCP_SM4_ALG_MODE_CTR, .version = CCP_VERSION(5, 0), @@ -275,6 +285,10 @@ static int ccp_register_sm4_hygon_alg(struct list_head *head, def->driver_name); alg->base.cra_blocksize = def->blocksize; alg->ivsize = def->ivsize; + if (def->mode == CCP_SM4_ALG_MODE_XTS) { + alg->min_keysize = SM4_KEY_SIZE * 2; + alg->max_keysize = SM4_KEY_SIZE * 2; + } ret = crypto_register_skcipher(alg); if (ret) { diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index c0bd6f654f50..8e78909f0c6c 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -344,7 +344,7 @@ struct ccp_sm3_exp_ctx { /***** SM4 related defines *****/ struct ccp_sm4_ctx { struct scatterlist key_sg; - u8 key[SM4_KEY_SIZE]; + u8 key[SM4_KEY_SIZE * 2]; u32 key_len; u32 mode; }; diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 980d2cdb2083..f4855e223157 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -869,7 +869,7 @@ static int ccp5_perform_sm4(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; - u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + u32 key_addr = op->sb_key * LSB_ITEM_SIZE; op->cmd_q->total_sm4_ops++; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 6727e50bf607..527330d5b0b1 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -204,6 +204,9 @@ #define CCP5_XTS_AES_KEY_SB_COUNT 2 #define CCP_XTS_AES_CTX_SB_COUNT 1 +#define CCP_XTS_SM4_KEY_SB_COUNT 1 +#define CCP_XTS_SM4_CTX_SB_COUNT 1 + #define CCP_DES3_KEY_SB_COUNT 1 #define CCP_DES3_CTX_SB_COUNT 1 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 664bbacb198e..58e331a4a428 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2728,19 +2728,27 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sm4_engine *sm4 = &cmd->u.sm4; - struct ccp_dm_workarea iv_key; + struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; + unsigned int jobid; bool in_place = false; int ret; if (sm4->src == NULL || sm4->dst == NULL) return -EINVAL; - if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + if (sm4->key == NULL) + return -EINVAL; + + switch ((sm4->mode == CCP_SM4_MODE_XTS) ? sm4->key_len/2 : sm4->key_len) { + case SM4_KEY_SIZE: + break; + default: return -EINVAL; + } - if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + if (sg_nents_for_len(sm4->key, sm4->key_len) < 0) return -EINVAL; if (sm4->mode != CCP_SM4_MODE_ECB) { @@ -2751,11 +2759,77 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return -EINVAL; } + jobid = CCP_NEW_JOBID(cmd_q->ccp); + ret = -EIO; + memset(&op, 0, sizeof(op)); + if (sm4->mode == CCP_SM4_MODE_XTS) { + op.cmd_q = cmd_q; + op.jobid = jobid; + op.sb_key = cmd_q->sb_key; + op.sb_ctx = cmd_q->sb_ctx; + + op.u.sm4.select = 0; + op.u.sm4.mode = CCP_SM4_MODE_ECB; + op.u.sm4.action = CCP_SM4_ACTION_ENCRYPT; + + /* Copy the tweak key to the LSB */ + + ret = ccp_init_dm_workarea(&key, cmd_q, + CCP_XTS_SM4_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); + if (ret) { + ccp_dm_free(&key); + return ret; + } + + ret = ccp_set_dm_area(&key, 0, sm4->key, sm4->key_len/2, sm4->key_len/2); + if (ret) { + ccp_dm_free(&key); + return ret; + } + + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + ccp_dm_free(&key); + return ret; + } + + ret = ccp_init_data(&src, cmd_q, sm4->iv, + sm4->iv_len, SM4_BLOCK_SIZE, DMA_BIDIRECTIONAL); + if (ret) { + ccp_dm_free(&key); + return ret; + } + + dst = src; + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + + op.ioc = 1; + op.soc = 0; + op.init = 0; + op.eom = 1; + ret = cmd_q->ccp->vdata->perform->sm4(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + ccp_free_data(&src, cmd_q); + ccp_dm_free(&key); + return ret; + } + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + ccp_free_data(&src, cmd_q); + ccp_dm_free(&key); + return ret; + } + } + memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.jobid = jobid; op.ioc = 1; - op.sb_ctx = cmd_q->sb_ctx; + op.sb_key = cmd_q->sb_key; /* Pre-allocated */ + op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.u.sm4.action = sm4->action; op.u.sm4.mode = sm4->mode; op.u.sm4.select = sm4->select; @@ -2767,6 +2841,40 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sg_virt(sm4->src) == sg_virt(sm4->dst)) in_place = true; + /* Copy the key to the LSB */ + ret = ccp_init_dm_workarea(&key, cmd_q, + CCP_XTS_SM4_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); + if (ret) + goto e_key; + + ret = ccp_set_dm_area(&key, 0, sm4->key, 0, + (sm4->mode == CCP_SM4_MODE_XTS) ? sm4->key_len/2 : sm4->key_len); + if (ret) + goto e_key; + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_key; + } + + /* Copy the context (IV) to the LSB.*/ + ret = ccp_init_dm_workarea(&ctx, cmd_q, + CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); + if (ret) + goto e_ctx; + + if (sm4->mode != CCP_SM4_MODE_ECB) { + ret = ccp_set_dm_area(&ctx, 0, sm4->iv, 0, sm4->iv_len); + if (ret) + goto e_ctx; + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) @@ -2781,24 +2889,6 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_src; } - /* load iv and key */ - ret = ccp_init_dm_workarea(&iv_key, cmd_q, - SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); - if (ret) - goto e_dst; - - if (sm4->mode != CCP_SM4_MODE_ECB) - ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); - - ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); - - ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, - CCP_PASSTHRU_BYTESWAP_NOOP); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - goto e_iv_key; - } - /* send data to the CCP SM4 engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); @@ -2813,14 +2903,14 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ret = cmd_q->ccp->vdata->perform->sm4(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_iv_key; + goto e_dst; } if (!src.sg_wa.bytes_left || op.soc) { ret = cmd_q->ccp->vdata->perform->run_cmd(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_iv_key; + goto e_dst; } } @@ -2829,19 +2919,17 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sm4->mode != CCP_SM4_MODE_ECB) { /* retrieve the SM4 iv */ - ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_iv_key; + goto e_dst; } - ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + ccp_get_dm_area(&ctx, 0, sm4->iv, 0, sm4->iv_len); } -e_iv_key: - memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); - ccp_dm_free(&iv_key); + e_dst: if (!in_place) @@ -2850,6 +2938,12 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) e_src: ccp_free_data(&src, cmd_q); +e_ctx: + ccp_dm_free(&ctx); + +e_key: + ccp_dm_free(&key); + return ret; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index c644f99556e2..9f949d51d7a1 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -699,6 +699,7 @@ struct ccp_sm3_engine { * @CCP_SM4_MODE_CBC: CBC mode * @CCP_SM4_MODE_OFB: OFB mode * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_XTS: XTS mode * @CCP_SM4_MODE_CTR: CTR mode */ enum ccp_sm4_mode { @@ -706,6 +707,7 @@ enum ccp_sm4_mode { CCP_SM4_MODE_CBC, CCP_SM4_MODE_OFB, CCP_SM4_MODE_CFB, + CCP_SM4_MODE_XTS, CCP_SM4_MODE_CTR, CCP_SM4_MODE__LAST, }; -- Gitee From fa5a21dc0d15e6b54923942fbc8eed44a172bde9 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 14 Dec 2024 15:34:53 +0800 Subject: [PATCH 3/6] anolis: crypto: ccp: sm4-xts crypto support data not aligned to 16 bytes ANBZ: #6244 support any data length for sm4-xts Signed-off-by: Yabin Li Signed-off-by: yangdepei --- drivers/crypto/ccp/Kconfig | 1 + drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 3 +- drivers/crypto/ccp/ccp-dev-v5.c | 2 +- drivers/crypto/ccp/ccp-dev.h | 3 - drivers/crypto/ccp/ccp-ops.c | 255 ++++++++++++---------- 5 files changed, 139 insertions(+), 125 deletions(-) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 12264442ae72..c822de0fc685 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -15,6 +15,7 @@ config CRYPTO_DEV_SP_CCP select DMA_ENGINE select CRYPTO_SHA1 select CRYPTO_SHA256 + select CRYPTO_SM4 help Provides the support for AMD Cryptographic Coprocessor (CCP) device which can be used to offload encryption operations such as SHA, AES diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c index b7a72aee3a13..f19529d84390 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -84,6 +84,7 @@ static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) if ((mode != CCP_SM4_ALG_MODE_CTR) && (mode != CCP_SM4_ALG_MODE_OFB) && (mode != CCP_SM4_ALG_MODE_CFB) && + (mode != CCP_SM4_ALG_MODE_XTS) && (req->cryptlen & (SM4_BLOCK_SIZE - 1))) return -EINVAL; @@ -245,7 +246,7 @@ static struct ccp_sm4_def sm4_algs[] = { { .mode = CCP_SM4_ALG_MODE_XTS, .version = CCP_VERSION(5, 0), - .name = "xts(sm4)", + .name = "xts_ccp(sm4)", .driver_name = "xts-sm4-ccp", .blocksize = SM4_BLOCK_SIZE, .ivsize = SM4_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index f4855e223157..980d2cdb2083 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -869,7 +869,7 @@ static int ccp5_perform_sm4(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; - u32 key_addr = op->sb_key * LSB_ITEM_SIZE; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; op->cmd_q->total_sm4_ops++; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 527330d5b0b1..6727e50bf607 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -204,9 +204,6 @@ #define CCP5_XTS_AES_KEY_SB_COUNT 2 #define CCP_XTS_AES_CTX_SB_COUNT 1 -#define CCP_XTS_SM4_KEY_SB_COUNT 1 -#define CCP_XTS_SM4_CTX_SB_COUNT 1 - #define CCP_DES3_KEY_SB_COUNT 1 #define CCP_DES3_CTX_SB_COUNT 1 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 58e331a4a428..11a58516f9d7 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "ccp-dev.h" @@ -56,6 +57,35 @@ static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ ccp_gen_jobid(ccp) : 0) +/* + * Complete the encryption operation of a block for sm4 algorithm. + * enc: 1--encrypt 0--decrypt + */ +static inline int sm4_generic_crypt_block(u8 *dst, u8 *src, u8 *key, int enc) +{ + struct crypto_cipher *cipher = NULL; + int ret = 0; + + cipher = crypto_alloc_cipher("sm4", 0, 0); + if (IS_ERR(cipher)) { + pr_err("Allocate sm4 cipher failed, %ld\n", PTR_ERR(cipher)); + return -ENOMEM; + } + + ret = crypto_cipher_setkey(cipher, key, SM4_KEY_SIZE); + if (ret) + goto out; + + if (enc) + crypto_cipher_encrypt_one(cipher, dst, src); + else + crypto_cipher_decrypt_one(cipher, dst, src); + +out: + crypto_free_cipher(cipher); + return ret; +} + static u32 ccp_gen_jobid(struct ccp_device *ccp) { return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; @@ -2728,25 +2758,24 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sm4_engine *sm4 = &cmd->u.sm4; - struct ccp_dm_workarea key, ctx; + struct ccp_dm_workarea iv_key; struct ccp_data src, dst; struct ccp_op op; - unsigned int jobid; + int remain = sm4->src_len & (SM4_BLOCK_SIZE - 1); bool in_place = false; int ret; - if (sm4->src == NULL || sm4->dst == NULL) + if (sm4->src == NULL || sm4->dst == NULL || sm4->key == NULL) return -EINVAL; - if (sm4->key == NULL) + if (sm4->mode != CCP_SM4_MODE_XTS && sm4->key_len != SM4_KEY_SIZE) return -EINVAL; - switch ((sm4->mode == CCP_SM4_MODE_XTS) ? sm4->key_len/2 : sm4->key_len) { - case SM4_KEY_SIZE: - break; - default: + if (sm4->mode == CCP_SM4_MODE_XTS && sm4->src_len < SM4_BLOCK_SIZE) + return -EINVAL; + + if (sm4->mode == CCP_SM4_MODE_XTS && sm4->key_len != SM4_KEY_SIZE * 2) return -EINVAL; - } if (sg_nents_for_len(sm4->key, sm4->key_len) < 0) return -EINVAL; @@ -2759,77 +2788,11 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return -EINVAL; } - jobid = CCP_NEW_JOBID(cmd_q->ccp); - ret = -EIO; - memset(&op, 0, sizeof(op)); - if (sm4->mode == CCP_SM4_MODE_XTS) { - op.cmd_q = cmd_q; - op.jobid = jobid; - op.sb_key = cmd_q->sb_key; - op.sb_ctx = cmd_q->sb_ctx; - - op.u.sm4.select = 0; - op.u.sm4.mode = CCP_SM4_MODE_ECB; - op.u.sm4.action = CCP_SM4_ACTION_ENCRYPT; - - /* Copy the tweak key to the LSB */ - - ret = ccp_init_dm_workarea(&key, cmd_q, - CCP_XTS_SM4_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); - if (ret) { - ccp_dm_free(&key); - return ret; - } - - ret = ccp_set_dm_area(&key, 0, sm4->key, sm4->key_len/2, sm4->key_len/2); - if (ret) { - ccp_dm_free(&key); - return ret; - } - - ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - ccp_dm_free(&key); - return ret; - } - - ret = ccp_init_data(&src, cmd_q, sm4->iv, - sm4->iv_len, SM4_BLOCK_SIZE, DMA_BIDIRECTIONAL); - if (ret) { - ccp_dm_free(&key); - return ret; - } - - dst = src; - ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); - - op.ioc = 1; - op.soc = 0; - op.init = 0; - op.eom = 1; - ret = cmd_q->ccp->vdata->perform->sm4(&op); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - ccp_free_data(&src, cmd_q); - ccp_dm_free(&key); - return ret; - } - ret = cmd_q->ccp->vdata->perform->run_cmd(&op); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - ccp_free_data(&src, cmd_q); - ccp_dm_free(&key); - return ret; - } - } - memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = jobid; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.ioc = 1; - op.sb_key = cmd_q->sb_key; /* Pre-allocated */ - op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ + op.sb_ctx = cmd_q->sb_ctx; op.u.sm4.action = sm4->action; op.u.sm4.mode = sm4->mode; op.u.sm4.select = sm4->select; @@ -2841,38 +2804,10 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sg_virt(sm4->src) == sg_virt(sm4->dst)) in_place = true; - /* Copy the key to the LSB */ - ret = ccp_init_dm_workarea(&key, cmd_q, - CCP_XTS_SM4_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); - if (ret) - goto e_key; - - ret = ccp_set_dm_area(&key, 0, sm4->key, 0, - (sm4->mode == CCP_SM4_MODE_XTS) ? sm4->key_len/2 : sm4->key_len); - if (ret) - goto e_key; - ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - goto e_key; - } - - /* Copy the context (IV) to the LSB.*/ - ret = ccp_init_dm_workarea(&ctx, cmd_q, - CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); - if (ret) - goto e_ctx; - - if (sm4->mode != CCP_SM4_MODE_ECB) { - ret = ccp_set_dm_area(&ctx, 0, sm4->iv, 0, sm4->iv_len); - if (ret) - goto e_ctx; - ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, - CCP_PASSTHRU_BYTESWAP_NOOP); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - goto e_ctx; - } + if (sm4->mode == CCP_SM4_MODE_XTS && remain) { + sm4->src_len -= remain; + if (sm4->action == CCP_SM4_ACTION_DECRYPT) + sm4->src_len -= SM4_BLOCK_SIZE; } ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, @@ -2889,6 +2824,33 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_src; } + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + if (sm4->mode != CCP_SM4_MODE_ECB) + ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + + if (sm4->mode == CCP_SM4_MODE_XTS) { + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, + SM4_KEY_SIZE, SM4_KEY_SIZE); + ret = sm4_generic_crypt_block(iv_key.address, + iv_key.address, iv_key.address + SM4_BLOCK_SIZE, 1); + if (ret) + goto e_iv_key; + } + + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + /* send data to the CCP SM4 engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); @@ -2903,14 +2865,14 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ret = cmd_q->ccp->vdata->perform->sm4(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_dst; + goto e_iv_key; } if (!src.sg_wa.bytes_left || op.soc) { ret = cmd_q->ccp->vdata->perform->run_cmd(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_dst; + goto e_iv_key; } } @@ -2919,17 +2881,76 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sm4->mode != CCP_SM4_MODE_ECB) { /* retrieve the SM4 iv */ - ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_dst; + goto e_iv_key; } - ccp_get_dm_area(&ctx, 0, sm4->iv, 0, sm4->iv_len); + ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); } + if (sm4->mode == CCP_SM4_MODE_XTS && remain) { + struct ccp_dm_workarea xts_wa; + u8 tweak[SM4_BLOCK_SIZE] = {0}; + u8 temp[SM4_BLOCK_SIZE] = {0}; + u8 key1[SM4_KEY_SIZE] = {0}; + + ret = ccp_init_dm_workarea(&xts_wa, cmd_q, + SM4_BLOCK_SIZE * 2, DMA_BIDIRECTIONAL); + if (ret) + goto e_iv_key; + + memcpy(tweak, iv_key.address, SM4_BLOCK_SIZE); + scatterwalk_map_and_copy(key1, sm4->key, 0, SM4_KEY_SIZE, 0); + if (sm4->action == CCP_SM4_ACTION_ENCRYPT) { + ccp_set_dm_area(&xts_wa, 0, sm4->dst, + sm4->src_len - SM4_BLOCK_SIZE, SM4_BLOCK_SIZE); + memcpy(xts_wa.address + SM4_BLOCK_SIZE, xts_wa.address, remain); + ccp_set_dm_area(&xts_wa, 0, sm4->src, sm4->src_len, remain); + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ret = sm4_generic_crypt_block(xts_wa.address, xts_wa.address, key1, 1); + if (ret) { + ccp_dm_free(&xts_wa); + goto e_iv_key; + } + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ccp_get_dm_area(&xts_wa, 0, sm4->dst, + sm4->src_len - SM4_BLOCK_SIZE, remain + SM4_BLOCK_SIZE); + } else { + gf128mul_x_lle((be128 *)tweak, (be128 *)tweak); + ccp_set_dm_area(&xts_wa, 0, sm4->src, + sm4->src_len, remain + SM4_BLOCK_SIZE); + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ret = sm4_generic_crypt_block(xts_wa.address, xts_wa.address, key1, 0); + if (ret) { + ccp_dm_free(&xts_wa); + goto e_iv_key; + } + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + + memcpy(tweak, iv_key.address, SM4_BLOCK_SIZE); + memcpy(temp, xts_wa.address, remain); + memcpy(xts_wa.address, xts_wa.address + SM4_BLOCK_SIZE, remain); + memcpy(xts_wa.address + SM4_BLOCK_SIZE, temp, remain); + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ret = sm4_generic_crypt_block(xts_wa.address, xts_wa.address, key1, 0); + if (ret) { + ccp_dm_free(&xts_wa); + goto e_iv_key; + } + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ccp_get_dm_area(&xts_wa, 0, sm4->dst, sm4->src_len, + remain + SM4_BLOCK_SIZE); + } + + ccp_dm_free(&xts_wa); + } +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); e_dst: if (!in_place) @@ -2938,12 +2959,6 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) e_src: ccp_free_data(&src, cmd_q); -e_ctx: - ccp_dm_free(&ctx); - -e_key: - ccp_dm_free(&key); - return ret; } -- Gitee From 88ed037d32c8fb1983987145e491e113702b0a45 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 13 Feb 2025 20:19:28 +0800 Subject: [PATCH 4/6] anolis: crypto: ccp: Optimize sm4-gcm algorithm for hygon ccp. ANBZ: #6244 Optimize the performence for sm4-gcm Signed-off-by: Yabin Li Signed-off-by: yangdepei --- drivers/crypto/ccp/Kconfig | 1 + .../crypto/ccp/ccp-crypto-sm4-galois-hygon.c | 51 ++- drivers/crypto/ccp/ccp-dev.h | 4 +- drivers/crypto/ccp/ccp-ops.c | 375 ++++++++++++++---- 4 files changed, 319 insertions(+), 112 deletions(-) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index c822de0fc685..a9c1ab433e09 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -16,6 +16,7 @@ config CRYPTO_DEV_SP_CCP select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SM4 + select CRYPTO_GF128MUL help Provides the support for AMD Cryptographic Coprocessor (CCP) device which can be used to offload encryption operations such as SHA, AES diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c index a9ec783ea322..ac9a561832e8 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c @@ -2,7 +2,7 @@ /* * Hygon Cryptographic Coprocessor (CCP) SM4 GCM crypto API support * - * Copyright (C) 2022 Hygon Info Technologies Ltd. + * Copyright (C) 2022 Hygon Information Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -20,6 +20,8 @@ #include "ccp-crypto.h" #include "ccp-dev.h" +#define RI_SM4GCM_PRESENT_BIT 14 + static int ccp_sm4_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int key_len) { @@ -43,8 +45,13 @@ static int ccp_sm4_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { - //16 byte tag only case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: break; default: return -EINVAL; @@ -60,7 +67,6 @@ static int ccp_sm4_gcm_crypt(struct aead_request *req, bool encrypt) struct ccp_sm4_req_ctx *rctx = aead_request_ctx(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; - int ret = 0; if (!ctx->u.sm4.key_len) return -EINVAL; @@ -72,22 +78,18 @@ static int ccp_sm4_gcm_crypt(struct aead_request *req, bool encrypt) return -EINVAL; /* - * 5 parts: - * plaintext/ciphertext input - * AAD - * key - * IV - * Destination+tag buffer + * encrypt: + * AAD & PT => AAD, CT & TAG + * decrypt: + * AAD & [CT + TAG] => AAD & PT */ /* Prepare the IV (12 byte iv only)*/ - memcpy(rctx->iv, req->iv, HGGON_CCP_SM4GCM_IV_LEN); - /* Set up a scatterlist for the IV */ + memcpy(rctx->iv, req->iv, HYGON_CCP_SM4GCM_IV_LEN); iv_sg = &rctx->iv_sg; - iv_len = HGGON_CCP_SM4GCM_IV_LEN; + iv_len = HYGON_CCP_SM4GCM_IV_LEN; sg_init_one(iv_sg, rctx->iv, iv_len); - /* The AAD + plaintext are concatenated in the src buffer */ memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_SM4_GCM; @@ -101,13 +103,9 @@ static int ccp_sm4_gcm_crypt(struct aead_request *req, bool encrypt) rctx->cmd.u.sm4_gcm.src = req->src; rctx->cmd.u.sm4_gcm.src_len = req->cryptlen; rctx->cmd.u.sm4_gcm.aad_len = req->assoclen; - - /* The cipher text + the tag are in the dst buffer */ rctx->cmd.u.sm4_gcm.dst = req->dst; - ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); - - return ret; + return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); } static int ccp_sm4_gcm_encrypt(struct aead_request *req) @@ -137,13 +135,13 @@ static void ccp_sm4_gcm_cra_exit(struct crypto_tfm *tfm) } static struct aead_alg ccp_sm4_gcm_defaults = { - .setkey = ccp_sm4_gcm_setkey, - .setauthsize = ccp_sm4_gcm_setauthsize, - .encrypt = ccp_sm4_gcm_encrypt, - .decrypt = ccp_sm4_gcm_decrypt, - .init = ccp_sm4_gcm_cra_init, - .ivsize = HGGON_CCP_SM4GCM_IV_LEN, - .maxauthsize = SM4_BLOCK_SIZE, + .setkey = ccp_sm4_gcm_setkey, + .setauthsize = ccp_sm4_gcm_setauthsize, + .encrypt = ccp_sm4_gcm_encrypt, + .decrypt = ccp_sm4_gcm_decrypt, + .init = ccp_sm4_gcm_cra_init, + .ivsize = HYGON_CCP_SM4GCM_IV_LEN, + .maxauthsize = SM4_BLOCK_SIZE, .base = { .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | @@ -174,7 +172,7 @@ static struct ccp_sm4_aead_def sm4_aead_algs[] = { .name = "gcm(sm4)", .driver_name = "gcm-sm4-ccp", .blocksize = SM4_BLOCK_SIZE, - .ivsize = HGGON_CCP_SM4GCM_IV_LEN, + .ivsize = HYGON_CCP_SM4GCM_IV_LEN, .alg_defaults = &ccp_sm4_gcm_defaults, }, }; @@ -215,7 +213,6 @@ static int ccp_register_sm4_aead(struct list_head *head, return 0; } -#define RI_SM4GCM_PRESENT_BIT 14 int ccp_register_sm4_hygon_aeads(struct list_head *head) { int i, ret; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 6727e50bf607..36a35bcf9f25 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -168,8 +168,8 @@ #define REQ1_ECC_FUNCTION_SHIFT 18 /***** HYGON CCP SM4 GCM related defines *****/ -#define HGGON_CCP_SM4GCM_IV_LEN 12 -#define HGGON_CCP_SM4GCM_TAG_LEN 16 +#define HYGON_CCP_SM4GCM_IV_LEN 12 +#define HYGON_CCP_SM4GCM_TAG_LEN 16 /****** REQ4 Related Values ******/ #define REQ4_KSB_SHIFT 18 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 11a58516f9d7..584ade2cc682 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -56,6 +56,19 @@ static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ ccp_gen_jobid(ccp) : 0) +#define GHASH_BLOCK_SIZE 16 +#define CCP_AAD_LEN_MAX_HG 128 + +static void ccp_gcm_ghash(be128 *ghash, const be128 *h, const void *s, int l) +{ + while (l > 0) { + crypto_xor((u8 *)ghash, s, min(l, GHASH_BLOCK_SIZE)); + gf128mul_lle(ghash, h); + + s += GHASH_BLOCK_SIZE; + l -= GHASH_BLOCK_SIZE; + } +} /* * Complete the encryption operation of a block for sm4 algorithm. @@ -3084,19 +3097,113 @@ static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_sm4_gcm_crypt(struct ccp_cmd_queue *cmd_q, + struct ccp_sm4_gcm_engine *sm4_gcm) +{ + be128 tail = {cpu_to_be64(sm4_gcm->aad_len * 8), 0}; + struct ccp_dm_workarea ikey; + struct ccp_dm_workarea ghash; + u8 H[SM4_BLOCK_SIZE] = {0}; + u8 T[SM4_BLOCK_SIZE] = {0}; + u8 I[SM4_BLOCK_SIZE] = {0}; + u8 C[SM4_BLOCK_SIZE] = {0}; + int ilen = sm4_gcm->aad_len + sm4_gcm->src_len; + int slen = 0, authsize = 0; + int len = 0, process = 0; + int ret = 0; + + authsize = sm4_gcm->authsize ? sm4_gcm->authsize : SM4_BLOCK_SIZE; + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) + slen = sm4_gcm->src_len; + else + slen = sm4_gcm->src_len - authsize; + + ret = ccp_init_dm_workarea(&ikey, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + return ret; + + len = SM4_BLOCK_SIZE + ilen; + ret = ccp_init_dm_workarea(&ghash, cmd_q, len, DMA_BIDIRECTIONAL); + if (ret) + goto e_ikey; + + ccp_set_dm_area(&ikey, 0, sm4_gcm->iv, 0, HYGON_CCP_SM4GCM_IV_LEN); + ccp_set_dm_area(&ikey, SM4_BLOCK_SIZE, sm4_gcm->key, 0, SM4_KEY_SIZE); + if (ilen > 0) + ccp_set_dm_area(&ghash, SM4_BLOCK_SIZE, sm4_gcm->src, 0, ilen); + + memcpy(T, ikey.address, HYGON_CCP_SM4GCM_IV_LEN); + T[15] = 1; + memcpy(I, T, SM4_BLOCK_SIZE); + sm4_generic_crypt_block(H, H, ikey.address + SM4_BLOCK_SIZE, 1); + sm4_generic_crypt_block(T, T, ikey.address + SM4_BLOCK_SIZE, 1); + + if (sm4_gcm->aad_len > 0) + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, + ghash.address + SM4_BLOCK_SIZE, sm4_gcm->aad_len); + if (slen > 0) { + u8 *src = ghash.address + SM4_BLOCK_SIZE + sm4_gcm->aad_len; + + if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, src, slen); + + crypto_inc(I, SM4_BLOCK_SIZE); + while (slen > 0) { + len = min(slen, SM4_BLOCK_SIZE); + sm4_generic_crypt_block(C, I, ikey.address + SM4_BLOCK_SIZE, 1); + crypto_xor(src + process, C, len); + crypto_inc(I, SM4_BLOCK_SIZE); + slen -= len; + process += len; + } + + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, src, process); + } + tail.b = cpu_to_be64(process * 8); + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, &tail, sizeof(tail)); + crypto_xor(ghash.address, T, SM4_BLOCK_SIZE); + + if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) { + ret = crypto_memneq(ghash.address, ghash.address + + SM4_BLOCK_SIZE + ilen - authsize, authsize) ? -EBADMSG : 0; + if (ret) + goto e_ghash; + + if (ilen - authsize > 0) + ccp_get_dm_area(&ghash, SM4_BLOCK_SIZE, sm4_gcm->dst, 0, ilen - authsize); + + } else { + if (ilen > 0) + ccp_get_dm_area(&ghash, SM4_BLOCK_SIZE, sm4_gcm->dst, 0, ilen); + ccp_get_dm_area(&ghash, 0, sm4_gcm->dst, ilen, authsize); + } + +e_ghash: + ccp_dm_free(&ghash); +e_ikey: + ccp_dm_free(&ikey); + return ret; +} + static int ccp_run_sm4_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sm4_gcm_engine *sm4_gcm = &cmd->u.sm4_gcm; struct ccp_dm_workarea key, ctx; + struct ccp_dm_workarea aad, tag; struct ccp_data src, dst; struct ccp_op op; - unsigned int authsize; + unsigned int ilen = 0, authsize = 0; + unsigned int dm_offset = 0, dm_remain = 0; bool in_place = false; /* Default value */ int ret; - u8 *pt_dec_data = NULL; - struct scatterlist sg_outp; - if (sm4_gcm->iv == NULL || sm4_gcm->iv_len != HGGON_CCP_SM4GCM_IV_LEN) + struct scatterlist *p_inp, sg_inp[2]; + struct scatterlist *p_outp, sg_outp[2]; + struct scatterlist *p_tag, sg_tag[2]; + + if (sm4_gcm->iv == NULL || sm4_gcm->iv_len != HYGON_CCP_SM4GCM_IV_LEN) return -EINVAL; if (sm4_gcm->key == NULL || sm4_gcm->key_len != SM4_KEY_SIZE) @@ -3105,26 +3212,38 @@ static int ccp_run_sm4_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sm4_gcm->src == NULL || sm4_gcm->dst == NULL) return -EINVAL; - if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) { - pt_dec_data = kmalloc(sm4_gcm->src_len, GFP_KERNEL); - if (!pt_dec_data) { - ret = -ENOMEM; - goto out; - } - memset(pt_dec_data, 0, sm4_gcm->src_len); - sg_init_one(&sg_outp, pt_dec_data, sm4_gcm->src_len); - } - /* Zero defaults to 16 bytes, the maximum size */ authsize = sm4_gcm->authsize ? sm4_gcm->authsize : SM4_BLOCK_SIZE; switch (authsize) { case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: break; default: - ret = -EINVAL; - goto out; + return -EINVAL; + } + + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) { + ilen = sm4_gcm->src_len; + if (sg_nents_for_len(sm4_gcm->dst, sm4_gcm->aad_len + ilen + authsize) < 0) + return -EINVAL; + } else { + /* Input length for decryption includes tag */ + ilen = sm4_gcm->src_len - authsize; + if (sg_nents_for_len(sm4_gcm->dst, sm4_gcm->aad_len + ilen) < 0) + return -EINVAL; } + /* When the data length is 0, ccp cannot run. When the aad length is + * greater than 127, the tag result generated by ccp is incorrect. + */ + if (ilen == 0 || sm4_gcm->aad_len >= CCP_AAD_LEN_MAX_HG) + return ccp_sm4_gcm_crypt(cmd_q, sm4_gcm); + ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; @@ -3152,123 +3271,213 @@ static int ccp_run_sm4_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } /* Copy the context (IV) to the LSB.*/ - ret = ccp_init_dm_workarea(&ctx, cmd_q, HGGON_CCP_SM4GCM_IV_LEN, DMA_BIDIRECTIONAL); + ret = ccp_init_dm_workarea(&ctx, cmd_q, HYGON_CCP_SM4GCM_IV_LEN, DMA_BIDIRECTIONAL); if (ret) goto e_ctx; ret = ccp_set_dm_area(&ctx, 0, sm4_gcm->iv, 0, sm4_gcm->iv_len); if (ret) goto e_ctx; - ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } - if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) { - ret = ccp_init_data(&src, cmd_q, sm4_gcm->src, - sm4_gcm->aad_len + sm4_gcm->src_len, - SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + op.init = 1; + if (sm4_gcm->aad_len > 0) { + dm_offset = ilen <= SM4_BLOCK_SIZE ? ilen : SM4_BLOCK_SIZE; + ilen -= dm_offset; + if (ilen == 0) + op.eom = 1; + + dm_offset += sm4_gcm->aad_len; + ret = ccp_init_dm_workarea(&aad, cmd_q, + /* If ilen is equal to 0, the output data contains tag. */ + dm_offset + (ilen == 0 ? SM4_BLOCK_SIZE : 0), + DMA_BIDIRECTIONAL); if (ret) - goto e_src; + goto e_ctx; + + ret = ccp_set_dm_area(&aad, 0, sm4_gcm->src, 0, dm_offset); + if (ret) + goto e_aad; + + ccp_get_dm_area(&aad, 0, sm4_gcm->dst, 0, sm4_gcm->aad_len); + op.u.sm4_gcm.size = sm4_gcm->aad_len; + op.src.u.dma.address = aad.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = dm_offset; + op.dst.u.dma.address = aad.dma.address; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = dm_offset; + ret = cmd_q->ccp->vdata->perform->sm4_gcm(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_aad; + } + + op.u.sm4_gcm.size = 0; + op.init = 0; + } + + dm_remain = ilen % SM4_BLOCK_SIZE; + if (ilen > 0) { + if (dm_remain == 0) + dm_remain = SM4_BLOCK_SIZE; + ilen -= dm_remain; + } + if (ilen > 0) { + p_inp = scatterwalk_ffwd(sg_inp, sm4_gcm->src, dm_offset); + p_outp = scatterwalk_ffwd(sg_outp, sm4_gcm->dst, dm_offset); + + ret = ccp_init_data(&src, cmd_q, p_inp, + ilen, SM4_BLOCK_SIZE, + in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + goto e_aad; + if (in_place) { dst = src; } else { - ret = ccp_init_data(&dst, cmd_q, - sm4_gcm->dst, sm4_gcm->aad_len + sm4_gcm->src_len + authsize, - SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + ret = ccp_init_data(&dst, cmd_q, p_outp, + ilen, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) - goto e_dst; + goto e_src; } - } else { - ret = ccp_init_data(&src, cmd_q, sm4_gcm->src, - sm4_gcm->aad_len + sm4_gcm->src_len - authsize, - SM4_BLOCK_SIZE, DMA_TO_DEVICE); - if (ret) - goto e_src; - ret = ccp_init_data(&dst, cmd_q, &sg_outp, sm4_gcm->src_len, - SM4_BLOCK_SIZE, DMA_FROM_DEVICE); - if (ret) - goto e_dst; - } - op.init = 1; - /* send data to the CCP SM4 GCM engine */ - while (src.sg_wa.bytes_left) { - if (op.init == 1) { - ccp_update_sg_workarea(&src.sg_wa, sm4_gcm->aad_len); - if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) - ccp_update_sg_workarea(&dst.sg_wa, sm4_gcm->aad_len); + while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); - op.src.u.dma.offset -= sm4_gcm->aad_len; - op.src.u.dma.length += sm4_gcm->aad_len; - op.u.sm4_gcm.size = sm4_gcm->aad_len; - } else { - op.u.sm4_gcm.size = 0; - if (src.sg_wa.bytes_left != 0) - ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + + if (op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4_gcm(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + } + + ccp_process_data(&src, &dst, &op); + op.init = 0; } + } - if (!src.sg_wa.bytes_left) - op.eom = 1; + if (dm_remain > 0) { + p_tag = scatterwalk_ffwd(sg_tag, sm4_gcm->src, dm_offset + ilen); - if (!src.sg_wa.bytes_left || op.soc) - op.ioc = 1; - else - op.ioc = 0; + ret = ccp_init_dm_workarea(&tag, cmd_q, + dm_remain + SM4_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + ret = ccp_set_dm_area(&tag, 0, p_tag, 0, dm_remain); + if (ret) + goto e_tag; + + op.eom = 1; + op.src.u.dma.address = tag.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = dm_remain; + op.dst.u.dma.address = tag.dma.address; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = dm_remain + SM4_BLOCK_SIZE; ret = cmd_q->ccp->vdata->perform->sm4_gcm(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_dst; - } - - if (!src.sg_wa.bytes_left || op.soc) { - ret = cmd_q->ccp->vdata->perform->run_cmd(&op); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - goto e_dst; - } + goto e_tag; } + } - ccp_process_data(&src, &dst, &op); - op.init = 0; + /* run ccp to process data */ + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_tag; } /* retrieve the SM4 GCM iv */ ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; - goto e_dst; + goto e_tag; } - - ccp_get_dm_area(&ctx, 0, sm4_gcm->iv, 0, HGGON_CCP_SM4GCM_IV_LEN); + ccp_get_dm_area(&ctx, 0, sm4_gcm->iv, 0, HYGON_CCP_SM4GCM_IV_LEN); if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) { - if (memcmp((u8 *)sg_virt(sm4_gcm->src) + - sm4_gcm->aad_len + sm4_gcm->src_len - authsize, - &pt_dec_data[sm4_gcm->src_len - authsize], HGGON_CCP_SM4GCM_TAG_LEN) != 0) { - pr_err("SM4 GCM Dec error!\n"); - ret = -EINVAL; + struct ccp_dm_workarea tag_wa; + + ret = ccp_init_dm_workarea(&tag_wa, cmd_q, authsize, DMA_BIDIRECTIONAL); + if (ret) + goto e_tag; + + ret = ccp_set_dm_area(&tag_wa, 0, sm4_gcm->src, + sm4_gcm->aad_len + sm4_gcm->src_len - authsize, + authsize); + if (ret) { + ccp_dm_free(&tag_wa); + goto e_tag; } - memcpy((u8 *)sg_virt(sm4_gcm->dst) + sm4_gcm->aad_len, - pt_dec_data, sm4_gcm->src_len - authsize); + + if (sm4_gcm->aad_len > 0 && dm_remain == 0) + ret = crypto_memneq(aad.address + dm_offset - sm4_gcm->aad_len, + tag_wa.address, authsize) ? -EBADMSG : 0; + else + ret = crypto_memneq(tag.address + dm_remain, + tag_wa.address, authsize) ? -EBADMSG : 0; + + ccp_dm_free(&tag_wa); + if (ret) + goto e_tag; + + if (sm4_gcm->aad_len > 0) + ccp_get_dm_area(&aad, 0, sm4_gcm->dst, sm4_gcm->aad_len, + dm_offset - sm4_gcm->aad_len); + + if (dm_remain > 0) + ccp_get_dm_area(&tag, 0, sm4_gcm->dst, + dm_offset + ilen, dm_remain); + } else { + if (sm4_gcm->aad_len > 0) + ccp_get_dm_area(&aad, 0, sm4_gcm->dst, sm4_gcm->aad_len, + dm_offset - sm4_gcm->aad_len + + (dm_remain == 0 ? authsize : 0)); + + if (dm_remain > 0) + ccp_get_dm_area(&tag, 0, sm4_gcm->dst, + dm_offset + ilen, dm_remain + authsize); } +e_tag: + if (dm_remain > 0) + ccp_dm_free(&tag); e_dst: - if (!in_place) + if (ilen > 0 && !in_place) ccp_free_data(&dst, cmd_q); e_src: - ccp_free_data(&src, cmd_q); + if (ilen > 0) + ccp_free_data(&src, cmd_q); +e_aad: + if (sm4_gcm->aad_len > 0) + ccp_dm_free(&aad); e_ctx: memset(ctx.address, 0, SM4_BLOCK_SIZE); ccp_dm_free(&ctx); e_key: memset(key.address, 0, SM4_KEY_SIZE); ccp_dm_free(&key); -out: - kfree(pt_dec_data); + return ret; } -- Gitee From 4e0b13d8d371f6a9fdea94ed15d63a92d2bc285d Mon Sep 17 00:00:00 2001 From: liulanyi Date: Tue, 1 Apr 2025 09:40:21 +0000 Subject: [PATCH 5/6] anolis: crypto: ccp: check if ccp supports sm4-xts/sm4-gcm ANBZ: #6244 Only platforms that support the sm4-xts/sm4-gcm algorithm can register related interface Signed-off-by: liulanyi Signed-off-by: yangdepei --- .../crypto/ccp/ccp-crypto-sm4-galois-hygon.c | 8 +++---- drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 9 ++++++++ drivers/crypto/ccp/ccp-dev.c | 22 +++++++++++++++++++ drivers/crypto/ccp/ccp-dev.h | 9 +++++++- include/linux/ccp.h | 12 ++++++++++ 5 files changed, 54 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c index ac9a561832e8..279dd3237f63 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c @@ -20,8 +20,6 @@ #include "ccp-crypto.h" #include "ccp-dev.h" -#define RI_SM4GCM_PRESENT_BIT 14 - static int ccp_sm4_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int key_len) { @@ -217,11 +215,11 @@ int ccp_register_sm4_hygon_aeads(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); - unsigned int pspccp_version_reg; + unsigned int pspccp_version_reg = 0; pspccp_version_reg = get_ccp_version_reg_val(); - if (!test_bit(RI_SM4GCM_PRESENT_BIT, (unsigned long *)&pspccp_version_reg)) { - pr_warn("SM4 GCM CCP ENGINE NOT SUPPORTED!\n"); + if (!(pspccp_version_reg & RI_SM4GCM_PRESENT)) { + pr_info("SM4 GCM CCP ENGINE NOT SUPPORTED.\n"); return 0; } diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c index f19529d84390..3fd08daf4c6b 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -17,6 +17,7 @@ #include #include "ccp-crypto.h" +#include "ccp-dev.h" enum ccp_sm4_alg_mode { CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, @@ -308,10 +309,18 @@ int ccp_register_sm4_hygon_algs(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); + unsigned int ccp_engine_version_reg = 0; for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { if (sm4_algs[i].version > ccpversion) continue; + if (sm4_algs[i].mode == CCP_SM4_ALG_MODE_XTS) { + ccp_engine_version_reg = get_ccp_engine_version_reg_val(); + if (!(ccp_engine_version_reg & RI_SM4VersionNum)) { + pr_info("SM4 XTS CCP ENGINE NOT SUPPORTED.\n"); + continue; + } + } ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); if (ret) return ret; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index f4b856e8d003..528a427c9d7a 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -270,6 +270,28 @@ unsigned int ccp_version(void) } EXPORT_SYMBOL_GPL(ccp_version); +/** + * get_ccp_engine_version_reg_val - get the ccp engine version register of the CCP device + * + * Returns the ccp engine version register value of the first unit on the list; + */ +unsigned int get_ccp_engine_version_reg_val(void) +{ + struct ccp_device *dp; + unsigned long flags; + unsigned int ret = 0; + + read_lock_irqsave(&ccp_unit_lock, flags); + if (!list_empty(&ccp_units)) { + dp = list_first_entry(&ccp_units, struct ccp_device, entry); + ret = ioread32(dp->io_regs + CMD5_PSP_CCP_ENG_VERSION); + } + read_unlock_irqrestore(&ccp_unit_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(get_ccp_engine_version_reg_val); + /** * get_ccp_version_reg_val - get PspCcpVersion register value * diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 36a35bcf9f25..c53d8f652432 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -70,6 +70,7 @@ #define LSB_PRIVATE_MASK_LO_OFFSET 0x20 #define LSB_PRIVATE_MASK_HI_OFFSET 0x24 #define CMD5_PSP_CCP_VERSION 0x100 +#define CMD5_PSP_CCP_ENG_VERSION 0x104 #define CMD5_Q_CONTROL_BASE 0x0000 #define CMD5_Q_TAIL_LO_BASE 0x0004 @@ -125,8 +126,14 @@ #define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE) -/* indicates whether there is ECC engine for Hygon CCP */ +/* Hygon ccp crypto engine mask */ #define RI_ECC_PRESENT 0x0400 +#define RI_AES_PRESENT 0x0800 +#define RI_SHA2_PRESENT 0x01000 +#define RI_SM4GCM_PRESENT 0x04000 + +/* Hygon ccp sm4 engine version mask */ +#define RI_SM4VersionNum (0x7 << 6) /* ------------------------ CCP Version 3 Specifics ------------------------ */ #define REQ0_WAIT_FOR_WRITE 0x00000004 diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 9f949d51d7a1..4be54bcfa1c0 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -42,6 +42,13 @@ int ccp_present(void); */ unsigned int ccp_version(void); +/** + * get_ccp_engine_version_reg_val - get the ccp engine version register of the CCP device + * + * Returns the ccp engine version register value of the first unit on the list; + */ +unsigned int get_ccp_engine_version_reg_val(void); + /** * ccp_read_version - read PspCcpVersion register value of CCP * @@ -86,6 +93,11 @@ static inline unsigned int ccp_version(void) return 0; } +static inline unsigned int get_ccp_engine_version_reg_val(void) +{ + return 0; +} + static inline unsigned int get_ccp_version_reg_val(void) { return 0; -- Gitee From 7779e5b6f301fb620d4aba14790f77b0c647516f Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Mon, 17 Mar 2025 09:37:16 +0800 Subject: [PATCH 6/6] anolis: crypto: ccp: SM2 ccp encryption failed on D3, prompting ILLEGAL_BUFFER_LENGTH. ANBZ: #6244 The D3 does not support ecc algorithm, but the ccp hardware descriptor adopts the new definition of function, which allows for the option to support either sm2 or ecc mode. Signed-off-by: Yabin Li Signed-off-by: yangdepei --- drivers/crypto/ccp/ccp-dev-v5.c | 12 +++++++++--- drivers/crypto/ccp/ccp-dev.h | 5 +++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 980d2cdb2083..9a875e7d34c4 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -1209,6 +1209,7 @@ static int ccp5_init(struct ccp_device *ccp) unsigned int qmr, i; u64 status; u32 status_lo, status_hi, command_per_q, queue_size_val; + int ecc_support = 0, is_trng2 = 0; int ret; /* Find available queues */ @@ -1226,10 +1227,15 @@ static int ccp5_init(struct ccp_device *ccp) } #ifdef CONFIG_HYGON_GM - /* check if ccp support both sm2 and ecc. */ + /* check if ccp support both sm2 and ecc, or not support ecc + * but use new function structure. + */ if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - ccp->support_sm2_ecc = - !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) & RI_ECC_PRESENT); + ecc_support = !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) & RI_ECC_PRESENT); + is_trng2 = !!(((ioread32(ccp->io_regs + CMD5_PSP_CCP_ENG_VERSION) + >> RI_TRNGVersionOffset) & RI_TRNGVersionMask) + == RI_TRNGVersion_002); + ccp->support_sm2_ecc = ecc_support || is_trng2; } #endif diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index c53d8f652432..c833d5105b9d 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -135,6 +135,11 @@ /* Hygon ccp sm4 engine version mask */ #define RI_SM4VersionNum (0x7 << 6) +/* Hygon ccp TRNG version mask */ +#define RI_TRNGVersionOffset 21 +#define RI_TRNGVersionMask 0x03 +#define RI_TRNGVersion_002 2 + /* ------------------------ CCP Version 3 Specifics ------------------------ */ #define REQ0_WAIT_FOR_WRITE 0x00000004 #define REQ0_INT_ON_COMPLETE 0x00000002 -- Gitee