diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 12264442ae72139ea16bfa71606781f9e87a3a14..a9c1ab433e091dd5382e11261b872bacbadd70be 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -15,6 +15,8 @@ config CRYPTO_DEV_SP_CCP select DMA_ENGINE select CRYPTO_SHA1 select CRYPTO_SHA256 + select CRYPTO_SM4 + select CRYPTO_GF128MUL help Provides the support for AMD Cryptographic Coprocessor (CCP) device which can be used to offload encryption operations such as SHA, AES diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 032dc121fbcb378e34bbfde3931bcf98a8d49671..c6cde2467f3d6077dfbbd08ea575650fd2bca762 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -34,6 +34,7 @@ $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ ccp-crypto-sm3-hygon.o \ ccp-crypto-sm4-hygon.o \ + ccp-crypto-sm4-galois-hygon.o \ ccp_sm2_sign.asn1.o obj-$(CONFIG_TDM_KERNEL_GUARD) += tdm-kernel-guard.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 128e8526a001439bc5106ad2492e5318b83bed3d..83a915ae9e45f38d2791aa0efb92eb93b32aa032 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -357,6 +357,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm4_hygon_aeads(&aead_algs); + if (ret) + return ret; + return 0; } #endif diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..279dd3237f630fa7d516a4b5619df543a381f634 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-galois-hygon.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 GCM crypto API support + * + * Copyright (C) 2022 Hygon Information Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" +#include "ccp-dev.h" + +static int ccp_sm4_gcm_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->u.sm4.mode = CCP_SM4_MODE_GCM; + ctx->u.sm4.key_len = key_len; + + memcpy(ctx->u.sm4.key, key, key_len); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, key_len); + + return 0; +} + +static int ccp_sm4_gcm_complete(struct crypto_async_request *async_req, int ret) +{ + return ret; +} + +static int ccp_sm4_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ccp_sm4_gcm_crypt(struct aead_request *req, bool encrypt) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = aead_request_ctx(req); + struct scatterlist *iv_sg = NULL; + unsigned int iv_len = 0; + + if (!ctx->u.sm4.key_len) + return -EINVAL; + + if (ctx->u.sm4.mode != CCP_SM4_MODE_GCM) + return -EINVAL; + + if (!req->iv) + return -EINVAL; + + /* + * encrypt: + * AAD & PT => AAD, CT & TAG + * decrypt: + * AAD & [CT + TAG] => AAD & PT + */ + + /* Prepare the IV (12 byte iv only)*/ + memcpy(rctx->iv, req->iv, HYGON_CCP_SM4GCM_IV_LEN); + iv_sg = &rctx->iv_sg; + iv_len = HYGON_CCP_SM4GCM_IV_LEN; + sg_init_one(iv_sg, rctx->iv, iv_len); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM4_GCM; + rctx->cmd.u.sm4_gcm.authsize = crypto_aead_authsize(tfm); + rctx->cmd.u.sm4_gcm.mode = ctx->u.sm4.mode; + rctx->cmd.u.sm4_gcm.action = encrypt; + rctx->cmd.u.sm4_gcm.key = &ctx->u.sm4.key_sg; + rctx->cmd.u.sm4_gcm.key_len = ctx->u.sm4.key_len; + rctx->cmd.u.sm4_gcm.iv = iv_sg; + rctx->cmd.u.sm4_gcm.iv_len = iv_len; + rctx->cmd.u.sm4_gcm.src = req->src; + rctx->cmd.u.sm4_gcm.src_len = req->cryptlen; + rctx->cmd.u.sm4_gcm.aad_len = req->assoclen; + rctx->cmd.u.sm4_gcm.dst = req->dst; + + return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); +} + +static int ccp_sm4_gcm_encrypt(struct aead_request *req) +{ + return ccp_sm4_gcm_crypt(req, CCP_SM4_ACTION_ENCRYPT); +} + +static int ccp_sm4_gcm_decrypt(struct aead_request *req) +{ + return ccp_sm4_gcm_crypt(req, CCP_SM4_ACTION_DECRYPT); +} + +static int ccp_sm4_gcm_cra_init(struct crypto_aead *tfm) +{ + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->complete = ccp_sm4_gcm_complete; + ctx->u.sm4.key_len = 0; + + crypto_aead_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static void ccp_sm4_gcm_cra_exit(struct crypto_tfm *tfm) +{ +} + +static struct aead_alg ccp_sm4_gcm_defaults = { + .setkey = ccp_sm4_gcm_setkey, + .setauthsize = ccp_sm4_gcm_setauthsize, + .encrypt = ccp_sm4_gcm_encrypt, + .decrypt = ccp_sm4_gcm_decrypt, + .init = ccp_sm4_gcm_cra_init, + .ivsize = HYGON_CCP_SM4GCM_IV_LEN, + .maxauthsize = SM4_BLOCK_SIZE, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_exit = ccp_sm4_gcm_cra_exit, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm4_aead_def { + enum ccp_sm4_aead_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + struct aead_alg *alg_defaults; +}; + +static struct ccp_sm4_aead_def sm4_aead_algs[] = { + { + .mode = CCP_SM4_MODE_GCM, + .version = CCP_VERSION(5, 0), + .name = "gcm(sm4)", + .driver_name = "gcm-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = HYGON_CCP_SM4GCM_IV_LEN, + .alg_defaults = &ccp_sm4_gcm_defaults, + }, +}; + +static int ccp_register_sm4_aead(struct list_head *head, + const struct ccp_sm4_aead_def *def) +{ + struct ccp_crypto_aead *ccp_aead; + struct aead_alg *alg; + int ret; + + ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL); + if (!ccp_aead) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_aead->entry); + + ccp_aead->mode = def->mode; + + /* Copy the defaults and override as necessary */ + alg = &ccp_aead->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + + ret = crypto_register_aead(alg); + if (ret) { + pr_err("%s aead algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_aead); + return ret; + } + + list_add(&ccp_aead->entry, head); + + return 0; +} + +int ccp_register_sm4_hygon_aeads(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + unsigned int pspccp_version_reg = 0; + + pspccp_version_reg = get_ccp_version_reg_val(); + if (!(pspccp_version_reg & RI_SM4GCM_PRESENT)) { + pr_info("SM4 GCM CCP ENGINE NOT SUPPORTED.\n"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(sm4_aead_algs); i++) { + if (sm4_aead_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_aead(head, &sm4_aead_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c index 2328a9f87218419522ae52b8189bbb1b572c3532..3fd08daf4c6b41c27410d2f63db7f2dcd4573ab7 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -17,12 +17,14 @@ #include #include "ccp-crypto.h" +#include "ccp-dev.h" enum ccp_sm4_alg_mode { CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_XTS = CCP_SM4_MODE_XTS, CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, @@ -57,10 +59,10 @@ static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!key) return -EINVAL; - memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); - sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + memcpy(ctx->u.sm4.key, key, key_len); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, key_len); - ctx->u.sm4.key_len = SM4_KEY_SIZE; + ctx->u.sm4.key_len = key_len; return 0; } @@ -83,6 +85,7 @@ static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) if ((mode != CCP_SM4_ALG_MODE_CTR) && (mode != CCP_SM4_ALG_MODE_OFB) && (mode != CCP_SM4_ALG_MODE_CFB) && + (mode != CCP_SM4_ALG_MODE_XTS) && (req->cryptlen & (SM4_BLOCK_SIZE - 1))) return -EINVAL; @@ -122,7 +125,7 @@ static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) cmd->u.sm4.select = 1; cmd->u.sm4.key = &ctx->u.sm4.key_sg; - cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.key_len = ctx->u.sm4.key_len; cmd->u.sm4.iv = iv_sg; cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; @@ -241,6 +244,15 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = SM4_BLOCK_SIZE, .alg_defaults = &ccp_sm4_defaults, }, + { + .mode = CCP_SM4_ALG_MODE_XTS, + .version = CCP_VERSION(5, 0), + .name = "xts_ccp(sm4)", + .driver_name = "xts-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, { .mode = CCP_SM4_ALG_MODE_CTR, .version = CCP_VERSION(5, 0), @@ -275,6 +287,10 @@ static int ccp_register_sm4_hygon_alg(struct list_head *head, def->driver_name); alg->base.cra_blocksize = def->blocksize; alg->ivsize = def->ivsize; + if (def->mode == CCP_SM4_ALG_MODE_XTS) { + alg->min_keysize = SM4_KEY_SIZE * 2; + alg->max_keysize = SM4_KEY_SIZE * 2; + } ret = crypto_register_skcipher(alg); if (ret) { @@ -293,10 +309,18 @@ int ccp_register_sm4_hygon_algs(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); + unsigned int ccp_engine_version_reg = 0; for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { if (sm4_algs[i].version > ccpversion) continue; + if (sm4_algs[i].mode == CCP_SM4_ALG_MODE_XTS) { + ccp_engine_version_reg = get_ccp_engine_version_reg_val(); + if (!(ccp_engine_version_reg & RI_SM4VersionNum)) { + pr_info("SM4 XTS CCP ENGINE NOT SUPPORTED.\n"); + continue; + } + } ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); if (ret) return ret; diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 05a80f11397c07e9cafdfad0518440c07e233327..8e78909f0c6cb6eae256de60e972cf880e359e11 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -344,7 +344,7 @@ struct ccp_sm3_exp_ctx { /***** SM4 related defines *****/ struct ccp_sm4_ctx { struct scatterlist key_sg; - u8 key[SM4_KEY_SIZE]; + u8 key[SM4_KEY_SIZE * 2]; u32 key_len; u32 mode; }; @@ -353,6 +353,9 @@ struct ccp_sm4_req_ctx { struct scatterlist iv_sg; u8 iv[SM4_BLOCK_SIZE]; + struct scatterlist tag_sg; + u8 tag[SM4_BLOCK_SIZE]; + struct ccp_cmd cmd; }; @@ -386,5 +389,6 @@ int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); int ccp_register_sm3_hygon_algs(struct list_head *head); int ccp_register_sm4_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_aeads(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index c98062f52a6b75e619a4cc6ae214e2baea7d6791..9a875e7d34c4c83d38531677c399cac74dd0da4d 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -161,6 +161,12 @@ union ccp_function { u16 encrypt:1; u16 step:7; } sm4_ctr; + struct { + u16 size:7; + u16 encrypt:1; + u16 rsvd:5; + u16 mode:1; + } sm4_gcm; u16 raw; }; @@ -193,6 +199,9 @@ union ccp_function { #define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) #define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) #define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) +#define CCP_SM4_GCM_SIZE(p) ((p)->sm4_gcm.size) +#define CCP_SM4_GCM_ENCRYPT(p) ((p)->sm4_gcm.encrypt) +#define CCP_SM4_GCM_MODE(p) ((p)->sm4_gcm.mode) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -940,6 +949,47 @@ static int ccp5_perform_sm4_ctr(struct ccp_op *op) return ccp5_do_multi_cmds(&desc, op->cmd_q); } +static int ccp5_perform_sm4_gcm(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_key * LSB_ITEM_SIZE; + + op->cmd_q->total_sm4_gcm_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_GCM; + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_GCM_ENCRYPT(&function) = op->u.sm4_gcm.action; + CCP_SM4_GCM_MODE(&function) = op->u.sm4_gcm.mode; + CCP_SM4_GCM_SIZE(&function) = op->u.sm4_gcm.size; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1159,6 +1209,7 @@ static int ccp5_init(struct ccp_device *ccp) unsigned int qmr, i; u64 status; u32 status_lo, status_hi, command_per_q, queue_size_val; + int ecc_support = 0, is_trng2 = 0; int ret; /* Find available queues */ @@ -1176,10 +1227,15 @@ static int ccp5_init(struct ccp_device *ccp) } #ifdef CONFIG_HYGON_GM - /* check if ccp support both sm2 and ecc. */ + /* check if ccp support both sm2 and ecc, or not support ecc + * but use new function structure. + */ if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - ccp->support_sm2_ecc = - !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) & RI_ECC_PRESENT); + ecc_support = !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) & RI_ECC_PRESENT); + is_trng2 = !!(((ioread32(ccp->io_regs + CMD5_PSP_CCP_ENG_VERSION) + >> RI_TRNGVersionOffset) & RI_TRNGVersionMask) + == RI_TRNGVersion_002); + ccp->support_sm2_ecc = ecc_support || is_trng2; } #endif @@ -1514,6 +1570,7 @@ static const struct ccp_actions ccp5_actions = { .sm3 = ccp5_perform_sm3, .sm4 = ccp5_perform_sm4, .sm4_ctr = ccp5_perform_sm4_ctr, + .sm4_gcm = ccp5_perform_sm4_gcm, .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 8b00875a0d65ccc7852654102c9ffcd4134a4afd..528a427c9d7af4dcc9b647cbfd64362c565b49a4 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -270,6 +270,51 @@ unsigned int ccp_version(void) } EXPORT_SYMBOL_GPL(ccp_version); +/** + * get_ccp_engine_version_reg_val - get the ccp engine version register of the CCP device + * + * Returns the ccp engine version register value of the first unit on the list; + */ +unsigned int get_ccp_engine_version_reg_val(void) +{ + struct ccp_device *dp; + unsigned long flags; + unsigned int ret = 0; + + read_lock_irqsave(&ccp_unit_lock, flags); + if (!list_empty(&ccp_units)) { + dp = list_first_entry(&ccp_units, struct ccp_device, entry); + ret = ioread32(dp->io_regs + CMD5_PSP_CCP_ENG_VERSION); + } + read_unlock_irqrestore(&ccp_unit_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(get_ccp_engine_version_reg_val); + +/** + * get_ccp_version_reg_val - get PspCcpVersion register value + * + * Returns the PspCcpVersion register value of the fist CCP on list; + * otherwise a zero if no CCP device is present + */ +unsigned int get_ccp_version_reg_val(void) +{ + struct ccp_device *dp; + unsigned long flags; + int ret = 0; + + read_lock_irqsave(&ccp_unit_lock, flags); + if (!list_empty(&ccp_units)) { + dp = list_first_entry(&ccp_units, struct ccp_device, entry); + ret = ioread32(dp->io_regs + CMD5_PSP_CCP_VERSION); + } + read_unlock_irqrestore(&ccp_unit_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(get_ccp_version_reg_val); + /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index cb93a57a75c6e26af7e6ea4a1d6ed447d5d3207c..c833d5105b9ded775163500daf9442e3baffbe5c 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -70,6 +70,7 @@ #define LSB_PRIVATE_MASK_LO_OFFSET 0x20 #define LSB_PRIVATE_MASK_HI_OFFSET 0x24 #define CMD5_PSP_CCP_VERSION 0x100 +#define CMD5_PSP_CCP_ENG_VERSION 0x104 #define CMD5_Q_CONTROL_BASE 0x0000 #define CMD5_Q_TAIL_LO_BASE 0x0004 @@ -125,8 +126,19 @@ #define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE) -/* indicates whether there is ECC engine for Hygon CCP */ +/* Hygon ccp crypto engine mask */ #define RI_ECC_PRESENT 0x0400 +#define RI_AES_PRESENT 0x0800 +#define RI_SHA2_PRESENT 0x01000 +#define RI_SM4GCM_PRESENT 0x04000 + +/* Hygon ccp sm4 engine version mask */ +#define RI_SM4VersionNum (0x7 << 6) + +/* Hygon ccp TRNG version mask */ +#define RI_TRNGVersionOffset 21 +#define RI_TRNGVersionMask 0x03 +#define RI_TRNGVersion_002 2 /* ------------------------ CCP Version 3 Specifics ------------------------ */ #define REQ0_WAIT_FOR_WRITE 0x00000004 @@ -167,6 +179,10 @@ #define REQ1_ECC_AFFINE_CONVERT 0x00200000 #define REQ1_ECC_FUNCTION_SHIFT 18 +/***** HYGON CCP SM4 GCM related defines *****/ +#define HYGON_CCP_SM4GCM_IV_LEN 12 +#define HYGON_CCP_SM4GCM_TAG_LEN 16 + /****** REQ4 Related Values ******/ #define REQ4_KSB_SHIFT 18 #define REQ4_MEMTYPE_SHIFT 16 @@ -344,6 +360,7 @@ struct ccp_cmd_queue { unsigned long total_sm3_ops; unsigned long total_sm4_ops; unsigned long total_sm4_ctr_ops; + unsigned long total_sm4_gcm_ops; } ____cacheline_aligned; struct ccp_device { @@ -563,6 +580,12 @@ struct ccp_sm4_ctr_op { u32 step; }; +struct ccp_sm4_gcm_op { + enum ccp_sm4_action action; + enum ccp_sm4_aead_mode mode; + u32 size; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -590,6 +613,7 @@ struct ccp_op { struct ccp_sm3_op sm3; struct ccp_sm4_op sm4; struct ccp_sm4_ctr_op sm4_ctr; + struct ccp_sm4_gcm_op sm4_gcm; } u; }; @@ -702,6 +726,7 @@ struct ccp_actions { int (*sm3)(struct ccp_op *op); int (*sm4)(struct ccp_op *op); int (*sm4_ctr)(struct ccp_op *op); + int (*sm4_gcm)(struct ccp_op *op); int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 88ed6bc2f129a723bfaabbba110c12ed6aeb434f..584ade2cc682dc42eb79fd153bcd4261dee189ac 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "ccp-dev.h" @@ -55,6 +56,48 @@ static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ ccp_gen_jobid(ccp) : 0) +#define GHASH_BLOCK_SIZE 16 +#define CCP_AAD_LEN_MAX_HG 128 + +static void ccp_gcm_ghash(be128 *ghash, const be128 *h, const void *s, int l) +{ + while (l > 0) { + crypto_xor((u8 *)ghash, s, min(l, GHASH_BLOCK_SIZE)); + gf128mul_lle(ghash, h); + + s += GHASH_BLOCK_SIZE; + l -= GHASH_BLOCK_SIZE; + } +} + +/* + * Complete the encryption operation of a block for sm4 algorithm. + * enc: 1--encrypt 0--decrypt + */ +static inline int sm4_generic_crypt_block(u8 *dst, u8 *src, u8 *key, int enc) +{ + struct crypto_cipher *cipher = NULL; + int ret = 0; + + cipher = crypto_alloc_cipher("sm4", 0, 0); + if (IS_ERR(cipher)) { + pr_err("Allocate sm4 cipher failed, %ld\n", PTR_ERR(cipher)); + return -ENOMEM; + } + + ret = crypto_cipher_setkey(cipher, key, SM4_KEY_SIZE); + if (ret) + goto out; + + if (enc) + crypto_cipher_encrypt_one(cipher, dst, src); + else + crypto_cipher_decrypt_one(cipher, dst, src); + +out: + crypto_free_cipher(cipher); + return ret; +} static u32 ccp_gen_jobid(struct ccp_device *ccp) { @@ -2731,16 +2774,23 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) struct ccp_dm_workarea iv_key; struct ccp_data src, dst; struct ccp_op op; + int remain = sm4->src_len & (SM4_BLOCK_SIZE - 1); bool in_place = false; int ret; - if (sm4->src == NULL || sm4->dst == NULL) + if (sm4->src == NULL || sm4->dst == NULL || sm4->key == NULL) + return -EINVAL; + + if (sm4->mode != CCP_SM4_MODE_XTS && sm4->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sm4->mode == CCP_SM4_MODE_XTS && sm4->src_len < SM4_BLOCK_SIZE) return -EINVAL; - if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + if (sm4->mode == CCP_SM4_MODE_XTS && sm4->key_len != SM4_KEY_SIZE * 2) return -EINVAL; - if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + if (sg_nents_for_len(sm4->key, sm4->key_len) < 0) return -EINVAL; if (sm4->mode != CCP_SM4_MODE_ECB) { @@ -2767,6 +2817,12 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sg_virt(sm4->src) == sg_virt(sm4->dst)) in_place = true; + if (sm4->mode == CCP_SM4_MODE_XTS && remain) { + sm4->src_len -= remain; + if (sm4->action == CCP_SM4_ACTION_DECRYPT) + sm4->src_len -= SM4_BLOCK_SIZE; + } + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) @@ -2790,6 +2846,15 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sm4->mode != CCP_SM4_MODE_ECB) ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + if (sm4->mode == CCP_SM4_MODE_XTS) { + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, + SM4_KEY_SIZE, SM4_KEY_SIZE); + ret = sm4_generic_crypt_block(iv_key.address, + iv_key.address, iv_key.address + SM4_BLOCK_SIZE, 1); + if (ret) + goto e_iv_key; + } + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, @@ -2839,6 +2904,63 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); } + if (sm4->mode == CCP_SM4_MODE_XTS && remain) { + struct ccp_dm_workarea xts_wa; + u8 tweak[SM4_BLOCK_SIZE] = {0}; + u8 temp[SM4_BLOCK_SIZE] = {0}; + u8 key1[SM4_KEY_SIZE] = {0}; + + ret = ccp_init_dm_workarea(&xts_wa, cmd_q, + SM4_BLOCK_SIZE * 2, DMA_BIDIRECTIONAL); + if (ret) + goto e_iv_key; + + memcpy(tweak, iv_key.address, SM4_BLOCK_SIZE); + scatterwalk_map_and_copy(key1, sm4->key, 0, SM4_KEY_SIZE, 0); + if (sm4->action == CCP_SM4_ACTION_ENCRYPT) { + ccp_set_dm_area(&xts_wa, 0, sm4->dst, + sm4->src_len - SM4_BLOCK_SIZE, SM4_BLOCK_SIZE); + memcpy(xts_wa.address + SM4_BLOCK_SIZE, xts_wa.address, remain); + ccp_set_dm_area(&xts_wa, 0, sm4->src, sm4->src_len, remain); + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ret = sm4_generic_crypt_block(xts_wa.address, xts_wa.address, key1, 1); + if (ret) { + ccp_dm_free(&xts_wa); + goto e_iv_key; + } + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ccp_get_dm_area(&xts_wa, 0, sm4->dst, + sm4->src_len - SM4_BLOCK_SIZE, remain + SM4_BLOCK_SIZE); + } else { + gf128mul_x_lle((be128 *)tweak, (be128 *)tweak); + ccp_set_dm_area(&xts_wa, 0, sm4->src, + sm4->src_len, remain + SM4_BLOCK_SIZE); + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ret = sm4_generic_crypt_block(xts_wa.address, xts_wa.address, key1, 0); + if (ret) { + ccp_dm_free(&xts_wa); + goto e_iv_key; + } + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + + memcpy(tweak, iv_key.address, SM4_BLOCK_SIZE); + memcpy(temp, xts_wa.address, remain); + memcpy(xts_wa.address, xts_wa.address + SM4_BLOCK_SIZE, remain); + memcpy(xts_wa.address + SM4_BLOCK_SIZE, temp, remain); + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ret = sm4_generic_crypt_block(xts_wa.address, xts_wa.address, key1, 0); + if (ret) { + ccp_dm_free(&xts_wa); + goto e_iv_key; + } + crypto_xor(xts_wa.address, tweak, SM4_BLOCK_SIZE); + ccp_get_dm_area(&xts_wa, 0, sm4->dst, sm4->src_len, + remain + SM4_BLOCK_SIZE); + } + + ccp_dm_free(&xts_wa); + } + e_iv_key: memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); ccp_dm_free(&iv_key); @@ -2975,6 +3097,390 @@ static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_sm4_gcm_crypt(struct ccp_cmd_queue *cmd_q, + struct ccp_sm4_gcm_engine *sm4_gcm) +{ + be128 tail = {cpu_to_be64(sm4_gcm->aad_len * 8), 0}; + struct ccp_dm_workarea ikey; + struct ccp_dm_workarea ghash; + u8 H[SM4_BLOCK_SIZE] = {0}; + u8 T[SM4_BLOCK_SIZE] = {0}; + u8 I[SM4_BLOCK_SIZE] = {0}; + u8 C[SM4_BLOCK_SIZE] = {0}; + int ilen = sm4_gcm->aad_len + sm4_gcm->src_len; + int slen = 0, authsize = 0; + int len = 0, process = 0; + int ret = 0; + + authsize = sm4_gcm->authsize ? sm4_gcm->authsize : SM4_BLOCK_SIZE; + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) + slen = sm4_gcm->src_len; + else + slen = sm4_gcm->src_len - authsize; + + ret = ccp_init_dm_workarea(&ikey, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + return ret; + + len = SM4_BLOCK_SIZE + ilen; + ret = ccp_init_dm_workarea(&ghash, cmd_q, len, DMA_BIDIRECTIONAL); + if (ret) + goto e_ikey; + + ccp_set_dm_area(&ikey, 0, sm4_gcm->iv, 0, HYGON_CCP_SM4GCM_IV_LEN); + ccp_set_dm_area(&ikey, SM4_BLOCK_SIZE, sm4_gcm->key, 0, SM4_KEY_SIZE); + if (ilen > 0) + ccp_set_dm_area(&ghash, SM4_BLOCK_SIZE, sm4_gcm->src, 0, ilen); + + memcpy(T, ikey.address, HYGON_CCP_SM4GCM_IV_LEN); + T[15] = 1; + memcpy(I, T, SM4_BLOCK_SIZE); + sm4_generic_crypt_block(H, H, ikey.address + SM4_BLOCK_SIZE, 1); + sm4_generic_crypt_block(T, T, ikey.address + SM4_BLOCK_SIZE, 1); + + if (sm4_gcm->aad_len > 0) + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, + ghash.address + SM4_BLOCK_SIZE, sm4_gcm->aad_len); + if (slen > 0) { + u8 *src = ghash.address + SM4_BLOCK_SIZE + sm4_gcm->aad_len; + + if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, src, slen); + + crypto_inc(I, SM4_BLOCK_SIZE); + while (slen > 0) { + len = min(slen, SM4_BLOCK_SIZE); + sm4_generic_crypt_block(C, I, ikey.address + SM4_BLOCK_SIZE, 1); + crypto_xor(src + process, C, len); + crypto_inc(I, SM4_BLOCK_SIZE); + slen -= len; + process += len; + } + + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, src, process); + } + tail.b = cpu_to_be64(process * 8); + ccp_gcm_ghash((be128 *)ghash.address, (be128 *)H, &tail, sizeof(tail)); + crypto_xor(ghash.address, T, SM4_BLOCK_SIZE); + + if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) { + ret = crypto_memneq(ghash.address, ghash.address + + SM4_BLOCK_SIZE + ilen - authsize, authsize) ? -EBADMSG : 0; + if (ret) + goto e_ghash; + + if (ilen - authsize > 0) + ccp_get_dm_area(&ghash, SM4_BLOCK_SIZE, sm4_gcm->dst, 0, ilen - authsize); + + } else { + if (ilen > 0) + ccp_get_dm_area(&ghash, SM4_BLOCK_SIZE, sm4_gcm->dst, 0, ilen); + ccp_get_dm_area(&ghash, 0, sm4_gcm->dst, ilen, authsize); + } + +e_ghash: + ccp_dm_free(&ghash); +e_ikey: + ccp_dm_free(&ikey); + return ret; +} + +static int ccp_run_sm4_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_gcm_engine *sm4_gcm = &cmd->u.sm4_gcm; + struct ccp_dm_workarea key, ctx; + struct ccp_dm_workarea aad, tag; + struct ccp_data src, dst; + struct ccp_op op; + unsigned int ilen = 0, authsize = 0; + unsigned int dm_offset = 0, dm_remain = 0; + bool in_place = false; /* Default value */ + int ret; + + struct scatterlist *p_inp, sg_inp[2]; + struct scatterlist *p_outp, sg_outp[2]; + struct scatterlist *p_tag, sg_tag[2]; + + if (sm4_gcm->iv == NULL || sm4_gcm->iv_len != HYGON_CCP_SM4GCM_IV_LEN) + return -EINVAL; + + if (sm4_gcm->key == NULL || sm4_gcm->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sm4_gcm->src == NULL || sm4_gcm->dst == NULL) + return -EINVAL; + + /* Zero defaults to 16 bytes, the maximum size */ + authsize = sm4_gcm->authsize ? sm4_gcm->authsize : SM4_BLOCK_SIZE; + switch (authsize) { + case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: + break; + default: + return -EINVAL; + } + + if (sm4_gcm->action == CCP_SM4_ACTION_ENCRYPT) { + ilen = sm4_gcm->src_len; + if (sg_nents_for_len(sm4_gcm->dst, sm4_gcm->aad_len + ilen + authsize) < 0) + return -EINVAL; + } else { + /* Input length for decryption includes tag */ + ilen = sm4_gcm->src_len - authsize; + if (sg_nents_for_len(sm4_gcm->dst, sm4_gcm->aad_len + ilen) < 0) + return -EINVAL; + } + + /* When the data length is 0, ccp cannot run. When the aad length is + * greater than 127, the tag result generated by ccp is incorrect. + */ + if (ilen == 0 || sm4_gcm->aad_len >= CCP_AAD_LEN_MAX_HG) + return ccp_sm4_gcm_crypt(cmd_q, sm4_gcm); + + ret = -EIO; + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.sb_key = cmd_q->sb_key; /* Pre-allocated */ + op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ + op.u.sm4_gcm.action = sm4_gcm->action; + op.u.sm4_gcm.mode = sm4_gcm->mode; + + if (sg_virt(sm4_gcm->src) == sg_virt(sm4_gcm->dst)) + in_place = true; + + /* Copy the key to the LSB */ + ret = ccp_init_dm_workarea(&key, cmd_q, SM4_KEY_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_key; + + ret = ccp_set_dm_area(&key, 0, sm4_gcm->key, 0, sm4_gcm->key_len); + if (ret) + goto e_key; + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_key; + } + + /* Copy the context (IV) to the LSB.*/ + ret = ccp_init_dm_workarea(&ctx, cmd_q, HYGON_CCP_SM4GCM_IV_LEN, DMA_BIDIRECTIONAL); + if (ret) + goto e_ctx; + + ret = ccp_set_dm_area(&ctx, 0, sm4_gcm->iv, 0, sm4_gcm->iv_len); + if (ret) + goto e_ctx; + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + + op.init = 1; + if (sm4_gcm->aad_len > 0) { + dm_offset = ilen <= SM4_BLOCK_SIZE ? ilen : SM4_BLOCK_SIZE; + ilen -= dm_offset; + if (ilen == 0) + op.eom = 1; + + dm_offset += sm4_gcm->aad_len; + ret = ccp_init_dm_workarea(&aad, cmd_q, + /* If ilen is equal to 0, the output data contains tag. */ + dm_offset + (ilen == 0 ? SM4_BLOCK_SIZE : 0), + DMA_BIDIRECTIONAL); + if (ret) + goto e_ctx; + + ret = ccp_set_dm_area(&aad, 0, sm4_gcm->src, 0, dm_offset); + if (ret) + goto e_aad; + + ccp_get_dm_area(&aad, 0, sm4_gcm->dst, 0, sm4_gcm->aad_len); + op.u.sm4_gcm.size = sm4_gcm->aad_len; + op.src.u.dma.address = aad.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = dm_offset; + op.dst.u.dma.address = aad.dma.address; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = dm_offset; + ret = cmd_q->ccp->vdata->perform->sm4_gcm(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_aad; + } + + op.u.sm4_gcm.size = 0; + op.init = 0; + } + + dm_remain = ilen % SM4_BLOCK_SIZE; + if (ilen > 0) { + if (dm_remain == 0) + dm_remain = SM4_BLOCK_SIZE; + ilen -= dm_remain; + } + if (ilen > 0) { + p_inp = scatterwalk_ffwd(sg_inp, sm4_gcm->src, dm_offset); + p_outp = scatterwalk_ffwd(sg_outp, sm4_gcm->dst, dm_offset); + + ret = ccp_init_data(&src, cmd_q, p_inp, + ilen, SM4_BLOCK_SIZE, + in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + goto e_aad; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, p_outp, + ilen, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + + if (op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4_gcm(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + } + + ccp_process_data(&src, &dst, &op); + op.init = 0; + } + } + + if (dm_remain > 0) { + p_tag = scatterwalk_ffwd(sg_tag, sm4_gcm->src, dm_offset + ilen); + + ret = ccp_init_dm_workarea(&tag, cmd_q, + dm_remain + SM4_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + ret = ccp_set_dm_area(&tag, 0, p_tag, 0, dm_remain); + if (ret) + goto e_tag; + + op.eom = 1; + op.src.u.dma.address = tag.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = dm_remain; + op.dst.u.dma.address = tag.dma.address; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = dm_remain + SM4_BLOCK_SIZE; + ret = cmd_q->ccp->vdata->perform->sm4_gcm(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_tag; + } + } + + /* run ccp to process data */ + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_tag; + } + + /* retrieve the SM4 GCM iv */ + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_tag; + } + ccp_get_dm_area(&ctx, 0, sm4_gcm->iv, 0, HYGON_CCP_SM4GCM_IV_LEN); + + if (sm4_gcm->action == CCP_SM4_ACTION_DECRYPT) { + struct ccp_dm_workarea tag_wa; + + ret = ccp_init_dm_workarea(&tag_wa, cmd_q, authsize, DMA_BIDIRECTIONAL); + if (ret) + goto e_tag; + + ret = ccp_set_dm_area(&tag_wa, 0, sm4_gcm->src, + sm4_gcm->aad_len + sm4_gcm->src_len - authsize, + authsize); + if (ret) { + ccp_dm_free(&tag_wa); + goto e_tag; + } + + if (sm4_gcm->aad_len > 0 && dm_remain == 0) + ret = crypto_memneq(aad.address + dm_offset - sm4_gcm->aad_len, + tag_wa.address, authsize) ? -EBADMSG : 0; + else + ret = crypto_memneq(tag.address + dm_remain, + tag_wa.address, authsize) ? -EBADMSG : 0; + + ccp_dm_free(&tag_wa); + if (ret) + goto e_tag; + + if (sm4_gcm->aad_len > 0) + ccp_get_dm_area(&aad, 0, sm4_gcm->dst, sm4_gcm->aad_len, + dm_offset - sm4_gcm->aad_len); + + if (dm_remain > 0) + ccp_get_dm_area(&tag, 0, sm4_gcm->dst, + dm_offset + ilen, dm_remain); + } else { + if (sm4_gcm->aad_len > 0) + ccp_get_dm_area(&aad, 0, sm4_gcm->dst, sm4_gcm->aad_len, + dm_offset - sm4_gcm->aad_len + + (dm_remain == 0 ? authsize : 0)); + + if (dm_remain > 0) + ccp_get_dm_area(&tag, 0, sm4_gcm->dst, + dm_offset + ilen, dm_remain + authsize); + } + +e_tag: + if (dm_remain > 0) + ccp_dm_free(&tag); +e_dst: + if (ilen > 0 && !in_place) + ccp_free_data(&dst, cmd_q); +e_src: + if (ilen > 0) + ccp_free_data(&src, cmd_q); +e_aad: + if (sm4_gcm->aad_len > 0) + ccp_dm_free(&aad); +e_ctx: + memset(ctx.address, 0, SM4_BLOCK_SIZE); + ccp_dm_free(&ctx); +e_key: + memset(key.address, 0, SM4_KEY_SIZE); + ccp_dm_free(&key); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -3031,6 +3537,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM4_CTR: ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM4_GCM: + ret = ccp_run_sm4_gcm_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 45cc6fedd131d38996de629c4b8ea7d16528f41c..4be54bcfa1c06fe31768a588c0676d76e810796b 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -42,6 +42,20 @@ int ccp_present(void); */ unsigned int ccp_version(void); +/** + * get_ccp_engine_version_reg_val - get the ccp engine version register of the CCP device + * + * Returns the ccp engine version register value of the first unit on the list; + */ +unsigned int get_ccp_engine_version_reg_val(void); + +/** + * ccp_read_version - read PspCcpVersion register value of CCP + * + * Returns PspCcpVersion register value , or zero if no CCP + */ +unsigned int get_ccp_version_reg_val(void); + /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * @@ -79,6 +93,16 @@ static inline unsigned int ccp_version(void) return 0; } +static inline unsigned int get_ccp_engine_version_reg_val(void) +{ + return 0; +} + +static inline unsigned int get_ccp_version_reg_val(void) +{ + return 0; +} + static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) { return -ENODEV; @@ -687,6 +711,7 @@ struct ccp_sm3_engine { * @CCP_SM4_MODE_CBC: CBC mode * @CCP_SM4_MODE_OFB: OFB mode * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_XTS: XTS mode * @CCP_SM4_MODE_CTR: CTR mode */ enum ccp_sm4_mode { @@ -694,10 +719,21 @@ enum ccp_sm4_mode { CCP_SM4_MODE_CBC, CCP_SM4_MODE_OFB, CCP_SM4_MODE_CFB, + CCP_SM4_MODE_XTS, CCP_SM4_MODE_CTR, CCP_SM4_MODE__LAST, }; +/** + * ccp_sm4_aead_mode - SM4 AEAD operation mode + * + * @CCP_SM4_MODE_GCM: GCM mode + */ +enum ccp_sm4_aead_mode { + CCP_SM4_MODE_GCM = 0, + CCP_SM4_AEAD_MODE__LAST, +}; + /** * ccp_sm4_action - SM4 operation * @@ -784,6 +820,38 @@ struct ccp_sm4_ctr_engine { u64 src_len; /* In bytes */ }; +/** + * struct ccp_sm4_gcm_engine - CCP SM4 GCM operation + * @action: SM4 GCM operation (decrypt/encrypt) + * @key: key to be used for this SM4 GCM operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 GCM operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @aad_len: length in bytes of additional authenticated date used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + */ +struct ccp_sm4_gcm_engine { + enum ccp_sm4_action action; + enum ccp_sm4_aead_mode mode; + + u32 authsize; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 aad_len; /* In bytes */ +}; + /** * ccp_engine - CCP operation identifiers * @@ -811,6 +879,7 @@ enum ccp_engine { CCP_ENGINE_SM3, CCP_ENGINE_SM4, CCP_ENGINE_SM4_CTR, + CCP_ENGINE_SM4_GCM, CCP_ENGINE__LAST, }; @@ -863,6 +932,7 @@ struct ccp_cmd { struct ccp_sm3_engine sm3; struct ccp_sm4_engine sm4; struct ccp_sm4_ctr_engine sm4_ctr; + struct ccp_sm4_gcm_engine sm4_gcm; } u; /* Completion callback support */