diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 32268e239bf15e49c9749e3fd5e9640ea09056a0..9d5d3312f8e3041e952b71cc1dd819b587033d7b 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -46,6 +46,13 @@ config CRYPTO_DEV_SP_PSP along with software-based Trusted Execution Environment (TEE) to enable third-party trusted applications. +config HYGON_GM + bool "Hygon GM (sm2/sm3/sm4) Interface" + default y + depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + help + Hygon GM ccp driver + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index aa0ba2d17e1e2202bf6115e0774bf63a802115cb..17fb4f80dae6883417a31c1dbaec41c956b7ac0f 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -23,3 +23,11 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o + +$(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h +$(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h + +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ + ccp-crypto-sm3-hygon.o \ + ccp-crypto-sm4-hygon.o \ + ccp_sm2_sign.asn1.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index ecd58b38c46eed8fa79e9028f7e2be097f85463b..3d22fbabc815a5d452bcf782617ec58e1db81866 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -39,6 +39,10 @@ static unsigned int rsa_disable; module_param(rsa_disable, uint, 0444); MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); +static unsigned int sm_disable; +module_param(sm_disable, uint, 0444); +MODULE_PARM_DESC(sm_disable, "Disable use of SM2/SM3/SM4 - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(skcipher_algs); @@ -322,6 +326,25 @@ static int ccp_register_algs(void) { int ret; +#ifdef CONFIG_HYGON_GM + if (!sm_disable && boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = ccp_register_sm2_hygon_algs(&akcipher_algs); + if (ret) + return ret; + + ret = ccp_register_sm3_hygon_algs(&hash_algs); + if (ret) + return ret; + + ret = ccp_register_sm4_hygon_algs(&skcipher_algs); + if (ret) + return ret; + + /* Return on hygon platform */ + return 0; + } +#endif + if (!aes_disable) { ret = ccp_register_aes_algs(&skcipher_algs); if (ret) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..25c9a49f7d2236059517a5b62f18375dc6a53f8b --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -0,0 +1,1155 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM2 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" +#include "ccp_sm2_sign.asn1.h" + +static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +}; + +static const u8 sm2_ecc_a[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, +}; + +static const u8 sm2_ecc_b[CCP_SM2_OPERAND_LEN] = { + 0x28, 0xE9, 0xFA, 0x9E, 0x9D, 0x9F, 0x5E, 0x34, + 0x4D, 0x5A, 0x9E, 0x4B, 0xCF, 0x65, 0x09, 0xA7, + 0xF3, 0x97, 0x89, 0xF5, 0x15, 0xAB, 0x8F, 0x92, + 0xDD, 0xBC, 0xBD, 0x41, 0x4D, 0x94, 0x0E, 0x93, +}; + +static const u8 sm2_ecc_n_sub_1[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x72, 0x03, 0xDF, 0x6B, 0x21, 0xC6, 0x05, 0x2B, + 0x53, 0xBB, 0xF4, 0x09, 0x39, 0xD5, 0x41, 0x22, +}; + +static const u8 sm2_ecc_gx[CCP_SM2_OPERAND_LEN] = { + 0x32, 0xC4, 0xAE, 0x2C, 0x1F, 0x19, 0x81, 0x19, + 0x5F, 0x99, 0x04, 0x46, 0x6A, 0x39, 0xC9, 0x94, + 0x8F, 0xE3, 0x0B, 0xBF, 0xF2, 0x66, 0x0B, 0xE1, + 0x71, 0x5A, 0x45, 0x89, 0x33, 0x4C, 0x74, 0xC7, +}; + +static const u8 sm2_ecc_gy[CCP_SM2_OPERAND_LEN] = { + 0xBC, 0x37, 0x36, 0xA2, 0xF4, 0xF6, 0x77, 0x9C, + 0x59, 0xBD, 0xCE, 0xE3, 0x6B, 0x69, 0x21, 0x53, + 0xD0, 0xA9, 0x87, 0x7C, 0xC6, 0x2A, 0x47, 0x40, + 0x02, 0xDF, 0x32, 0xE5, 0x21, 0x39, 0xF0, 0xA0, +}; + +struct ccp_sm2_verify_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* input data r */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* input data s */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_lp_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_kg_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_sign_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* private key */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_mmul_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* mulplicand */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* mulplicator */ +}; + +struct ccp_sm2_dst { + union { + u8 result[CCP_SM2_OPERAND_LEN]; + u32 status; + } u; + u8 result_r[CCP_SM2_OPERAND_LEN]; + u8 result_s[CCP_SM2_OPERAND_LEN]; + u8 result_t[CCP_SM2_OPERAND_LEN]; +}; + +struct sm2_signature_ctx { + const u8 *sig_r; + const u8 *sig_s; + size_t r_len; + size_t s_len; +}; + +int ccp_sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_r = value; + sig->r_len = vlen; + + if (!sig->sig_r) + return -ENOMEM; + + return 0; +} + +int ccp_sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_s = value; + sig->s_len = vlen; + + if (!sig->sig_s) + return -ENOMEM; + + return 0; +} + +static bool ccp_sm2_is_zero(const u64 *data, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++) { + if (data[i]) + return false; + } + + return true; +} + +/* Return: + * 1: a > b + * -1: a < b + * 0: a = b + */ +static int ccp_sm2_fp_cmp(const u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu; + u32 i; + + for (i = 0; i < count; i++) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + if (a_cpu > b_cpu) + return 1; + else if (a_cpu < b_cpu) + return -1; + } + + return 0; +} + +/* a = a + b */ +static void ccp_sm2_fp_add(u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu, c_cpu, d_cpu; + u32 carry = 0; + s32 i; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + c_cpu = a_cpu + b_cpu; + d_cpu = c_cpu + carry; + a[i] = cpu_to_be64(d_cpu); + + if (c_cpu < a_cpu) + carry = 1; + else if (carry && !d_cpu) + carry = 1; + else + carry = 0; + } +} + +/* a = -a */ +static void ccp_sm2_fp_neg(u64 *a, u32 count) +{ + u64 a_cpu, c_cpu; + s32 i; + + for (i = 0; i <= count - 1; i++) + a[i] = ~a[i]; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + c_cpu = a_cpu + 1; + a[i] = cpu_to_be64(c_cpu); + + if (a_cpu < c_cpu) + break; + } +} + +/* a = a - b */ +static void ccp_sm2_fp_sub(u64 *a, u64 *b, u32 count) +{ + ccp_sm2_fp_neg(b, count); + ccp_sm2_fp_add(a, b, count); +} + +/* a and tmp must be 64B, b and c must be 32B + * a = b * c + */ +static void ccp_sm2_fp_mmul32(u8 *a, const u32 *b, const u32 *c, u8 *tmp) +{ + u64 b_cpu, c_cpu, m_cpu; + u32 rem_cpu; + u32 *base, *m_cur; + int i, j, iter; + + memset(a, 0, CCP_SM2_MMUL_LEN); + + iter = 7; + base = (u32 *)(tmp + CCP_SM2_MMUL_LEN - sizeof(u32)); + for (i = iter; i >= 0; i--) { + b_cpu = be32_to_cpu(b[i]); + memset(tmp, 0, CCP_SM2_MMUL_LEN); + + rem_cpu = 0; + m_cur = base; + for (j = iter; j >= 0; j--) { + c_cpu = be32_to_cpu(c[j]); + + m_cpu = b_cpu * c_cpu + rem_cpu; + rem_cpu = (u32)(m_cpu >> 32); + *m_cur = cpu_to_be32((u32)(m_cpu)); + m_cur--; + } + *m_cur = cpu_to_be32(rem_cpu); + ccp_sm2_fp_add((u64 *)a, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + + base--; + } +} + +/* mmul, dst, tmp must be 64B, remainder in mmul[32-63] + * high:low mod p + * = high*2^256+low mod p + * = high*(p+h)+low mod p + * = high*h+low mod p + * = high*(2^224+2^96-2^64+1)+low mod p + * iterating 8 times + */ +static void ccp_sm2_fast_mod_p(u8 *mmul, u8 *dst, u8 *tmp) +{ + u8 *mmul_high, *mmul_low; + u32 count; + int i, iter, ret; + + mmul_high = mmul; + mmul_low = mmul + CCP_SM2_OPERAND_LEN; + count = CCP_SM2_MMUL_LEN / sizeof(u64); + + iter = 8; + for (i = 0; i < iter; i++) { + /* dst = high * 2^224 */ + memset(dst, 0, CCP_SM2_MMUL_LEN); + memcpy(dst + 4, mmul_high, CCP_SM2_OPERAND_LEN); + + /* dst += high * 2^96 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 20, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 2^64 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 24, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_sub((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 1 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += low */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_low, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* copy dst to mmul */ + memcpy(mmul, dst, CCP_SM2_MMUL_LEN); + } + + do { + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + ret = ccp_sm2_fp_cmp( + (u64 *)mmul, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + if (ret < 0) + break; + + ccp_sm2_fp_sub((u64 *)mmul, (u64 *)tmp, count); + } while (1); +} + +static int ccp_sm2_is_privkey_valid(const u8 *priv_key) +{ + u64 last, last_cpu; + bool zero; + int ret; + + /* private key is satisfied with(1, n-1) */ + zero = ccp_sm2_is_zero((const u64 *)priv_key, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64) - 1); + if (zero) { + last = *(const u64 *) + (priv_key + CCP_SM2_PRIVATE_KEY_LEN - sizeof(u64)); + last_cpu = be64_to_cpu(last); + if (last_cpu <= 1) + return -EINVAL; + } + + ret = ccp_sm2_fp_cmp((const u64 *)priv_key, + (const u64 *)sm2_ecc_n_sub_1, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_setprivkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + int ret; + + if (!key || keylen != CCP_SM2_PRIVATE_KEY_LEN) + return -EINVAL; + + ret = ccp_sm2_is_privkey_valid(key); + if (ret < 0) + return ret; + + memcpy(sm2->pri_key, key, CCP_SM2_PRIVATE_KEY_LEN); + sm2->pri_key_len = CCP_SM2_PRIVATE_KEY_LEN; + + return 0; +} + +static int ccp_sm2_post_cmd(struct ccp_sm2_req_ctx *rctx, + u32 src_size, enum ccp_sm2_mode mode, u32 rand) +{ + struct akcipher_request *req = rctx->req; + struct ccp_sm2_engine *sm2 = NULL; + int ret; + + sg_init_one(&rctx->src_sg, rctx->src, src_size); + memset(rctx->dst, 0, CCP_SM2_DST_SIZE); + sg_init_one(&rctx->dst_sg, rctx->dst, CCP_SM2_DST_SIZE); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM2; + + sm2 = &rctx->cmd.u.sm2; + sm2->mode = mode; + sm2->rand = rand; /* whether read operand_k from trng */ + sm2->src = &rctx->src_sg; + sm2->src_len = src_size; + sm2->dst = &rctx->dst_sg; + sm2->dst_len = CCP_SM2_DST_SIZE; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm2_pubkey_strict_valid(const u8 *px, const u8 *py) +{ + u64 buf[CCP_SM2_OPERAND_LEN / sizeof(u64)]; + int ret1, ret2; + + /* private key is 1, corresponding public key is invalid */ + ret1 = memcmp(px, sm2_ecc_gx, CCP_SM2_OPERAND_LEN); + ret2 = memcmp(py, sm2_ecc_gy, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + /* private key is n - 1, corresponding public key is invalid */ + memcpy(buf, py, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add(buf, (const u64 *)sm2_ecc_gy, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + ret2 = memcmp(buf, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_is_pubkey_valid(struct ccp_sm2_req_ctx *rctx, bool strict) +{ + const u8 *px, *py; + u8 *tmp; + bool zero; + int ret; + + px = rctx->src + CCP_SM2_LP_SRC_SIZE; + py = px + CCP_SM2_OPERAND_LEN; + + zero = ccp_sm2_is_zero((u64 *)px, CCP_SM2_PUBLIC_KEY_LEN / sizeof(u64)); + if (zero) + return -EINVAL; + + /* x < p */ + ret = ccp_sm2_fp_cmp((u64 *)px, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + /* y < p */ + ret = ccp_sm2_fp_cmp((u64 *)py, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + if (strict) { + ret = ccp_sm2_pubkey_strict_valid(px, py); + if (ret < 0) + return ret; + } + + /* check whether y^2 = x^3 + ax + b */ + tmp = rctx->dst + CCP_SM2_MMUL_LEN; + /* y * y */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)py, (u32 *)py, tmp); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* x * x + a */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)px, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_a, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src, rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* (x * x + a) * x + b */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)rctx->src, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_b, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + + ret = memcmp(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + if (ret) + return -EINVAL; + + /* Because the cofactor of the ECC group is 1, + * the checking that [n]P=O is not required. + */ + + return 0; +} + +static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + struct ccp_sm2_req_ctx *rctx = NULL; + const unsigned char *cflag = (const unsigned char *)key; + int ret; + + if (!key || keylen < CCP_SM2_PUBLIC_KEY_LEN) + return -EINVAL; + + /* When the length of sm2 public key is 65, + * content of key should be 04 || X || Y, from GM/T0009-2012. + */ + if (keylen > CCP_SM2_PUBLIC_KEY_LEN) { + if (*cflag != 0x04) + return -EINVAL; + key = key + 1; + } + + /* check whether public key is valid */ + rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); + if (!rctx) + return -ENOMEM; + + memcpy(rctx->src + CCP_SM2_LP_SRC_SIZE, key, CCP_SM2_PUBLIC_KEY_LEN); + ret = ccp_sm2_is_pubkey_valid(rctx, true); + kfree(rctx); + if (ret < 0) + return ret; + + /* public key is valid */ + memcpy(sm2->pub_key, key, CCP_SM2_PUBLIC_KEY_LEN); + sm2->pub_key_len = CCP_SM2_PUBLIC_KEY_LEN; + + return 0; +} + +static unsigned int ccp_sm2_maxsize(struct crypto_akcipher *tfm) +{ + return CCP_SM2_DST_SIZE; +} + +static int ccp_sm2_compute_c3(struct crypto_shash *shash, + struct scatterlist *sg, u32 mlen, + u8 *c3, const u8 *x2, const u8 *y2) +{ + unsigned int len, remain; + int ret; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret < 0) + return ret; + + /* update X2 */ + ret = crypto_shash_update(sdesc, x2, CCP_SM2_OPERAND_LEN); + if (ret < 0) + return ret; + + /* update M */ + remain = mlen; + while (sg) { + len = sg->length; + if (len > remain) + len = remain; + ret = crypto_shash_update(sdesc, (u8 *)sg_virt(sg), len); + if (ret < 0) + return ret; + + remain -= len; + if (!remain) + break; + + sg = sg_next(sg); + } + + /* ccp_sm2_encrypt should have checked length */ + if (unlikely(!sg)) + return -EINVAL; + + /* update Y2 */ + ret = crypto_shash_finup(sdesc, y2, CCP_SM2_OPERAND_LEN, c3); + + return ret; +} + +static bool ccp_sm2_msg_xor_t(u8 *msg, const u8 *t, u32 len) +{ + u64 *msg_cur, *msg_last, *t_cur; + u32 zero_cnt = 0; + u32 rem; + int i; + + msg_cur = (u64 *)msg; + t_cur = (u64 *)t; + msg_last = msg_cur + (len / sizeof(u64)); + while (msg_cur != msg_last) { + if (likely(*t_cur)) + *msg_cur = *msg_cur ^ *t_cur; + else + zero_cnt += sizeof(u64); + + msg_cur++; + t_cur++; + } + + msg = (u8 *)msg_cur; + t = (const u8 *)t_cur; + rem = len % sizeof(u64); + for (i = 0; i < rem; i++) { + if (likely(t[i])) + msg[i] = msg[i] ^ t[i]; + else + zero_cnt++; + } + + return zero_cnt == len; +} + +static int ccp_sm2_kdf_xor(struct crypto_shash *shash, + struct scatterlist *src, u32 src_offset, u32 src_len, + struct scatterlist *dst, u32 dst_offset, + u8 *x2_y2_ct, bool *all_zero, struct ccp_sm2_req_ctx *rctx) +{ + u32 *be_ct = NULL; + u32 ct, len, remain; + bool zero; + int ret = 0; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + *all_zero = true; + ct = 1; + be_ct = (u32 *)(x2_y2_ct + CCP_SM2_PUBLIC_KEY_LEN); + remain = src_len; + while (remain) { + len = SM3_DIGEST_SIZE; + if (len > remain) + len = remain; + *be_ct = cpu_to_be32(ct); + ret = crypto_shash_digest(sdesc, x2_y2_ct, + CCP_SM2_PUBLIC_KEY_LEN + sizeof(*be_ct), rctx->src); + if (ret < 0) + break; + + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, src, + src_offset, len, 0); + zero = ccp_sm2_msg_xor_t(rctx->src + SM3_DIGEST_SIZE, + rctx->src, len); + if (zero == false) + *all_zero = false; + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, dst, + dst_offset, len, 1); + + remain -= len; + src_offset += len; + dst_offset += len; + ct++; + } + + return ret; +} + +static void ccp_sm2_enc_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* C2 = M ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, + req->dst, CCP_SM2_ENCRYPT_EXT_LEN, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (unlikely(all_zero)) { + ret = -EAGAIN; + goto e_hash; + } + + /* C3 */ + ret = ccp_sm2_compute_c3(shash, req->src, req->src_len, rctx->src, + dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* save C3 */ + scatterwalk_map_and_copy(rctx->src, req->dst, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 1); + +e_hash: + crypto_free_shash(shash); + +e_complete: + req->base.complete(req->base.data, ret); +} + +static void ccp_sm2_enc_lp(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int ret; + + /* save C1 */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_PUBLIC_KEY_LEN, 1); + /* operand_k used by kg is placed in dst->result_t */ + memcpy(src->operand_k, dst->result_t, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + rctx->phase = CCP_SM2_ENC_PH_LP; + + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + if (ret != -EBUSY && ret != -EINPROGRESS) + req->base.complete(req->base.data, ret); +} + +static int ccp_sm2_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (!req->src_len || + req->dst_len < CCP_SM2_ENCRYPT_EXT_LEN + req->src_len) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + rctx->req = req; + rctx->phase = CCP_SM2_ENC_PH_KG; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_KG_SRC_SIZE, CCP_SM2_MODE_KG, 1); + + return ret; +} + +static void ccp_sm2_dec_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* M' = C2 ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, CCP_SM2_ENCRYPT_EXT_LEN, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, req->dst, 0, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (all_zero) { + ret = -EBADMSG; + goto e_hash; + } + + /* u */ + ret = ccp_sm2_compute_c3(shash, req->dst, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, + rctx->src, dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* load and compare C3 */ + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, req->src, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 0); + ret = memcmp(rctx->src, rctx->src + SM3_DIGEST_SIZE, SM3_DIGEST_SIZE); + if (ret) + ret = -EBADMSG; + +e_hash: + crypto_free_shash(shash); + +e_complete: + /* clear private key, plain, and dC1 */ + memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); + memset(dst, 0, CCP_SM2_DST_SIZE); + req->base.complete(req->base.data, ret); +} + +static int ccp_sm2_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len <= (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE)) + return -EINVAL; + + if (req->dst_len < req->src_len - CCP_SM2_ENCRYPT_EXT_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + /* load C1 */ + scatterwalk_map_and_copy(rctx->src + CCP_SM2_LP_SRC_SIZE, + req->src, 0, CCP_SM2_PUBLIC_KEY_LEN, 0); + ret = ccp_sm2_is_pubkey_valid(rctx, false); + if (ret < 0) + return -EBADMSG; + + /* do kP */ + memcpy(src->operand_k, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + memcpy(src->operand_px, rctx->src + CCP_SM2_LP_SRC_SIZE, + CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, rctx->src + CCP_SM2_LP_SRC_SIZE + + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + rctx->req = req; + rctx->phase = CCP_SM2_DEC_PH_LP; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + + return ret; +} + +static int ccp_sm2_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN, 0); + memcpy(src->operand_d, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_SIGN_PH_SIGN; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_SIGN_SRC_SIZE, + CCP_SM2_MODE_SIGN, 1); + + return ret; +} + +static int ccp_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int siglen; + int nents; + int ret; + struct sm2_signature_ctx sig; + unsigned char *buffer; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (req->src_len == CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with non-encoded signature from user space */ + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; + } else if (req->src_len < CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with usage like sm2 test of testmgr */ + siglen = req->src_len; + if (req->dst_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + } else { + /* deal with der encoding signature from user space */ + siglen = req->src_len - CCP_SM2_OPERAND_LEN; + } + + buffer = kmalloc(siglen + CCP_SM2_OPERAND_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, siglen + CCP_SM2_OPERAND_LEN), + buffer, siglen + CCP_SM2_OPERAND_LEN, 0); + + sig.sig_r = NULL; + sig.sig_s = NULL; + ret = asn1_ber_decoder(&ccp_sm2_sign_decoder, &sig, + buffer, siglen); + + if (ret) + goto error; + + memcpy(src->operand_e, buffer + siglen, CCP_SM2_OPERAND_LEN); + + if (sig.r_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_d, sig.sig_r + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_d, sig.sig_r, CCP_SM2_OPERAND_LEN); + + if (sig.s_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_k, sig.sig_s + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_k, sig.sig_s, CCP_SM2_OPERAND_LEN); + + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + +error: + kfree(buffer); + return ret; +} + +static int ccp_sm2_verify_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (dst->u.status) + return -EBADMSG; + + return 0; +} + +static int ccp_sm2_sign_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + struct akcipher_request *req = rctx->req; + + if (unlikely(dst->u.status)) + return -EAGAIN; + + /* save signature */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_OPERAND_LEN * 2, 1); + /* clear private key */ + memset(src->operand_d, 0, CCP_SM2_PRIVATE_KEY_LEN); + + return 0; +} + +static int ccp_sm2_enc_kg_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + /* random operand_k is not satisfied with[1, n-1], try again */ + if (unlikely(dst->u.status)) + return -EAGAIN; + + INIT_WORK(&rctx->work, ccp_sm2_enc_lp); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_enc_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_enc_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_dec_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_dec_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = + container_of(async_req, struct akcipher_request, base); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + switch (rctx->phase) { + case CCP_SM2_SIGN_PH_SIGN: + ret = ccp_sm2_sign_handle(rctx); + break; + case CCP_SM2_VERIFY_PH_VERIFY: + ret = ccp_sm2_verify_handle(rctx); + break; + case CCP_SM2_ENC_PH_KG: + ret = ccp_sm2_enc_kg_handle(rctx); + break; + case CCP_SM2_ENC_PH_LP: + ret = ccp_sm2_enc_lp_handle(rctx); + break; + case CCP_SM2_DEC_PH_LP: + ret = ccp_sm2_dec_lp_handle(rctx); + break; + } + + return ret; +} + +static int ccp_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_sm2_req_ctx)); + ctx->complete = ccp_sm2_complete; + + return 0; +} + +static void ccp_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ +} + +static struct akcipher_alg ccp_sm2_defaults = { + .sign = ccp_sm2_sign, + .verify = ccp_sm2_verify, + .encrypt = ccp_sm2_encrypt, + .decrypt = ccp_sm2_decrypt, + .set_pub_key = ccp_sm2_setpubkey, + .set_priv_key = ccp_sm2_setprivkey, + .max_size = ccp_sm2_maxsize, + .init = ccp_sm2_init_tfm, + .exit = ccp_sm2_exit_tfm, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm2_def { + unsigned int version; + const char *name; + const char *driver_name; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_sm2_def sm2_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm2", + .driver_name = "sm2-ccp", + .alg_defaults = &ccp_sm2_defaults, + } +}; + +static int ccp_register_sm2_hygon_alg(struct list_head *head, + const struct ccp_sm2_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm2_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm2_algs); i++) { + if (sm2_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm2_hygon_alg(head, &sm2_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..46ddbc1f14a8ec48df3b4e447d99a9c4788272db --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c @@ -0,0 +1,488 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM3 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static int ccp_sm3_complete(struct crypto_async_request *async_req, int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if (ret) + goto e_free; + + rctx->msg_bits += (rctx->hash_cnt << 3); + if (rctx->hash_rem) { + /* save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + if (rctx->final) { + if (req->result) + memcpy(req->result, rctx->ctx, SM3_DIGEST_SIZE); + + memset(rctx->ctx, 0, SM3_DIGEST_SIZE); + } + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_sm3_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg = req->src; + struct ccp_sm3_engine *sm3 = NULL; + unsigned int sg_count; + gfp_t gfp; + u64 len, msg_bits = 0; + int nents; + int ret; + + /* must check length of src, + * otherwise will result in NullPointer exception in ccp_sm3_complete + */ + if (nbytes) { + nents = sg_nents_for_len(req->src, nbytes); + if (nents < 0) + return -EINVAL; + } + + len = (u64)rctx->buf_count + (u64)nbytes; + if (len <= SM3_BLOCK_SIZE) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + if (!final) + return 0; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = &rctx->buf_sg; + } else { + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (rctx->buf_count) { + /* build the scatterlist table: (buffer and input data) */ + sg_count = sg_nents(req->src) + 1; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add( + &rctx->data_sg, &rctx->buf_sg); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg = ccp_crypto_sg_table_add(&rctx->data_sg, + req->src); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg_mark_end(sg); + + sg = rctx->data_sg.sgl; + } else { + sg = req->src; + } + } + + rctx->final = final; + if (final) { + rctx->hash_rem = 0; + rctx->hash_cnt = len; + msg_bits = rctx->msg_bits + (len << 3); + } else { + rctx->hash_rem = len & (SM3_BLOCK_SIZE - 1); + rctx->hash_cnt = len - rctx->hash_rem; + rctx->src = req->src; + rctx->nbytes = nbytes; + } + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM3; + + sm3 = &rctx->cmd.u.sm3; + sm3->type = CCP_SM3_TYPE_256; + sm3->ctx = &rctx->ctx_sg; + sm3->ctx_len = SM3_DIGEST_SIZE; + sm3->src = sg; + sm3->src_len = rctx->hash_cnt; + sm3->first = rctx->msg_bits ? 0 : 1; + sm3->final = final; + sm3->msg_bits = msg_bits; + if (final && ctx->u.sm3.key_len) { + sm3->opad = &ctx->u.sm3.opad_sg; + sm3->opad_len = SM3_BLOCK_SIZE; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_sm3_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if ((crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) && + (!ctx->u.sm3.key_len)) + return -ENOKEY; + + memset(rctx, 0, sizeof(*rctx)); + if (ctx->u.sm3.key_len) { + /* buffer the HMAC key for first update */ + memcpy(rctx->buf, ctx->u.sm3.ipad, SM3_BLOCK_SIZE); + rctx->buf_count = SM3_BLOCK_SIZE; + } + + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + + return 0; +} + +static int ccp_sm3_update(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 0); +} + +static int ccp_sm3_final(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, 0, 1); +} + +static int ccp_sm3_finup(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 1); +} + +static int ccp_sm3_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_sm3_init(req); + if (unlikely(ret)) + return ret; + + return ccp_sm3_finup(req); +} + +static int ccp_sm3_export(struct ahash_request *req, void *out) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!out) + return -EINVAL; + + /* don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + + state.msg_bits = rctx->msg_bits; + memcpy(state.ctx, rctx->ctx, SM3_DIGEST_SIZE); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, SM3_BLOCK_SIZE); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_import(struct ahash_request *req, const void *in) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!in) + return -EINVAL; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->msg_bits = state.msg_bits; + memcpy(rctx->ctx, state.ctx, SM3_DIGEST_SIZE); + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, SM3_BLOCK_SIZE); + + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_shash *shash = ctx->u.sm3.hmac_tfm; + + SHASH_DESC_ON_STACK(sdesc, shash); + + int i, ret; + + /* set to zero until complete */ + ctx->u.sm3.key_len = 0; + if (!key) + return -EINVAL; + + if (!key_len) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + /* clear key area to provide zero padding for keys smaller + * than the block size + */ + memset(ctx->u.sm3.key, 0, SM3_BLOCK_SIZE); + + if (key_len > SM3_BLOCK_SIZE) { + /* must hash the input key */ + sdesc->tfm = shash; + ret = crypto_shash_digest(sdesc, key, key_len, + ctx->u.sm3.key); + if (ret) { + crypto_ahash_set_flags( + tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + key_len = SM3_DIGEST_SIZE; + } else { + memcpy(ctx->u.sm3.key, key, key_len); + } + + for (i = 0; i < SM3_BLOCK_SIZE; i++) { + ctx->u.sm3.ipad[i] = ctx->u.sm3.key[i] ^ HMAC_IPAD_VALUE; + ctx->u.sm3.opad[i] = ctx->u.sm3.key[i] ^ HMAC_OPAD_VALUE; + } + + sg_init_one(&ctx->u.sm3.opad_sg, ctx->u.sm3.opad, SM3_BLOCK_SIZE); + + ctx->u.sm3.key_len = key_len; + + return 0; +} + +static int ccp_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + + ctx->complete = ccp_sm3_complete; + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sm3_req_ctx)); + + return 0; +} + +static void ccp_sm3_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_hmac_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); + struct crypto_shash *hmac_tfm; + + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); + if (IS_ERR(hmac_tfm)) { + pr_warn("could not load driver %s need for HMAC support\n", + alg->child_alg); + return PTR_ERR(hmac_tfm); + } + + ctx->u.sm3.hmac_tfm = hmac_tfm; + + return ccp_sm3_cra_init(tfm); +} + +static void ccp_hmac_sm3_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.sm3.hmac_tfm) + crypto_free_shash(ctx->u.sm3.hmac_tfm); + + ccp_sm3_cra_exit(tfm); +} + +struct ccp_sm3_def { + unsigned int version; + const char *name; + const char *drv_name; + enum ccp_sm3_type type; + u32 digest_size; + u32 block_size; +}; + +static struct ccp_sm3_def sm3_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm3", + .drv_name = "sm3-ccp", + .type = CCP_SM3_TYPE_256, + .digest_size = SM3_DIGEST_SIZE, + .block_size = SM3_BLOCK_SIZE, + }, +}; + +static int ccp_register_hmac_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def, + const struct ccp_crypto_ahash_alg *base_alg) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + /* copy the base algorithm and only change what's necessary */ + *ccp_alg = *base_alg; + INIT_LIST_HEAD(&ccp_alg->entry); + + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + + alg = &ccp_alg->alg; + alg->setkey = ccp_sm3_setkey; + + base = &alg->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", + def->drv_name); + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + base->cra_init = ccp_hmac_sm3_cra_init; + base->cra_exit = ccp_hmac_sm3_cra_exit; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return ret; +} + +static int ccp_register_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->type = def->type; + + alg = &ccp_alg->alg; + alg->init = ccp_sm3_init; + alg->update = ccp_sm3_update; + alg->final = ccp_sm3_final; + alg->finup = ccp_sm3_finup; + alg->digest = ccp_sm3_digest; + alg->export = ccp_sm3_export; + alg->import = ccp_sm3_import; + + halg = &alg->halg; + halg->digestsize = def->digest_size; + halg->statesize = sizeof(struct ccp_sm3_exp_ctx); + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = def->block_size; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_init = ccp_sm3_cra_init; + base->cra_exit = ccp_sm3_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + ret = ccp_register_hmac_sm3_hygon_alg(head, def, ccp_alg); + + return ret; +} + +int ccp_register_sm3_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm3_algs); i++) { + if (sm3_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm3_hygon_alg(head, &sm3_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..0d1c750ff118d99546b3d345465b0354dc3fadfa --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -0,0 +1,324 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +enum ccp_sm4_alg_mode { + CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, + CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, + CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE__LAST, +}; + +static int ccp_sm4_complete(struct crypto_async_request *async_req, int ret) +{ + struct skcipher_request *req = skcipher_request_cast(async_req); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + + if (ret) + return ret; + + if ((ctx->u.sm4.mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + memcpy(req->iv, rctx->iv, SM4_BLOCK_SIZE); + memset(rctx->iv, 0, SM4_BLOCK_SIZE); + } + + return 0; +} + +static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + /* key_len is checked by crypto_ablkcipher_type, + * but key isn't checked + */ + if (!key) + return -EINVAL; + + memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + + ctx->u.sm4.key_len = SM4_KEY_SIZE; + + return 0; +} + +static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + struct scatterlist *iv_sg = NULL; + struct ccp_cmd *cmd = NULL; + enum ccp_sm4_alg_mode mode; + enum ccp_sm4_action action; + int ret; + + if (!ctx->u.sm4.key_len) + return -ENOKEY; + + mode = ctx->u.sm4.mode; + if ((mode != CCP_SM4_ALG_MODE_CTR) && + (mode != CCP_SM4_ALG_MODE_OFB) && + (mode != CCP_SM4_ALG_MODE_CFB) && + (req->cryptlen & (SM4_BLOCK_SIZE - 1))) + return -EINVAL; + + if ((mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + if (!req->iv) + return -EINVAL; + + memcpy(rctx->iv, req->iv, SM4_BLOCK_SIZE); + iv_sg = &rctx->iv_sg; + sg_init_one(iv_sg, rctx->iv, SM4_BLOCK_SIZE); + } + + cmd = &rctx->cmd; + memset(cmd, 0, sizeof(*cmd)); + INIT_LIST_HEAD(&cmd->entry); + action = encrypt ? CCP_SM4_ACTION_ENCRYPT : CCP_SM4_ACTION_DECRYPT; + if (mode == CCP_SM4_ALG_MODE_CTR) { + cmd->engine = CCP_ENGINE_SM4_CTR; + cmd->u.sm4_ctr.action = action; + cmd->u.sm4_ctr.size = 63; + cmd->u.sm4_ctr.step = 1; + + cmd->u.sm4_ctr.key = &ctx->u.sm4.key_sg; + cmd->u.sm4_ctr.key_len = SM4_KEY_SIZE; + cmd->u.sm4_ctr.iv = iv_sg; + cmd->u.sm4_ctr.iv_len = SM4_BLOCK_SIZE; + + cmd->u.sm4_ctr.src = req->src; + cmd->u.sm4_ctr.dst = req->dst; + cmd->u.sm4_ctr.src_len = req->cryptlen; + + } else { + cmd->engine = CCP_ENGINE_SM4; + cmd->u.sm4.mode = mode & CCP_SM4_MODE_MASK; + cmd->u.sm4.action = action; + if (mode & CCP_SM4_MODE_HS_SEL) + cmd->u.sm4.select = 1; + + cmd->u.sm4.key = &ctx->u.sm4.key_sg; + cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.iv = iv_sg; + cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; + + cmd->u.sm4.src = req->src; + cmd->u.sm4.dst = req->dst; + cmd->u.sm4.src_len = req->cryptlen; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm4_encrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, true); +} + +static int ccp_sm4_decrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, false); +} + +static int ccp_sm4_init_tfm(struct crypto_skcipher *tfm) +{ + struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->complete = ccp_sm4_complete; + ctx->u.sm4.mode = alg->mode; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static const struct skcipher_alg ccp_sm4_defaults = { + .setkey = ccp_sm4_setkey, + .encrypt = ccp_sm4_encrypt, + .decrypt = ccp_sm4_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .init = ccp_sm4_init_tfm, + + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = SM4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_priority = CCP_CRA_PRIORITY, + .base.cra_module = THIS_MODULE, +}; + +struct ccp_sm4_def { + enum ccp_sm4_alg_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + const struct skcipher_alg *alg_defaults; +}; + +static struct ccp_sm4_def sm4_algs[] = { + { + .mode = CCP_SM4_ALG_MODE_ECB, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_OFB, + .version = CCP_VERSION(5, 0), + .name = "ofb(sm4)", + .driver_name = "ofb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CFB, + .version = CCP_VERSION(5, 0), + .name = "cfb(sm4)", + .driver_name = "cfb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CTR, + .version = CCP_VERSION(5, 0), + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccp", + .blocksize = 1, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, +}; + +static int ccp_register_sm4_hygon_alg(struct list_head *head, + const struct ccp_sm4_def *def) +{ + struct ccp_crypto_skcipher_alg *ccp_alg; + struct skcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->mode = def->mode; + + /* copy the defaults and override as necessary */ + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + alg->ivsize = def->ivsize; + + ret = crypto_register_skcipher(alg); + if (ret) { + pr_err("%s skcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm4_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (sm4_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index e42450d071680e2ef0432ce704bf4a48f1c35962..58b2950f91001a6b12cfa5c5bae8add6663e7c67 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -258,6 +258,105 @@ struct ccp_rsa_req_ctx { #define CCP_RSA_MAXMOD (4 * 1024 / 8) #define CCP5_RSA_MAXMOD (16 * 1024 / 8) +/***** SM2 related defines *****/ +#define CCP_SM2_OPERAND_LEN 32 +#define CCP_SM2_PRIVATE_KEY_LEN CCP_SM2_OPERAND_LEN +#define CCP_SM2_PUBLIC_KEY_LEN (CCP_SM2_OPERAND_LEN * 2) +#define CCP_SM2_ENCRYPT_EXT_LEN (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE) +#define CCP_SM2_MMUL_LEN (CCP_SM2_OPERAND_LEN * 2) + +struct ccp_sm2_ctx { + u32 pri_key_len; + u32 pub_key_len; + u8 pri_key[CCP_SM2_PRIVATE_KEY_LEN]; + u8 pub_key[CCP_SM2_PUBLIC_KEY_LEN]; +}; + +enum ccp_sm2_op_phase { + CCP_SM2_SIGN_PH_SIGN, + CCP_SM2_VERIFY_PH_VERIFY, + CCP_SM2_ENC_PH_KG, + CCP_SM2_ENC_PH_LP, + CCP_SM2_DEC_PH_LP +}; + +struct ccp_sm2_req_ctx { + enum ccp_sm2_op_phase phase; + struct akcipher_request *req; + + u8 src[CCP_SM2_VERIFY_SRC_SIZE]; + u8 dst[CCP_SM2_DST_SIZE]; + + struct scatterlist src_sg; + struct scatterlist dst_sg; + + struct work_struct work; + + struct ccp_cmd cmd; +}; + +/***** SM3 related defines *****/ +struct ccp_sm3_ctx { + u32 key_len; + u8 key[SM3_BLOCK_SIZE]; + + u8 ipad[SM3_BLOCK_SIZE]; + + u8 opad[SM3_BLOCK_SIZE]; + struct scatterlist opad_sg; + + struct crypto_shash *hmac_tfm; +}; + +struct ccp_sm3_req_ctx { + u64 msg_bits; + + unsigned int first; + unsigned int final; + + struct scatterlist *src; + u32 nbytes; + + u64 hash_cnt; + u32 hash_rem; + + struct sg_table data_sg; + struct scatterlist *src_sg; + + struct scatterlist ctx_sg; + u8 ctx[SM3_DIGEST_SIZE]; + + struct scatterlist buf_sg; + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +struct ccp_sm3_exp_ctx { + u64 msg_bits; + + u8 ctx[SM3_DIGEST_SIZE]; + + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; +}; + +/***** SM4 related defines *****/ +struct ccp_sm4_ctx { + struct scatterlist key_sg; + u8 key[SM4_KEY_SIZE]; + u32 key_len; + u32 mode; +}; + +struct ccp_sm4_req_ctx { + struct scatterlist iv_sg; + u8 iv[SM4_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -267,6 +366,9 @@ struct ccp_ctx { struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; + struct ccp_sm2_ctx sm2; + struct ccp_sm3_ctx sm3; + struct ccp_sm4_ctx sm4; } u; }; @@ -282,5 +384,8 @@ int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); +int ccp_register_sm2_hygon_algs(struct list_head *head); +int ccp_register_sm3_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7b73332d6aa1abcbe3794d75a6b66522d4031b88..e5c129c3e0497f96ccbb2d454b00aca675e99d7c 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,6 +131,28 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; u16 raw; }; @@ -151,6 +173,15 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) +#define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -186,6 +217,8 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -194,6 +227,17 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) +static inline unsigned int command_per_queue(void) +{ +#ifdef CONFIG_HYGON_GM + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + HYGON_COMMANDS_PER_QUEUE : + COMMANDS_PER_QUEUE; +#else + return COMMANDS_PER_QUEUE; +#endif +} + static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -207,15 +251,86 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start; + u32 head_lo, queue_start, command_per_q; + command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + n = head_idx + command_per_q - cmd_q->qidx - 1; - return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ + return n % command_per_q; /* Always one unused spot */ +} + +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + u32 command_per_q; + + command_per_q = command_per_queue(); + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -223,10 +338,11 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail; + u32 tail, command_per_q; int i; int ret = 0; + command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -240,7 +356,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; /* The data used by this command must be flushed to memory */ wmb(); @@ -584,6 +700,163 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = saddr->length; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -593,6 +866,7 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ + status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); @@ -744,7 +1018,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status) { + if (status & SUPPORTED_INTERRUPTS) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); @@ -753,10 +1027,9 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - cmd_q->int_rcvd = 1; - /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } @@ -784,7 +1057,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi; + u32 status_lo, status_hi, command_per_q, queue_size_val; int ret; /* Find available queues */ @@ -801,6 +1074,9 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } + command_per_q = command_per_queue(); + queue_size_val = QUEUE_SIZE_VAL(command_per_q); + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -827,7 +1103,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -914,7 +1190,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); @@ -1062,6 +1338,26 @@ static void ccp5_destroy(struct ccp_device *ccp) } } +static int ccp5_get_trng_mask_param(void) +{ + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + +#ifdef CONFIG_HYGON_GM + /* for sm4 HS */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return 16; +#endif + + /* for AES HS */ + return 12; +} + static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1072,12 +1368,13 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; + int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < 12; i++) { + for (i = 0; i < len; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1103,6 +1400,11 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 83350e2d9821e5fd8d76dc4eb26bddf4aa9fc812..e1aa68f4044c84f3f207f966356f61afa7a55386 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -99,12 +99,15 @@ #define CMD5_Q_MEM_LOCATION 0x4 #define CMD5_Q_SIZE 0x1F #define CMD5_Q_SHIFT 3 + #define COMMANDS_PER_QUEUE 16 -#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ - CMD5_Q_SIZE) -#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) +#define HYGON_COMMANDS_PER_QUEUE 8192 + #define Q_DESC_SIZE sizeof(struct ccp5_desc) -#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) +#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) +#define Q_SIZE(c, n) ((c)*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 @@ -334,6 +337,10 @@ struct ccp_cmd_queue { unsigned long total_rsa_ops; unsigned long total_pt_ops; unsigned long total_ecc_ops; + unsigned long total_sm2_ops; + unsigned long total_sm3_ops; + unsigned long total_sm4_ops; + unsigned long total_sm4_ctr_ops; } ____cacheline_aligned; struct ccp_device { @@ -528,6 +535,28 @@ struct ccp_ecc_op { enum ccp_ecc_function function; }; +struct ccp_sm2_op { + u32 rand; + enum ccp_sm2_mode mode; +}; + +struct ccp_sm3_op { + enum ccp_sm3_type type; + u64 msg_bits; +}; + +struct ccp_sm4_op { + enum ccp_sm4_action action; + enum ccp_sm4_mode mode; + u32 select; +}; + +struct ccp_sm4_ctr_op { + u32 size; + enum ccp_sm4_action action; + u32 step; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -551,6 +580,10 @@ struct ccp_op { struct ccp_rsa_op rsa; struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; + struct ccp_sm2_op sm2; + struct ccp_sm3_op sm3; + struct ccp_sm4_op sm4; + struct ccp_sm4_ctr_op sm4_ctr; } u; }; @@ -599,6 +632,7 @@ struct dword3 { union dword4 { u32 dst_lo; /* NON-SHA */ u32 sha_len_lo; /* SHA */ + __le32 sm3_len_lo; /* SM3 */ }; union dword5 { @@ -609,6 +643,7 @@ union dword5 { unsigned int fixed:1; } fields; u32 sha_len_hi; + __le32 sm3_len_hi; }; struct dword7 { @@ -657,6 +692,11 @@ struct ccp_actions { int (*rsa)(struct ccp_op *); int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); + int (*sm2)(struct ccp_op *op); + int (*sm3)(struct ccp_op *op); + int (*sm4)(struct ccp_op *op); + int (*sm4_ctr)(struct ccp_op *op); + int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index aa4e1a5006919da78f1e5392c686479da2d579b9..684aa6c55cf169dd66f2a7ab8040325be17acad4 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2460,6 +2460,520 @@ ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } +static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm2_engine *sm2 = &cmd->u.sm2; + struct ccp_data src, dst; + struct ccp_op op; + int ret; + + if (!sm2->src || !sm2->dst) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.init = 1; + op.eom = 1; + op.u.sm2.rand = sm2->rand & 0x1; + op.u.sm2.mode = sm2->mode; + + memset(&src, 0, sizeof(src)); + ret = ccp_init_sg_workarea(&src.sg_wa, cmd_q->ccp->dev, + sm2->src, sm2->src_len, DMA_TO_DEVICE); + if (ret) + return ret; + + /* if src isn't contiguous, should copy to a contiguous buffer */ + if (src.sg_wa.dma_count == 1) { + op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); + } else { + ccp_sg_free(&src.sg_wa); + ret = ccp_init_dm_workarea(&src.dm_wa, cmd_q, sm2->src_len, + DMA_TO_DEVICE); + if (ret) + goto e_src; + + ccp_set_dm_area(&src.dm_wa, 0, sm2->src, 0, sm2->src_len); + op.src.u.dma.address = src.dm_wa.dma.address; + } + + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.offset = 0; + op.src.u.dma.length = sm2->src_len; + op.src.u.dma.dir = DMA_TO_DEVICE; + + memset(&dst, 0, sizeof(dst)); + ret = ccp_init_sg_workarea(&dst.sg_wa, cmd_q->ccp->dev, + sm2->dst, sm2->dst_len, DMA_FROM_DEVICE); + if (ret) + goto e_src; + + /* if dst isn't contiguous, should copy to a contiguous buffer */ + if (dst.sg_wa.dma_count == 1) { + op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); + } else { + ccp_sg_free(&dst.sg_wa); + ret = ccp_init_dm_workarea(&dst.dm_wa, cmd_q, sm2->dst_len, + DMA_FROM_DEVICE); + if (ret) + goto e_dst; + + op.dst.u.dma.address = dst.dm_wa.dma.address; + } + + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = sm2->dst_len; + op.dst.u.dma.dir = DMA_FROM_DEVICE; + + ret = cmd_q->ccp->vdata->perform->sm2(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (dst.dm_wa.address) { + ccp_get_dm_area(&dst.dm_wa, 0, sm2->dst, 0, sm2->dst_len); + memset(dst.dm_wa.address, 0, sm2->dst_len); + } + +e_dst: + ccp_free_data(&dst, cmd_q); + +e_src: + if (src.dm_wa.address) + memset(src.dm_wa.address, 0, sm2->src_len); + + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm3_engine *sm3 = &cmd->u.sm3; + struct ccp_dm_workarea ctx; + struct ccp_data src; + struct ccp_op op; + int ret; + + u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B, + }; + + if ((sm3->ctx == NULL) || (sm3->ctx_len != SM3_DIGEST_SIZE)) + return -EINVAL; + + if (sg_nents_for_len(sm3->ctx, SM3_DIGEST_SIZE) < 0) + return -EINVAL; + + if (sm3->final && sm3->first) { + if (!sm3->src_len) { + scatterwalk_map_and_copy( + (void *)sm3_zero_message_hash, + sm3->ctx, 0, SM3_DIGEST_SIZE, 1); + return 0; + } + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.init = sm3->first & 0x1; + op.u.sm3.type = sm3->type; + op.u.sm3.msg_bits = sm3->msg_bits; + + memset(&ctx, 0, sizeof(ctx)); + ret = ccp_init_dm_workarea(&ctx, cmd_q, SM3_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + return ret; + + if (!sm3->first) { + /* load iv */ + ccp_set_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + + ret = ccp_init_data(&src, cmd_q, sm3->src, sm3->src_len, + SM3_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + /* send data to the CCP SM3 engine */ + if (sm3->src_len) { + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, SM3_BLOCK_SIZE, + false); + if (!src.sg_wa.bytes_left && sm3->final) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ccp_process_data(&src, NULL, &op); + } + } else { + /* do sm3 padding */ + src.dm_wa.address[0] = 0x80; + *(__be64 *)&src.dm_wa.address[56] = cpu_to_be64(sm3->msg_bits); + + op.soc = 0; + op.ioc = 1; + op.eom = 0; + op.src.u.dma.address = src.dm_wa.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = SM3_BLOCK_SIZE; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (sm3->final && sm3->opad) { + /* HMAC operation, recursively perform final SM3 */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u8 *hmac_buf = NULL; + + hmac_buf = kmalloc( + SM3_BLOCK_SIZE + SM3_DIGEST_SIZE, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + scatterwalk_map_and_copy(hmac_buf, sm3->opad, + 0, SM3_BLOCK_SIZE, 0); + memcpy(hmac_buf + SM3_BLOCK_SIZE, ctx.address, + SM3_DIGEST_SIZE); + sg_init_one(&sg, hmac_buf, SM3_BLOCK_SIZE + SM3_DIGEST_SIZE); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SM3; + hmac_cmd.u.sm3.type = sm3->type; + hmac_cmd.u.sm3.ctx = sm3->ctx; + hmac_cmd.u.sm3.ctx_len = sm3->ctx_len; + hmac_cmd.u.sm3.src = &sg; + hmac_cmd.u.sm3.src_len = SM3_BLOCK_SIZE + SM3_DIGEST_SIZE; + hmac_cmd.u.sm3.opad = NULL; + hmac_cmd.u.sm3.opad_len = 0; + hmac_cmd.u.sm3.first = 1; + hmac_cmd.u.sm3.final = 1; + hmac_cmd.u.sm3.msg_bits = + (SM3_BLOCK_SIZE + SM3_DIGEST_SIZE) << 3; + + ret = ccp_run_sm3_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } else { + ccp_get_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + } + +e_data: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + + return ret; +} + +static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_engine *sm4 = &cmd->u.sm4; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4->src == NULL || sm4->dst == NULL) + return -EINVAL; + + if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4->mode != CCP_SM4_MODE_ECB) { + if (sm4->iv == NULL || sm4->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4.action = sm4->action; + op.u.sm4.mode = sm4->mode; + op.u.sm4.select = sm4->select; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4->src) == sg_virt(sm4->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4->dst, sm4->src_len, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + if (sm4->mode != CCP_SM4_MODE_ECB) + ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4 engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + + ccp_process_data(&src, &dst, &op); + } + + if (sm4->mode != CCP_SM4_MODE_ECB) { + /* retrieve the SM4 iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + } + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_ctr_engine *sm4_ctr = &cmd->u.sm4_ctr; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4_ctr->src == NULL || sm4_ctr->dst == NULL) + return -EINVAL; + + if (sm4_ctr->key == NULL || sm4_ctr->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4_ctr->iv == NULL || sm4_ctr->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4_ctr.size = sm4_ctr->size; + op.u.sm4_ctr.action = sm4_ctr->action; + op.u.sm4_ctr.step = sm4_ctr->step; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4_ctr->src) == sg_virt(sm4_ctr->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4_ctr->src, sm4_ctr->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4_ctr->dst, + sm4_ctr->src_len, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + ccp_set_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4_ctr->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4_CTR engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, false); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + + ccp_process_data(&src, &dst, &op); + } + + /* retrieve the SM4_CTR iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2504,6 +3018,18 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM2: + ret = ccp_run_sm2_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM3: + ret = ccp_run_sm3_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4: + ret = ccp_run_sm4_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4_CTR: + ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/drivers/crypto/ccp/ccp_sm2_sign.asn1 b/drivers/crypto/ccp/ccp_sm2_sign.asn1 new file mode 100644 index 0000000000000000000000000000000000000000..7e83e6799cb47574430dbecb06415ea179767adc --- /dev/null +++ b/drivers/crypto/ccp/ccp_sm2_sign.asn1 @@ -0,0 +1,4 @@ +Sm2Signature ::= SEQUENCE { + sig_r INTEGER ({ ccp_sm2_get_signature_r }), + sig_s INTEGER ({ ccp_sm2_get_signature_s }) +} diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 868924dec5a17ba353d6a0e67af8a033fbcf67ff..8e34f05bc6b1681fa11cc89384cd92e555064afb 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -17,6 +17,7 @@ #include #include #include +#include struct ccp_device; struct ccp_cmd; @@ -587,6 +588,202 @@ struct ccp_ecc_engine { u16 ecc_result; }; +/***** SM2 engine *****/ +#define CCP_SM2_VERIFY_SRC_SIZE 160 +#define CCP_SM2_LP_SRC_SIZE 96 +#define CCP_SM2_KG_SRC_SIZE 32 +#define CCP_SM2_SIGN_SRC_SIZE 96 +#define CCP_SM2_MMUL_SRC_SIZE 64 +#define CCP_SM2_DST_SIZE 128 + +/** + * ccp_sm2_mode - SM2 operation mode + * + * @CCP_SM2_MODE_VERIFY: Verify mode + * @CCP_SM2_MODE_LP: LP mode + * @CCP_SM2_MODE_KG: KG mode + * @CCP_SM2_MODE_SIGN: SIGN mode + * @CCP_SM2_MODE_MMUL: MMUL mode + */ +enum ccp_sm2_mode { + CCP_SM2_MODE_VERIFY, + CCP_SM2_MODE_LP, + CCP_SM2_MODE_KG, + CCP_SM2_MODE_SIGN, + CCP_SM2_MODE_MMUL, + CCP_SM2_MODE__LAST, +}; + +/** + * struct ccp_sm2_engine - CCP SM2 operation + * @mode: SM2 operation mode + * @rand: indicateing that operand_k is from TRNG or not + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @dst_len: length in bytes of data produced by this operation + */ +struct ccp_sm2_engine { + enum ccp_sm2_mode mode; + u32 rand; + + struct scatterlist *src; + u32 src_len; + + struct scatterlist *dst; + u32 dst_len; +}; + +/***** SM3 engine *****/ +/** + * ccp_sm3_type - type of SM3 operation + * + * @CCP_SM3_TYPE_256: SM3 operation + */ +enum ccp_sm3_type { + CCP_SM3_TYPE_256 = 2, + CCP_SM3_TYPE__LAST, +}; + +/** + * struct ccp_sm3_engine - CCP SM3 operation + * @type: Type of SM3 operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SM3 operation + * @final: indicates final SM3 operation + * @msg_bits: total length of the message in bits used in final SM3 operation + */ +struct ccp_sm3_engine { + enum ccp_sm3_type type; + + struct scatterlist *ctx; + u32 ctx_len; + + struct scatterlist *src; + u64 src_len; + + struct scatterlist *opad; + u32 opad_len; + + u32 first; + u32 final; + u64 msg_bits; +}; + +/***** SM4 engine *****/ +#define SM4_BLOCK_SIZE 16 +#define SM4_KEY_SIZE 16 +#define CCP_SM4_MODE_MASK 0x0F +#define CCP_SM4_MODE_HS_SEL 0x10 + +/** + * ccp_sm4_mode - SM4 operation mode + * + * @CCP_SM4_MODE_ECB: ECB mode + * @CCP_SM4_MODE_CBC: CBC mode + * @CCP_SM4_MODE_OFB: OFB mode + * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_CTR: CTR mode + */ +enum ccp_sm4_mode { + CCP_SM4_MODE_ECB = 0, + CCP_SM4_MODE_CBC, + CCP_SM4_MODE_OFB, + CCP_SM4_MODE_CFB, + CCP_SM4_MODE_CTR, + CCP_SM4_MODE__LAST, +}; + +/** + * ccp_sm4_action - SM4 operation + * + * @CCP_SM4_ACTION_DECRYPT: SM4 decrypt operation + * @CCP_SM4_ACTION_ENCRYPT: SM4 encrypt operation + */ +enum ccp_sm4_action { + CCP_SM4_ACTION_DECRYPT = 0, + CCP_SM4_ACTION_ENCRYPT, + CCP_SM4_ACTION__LAST, +}; + +/** + * struct ccp_sm4_engine - CCP SM4 operation + * @mode: SM4 operation mode + * @action: SM4 operation (decrypt/encrypt) + * @select: Indicating that high-secure engine is selected + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - mode, action, select, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - key_len and iv_len must be 16B + * - src_len must be multiple of 16B + * - high-secure engine only for ECB and CBC mode + * + * The iv variable is used as both input and output. On completion of the + * SM4 operation the new IV overwrites the old IV. + */ +struct ccp_sm4_engine { + enum ccp_sm4_mode mode; + enum ccp_sm4_action action; + u32 select; /* Indicating that high-secure engine is selected */ + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** SM4_CTR engine *****/ +/** + * struct ccp_sm4_ctr_engine - CCP SM4_CTR operation + * @action: SM4_CTR operation (decrypt/encrypt) + * @size: counter bit size + * @step: counter increase step + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, size, step, key, key_len, iv, iv_len, src, dst, src_len + * - key_len and iv_len must be 16B + * + * The iv variable is used as both input and output. On completion of the + * SM4_CTR operation the new IV overwrites the old IV. + */ +struct ccp_sm4_ctr_engine { + enum ccp_sm4_action action; + u32 size; + u32 step; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; /** * ccp_engine - CCP operation identifiers @@ -599,6 +796,8 @@ struct ccp_ecc_engine { * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation + * @CCP_ENGINE_SM2: SM2 operation + * @CCP_ENGINE_SM3: SM3 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -609,6 +808,10 @@ enum ccp_engine { CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, + CCP_ENGINE_SM2 = 8, /* fixed value */ + CCP_ENGINE_SM3, + CCP_ENGINE_SM4, + CCP_ENGINE_SM4_CTR, CCP_ENGINE__LAST, }; @@ -657,6 +860,10 @@ struct ccp_cmd { struct ccp_passthru_engine passthru; struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; + struct ccp_sm2_engine sm2; + struct ccp_sm3_engine sm3; + struct ccp_sm4_engine sm4; + struct ccp_sm4_ctr_engine sm4_ctr; } u; /* Completion callback support */