diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 4137a8bf131f0cb3362f3560b963e056df3af3d0..257c64809b48d245ff8fa86a09afd2ddc5357408 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -80,5 +80,6 @@ config CRYPTO_DEV_HISI_TRNG depends on ARM64 && ACPI select HW_RANDOM select CRYPTO_RNG + select CRYPTO_DRBG_CTR help Support for HiSilicon TRNG Driver. diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b3bd4aece611eba5ae99752a701857587d043f4a..ed5a99d9493609f2923cf79725bb8fc59da509fe 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4670,22 +4670,30 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) u32 delay = 0; int ret; - hisi_qm_dev_err_uninit(pf_qm); - - /* - * Check whether there is an ECC mbit error, If it occurs, need to - * wait for soft reset to fix it. - */ - while (qm_check_dev_error(pf_qm)) { - msleep(++delay); - if (delay > QM_RESET_WAIT_TIMEOUT) + while (true) { + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "FLR not ready!\n"); return; - } + } - ret = qm_reset_prepare_ready(qm); - if (ret) { - pci_err(pdev, "FLR not ready!\n"); - return; + hisi_qm_dev_err_uninit(pf_qm); + /* + * Check whether there is an ECC mbit error, + * If it occurs, need to wait for soft reset + * to fix it. + */ + if (qm_check_dev_error(qm)) { + qm_reset_bit_clear(qm); + if (delay > QM_RESET_WAIT_TIMEOUT) { + pci_err(pdev, "the hardware error was not recovered!\n"); + return; + } + + msleep(++delay); + } else { + break; + } } /* PF obtains the information of VF by querying the register. */ @@ -4699,16 +4707,23 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) ret = hisi_qm_stop(qm, QM_DOWN); if (ret) { pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); - hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); - hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); - return; + goto err_prepare; } ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to stop by vfs in FLR!\n"); + hisi_qm_cache_wb(qm); pci_info(pdev, "FLR resetting...\n"); + return; + +err_prepare: + pci_info(pdev, "FLR resetting prepare failed!\n"); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); + atomic_set(&qm->status.flags, QM_STOP); + hisi_qm_cache_wb(qm); } EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 02a377f55291c3e9ec7e0a7b2ef365bf72cc7557..ef6d1e011403f5b4ade39ca1dae639361415102b 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,6 +37,7 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; + bool fallback; }; /* SEC request of Crypto */ @@ -89,9 +90,7 @@ struct sec_auth_ctx { dma_addr_t a_key_dma; u8 *a_key; u8 a_key_len; - u8 mac_len; u8 a_alg; - bool fallback; struct crypto_shash *hash_tfm; struct crypto_aead *fallback_aead_tfm; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index faeb63b4c0cc0e118e9f4eadde7ebb6eae907b7a..3e96bcff05ddc471caf09664be4f30bbe969cab1 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -947,15 +947,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req) struct aead_request *aead_req = req->aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); size_t authsize = crypto_aead_authsize(tfm); - u8 *mac_out = req->out_mac; struct scatterlist *sgl = aead_req->src; + u8 *mac_out = req->out_mac; size_t copy_size; off_t skip_size; /* Copy input mac */ skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; - copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, - authsize, skip_size); + copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size); if (unlikely(copy_size != authsize)) return -EINVAL; @@ -1119,10 +1118,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) struct sec_ctx *ctx = crypto_tfm_ctx(tfm); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - if (unlikely(a_ctx->fallback_aead_tfm)) - return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); - - return 0; + return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); } static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, @@ -1138,7 +1134,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, const u32 keylen, const enum sec_hash_alg a_alg, const enum sec_calg c_alg, - const enum sec_mac_len mac_len, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); @@ -1150,7 +1145,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ctx->a_ctx.a_alg = a_alg; ctx->c_ctx.c_alg = c_alg; - ctx->a_ctx.mac_len = mac_len; c_ctx->c_mode = c_mode; if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { @@ -1161,13 +1155,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (unlikely(a_ctx->fallback_aead_tfm)) { - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); - if (ret) - return ret; - } - - return 0; + return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); } ret = crypto_authenc_extractkeys(&keys, key, keylen); @@ -1186,10 +1174,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || - (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { + if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) { ret = -EINVAL; - dev_err(dev, "MAC or AUTH key length error!\n"); + dev_err(dev, "AUTH key length error!\n"); + goto bad_key; + } + + ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + if (ret) { + dev_err(dev, "set sec fallback key err!\n"); goto bad_key; } @@ -1201,27 +1194,19 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } -#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ -static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ - u32 keylen) \ -{ \ - return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ -} - -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, - SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, - SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, - SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) +#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \ +static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \ +{ \ + return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \ +} + +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM) static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { @@ -1443,9 +1428,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct sec_cipher_req *c_req = &req->c_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *a_req = &req->aead_req; - size_t authsize = ctx->a_ctx.mac_len; + struct sec_cipher_req *c_req = &req->c_req; u32 data_size = aead_req->cryptlen; u8 flage = 0; u8 cm, cl; @@ -1486,10 +1472,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); - size_t authsize = crypto_aead_authsize(tfm); - struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); @@ -1497,15 +1481,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) /* * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, * the counter must set to 0x01 + * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ - ctx->a_ctx.mac_len = authsize; - /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ set_aead_auth_iv(ctx, req); - } - - /* GCM 12Byte Cipher_IV == Auth_IV */ - if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { - ctx->a_ctx.mac_len = authsize; + } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { + /* GCM 12Byte Cipher_IV == Auth_IV */ memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); } } @@ -1515,9 +1495,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); + sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; @@ -1541,9 +1523,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); + sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sqe3->a_key_addr = sqe3->c_key_addr; @@ -1567,11 +1551,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = - cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)((ctx->a_key_len) / @@ -1621,11 +1606,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->mac_len / + cpu_to_le32((u32)(authsize / SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= @@ -1676,9 +1663,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) { struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; - size_t authsize = crypto_aead_authsize(tfm); size_t sz; if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) @@ -1688,10 +1675,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) if (!err && c_req->encrypt) { struct scatterlist *sgl = a_req->dst; - sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), - aead_req->out_mac, - authsize, a_req->cryptlen + - a_req->assoclen); + sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac, + authsize, a_req->cryptlen + a_req->assoclen); if (unlikely(sz != authsize)) { dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; @@ -1894,8 +1879,10 @@ static void sec_aead_exit(struct crypto_aead *tfm) static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) { + struct aead_alg *alg = crypto_aead_alg(tfm); struct sec_ctx *ctx = crypto_aead_ctx(tfm); - struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + const char *aead_name = alg->base.cra_name; int ret; ret = sec_aead_init(tfm); @@ -1904,11 +1891,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) return ret; } - auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); - if (IS_ERR(auth_ctx->hash_tfm)) { + a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(a_ctx->hash_tfm)) { dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); - return PTR_ERR(auth_ctx->hash_tfm); + return PTR_ERR(a_ctx->hash_tfm); + } + + a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + if (IS_ERR(a_ctx->fallback_aead_tfm)) { + dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); + crypto_free_shash(ctx->a_ctx.hash_tfm); + sec_aead_exit(tfm); + return PTR_ERR(a_ctx->fallback_aead_tfm); } return 0; @@ -1918,6 +1914,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); + crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); crypto_free_shash(ctx->a_ctx.hash_tfm); sec_aead_exit(tfm); } @@ -1944,7 +1941,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) sec_aead_exit(tfm); return PTR_ERR(a_ctx->fallback_aead_tfm); } - a_ctx->fallback = false; return 0; } @@ -2199,21 +2195,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); + size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; struct device *dev = ctx->dev; int ret; - if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + /* Hardware does not handle cases where authsize is less than 4 bytes */ + if (unlikely(sz < MIN_MAC_LEN)) { + sreq->aead_req.fallback = true; return -EINVAL; } - if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || - (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || - authsize & MAC_LEN_MASK)))) { - dev_err(dev, "aead input mac length error!\n"); + if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || + req->assoclen > SEC_MAX_AAD_LEN)) { + dev_err(dev, "aead input spec error!\n"); return -EINVAL; } @@ -2232,7 +2227,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) if (sreq->c_req.encrypt) sreq->c_req.c_len = req->cryptlen; else - sreq->c_req.c_len = req->cryptlen - authsize; + sreq->c_req.c_len = req->cryptlen - sz; if (c_mode == SEC_CMODE_CBC) { if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { dev_err(dev, "aead crypto length error!\n"); @@ -2258,8 +2253,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) if (ctx->sec->qm.ver == QM_HW_V2) { if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - ctx->a_ctx.fallback = true; + req->cryptlen <= authsize))) { + sreq->aead_req.fallback = true; return -EINVAL; } } @@ -2287,16 +2282,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, bool encrypt) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - struct device *dev = ctx->dev; struct aead_request *subreq; int ret; - /* Kunpeng920 aead mode not support input 0 size */ - if (!a_ctx->fallback_aead_tfm) { - dev_err(dev, "aead fallback tfm is NULL!\n"); - return -EINVAL; - } - subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); if (!subreq) return -ENOMEM; @@ -2323,6 +2311,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; + req->aead_req.fallback = false; req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; @@ -2332,7 +2321,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) ret = sec_aead_param_check(ctx, req); if (unlikely(ret)) { - if (ctx->a_ctx.fallback) + if (req->aead_req.fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 27a0ee5ad9131c423407614de92f69a69bec6ff0..04725b514382f8e2371fba57aa99122400b8d4f1 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -23,17 +23,6 @@ enum sec_hash_alg { SEC_A_HMAC_SHA512 = 0x15, }; -enum sec_mac_len { - SEC_HMAC_CCM_MAC = 16, - SEC_HMAC_GCM_MAC = 16, - SEC_SM3_MAC = 32, - SEC_HMAC_SM3_MAC = 32, - SEC_HMAC_MD5_MAC = 16, - SEC_HMAC_SHA1_MAC = 20, - SEC_HMAC_SHA256_MAC = 32, - SEC_HMAC_SHA512_MAC = 64, -}; - enum sec_cmode { SEC_CMODE_ECB = 0x0, SEC_CMODE_CBC = 0x1, diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index a1eaac5f3311c736e6e9fb0c28cae29a83574f33..8f1743a053dfcca90815df0b3d5c39d909e20def 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -41,6 +41,7 @@ #define SEED_SHIFT_24 24 #define SEED_SHIFT_16 16 #define SEED_SHIFT_8 8 +#define WAIT_PERIOD 20 struct hisi_trng_list { struct mutex lock; @@ -53,6 +54,7 @@ struct hisi_trng { struct hisi_trng_list *trng_list; struct list_head list; struct hwrng rng; + u32 ctx_num; u32 ver; bool is_used; struct mutex mutex; @@ -60,6 +62,7 @@ struct hisi_trng { struct hisi_trng_ctx { struct hisi_trng *trng; + struct crypto_rng *drbg; }; static atomic_t trng_active_devs; @@ -85,6 +88,7 @@ static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm); + struct crypto_rng *drbg = ctx->drbg; struct hisi_trng *trng = ctx->trng; u32 val = 0; int ret = 0; @@ -95,6 +99,12 @@ static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed, return -EINVAL; } + /* when the device is busy, use soft tfm instead */ + if (drbg) { + crypto_rng_set_entropy(drbg, seed, slen); + return crypto_rng_reset(drbg, NULL, 0); + } + writel(0x0, trng->base + SW_DRBG_BLOCKS); hisi_trng_set_seed(trng, seed); @@ -114,6 +124,7 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dstn, unsigned int dlen) { struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm); + struct crypto_rng *drbg = ctx->drbg; struct hisi_trng *trng = ctx->trng; u32 data[SW_DRBG_DATA_NUM]; u32 currsize = 0; @@ -127,6 +138,9 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src, return -EINVAL; } + if (drbg) + return crypto_rng_generate(drbg, src, slen, dstn, dlen); + do { ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS, val, val & BIT(1), SLEEP_US, TIMEOUT_US); @@ -156,19 +170,41 @@ static int hisi_trng_init(struct crypto_tfm *tfm) { struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm); struct hisi_trng *trng; - int ret = -EBUSY; + int ret = 0; mutex_lock(&trng_devices.lock); list_for_each_entry(trng, &trng_devices.list, list) { if (!trng->is_used) { trng->is_used = true; + trng->ctx_num++; ctx->trng = trng; - ret = 0; break; } } mutex_unlock(&trng_devices.lock); + if (!ctx->trng) { + ctx->drbg = crypto_alloc_rng("drbg_nopr_ctr_aes256", 0, 0); + if (IS_ERR(ctx->drbg)) { + pr_err("Can not alloc rng!\n"); + ret = PTR_ERR(ctx->drbg); + return ret; + } + + mutex_lock(&trng_devices.lock); + if (list_empty(&trng_devices.list)) { + mutex_unlock(&trng_devices.lock); + crypto_free_rng(ctx->drbg); + return -ENODEV; + } + + trng = list_first_entry(&trng_devices.list, + struct hisi_trng, list); + trng->ctx_num++; + ctx->trng = trng; + mutex_unlock(&trng_devices.lock); + } + return ret; } @@ -177,7 +213,12 @@ static void hisi_trng_exit(struct crypto_tfm *tfm) struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm); mutex_lock(&trng_devices.lock); - ctx->trng->is_used = false; + if (!ctx->drbg) + ctx->trng->is_used = false; + else + crypto_free_rng(ctx->drbg); + + ctx->trng->ctx_num--; mutex_unlock(&trng_devices.lock); } @@ -239,7 +280,7 @@ static int hisi_trng_del_from_list(struct hisi_trng *trng) int ret = -EBUSY; mutex_lock(&trng_devices.lock); - if (!trng->is_used) { + if (!trng->ctx_num) { list_del(&trng->list); ret = 0; } @@ -264,6 +305,7 @@ static int hisi_trng_probe(struct platform_device *pdev) return PTR_ERR(trng->base); trng->is_used = false; + trng->ctx_num = 0; trng->ver = readl(trng->base + HISI_TRNG_VERSION); if (!trng_devices.is_init) { INIT_LIST_HEAD(&trng_devices.list); @@ -310,7 +352,7 @@ static void hisi_trng_remove(struct platform_device *pdev) /* Wait until the task is finished */ while (hisi_trng_del_from_list(trng)) - ; + msleep(WAIT_PERIOD); if (trng->ver != HISI_TRNG_VER_V1 && atomic_dec_return(&trng_active_devs) == 0)