From 8580cd82ebd2f8569bce04560f75107fbcf7d09c Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 29 Oct 2025 17:24:30 +0800 Subject: [PATCH 1/2] anolis: crypto: sm3 - Refactor code and harden security ANBZ: #26817 This patch series refactors the SM3 driver to improve code clarity, performance, and compatibility. Key changes include simplifying CPU feature verification using x86_match_cpu() and boot_cpu_has(), optimizing the sm3_generic_block_fn() assembly implementation for GCC compliance, and adjusting the cra_priority to ensure the GMI driver takes precedence over sm3-avx on specific kernel branches. Security is enhanced by replacing manual zeroing with memzero_explicit() for state cleanup. Additionally, support for legacy ZX-C processors is removed due to unreliable feature detection via FMS. Signed-off-by: leoliu-oc --- arch/x86/crypto/sm3-zhaoxin-gmi.c | 69 +++++++++++-------------------- 1 file changed, 25 insertions(+), 44 deletions(-) diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c index c39b7de97ce1..504639e125c9 100644 --- a/arch/x86/crypto/sm3-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -21,53 +21,26 @@ #include #include #include +#include -/* - * Load supported features of the CPU to see if the SM3/SM4 is available. - */ -static int gmi_available(void) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - u32 eax, edx; - - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || - ((c->x86 == 6) && (c->x86_model == 0x09)) || - (c->x86 > 6)) { - if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { - eax = 0xC0000001; - __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); - - if ((edx & 0x0030) != 0x0030) - return -ENODEV; - - pr_notice("GMI SM3 detected by CPUID\n"); - return 0; - } - pr_notice("GMI SM3 is available\n"); - return 0; - } - return -ENODEV; -} +#define GMI_SM3_CRA_PRIORITY 400 static void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) { - unsigned long in, out, cnt, blksz, ctrl; + unsigned int cnt, ctrl = 0x20; + long padding = -1; + unsigned char *out = (unsigned char *)sst->state; - if (!blockcnt) + if (blockcnt < 0) return; - in = (unsigned long)inp; - out = (unsigned long)(sst->state); - cnt = (unsigned long)blockcnt; - blksz = 0x20; - ctrl = -1; + cnt = (unsigned int)blockcnt; __asm__ __volatile__( - ".byte 0xf3,0x0f,0xa6,0xe8\n" - : "+S"(in) - : "S"(in), "D"(out), "c"(cnt), "b"(blksz), "a"(ctrl) - : "memory" - ); + ".byte 0xf3, 0x0f, 0xa6, 0xe8\n" + : "+S"(inp), "+D"(out), "+c"(cnt) + : "b"(ctrl), "a"(padding) + : "memory"); } static inline int zx_sm3_init(struct shash_desc *desc) @@ -100,7 +73,8 @@ static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out) memcpy(digest, sctx->state, SM3_DIGEST_SIZE); - *sctx = (struct sm3_state){}; + memzero_explicit(sctx, sizeof(*sctx)); + return 0; } @@ -133,19 +107,25 @@ static struct shash_alg zx_sm3_alg = { .base = { .cra_name = "sm3", .cra_driver_name = "sm3-zhaoxin-gmi", - .cra_priority = 300, + .cra_priority = GMI_SM3_CRA_PRIORITY, .cra_blocksize = SM3_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; +static const struct x86_cpu_id zhaoxin_ccs_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_CCS, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_CCS, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_ccs_cpu_ids); + static int __init zx_sm3_generic_mod_init(void) { - if (gmi_available() == 0) - return crypto_register_shash(&zx_sm3_alg); + if (!x86_match_cpu(zhaoxin_ccs_cpu_ids) || !boot_cpu_has(X86_FEATURE_CCS_EN)) + return -ENODEV; - pr_warn("GMI is unavailable on this platform."); - return -ENODEV; + return crypto_register_shash(&zx_sm3_alg); } static void __exit zx_sm3_generic_mod_fini(void) @@ -161,3 +141,4 @@ MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); MODULE_ALIAS_CRYPTO("sm3-zhaoxin"); MODULE_ALIAS_CRYPTO("sm3-zhaoxin-gmi"); +MODULE_VERSION("2.0.1"); -- Gitee From 3489a902366860b8d74e8c9bad55f937314f7d92 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 29 Oct 2025 17:25:02 +0800 Subject: [PATCH 2/2] anolis: crypto: sm4 - Refactor mode handling and optimize performance ANBZ: #26817 This patch series addresses critical issues in the SM4 driver's mode implementations and optimizes performance. Fixes include resolving last-block processing in CTR, OFB, and CFB modes by updating IVs or using ECB workarounds, correcting block size definitions for OFB/CFB (now 1 byte), and removing redundant IV size declarations in ECB mode. The assembly implementation of rep_xcrypt() is optimized for GCC compliance, and cra_priority is updated to prioritize the GMI driver over sm4-aesni-avx2 across multiple kernel branches. Legacy ZX-C processor support is dropped due to unreliable GMI feature detection. Signed-off-by: leoliu-oc --- arch/x86/crypto/sm4-zhaoxin-gmi.c | 356 ++++++++---------------------- 1 file changed, 88 insertions(+), 268 deletions(-) diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c index 288bae293607..bf9b682d6285 100644 --- a/arch/x86/crypto/sm4-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -22,6 +22,7 @@ #include #include #include +#include #define SM4_ECB (1<<6) #define SM4_CBC (1<<7) @@ -33,6 +34,8 @@ #define GETU16(p) ((u16)(p)[0]<<8 | (u16)(p)[1]) +#define GMI_SM4_CRA_PRIORITY 600 + /* Control word. */ struct sm4_cipher_data { u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ @@ -49,26 +52,27 @@ struct sm4_cipher_data { }; static u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) + struct sm4_cipher_data *sm4_data, size_t count) { - unsigned long rax = sm4_data->cword.pad; + size_t pad = sm4_data->cword.pad; - /* Set the flag for encryption or decryption */ + /* Set the flag for encryption or decryption */ if (sm4_data->cword.b.encdec == 1) - rax &= ~0x01; + pad &= ~0x01; else - rax |= 0x01; + pad |= 0x01; __asm__ __volatile__( ".byte 0xf3, 0x0f, 0xa7, 0xf0\n" - : - : "S"(input), "D"(output), "a"(rax), "b"(key), "c"((unsigned long)count), "d"(iv)); + : "+S"(input), "+D"(output), "+c"(count) + : "a"(pad), "b"(key), "d"(iv) + : "memory"); return iv; } static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) + struct sm4_cipher_data *sm4_data, size_t count) { u8 oiv[SM4_BLOCK_SIZE] = {0}; u16 cnt_tmp; @@ -85,9 +89,7 @@ static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, /* Get the available counter space before overflow. */ cnt_tmp = 0x10000 - cnt_tmp; - /* - * Check there is enough counter space for the required blocks. - */ + /* Check there is enough counter space for the required blocks. */ if (cnt_tmp < count) { /* Process the first part of data blocks. */ rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); @@ -116,8 +118,7 @@ static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, return iv; } -static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) +static u8 *rep_xcrypt_ecb_one(const u8 *input, u8 *output, void *key, u8 *iv) { struct sm4_cipher_data cw; @@ -128,7 +129,7 @@ static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, u8 *iv, return rep_xcrypt(input, output, key, iv, &cw, 1); } -/** +/* * gmi_sm4_set_key - Set the sm4 key. * @tfm: The %crypto_skcipher that is used in the context. * @in_key: The input key. @@ -257,7 +258,8 @@ static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data * } if (walk.nbytes == walk.total && nbytes > 0) { - rep_xcrypt_ecb_ONE(walk.iv, keystream, ctx->rkey_enc, walk.iv, cw, 1); + rep_xcrypt_ecb_one(walk.iv, keystream, ctx->rkey_enc, walk.iv); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); crypto_xor_cpy(dst, keystream, src, nbytes); dst += nbytes; src += nbytes; @@ -271,7 +273,7 @@ static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data * } /* - * ctr_encrypt is used for ZX-E and newer + * ctr_encrypt is used for ZX-E and newer */ static int ctr_encrypt(struct skcipher_request *req) { @@ -288,7 +290,7 @@ static int ctr_encrypt(struct skcipher_request *req) } /* - * ctr_decrypt is used for ZX-E and newer + * ctr_decrypt is used for ZX-E and newer */ static int ctr_decrypt(struct skcipher_request *req) { @@ -304,17 +306,16 @@ static int ctr_decrypt(struct skcipher_request *req) } /* - * sm4_ctr_zxc is used for ZXC+ + * sm4_cipher_ofb is used for ZX-E and newer */ -static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +static int sm4_cipher_ofb(struct skcipher_request *req, struct sm4_cipher_data *cw) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; - unsigned int nbytes; + unsigned int blocks, nbytes; int err; u8 *dst, *src; - u8 en_iv[SM4_BLOCK_SIZE] = {0}; err = skcipher_walk_virt(&walk, req, true); @@ -323,21 +324,18 @@ static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) dst = walk.dst.virt.addr; while (nbytes >= SM4_BLOCK_SIZE) { - rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); - crypto_inc(walk.iv, SM4_BLOCK_SIZE); - - crypto_xor_cpy(dst, en_iv, src, SM4_BLOCK_SIZE); + blocks = nbytes / SM4_BLOCK_SIZE; + rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, walk.iv, + cw, blocks); - dst += SM4_BLOCK_SIZE; - src += SM4_BLOCK_SIZE; - nbytes -= SM4_BLOCK_SIZE; + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; } - // tail if (walk.nbytes == walk.total && nbytes > 0) { - rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); - crypto_xor_cpy(dst, en_iv, src, nbytes); - + rep_xcrypt_ecb_one(walk.iv, walk.iv, ctx->rkey_enc, walk.iv); + crypto_xor_cpy(dst, src, walk.iv, nbytes); dst += nbytes; src += nbytes; nbytes = 0; @@ -349,40 +347,6 @@ static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) return err; } -/* - * ctr_encrypt_zxc is used for ZX-C+ - */ -static int ctr_encrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20 | SM4_CTR; - - err = sm4_ctr_zxc(req, &cw); - - return err; -} - -/* - * ctr_decrypt_zxc is used for ZX-C+ - */ -static int ctr_decrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20 | SM4_CTR; - - err = sm4_ctr_zxc(req, &cw); - - return err; -} - /* * ofb_encrypt is used for ZX-E and newer */ @@ -395,7 +359,7 @@ static int ofb_encrypt(struct skcipher_request *req) cw.cword.b.encdec = 1; cw.cword.pad |= 0x20 | SM4_OFB; - err = sm4_cipher_common(req, &cw); + err = sm4_cipher_ofb(req, &cw); return err; } @@ -411,83 +375,66 @@ static int ofb_decrypt(struct skcipher_request *req) cw.cword.pad = 0; cw.cword.pad |= 0x20 | SM4_OFB; - err = sm4_cipher_common(req, &cw); + err = sm4_cipher_ofb(req, &cw); return err; } /* - * sm4_ofb_zxc is used for ZX-C+ + * sm4_cipher_cfb is used for ZX-E and newer */ -static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +static int sm4_cipher_cfb(struct skcipher_request *req, struct sm4_cipher_data *cw) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; - unsigned int blocks; + unsigned int blocks, nbytes; int err; - - u32 n; + u8 *dst, *src; err = skcipher_walk_virt(&walk, req, true); - while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { - while (blocks--) { - rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); - - for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) - *(size_t *)(walk.dst.virt.addr + n) = - *(size_t *)(walk.iv + n) ^ - *(size_t *)(walk.src.virt.addr + n); + while ((nbytes = walk.nbytes) > 0) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; - walk.src.virt.addr += SM4_BLOCK_SIZE; - walk.dst.virt.addr += SM4_BLOCK_SIZE; + while (nbytes >= SM4_BLOCK_SIZE) { + blocks = nbytes / SM4_BLOCK_SIZE; + rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, walk.iv, + cw, blocks); + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; } - err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); - } - - return err; -} - -/* - * ofb_encrypt_zxc is used for ZX-C+ - */ -static int ofb_encrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - cw.cword.pad = 0; - cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20 | SM4_OFB; - - err = sm4_ofb_zxc(req, &cw); - - return err; -} - -/* - * ofb_decrypt_zxc is used for ZX-C+ - */ -static int ofb_decrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + if (cw->cword.b.encdec) { + rep_xcrypt_ecb_one(walk.iv, walk.iv, ctx->rkey_enc, walk.iv); + crypto_xor_cpy(keystream, walk.iv, src, nbytes); + memcpy(dst, keystream, nbytes); + } else { + rep_xcrypt_ecb_one(walk.iv, walk.iv, ctx->rkey_enc, walk.iv); + crypto_xor_cpy(dst, src, walk.iv, nbytes); + memcpy(walk.iv, src, nbytes); + } - cw.cword.pad = 0; - cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20 | SM4_OFB; + dst += nbytes; + src += nbytes; + nbytes = 0; + } - err = sm4_ofb_zxc(req, &cw); + err = skcipher_walk_done(&walk, nbytes); + } return err; } - /* - * cfb_encrypt is used for ZX-E and newer. - */ +* cfb_encrypt is used for ZX-E and newer. +*/ static int cfb_encrypt(struct skcipher_request *req) { int err; @@ -497,14 +444,14 @@ static int cfb_encrypt(struct skcipher_request *req) cw.cword.b.encdec = 1; cw.cword.pad |= 0x20 | SM4_CFB; - err = sm4_cipher_common(req, &cw); + err = sm4_cipher_cfb(req, &cw); return err; } /* - * cfb_decrypt is used for ZX-E and newer. - */ +* cfb_decrypt is used for ZX-E and newer. +*/ static int cfb_decrypt(struct skcipher_request *req) { @@ -514,83 +461,7 @@ static int cfb_decrypt(struct skcipher_request *req) cw.cword.pad = 0; cw.cword.pad |= 0x20 | SM4_CFB; - err = sm4_cipher_common(req, &cw); - - return err; -} - -/* - * sm4_cfb_zxc is used for ZX-C+ - */ -static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int blocks; - int err; - u32 n; - size_t t; - - err = skcipher_walk_virt(&walk, req, true); - - while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { - while (blocks--) { - rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); - if (cw->cword.b.encdec) - for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) - *(size_t *)(walk.dst.virt.addr + n) = - *(size_t *)(walk.iv + n) ^= - *(size_t *)(walk.src.virt.addr + n); - - else - for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) { - t = *(size_t *)(walk.src.virt.addr + n); - *(size_t *)(walk.dst.virt.addr + n) = - *(size_t *)(walk.iv + n) ^ t; - *(size_t *)(walk.iv + n) = t; - } - - walk.src.virt.addr += SM4_BLOCK_SIZE; - walk.dst.virt.addr += SM4_BLOCK_SIZE; - } - - err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); - } - - return err; -} - -/* - * cfb_encrypt_zxc is used for ZX-C+ - */ -static int cfb_encrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20 | SM4_CFB; - - err = sm4_cfb_zxc(req, &cw); - - return err; -} - -/* - * cfb_decrypt_zxc is used for ZX-C+ - */ -static int cfb_decrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20 | SM4_CFB; - - err = sm4_cfb_zxc(req, &cw); + err = sm4_cipher_cfb(req, &cw); return err; } @@ -600,7 +471,7 @@ static struct skcipher_alg sm4_algs[] = { .base = { .cra_name = "__ecb(sm4)", .cra_driver_name = "__ecb-sm4-gmi", - .cra_priority = 300, + .cra_priority = GMI_SM4_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_ctx), @@ -608,7 +479,6 @@ static struct skcipher_alg sm4_algs[] = { }, .min_keysize = SM4_KEY_SIZE, .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, .walksize = 8 * SM4_BLOCK_SIZE, .setkey = gmi_sm4_set_key, .encrypt = ecb_encrypt, @@ -619,7 +489,7 @@ static struct skcipher_alg sm4_algs[] = { .base = { .cra_name = "__cbc(sm4)", .cra_driver_name = "__cbc-sm4-gmi", - .cra_priority = 300, + .cra_priority = GMI_SM4_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_ctx), @@ -638,7 +508,7 @@ static struct skcipher_alg sm4_algs[] = { .base = { .cra_name = "__ctr(sm4)", .cra_driver_name = "__ctr-sm4-gmi", - .cra_priority = 300, + .cra_priority = GMI_SM4_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, //SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_ctx), @@ -658,9 +528,9 @@ static struct skcipher_alg sm4_algs[] = { .base = { .cra_name = "__ofb(sm4)", .cra_driver_name = "__ofb-sm4-gmi", - .cra_priority = 300, + .cra_priority = GMI_SM4_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, }, @@ -678,9 +548,9 @@ static struct skcipher_alg sm4_algs[] = { .base = { .cra_name = "__cfb(sm4)", .cra_driver_name = "__cfb-sm4-gmi", - .cra_priority = 300, + .cra_priority = GMI_SM4_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, }, @@ -697,49 +567,6 @@ static struct skcipher_alg sm4_algs[] = { static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(sm4_algs)]; -static int gmi_zxc_check(void) -{ - int f_zxc = 0; - - struct cpuinfo_x86 *c = &cpu_data(0); - - if ((c->x86 > 6)) - f_zxc = 0; - else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || - ((c->x86 == 6) && (c->x86_model == 0x09))) - f_zxc = 1; - - return f_zxc; -} - -/* - * Load supported features of the CPU to see if the SM4 is available. - */ -static int gmi_ccs_available(void) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - u32 eax, edx; - - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || - ((c->x86 == 6) && (c->x86_model == 0x09)) || - (c->x86 > 6)) { - if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { - eax = 0xC0000001; - __asm__ __volatile__ ("cpuid" : "=d"(edx) : "a"(eax) : ); - - if ((edx & 0x0030) != 0x0030) - return -ENODEV; - - pr_debug("GMI SM4 is detected by CPUID\n"); - return 0; - } - pr_debug("GMI SM4 is available\n"); - return 0; - - } - return -ENODEV; -} - static void gmi_sm4_exit(void) { int i; @@ -749,6 +576,14 @@ static void gmi_sm4_exit(void) crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); } + +static const struct x86_cpu_id zhaoxin_ccs_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_CCS, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_CCS, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_ccs_cpu_ids); + static int __init gmi_sm4_init(void) { struct simd_skcipher_alg *simd; @@ -758,24 +593,9 @@ static int __init gmi_sm4_init(void) int err; int i; - if (gmi_ccs_available() != 0) + if (!x86_match_cpu(zhaoxin_ccs_cpu_ids) || !boot_cpu_has(X86_FEATURE_CCS_EN)) return -ENODEV; - if (gmi_zxc_check()) { - for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { - if (!strcmp(sm4_algs[i].base.cra_name, "__ctr(sm4)")) { - sm4_algs[i].encrypt = ctr_encrypt_zxc; - sm4_algs[i].decrypt = ctr_decrypt_zxc; - } else if (!strcmp(sm4_algs[i].base.cra_name, "__cfb(sm4)")) { - sm4_algs[i].encrypt = cfb_encrypt_zxc; - sm4_algs[i].decrypt = cfb_decrypt_zxc; - } else if (!strcmp(sm4_algs[i].base.cra_name, "__ofb(sm4)")) { - sm4_algs[i].encrypt = ofb_encrypt_zxc; - sm4_algs[i].decrypt = ofb_decrypt_zxc; - } - } - } - err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); if (err) return err; @@ -784,8 +604,7 @@ static int __init gmi_sm4_init(void) algname = sm4_algs[i].base.cra_name + 2; drvname = sm4_algs[i].base.cra_driver_name + 2; basename = sm4_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(sm4_algs + i, algname, - drvname, basename); + simd = simd_skcipher_create_compat(sm4_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; @@ -806,3 +625,4 @@ module_exit(gmi_sm4_exit); MODULE_DESCRIPTION("SM4-ECB/CBC/CTR/CFB/OFB using Zhaoxin GMI"); MODULE_AUTHOR("GRX"); MODULE_LICENSE("GPL"); +MODULE_VERSION("2.0.1"); -- Gitee