diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c index e6f3d192dc08ef91955499de10589d18914e1745..79e3c031152f46c1926cb9fb4c84bc459ab6d280 100644 --- a/arch/x86/crypto/sm3-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -21,66 +21,26 @@ #include #include #include +#include -#define DRIVER_VERSION "1.0.1" +#define GMI_SM3_CRA_PRIORITY 400 -const u8 zx_sm3_zero_message_hash[SM3_DIGEST_SIZE] = { - 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, - 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, - 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, - 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B -}; -EXPORT_SYMBOL_GPL(zx_sm3_zero_message_hash); - -/* - * Load supported features of the CPU to see if the SM3/SM4 is available. - */ -static int gmi_check(void) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - u32 eax, edx; - - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || - ((c->x86 == 6) && (c->x86_model == 0x09)) || - (c->x86 > 6)) { - if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { - eax = 0xC0000001; - __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); - - if ((edx & 0x0030) == 0x0030) { - pr_info("GMI SM3/4 detected by CPUID\n"); - return 0; - } - return -ENODEV; - } - pr_info("GMI SM3/4 is available\n"); - return 0; - } - return -ENODEV; -} - -void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) +static void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) { - u64 in, out, cnt; + unsigned int cnt, ctrl = 0x20; + long padding = -1; + unsigned char *out = (unsigned char *)sst->state; - if (!blockcnt) + if (blockcnt < 0) return; - in = (u64)inp; - out = (u64)(sst->state); - cnt = (u64)blockcnt; + cnt = (unsigned int)blockcnt; __asm__ __volatile__( - "movq %0, %%rsi\n" - "movq %1, %%rdi\n" - "movq %2, %%rcx\n" - "movq $-1, %%rax\n" - "movq $0x20, %%rbx\n" - ".byte 0xf3, 0x0f, 0xa6, 0xe8" - : - : "r"(in), "r"(out), "r"(cnt) - : "%rdi", "%rsi", "%rcx", "rbx", "%rax", "memory" - ); + ".byte 0xf3, 0x0f, 0xa6, 0xe8\n" + : "+S"(inp), "+D"(out), "+c"(cnt) + : "b"(ctrl), "a"(padding) + : "memory"); } static inline int zx_sm3_init(struct shash_desc *desc) @@ -139,27 +99,35 @@ int zx_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 * EXPORT_SYMBOL(zx_sm3_finup); static struct shash_alg zx_sm3_alg = { - .digestsize = SM3_DIGEST_SIZE, - .init = zx_sm3_init, - .update = zx_sm3_update, - .final = zx_sm3_final, - .finup = zx_sm3_finup, - .descsize = sizeof(struct sm3_state), + .digestsize = SM3_DIGEST_SIZE, + .init = zx_sm3_init, + .update = zx_sm3_update, + .final = zx_sm3_final, + .finup = zx_sm3_finup, + .descsize = sizeof(struct sm3_state), .base = { - .cra_name = "sm3", - .cra_driver_name = "sm3-zhaoxin-gmi", - .cra_priority = 300, - .cra_blocksize = SM3_BLOCK_SIZE, - .cra_module = THIS_MODULE, + .cra_name = "sm3", + .cra_driver_name = "sm3-zhaoxin-gmi", + .cra_priority = GMI_SM3_CRA_PRIORITY, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, } }; +static const struct x86_cpu_id zhaoxin_ccs_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_CCS, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_CCS, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_ccs_cpu_ids); + static int __init zx_sm3_generic_mod_init(void) { - if (gmi_check()) { - pr_warn("GMI is unavailable on this platform."); + int err; + + if (!x86_match_cpu(zhaoxin_ccs_cpu_ids) || !boot_cpu_has(X86_FEATURE_CCS_EN)) return -ENODEV; - } + return crypto_register_shash(&zx_sm3_alg); } @@ -172,8 +140,7 @@ module_init(zx_sm3_generic_mod_init); module_exit(zx_sm3_generic_mod_fini); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRIVER_VERSION); MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); - MODULE_ALIAS_CRYPTO("zx-sm3"); MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm3"); +MODULE_VERSION("2.0.1"); diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c index fa241b29978889cdc7b85755f772d2eb8d215dbe..0e0dd1025c642ae79058f8b9c2200c616d3366fb 100644 --- a/arch/x86/crypto/sm4-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -22,8 +22,9 @@ #include #include #include +#include -#define DRIVER_VERSION "1.0.1" +#define GMI_SM4_CRA_PRIORITY 600 #define SM4_ECB (1<<6) #define SM4_CBC (1<<7) @@ -50,38 +51,28 @@ struct sm4_cipher_data { struct sm4_ctx keys; /* Encryption key */ }; -static inline u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) +static u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, size_t count) { - u64 rax = sm4_data->cword.pad; + size_t pad = sm4_data->cword.pad; /* Set the flag for encryption or decryption */ if (sm4_data->cword.b.encdec == 1) - rax &= ~0x01; + pad &= ~0x01; else - rax |= 0x01; + pad |= 0x01; __asm__ __volatile__( - "push %%rbx\n" - "movq %0, %%rsi\n" - "movq %1, %%rdi\n" - "movq %2, %%rax\n" - "movq %3, %%rbx\n" - "movq %4, %%rcx\n" - "movq %5, %%rdx\n" - ".byte 0xf3,0x0f,0xa7,0xf0\n" - "pop %%rbx" - : - : "r"((u64)input), "r"((u64)output), "r"(rax), "r"((u64)key), "r"((u64)count), - "r"((u64)iv) - : "%rsi", "%rdi", "%rax", "%rbx", "%rcx", "%rdx", "memory" - ); + ".byte 0xf3, 0x0f, 0xa7, 0xf0\n" + : "+S"(input), "+D"(output), "+c"(count) + : "a"(pad), "b"(key), "d"(iv) + : "memory"); return iv; } static inline u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) + struct sm4_cipher_data *sm4_data, size_t count) { u8 oiv[SM4_BLOCK_SIZE] = {0}; u32 cnt_tmp; @@ -130,8 +121,7 @@ static inline u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, return iv; } -static inline u8 *rep_xcrypt_ebc_ONE(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) +static u8 *rep_xcrypt_ecb_one(const u8 *input, u8 *output, void *key, u8 *iv) { struct sm4_cipher_data cw; @@ -185,7 +175,7 @@ static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_dat return err; } -static int ebc_encrypt(struct skcipher_request *req) +static int ecb_encrypt(struct skcipher_request *req) { int err; struct sm4_cipher_data cw; @@ -199,7 +189,7 @@ static int ebc_encrypt(struct skcipher_request *req) return err; } -static int ebc_decrypt(struct skcipher_request *req) +static int ecb_decrypt(struct skcipher_request *req) { int err; struct sm4_cipher_data cw; @@ -273,7 +263,8 @@ static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data * } if (walk.nbytes == walk.total && nbytes > 0) { - rep_xcrypt_ebc_ONE(walk.iv, keystream, ctx->rkey_enc, walk.iv, cw, 1); + rep_xcrypt_ecb_one(walk.iv, keystream, ctx->rkey_enc, walk.iv); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); crypto_xor_cpy(dst, keystream, src, nbytes); dst += nbytes; @@ -321,17 +312,16 @@ static int ctr_decrypt(struct skcipher_request *req) } /* - * sm4_ctr_zxc is used for ZXC+ + * sm4_cipher_ofb is used for ZX-E and newer */ -static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +static int sm4_cipher_ofb(struct skcipher_request *req, struct sm4_cipher_data *cw) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; - unsigned int nbytes; + unsigned int blocks, nbytes; int err; - u8 *iv = NULL, *dst, *src; - u8 en_iv[SM4_BLOCK_SIZE] = {0}; + u8 *dst, *src; err = skcipher_walk_virt(&walk, req, true); @@ -340,19 +330,18 @@ static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) dst = walk.dst.virt.addr; while (nbytes >= SM4_BLOCK_SIZE) { - iv = rep_xcrypt_ebc_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); - crypto_inc(walk.iv, SM4_BLOCK_SIZE); - crypto_xor_cpy(dst, en_iv, src, SM4_BLOCK_SIZE); + blocks = nbytes / SM4_BLOCK_SIZE; + rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, walk.iv, cw, + blocks); - dst += SM4_BLOCK_SIZE; - src += SM4_BLOCK_SIZE; - nbytes -= SM4_BLOCK_SIZE; + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; } if (walk.nbytes == walk.total && nbytes > 0) { - rep_xcrypt_ebc_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); - crypto_xor_cpy(dst, en_iv, src, nbytes); - + rep_xcrypt_ecb_one(walk.iv, walk.iv, ctx->rkey_enc, walk.iv); + crypto_xor_cpy(dst, src, walk.iv, nbytes); dst += nbytes; src += nbytes; nbytes = 0; @@ -364,40 +353,6 @@ static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) return err; } -/* - * ctr_encrypt_zxc is used for ZX-C+ - */ -static int ctr_encrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_CTR; - - err = sm4_ctr_zxc(req, &cw); - - return err; -} - -/* - * ctr_decrypt_zxc is used for ZX-C+ - */ -static int ctr_decrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20 | SM4_CTR; - - err = sm4_ctr_zxc(req, &cw); - - return err; -} - /* * ofb_encrypt is used for newer than ZX-C+ */ @@ -410,7 +365,7 @@ static int ofb_encrypt(struct skcipher_request *req) cw.cword.b.encdec = 1; cw.cword.pad |= 0x20|SM4_OFB; - err = sm4_cipher_common(req, &cw); + err = sm4_cipher_ofb(req, &cw); return err; } @@ -426,72 +381,59 @@ static int ofb_decrypt(struct skcipher_request *req) cw.cword.pad = 0; cw.cword.pad |= 0x20|SM4_OFB; - err = sm4_cipher_common(req, &cw); + err = sm4_cipher_ofb(req, &cw); return err; } /* - * sm4_ofb_zxc is used for ZX-C+ + * sm4_cipher_cfb is used for ZX-E and newer */ -static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +static int sm4_cipher_cfb(struct skcipher_request *req, struct sm4_cipher_data *cw) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; - unsigned int blocks; + unsigned int blocks, nbytes; int err; - u32 n; + u8 *dst, *src; err = skcipher_walk_virt(&walk, req, true); - while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { - while (blocks--) { - rep_xcrypt_ebc_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); - for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) - *(size_t *)(walk.dst.virt.addr + n) = - *(size_t *)(walk.iv + n) ^ - *(size_t *)(walk.src.virt.addr + n); - walk.src.virt.addr += SM4_BLOCK_SIZE; - walk.dst.virt.addr += SM4_BLOCK_SIZE; - } - - err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); - } - - return err; -} - -/* - * ofb_encrypt_zxc is used for ZX-C+ - */ -static int ofb_encrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20 | SM4_OFB; + while ((nbytes = walk.nbytes) > 0) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; - err = sm4_ofb_zxc(req, &cw); + while (nbytes >= SM4_BLOCK_SIZE) { + blocks = nbytes / SM4_BLOCK_SIZE; + rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, walk.iv, cw, + blocks); - return err; -} + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; + } -/* - * ofb_decrypt_zxc is used for ZX-C+ - */ -static int ofb_decrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + if (cw->cword.b.encdec){ + rep_xcrypt_ecb_one(walk.iv, walk.iv, ctx->rkey_enc, walk.iv); + crypto_xor_cpy(keystream, walk.iv, src, nbytes); + memcpy(dst, keystream, nbytes); + } else { + rep_xcrypt_ecb_one(walk.iv, walk.iv, ctx->rkey_enc, walk.iv); + crypto_xor_cpy(dst, src, walk.iv, nbytes); + memcpy(walk.iv, src, nbytes); + } - cw.cword.pad = 0; - cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20 | SM4_OFB; + dst += nbytes; + src += nbytes; + nbytes = 0; + } - err = sm4_ofb_zxc(req, &cw); + err = skcipher_walk_done(&walk, nbytes); + } return err; } @@ -508,7 +450,7 @@ static int cfb_encrypt(struct skcipher_request *req) cw.cword.b.encdec = 1; cw.cword.pad |= 0x20 | SM4_CFB; - err = sm4_cipher_common(req, &cw); + err = sm4_cipher_cfb(req, &cw); return err; } @@ -525,82 +467,7 @@ static int cfb_decrypt(struct skcipher_request *req) cw.cword.pad = 0; cw.cword.pad |= 0x20|SM4_CFB; - err = sm4_cipher_common(req, &cw); - - return err; -} - -/* - * sm4_cfb_zxc is used for ZX-C+ - */ -static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int blocks; - int err; - u32 n; - size_t t; - - err = skcipher_walk_virt(&walk, req, true); - - while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { - while (blocks--) { - rep_xcrypt_ebc_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); - if (cw->cword.b.encdec) - for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) - *(size_t *)(walk.dst.virt.addr + n) = - *(size_t *)(walk.iv + n) ^= - *(size_t *)(walk.src.virt.addr + n); - else - for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) { - t = *(size_t *)(walk.src.virt.addr + n); - *(size_t *)(walk.dst.virt.addr + n) = - *(size_t *)(walk.iv + n) ^ t; - *(size_t *)(walk.iv + n) = t; - } - - walk.src.virt.addr += SM4_BLOCK_SIZE; - walk.dst.virt.addr += SM4_BLOCK_SIZE; - } - - err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); - } - - return err; -} - -/* - * cfb_encrypt_zxc is used for ZX-C+ - */ -static int cfb_encrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_CFB; - - err = sm4_cfb_zxc(req, &cw); - - return err; -} - -/* - * cfb_decrypt_zxc is used for ZX-C+ - */ -static int cfb_decrypt_zxc(struct skcipher_request *req) -{ - int err; - struct sm4_cipher_data cw; - - cw.cword.pad = 0; - cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20|SM4_CFB; - - err = sm4_cfb_zxc(req, &cw); + err = sm4_cipher_cfb(req, &cw); return err; } @@ -608,148 +475,104 @@ static int cfb_decrypt_zxc(struct skcipher_request *req) static struct skcipher_alg sm4_algs[] = { { .base = { - .cra_name = "__ecb(sm4)", - .cra_driver_name = "__ecb-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__ecb(sm4)", + .cra_driver_name = "__ecb-sm4-gmi", + .cra_priority = GMI_SM4_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = ebc_encrypt, - .decrypt = ebc_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, }, { .base = { - .cra_name = "__cbc(sm4)", - .cra_driver_name = "__cbc-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__cbc(sm4)", + .cra_driver_name = "__cbc-sm4-gmi", + .cra_priority = GMI_SM4_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, }, { .base = { - .cra_name = "__ctr(sm4)", - .cra_driver_name = "__ctr-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = 1, //SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__ctr(sm4)", + .cra_driver_name = "__ctr-sm4-gmi", + .cra_priority = GMI_SM4_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, //SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = ctr_encrypt, - .decrypt = ctr_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ctr_encrypt, + .decrypt = ctr_decrypt, }, { .base = { - .cra_name = "__ofb(sm4)", - .cra_driver_name = "__ofb-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__ofb(sm4)", + .cra_driver_name = "__ofb-sm4-gmi", + .cra_priority = GMI_SM4_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = ofb_encrypt, - .decrypt = ofb_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ofb_encrypt, + .decrypt = ofb_decrypt, }, { .base = { - .cra_name = "__cfb(sm4)", - .cra_driver_name = "__cfb-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__cfb(sm4)", + .cra_driver_name = "__cfb-sm4-gmi", + .cra_priority = GMI_SM4_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = cfb_encrypt, - .decrypt = cfb_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cfb_encrypt, + .decrypt = cfb_decrypt, } }; static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(sm4_algs)]; -static int gmi_zxc_check(void) -{ - int f_zxc = 0; - struct cpuinfo_x86 *c = &cpu_data(0); - - if ((c->x86 > 6)) { - f_zxc = 0; - } else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || - ((c->x86 == 6) && (c->x86_model == 0x09))) { - f_zxc = 1; - } - - return f_zxc; -} - -/* - * Load supported features of the CPU to see if the SM3/SM4 is available. - */ -static int gmi_check(void) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - u32 eax, edx; - - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || - ((c->x86 == 6) && (c->x86_model == 0x09)) || - (c->x86 > 6)) { - if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { - eax = 0xC0000001; - __asm__ __volatile__ ("cpuid" : "=d"(edx) : "a"(eax) : ); - - if ((edx & 0x0030) == 0x0030) { - pr_debug("GMI SM3/SM4 is detected by CPUID\n"); - return 0; - } - return -ENODEV; - } - pr_debug("GMI SM3/4 is available\n"); - return 0; - - } - return -ENODEV; -} - static void gmi_sm4_exit(void) { int i; @@ -759,6 +582,14 @@ static void gmi_sm4_exit(void) crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); } + +static const struct x86_cpu_id zhaoxin_ccs_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_CCS, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_CCS, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_ccs_cpu_ids); + static int __init gmi_sm4_init(void) { struct simd_skcipher_alg *simd; @@ -768,24 +599,9 @@ static int __init gmi_sm4_init(void) int err; int i; - if (gmi_check()) + if (!x86_match_cpu(zhaoxin_ccs_cpu_ids) || !boot_cpu_has(X86_FEATURE_CCS_EN)) return -ENODEV; - if (gmi_zxc_check()) { - for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { - if (!strcmp(sm4_algs[i].base.cra_name, "__ctr(sm4)")) { - sm4_algs[i].encrypt = ctr_encrypt_zxc; - sm4_algs[i].decrypt = ctr_decrypt_zxc; - } else if (!strcmp(sm4_algs[i].base.cra_name, "__cfb(sm4)")) { - sm4_algs[i].encrypt = cfb_encrypt_zxc; - sm4_algs[i].decrypt = cfb_decrypt_zxc; - } else if (!strcmp(sm4_algs[i].base.cra_name, "__ofb(sm4)")) { - sm4_algs[i].encrypt = ofb_encrypt_zxc; - sm4_algs[i].decrypt = ofb_decrypt_zxc; - } - } - } - err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); if (err) return err; @@ -814,5 +630,5 @@ module_exit(gmi_sm4_exit); MODULE_DESCRIPTION("SM4-ECB/CBC/CTR/CFB/OFB using Zhaoxin GMI"); MODULE_AUTHOR("GRX"); -MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); +MODULE_VERSION("2.0.1");