diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index a84996377bd83d63f9de619f96c8bc29d2ced914..450c08d13c7669435282e27b4c7bd1f33eccb341 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -39,7 +39,6 @@ struct sec_req { struct sec_qp_ctx *qp_ctx; struct sec_cipher_req c_req; - struct list_head backlog_head; int err_type; int req_id; @@ -89,7 +88,6 @@ struct sec_qp_ctx { struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; spinlock_t req_lock; - struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index fb59c6f043a16de7b901a432ba28e9623ccb8370..c08812be6540b9674d08f3f6ab0e2cd4f845c79e 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -180,10 +180,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { - list_add_tail(&req->backlog_head, &qp_ctx->backlog); + req->fake_busy = true; + spin_unlock_bh(&qp_ctx->req_lock); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } spin_unlock_bh(&qp_ctx->req_lock); @@ -322,7 +322,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type) spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); - INIT_LIST_HEAD(&qp_ctx->backlog); qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, SEC_SGL_SGE_NR); @@ -832,31 +831,10 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); } -static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) -{ - struct sec_req *backlog_req = NULL; - - spin_lock_bh(&qp_ctx->req_lock); - if (ctx->fake_req_limit >= - atomic_read(&qp_ctx->qp->qp_status.used) && - !list_empty(&qp_ctx->backlog)) { - backlog_req = list_first_entry(&qp_ctx->backlog, - typeof(*backlog_req), backlog_head); - list_del(&backlog_req->backlog_head); - } - spin_unlock_bh(&qp_ctx->req_lock); - - return backlog_req; -} - static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct skcipher_request *backlog_sk_req; - struct sec_req *backlog_req; sec_free_req_id(req); @@ -864,14 +842,8 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - while (1) { - backlog_req = sec_back_req_clear(ctx, qp_ctx); - if (!backlog_req) - break; - - backlog_sk_req = backlog_req->c_req.sk_req; - backlog_sk_req->base.complete(&backlog_sk_req->base, - -EINPROGRESS); + if (req->fake_busy) { + sk_req->base.complete(&sk_req->base, -EINPROGRESS); atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); } @@ -1017,6 +989,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->fake_busy = false; ret = sec_skcipher_param_check(ctx, req); if (unlikely(ret))