crypto:hisilicon/sec2 - update busy processing logic

As before, if a SEC queue is at the 'fake busy' status,
the request with a 'fake busy' flag will be sent into hardware
and the sending function returns busy. After the request is
finished, SEC driver's call back will identify the 'fake busy' flag,
and notifies the user that hardware is not busy now by calling
user's call back function.

Now, a request sent into busy hardware will be cached in the
SEC queue's backlog, return '-EBUSY' to user.
After the request being finished, the cached requests will
be processed in the call back function. to notify the
corresponding user that SEC queue can process more requests.

Signed-off-by: Kai Ye <yekai13@huawei.com>
Reviewed-by: Longfang Liu <liulongfang@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Kai Ye
2020-07-07 09:15:38 +08:00
committed by Herbert Xu
parent 7dc95d0ea3
commit 9597efc3ae
3 changed files with 71 additions and 25 deletions

View File

@@ -166,6 +166,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
atomic_inc(&qp->qp_status.used);
return;
}
req->err_type = bd->type2.error_type;
@@ -198,21 +199,30 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int ret;
if (ctx->fake_req_limit <=
atomic_read(&qp_ctx->qp->qp_status.used) &&
!(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
if (ctx->fake_req_limit <=
atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
list_add_tail(&req->backlog_head, &qp_ctx->backlog);
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
mutex_unlock(&qp_ctx->req_lock);
return -EBUSY;
}
mutex_unlock(&qp_ctx->req_lock);
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
if (unlikely(ret == -EBUSY))
return -ENOBUFS;
if (!ret) {
if (req->fake_busy) {
atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
ret = -EBUSY;
} else {
ret = -EINPROGRESS;
}
if (likely(!ret)) {
ret = -EINPROGRESS;
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
}
return ret;
@@ -373,8 +383,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
INIT_LIST_HEAD(&qp_ctx->backlog);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
@@ -1048,21 +1058,49 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
struct sec_req *backlog_req = NULL;
mutex_lock(&qp_ctx->req_lock);
if (ctx->fake_req_limit >=
atomic_read(&qp_ctx->qp->qp_status.used) &&
!list_empty(&qp_ctx->backlog)) {
backlog_req = list_first_entry(&qp_ctx->backlog,
typeof(*backlog_req), backlog_head);
list_del(&backlog_req->backlog_head);
}
mutex_unlock(&qp_ctx->req_lock);
return backlog_req;
}
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct skcipher_request *backlog_sk_req;
struct sec_req *backlog_req;
atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
if (req->fake_busy)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
while (1) {
backlog_req = sec_back_req_clear(ctx, qp_ctx);
if (!backlog_req)
break;
backlog_sk_req = backlog_req->c_req.sk_req;
backlog_sk_req->base.complete(&backlog_sk_req->base,
-EINPROGRESS);
atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
}
sk_req->base.complete(&sk_req->base, err);
}
@@ -1133,10 +1171,10 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
size_t sz;
atomic_dec(&qp_ctx->pending_reqs);
if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
sec_update_iv(req, SEC_AEAD);
@@ -1157,17 +1195,22 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
sec_free_req_id(req);
if (req->fake_busy)
a_req->base.complete(&a_req->base, -EINPROGRESS);
while (1) {
backlog_req = sec_back_req_clear(c, qp_ctx);
if (!backlog_req)
break;
backlog_aead_req = backlog_req->aead_req.aead_req;
backlog_aead_req->base.complete(&backlog_aead_req->base,
-EINPROGRESS);
atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
}
a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
sec_free_queue_id(ctx, req);
}
@@ -1187,11 +1230,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
return req->req_id;
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
req->fake_busy = true;
else
req->fake_busy = false;
return 0;
}
@@ -1213,7 +1251,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -1407,6 +1446,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
req->flag = sk_req->base.flags;
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
@@ -1530,6 +1570,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;