crypto: cavium/nitrox - NITROX command queue changes.

Use node based allocations for queues. consider the dma address
alignment changes, while calculating the queue base address.
added checks in cleanup functions. Minor changes to queue variable names

Signed-off-by: Srikanth Jampala <Jampala.Srikanth@cavium.com>
Reviewed-by: Gadam Sreerama <sgadam@cavium.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Srikanth Jampala
2018-09-29 13:49:09 +05:30
committed by Herbert Xu
parent 0b501e7a4f
commit e7892dd6d8
5 changed files with 96 additions and 81 deletions

View File

@@ -382,11 +382,11 @@ static inline void backlog_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->backlog);
spin_lock_bh(&cmdq->backlog_lock);
spin_lock_bh(&cmdq->backlog_qlock);
list_add_tail(&sr->backlog, &cmdq->backlog_head);
atomic_inc(&cmdq->backlog_count);
atomic_set(&sr->status, REQ_BACKLOG);
spin_unlock_bh(&cmdq->backlog_lock);
spin_unlock_bh(&cmdq->backlog_qlock);
}
static inline void response_list_add(struct nitrox_softreq *sr,
@@ -394,17 +394,17 @@ static inline void response_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->response);
spin_lock_bh(&cmdq->response_lock);
spin_lock_bh(&cmdq->resp_qlock);
list_add_tail(&sr->response, &cmdq->response_head);
spin_unlock_bh(&cmdq->response_lock);
spin_unlock_bh(&cmdq->resp_qlock);
}
static inline void response_list_del(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
spin_lock_bh(&cmdq->response_lock);
spin_lock_bh(&cmdq->resp_qlock);
list_del(&sr->response);
spin_unlock_bh(&cmdq->response_lock);
spin_unlock_bh(&cmdq->resp_qlock);
}
static struct nitrox_softreq *
@@ -439,11 +439,11 @@ static void post_se_instr(struct nitrox_softreq *sr,
int idx;
u8 *ent;
spin_lock_bh(&cmdq->cmdq_lock);
spin_lock_bh(&cmdq->cmd_qlock);
idx = cmdq->write_idx;
/* copy the instruction */
ent = cmdq->head + (idx * cmdq->instr_size);
ent = cmdq->base + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
atomic_set(&sr->status, REQ_POSTED);
@@ -459,7 +459,7 @@ static void post_se_instr(struct nitrox_softreq *sr,
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
spin_unlock_bh(&cmdq->cmdq_lock);
spin_unlock_bh(&cmdq->cmd_qlock);
/* increment the posted command count */
atomic64_inc(&ndev->stats.posted);
@@ -474,7 +474,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
if (!atomic_read(&cmdq->backlog_count))
return 0;
spin_lock_bh(&cmdq->backlog_lock);
spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
struct skcipher_request *skreq;
@@ -497,7 +497,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* backlog requests are posted, wakeup with -EINPROGRESS */
skcipher_request_complete(skreq, -EINPROGRESS);
}
spin_unlock_bh(&cmdq->backlog_lock);
spin_unlock_bh(&cmdq->backlog_qlock);
return ret;
}
@@ -578,7 +578,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* select the queue */
qno = smp_processor_id() % ndev->nr_queues;
sr->cmdq = &ndev->pkt_cmdqs[qno];
sr->cmdq = &ndev->pkt_inq[qno];
/*
* 64-Byte Instruction Format