Commit b31c17c8 authored by Phani Kiran Hemadri's avatar Phani Kiran Hemadri Committed by Herbert Xu

crypto: cavium/nitrox - Allocate asymmetric crypto command queues

This patch adds support to allocate CNN55XX device AQMQ command queues
required for submitting asymmetric crypto requests.
Signed-off-by: default avatarPhani Kiran Hemadri <phemadri@marvell.com>
Reviewed-by: default avatarSrikanth Jampala <jsrikanth@marvell.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 43b970fa
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#define VERSION_LEN 32 #define VERSION_LEN 32
/* Maximum queues in PF mode */ /* Maximum queues in PF mode */
#define MAX_PF_QUEUES 64 #define MAX_PF_QUEUES 64
/* Maximum device queues */
#define MAX_DEV_QUEUES (MAX_PF_QUEUES)
/* Maximum UCD Blocks */ /* Maximum UCD Blocks */
#define CNN55XX_MAX_UCD_BLOCKS 8 #define CNN55XX_MAX_UCD_BLOCKS 8
...@@ -208,6 +210,7 @@ enum vf_mode { ...@@ -208,6 +210,7 @@ enum vf_mode {
* @mode: Device mode PF/VF * @mode: Device mode PF/VF
* @ctx_pool: DMA pool for crypto context * @ctx_pool: DMA pool for crypto context
* @pkt_inq: Packet input rings * @pkt_inq: Packet input rings
* @aqmq: AQM command queues
* @qvec: MSI-X queue vectors information * @qvec: MSI-X queue vectors information
* @iov: SR-IOV informatin * @iov: SR-IOV informatin
* @num_vecs: number of MSI-X vectors * @num_vecs: number of MSI-X vectors
...@@ -234,6 +237,7 @@ struct nitrox_device { ...@@ -234,6 +237,7 @@ struct nitrox_device {
struct dma_pool *ctx_pool; struct dma_pool *ctx_pool;
struct nitrox_cmdq *pkt_inq; struct nitrox_cmdq *pkt_inq;
struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp;
struct nitrox_q_vector *qvec; struct nitrox_q_vector *qvec;
struct nitrox_iov iov; struct nitrox_iov iov;
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
/* packet inuput ring alignments */ /* packet inuput ring alignments */
#define PKTIN_Q_ALIGN_BYTES 16 #define PKTIN_Q_ALIGN_BYTES 16
/* AQM Queue input alignments */
#define AQM_Q_ALIGN_BYTES 32
static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
{ {
...@@ -57,11 +59,15 @@ static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq) ...@@ -57,11 +59,15 @@ static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
{ {
struct nitrox_device *ndev = cmdq->ndev; struct nitrox_device *ndev;
if (!cmdq)
return;
if (!cmdq->unalign_base) if (!cmdq->unalign_base)
return; return;
ndev = cmdq->ndev;
cancel_work_sync(&cmdq->backlog_qflush); cancel_work_sync(&cmdq->backlog_qflush);
dma_free_coherent(DEV(ndev), cmdq->qsize, dma_free_coherent(DEV(ndev), cmdq->qsize,
...@@ -78,6 +84,57 @@ static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) ...@@ -78,6 +84,57 @@ static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
cmdq->instr_size = 0; cmdq->instr_size = 0;
} }
static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
{
int i;
for (i = 0; i < ndev->nr_queues; i++) {
nitrox_cmdq_cleanup(ndev->aqmq[i]);
kzfree(ndev->aqmq[i]);
ndev->aqmq[i] = NULL;
}
}
static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
{
int i, err;
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq;
u64 offset;
cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
if (!cmdq) {
err = -ENOMEM;
goto aqmq_fail;
}
cmdq->ndev = ndev;
cmdq->qno = i;
cmdq->instr_size = sizeof(struct aqmq_command_s);
/* AQM Queue Doorbell Counter Register Address */
offset = AQMQ_DRBLX(i);
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
/* AQM Queue Commands Completed Count Register Address */
offset = AQMQ_CMD_CNTX(i);
cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
if (err) {
kzfree(cmdq);
goto aqmq_fail;
}
ndev->aqmq[i] = cmdq;
}
return 0;
aqmq_fail:
nitrox_free_aqm_queues(ndev);
return err;
}
static void nitrox_free_pktin_queues(struct nitrox_device *ndev) static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
{ {
int i; int i;
...@@ -222,6 +279,12 @@ int nitrox_common_sw_init(struct nitrox_device *ndev) ...@@ -222,6 +279,12 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
if (err) if (err)
destroy_crypto_dma_pool(ndev); destroy_crypto_dma_pool(ndev);
err = nitrox_alloc_aqm_queues(ndev);
if (err) {
nitrox_free_pktin_queues(ndev);
destroy_crypto_dma_pool(ndev);
}
return err; return err;
} }
...@@ -231,6 +294,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev) ...@@ -231,6 +294,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
*/ */
void nitrox_common_sw_cleanup(struct nitrox_device *ndev) void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
{ {
nitrox_free_aqm_queues(ndev);
nitrox_free_pktin_queues(ndev); nitrox_free_pktin_queues(ndev);
destroy_crypto_dma_pool(ndev); destroy_crypto_dma_pool(ndev);
} }
...@@ -399,6 +399,36 @@ struct nps_pkt_instr { ...@@ -399,6 +399,36 @@ struct nps_pkt_instr {
u64 fdata[2]; u64 fdata[2];
}; };
/**
* struct aqmq_command_s - The 32 byte command for AE processing.
* @opcode: Request opcode
* @param1: Request control parameter 1
* @param2: Request control parameter 2
* @dlen: Input length
* @dptr: Input pointer points to buffer in remote host
* @rptr: Result pointer points to buffer in remote host
* @grp: AQM Group (0..7)
* @cptr: Context pointer
*/
struct aqmq_command_s {
__be16 opcode;
__be16 param1;
__be16 param2;
__be16 dlen;
__be64 dptr;
__be64 rptr;
union {
__be64 word3;
#if defined(__BIG_ENDIAN_BITFIELD)
u64 grp : 3;
u64 cptr : 61;
#else
u64 cptr : 61;
u64 grp : 3;
#endif
};
};
/** /**
* struct ctx_hdr - Book keeping data about the crypto context * struct ctx_hdr - Book keeping data about the crypto context
* @pool: Pool used to allocate crypto context * @pool: Pool used to allocate crypto context
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment