Commit c9613335 authored by Nagadheeraj Rottela's avatar Nagadheeraj Rottela Committed by Herbert Xu

crypto: cavium/nitrox - Added AEAD cipher support

Added support to offload AEAD ciphers to NITROX. Currently supported
AEAD cipher is 'gcm(aes)'.
Signed-off-by: default avatarNagadheeraj Rottela <rnagadheeraj@marvell.com>
Reviewed-by: default avatarSrikanth Jampala <jsrikanth@marvell.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 2326828e
......@@ -7,7 +7,9 @@ n5pf-objs := nitrox_main.o \
nitrox_hal.o \
nitrox_reqmgr.o \
nitrox_algs.o \
nitrox_mbx.o
nitrox_mbx.o \
nitrox_skcipher.o \
nitrox_aead.o
n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/crypto.h>
#include <linux/rtnetlink.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include "nitrox_dev.h"
#include "nitrox_common.h"
#include "nitrox_req.h"
#define GCM_AES_SALT_SIZE 4
/**
* struct nitrox_crypt_params - Params to set nitrox crypto request.
* @cryptlen: Encryption/Decryption data length
* @authlen: Assoc data length + Cryptlen
* @srclen: Input buffer length
* @dstlen: Output buffer length
* @iv: IV data
* @ivsize: IV data length
* @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
*/
struct nitrox_crypt_params {
unsigned int cryptlen;
unsigned int authlen;
unsigned int srclen;
unsigned int dstlen;
u8 *iv;
int ivsize;
u8 ctrl_arg;
};
union gph_p3 {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
u16 iv_offset : 8;
u16 auth_offset : 8;
#else
u16 auth_offset : 8;
u16 iv_offset : 8;
#endif
};
u16 param;
};
static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
int aes_keylen;
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
struct flexi_crypto_context *fctx;
union fc_ctx_flags flags;
aes_keylen = flexi_aes_keylen(keylen);
if (aes_keylen < 0) {
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/* fill crypto context */
fctx = nctx->u.fctx;
flags.f = be64_to_cpu(fctx->flags.f);
flags.w0.aes_keylen = aes_keylen;
fctx->flags.f = cpu_to_be64(flags.f);
/* copy enc key to context */
memset(&fctx->crypto, 0, sizeof(fctx->crypto));
memcpy(fctx->crypto.u.key, key, keylen);
return 0;
}
static int nitrox_aead_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
struct flexi_crypto_context *fctx = nctx->u.fctx;
union fc_ctx_flags flags;
flags.f = be64_to_cpu(fctx->flags.f);
flags.w0.mac_len = authsize;
fctx->flags.f = cpu_to_be64(flags.f);
aead->authsize = authsize;
return 0;
}
static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
int buflen)
{
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
int nents = sg_nents_for_len(areq->src, buflen) + 1;
int ret;
if (nents < 0)
return nents;
/* Allocate buffer to hold IV and input scatterlist array */
ret = alloc_src_req_buf(nkreq, nents, ivsize);
if (ret)
return ret;
nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
return 0;
}
static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
{
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
int nents = sg_nents_for_len(areq->dst, buflen) + 3;
int ret;
if (nents < 0)
return nents;
/* Allocate buffer to hold ORH, COMPLETION and output scatterlist
* array
*/
ret = alloc_dst_req_buf(nkreq, nents);
if (ret)
return ret;
nitrox_creq_set_orh(nkreq);
nitrox_creq_set_comp(nkreq);
nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
return 0;
}
static void free_src_sglist(struct aead_request *areq)
{
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
kfree(nkreq->src);
}
static void free_dst_sglist(struct aead_request *areq)
{
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
kfree(nkreq->dst);
}
static int nitrox_set_creq(struct aead_request *areq,
struct nitrox_crypt_params *params)
{
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
struct se_crypto_request *creq = &nkreq->creq;
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
union gph_p3 param3;
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
int ret;
creq->flags = areq->base.flags;
creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
creq->ctrl.value = 0;
creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
creq->ctrl.s.arg = params->ctrl_arg;
creq->gph.param0 = cpu_to_be16(params->cryptlen);
creq->gph.param1 = cpu_to_be16(params->authlen);
creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
param3.iv_offset = 0;
param3.auth_offset = params->ivsize;
creq->gph.param3 = cpu_to_be16(param3.param);
creq->ctx_handle = nctx->u.ctx_handle;
creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
ret = alloc_src_sglist(areq, params->iv, params->ivsize,
params->srclen);
if (ret)
return ret;
ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
if (ret) {
free_src_sglist(areq);
return ret;
}
return 0;
}
static void nitrox_aead_callback(void *arg, int err)
{
struct aead_request *areq = arg;
free_src_sglist(areq);
free_dst_sglist(areq);
if (err) {
pr_err_ratelimited("request failed status 0x%0x\n", err);
err = -EINVAL;
}
areq->base.complete(&areq->base, err);
}
static int nitrox_aes_gcm_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
struct se_crypto_request *creq = &nkreq->creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
memset(&params, 0, sizeof(params));
params.cryptlen = areq->cryptlen;
params.authlen = areq->assoclen + params.cryptlen;
params.srclen = params.authlen;
params.dstlen = params.srclen + aead->authsize;
params.iv = &areq->iv[GCM_AES_SALT_SIZE];
params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
params.ctrl_arg = ENCRYPT;
ret = nitrox_set_creq(areq, &params);
if (ret)
return ret;
/* send the crypto request */
return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback,
areq);
}
static int nitrox_aes_gcm_dec(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
struct se_crypto_request *creq = &nkreq->creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
memset(&params, 0, sizeof(params));
params.cryptlen = areq->cryptlen - aead->authsize;
params.authlen = areq->assoclen + params.cryptlen;
params.srclen = areq->cryptlen + areq->assoclen;
params.dstlen = params.srclen - aead->authsize;
params.iv = &areq->iv[GCM_AES_SALT_SIZE];
params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
params.ctrl_arg = DECRYPT;
ret = nitrox_set_creq(areq, &params);
if (ret)
return ret;
/* send the crypto request */
return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback,
areq);
}
static int nitrox_aead_init(struct crypto_aead *aead)
{
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
struct crypto_ctx_hdr *chdr;
/* get the first device */
nctx->ndev = nitrox_get_first_device();
if (!nctx->ndev)
return -ENODEV;
/* allocate nitrox crypto context */
chdr = crypto_alloc_context(nctx->ndev);
if (!chdr) {
nitrox_put_device(nctx->ndev);
return -ENOMEM;
}
nctx->chdr = chdr;
nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
sizeof(struct ctx_hdr));
nctx->u.fctx->flags.f = 0;
return 0;
}
static int nitrox_aes_gcm_init(struct crypto_aead *aead)
{
int ret;
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
union fc_ctx_flags *flags;
ret = nitrox_aead_init(aead);
if (ret)
return ret;
flags = &nctx->u.fctx->flags;
flags->w0.cipher_type = CIPHER_AES_GCM;
flags->w0.hash_type = AUTH_NULL;
flags->w0.iv_source = IV_FROM_DPTR;
/* ask microcode to calculate ipad/opad */
flags->w0.auth_input_type = 1;
flags->f = be64_to_cpu(flags->f);
crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
sizeof(struct nitrox_kcrypt_request));
return 0;
}
static void nitrox_aead_exit(struct crypto_aead *aead)
{
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
/* free the nitrox crypto context */
if (nctx->u.ctx_handle) {
struct flexi_crypto_context *fctx = nctx->u.fctx;
memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys));
memzero_explicit(&fctx->auth, sizeof(struct auth_keys));
crypto_free_context((void *)nctx->chdr);
}
nitrox_put_device(nctx->ndev);
nctx->u.ctx_handle = 0;
nctx->ndev = NULL;
}
static struct aead_alg nitrox_aeads[] = { {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "n5_aes_gcm",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.setkey = nitrox_aes_gcm_setkey,
.setauthsize = nitrox_aead_setauthsize,
.encrypt = nitrox_aes_gcm_enc,
.decrypt = nitrox_aes_gcm_dec,
.init = nitrox_aes_gcm_init,
.exit = nitrox_aead_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
} };
int nitrox_register_aeads(void)
{
return crypto_register_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads));
}
void nitrox_unregister_aeads(void)
{
crypto_unregister_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads));
}
This diff is collapsed.
......@@ -7,6 +7,10 @@
int nitrox_crypto_register(void);
void nitrox_crypto_unregister(void);
int nitrox_register_aeads(void);
void nitrox_unregister_aeads(void);
int nitrox_register_skciphers(void);
void nitrox_unregister_skciphers(void);
void *crypto_alloc_context(struct nitrox_device *ndev);
void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
......@@ -19,7 +23,7 @@ void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
struct skcipher_request *skreq);
void *cb_arg);
void backlog_qflush_work(struct work_struct *work);
......
......@@ -8,6 +8,7 @@
#include "nitrox_dev.h"
#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define PRIO 4001
/**
* struct gphdr - General purpose Header
......@@ -106,6 +107,18 @@ enum flexi_cipher {
CIPHER_INVALID
};
enum flexi_auth {
AUTH_NULL = 0,
AUTH_MD5,
AUTH_SHA1,
AUTH_SHA2_SHA224,
AUTH_SHA2_SHA256,
AUTH_SHA2_SHA384,
AUTH_SHA2_SHA512,
AUTH_GMAC,
AUTH_INVALID
};
/**
* struct crypto_keys - Crypto keys
* @key: Encryption key or KEY1 for AES-XTS
......@@ -132,22 +145,8 @@ struct auth_keys {
u8 opad[64];
};
/**
* struct flexi_crypto_context - Crypto context
* @cipher_type: Encryption cipher type
* @aes_keylen: AES key length
* @iv_source: Encryption IV source
* @hash_type: Authentication type
* @auth_input_type: Authentication input type
* 1 - Authentication IV and KEY, microcode calculates OPAD/IPAD
* 0 - Authentication OPAD/IPAD
* @mac_len: mac length
* @crypto: Crypto keys
* @auth: Authentication keys
*/
struct flexi_crypto_context {
union {
__be64 flags;
union fc_ctx_flags {
__be64 f;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cipher_type : 4;
......@@ -171,8 +170,22 @@ struct flexi_crypto_context {
u64 cipher_type : 4;
#endif
} w0;
};
};
/**
* struct flexi_crypto_context - Crypto context
* @cipher_type: Encryption cipher type
* @aes_keylen: AES key length
* @iv_source: Encryption IV source
* @hash_type: Authentication type
* @auth_input_type: Authentication input type
* 1 - Authentication IV and KEY, microcode calculates OPAD/IPAD
* 0 - Authentication OPAD/IPAD
* @mac_len: mac length
* @crypto: Crypto keys
* @auth: Authentication keys
*/
struct flexi_crypto_context {
union fc_ctx_flags flags;
struct crypto_keys crypto;
struct auth_keys auth;
};
......@@ -194,8 +207,6 @@ struct nitrox_crypto_ctx {
struct nitrox_kcrypt_request {
struct se_crypto_request creq;
struct nitrox_crypto_ctx *nctx;
struct skcipher_request *skreq;
u8 *src;
u8 *dst;
};
......@@ -400,7 +411,7 @@ struct resp_hdr {
u64 *completion;
};
typedef void (*completion_t)(struct skcipher_request *skreq, int err);
typedef void (*completion_t)(void *arg, int err);
/**
* struct nitrox_softreq - Represents the NIROX Request.
......@@ -435,9 +446,30 @@ struct nitrox_softreq {
unsigned long tstamp;
completion_t callback;
struct skcipher_request *skreq;
void *cb_arg;
};
static inline int flexi_aes_keylen(int keylen)
{
int aes_keylen;
switch (keylen) {
case AES_KEYSIZE_128:
aes_keylen = 1;
break;
case AES_KEYSIZE_192:
aes_keylen = 2;
break;
case AES_KEYSIZE_256:
aes_keylen = 3;
break;
default:
aes_keylen = -EINVAL;
break;
}
return aes_keylen;
}
static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp)
{
size_t size;
......@@ -448,6 +480,14 @@ static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp)
return kzalloc(size, gfp);
}
/**
* create_single_sg - Point SG entry to the data
* @sg: Destination SG list
* @buf: Data
* @buflen: Data length
*
* Returns next free entry in the destination SG list
**/
static inline struct scatterlist *create_single_sg(struct scatterlist *sg,
void *buf, int buflen)
{
......@@ -456,18 +496,33 @@ static inline struct scatterlist *create_single_sg(struct scatterlist *sg,
return sg;
}
/**
* create_multi_sg - Create multiple sg entries with buflen data length from
* source sglist
* @to_sg: Destination SG list
* @from_sg: Source SG list
* @buflen: Data length
*
* Returns next free entry in the destination SG list
**/
static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
struct scatterlist *from_sg)
struct scatterlist *from_sg,
int buflen)
{
struct scatterlist *sg;
int i;
struct scatterlist *sg = to_sg;
unsigned int sglen;
for_each_sg(from_sg, sg, sg_nents(from_sg), i) {
sg_set_buf(to_sg, sg_virt(sg), sg->length);
to_sg++;
for (; buflen; buflen -= sglen) {
sglen = from_sg->length;
if (sglen > buflen)
sglen = buflen;
sg_set_buf(sg, sg_virt(from_sg), sglen);
from_sg = sg_next(from_sg);
sg++;
}
return to_sg;
return sg;
}
static inline void set_orh_value(u64 *orh)
......@@ -480,4 +535,112 @@ static inline void set_comp_value(u64 *comp)
WRITE_ONCE(*comp, PENDING_SIG);
}
static inline int alloc_src_req_buf(struct nitrox_kcrypt_request *nkreq,
int nents, int ivsize)
{
struct se_crypto_request *creq = &nkreq->creq;
nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp);
if (!nkreq->src)
return -ENOMEM;
return 0;
}
static inline void nitrox_creq_copy_iv(char *dst, char *src, int size)
{
memcpy(dst, src, size);
}
static inline struct scatterlist *nitrox_creq_src_sg(char *iv, int ivsize)
{
return (struct scatterlist *)(iv + ivsize);
}
static inline void nitrox_creq_set_src_sg(struct nitrox_kcrypt_request *nkreq,
int nents, int ivsize,
struct scatterlist *src, int buflen)
{
char *iv = nkreq->src;
struct scatterlist *sg;
struct se_crypto_request *creq = &nkreq->creq;
creq->src = nitrox_creq_src_sg(iv, ivsize);
sg = creq->src;
sg_init_table(sg, nents);
/* Input format:
* +----+----------------+
* | IV | SRC sg entries |
* +----+----------------+
*/
/* IV */
sg = create_single_sg(sg, iv, ivsize);
/* SRC entries */
create_multi_sg(sg, src, buflen);
}
static inline int alloc_dst_req_buf(struct nitrox_kcrypt_request *nkreq,
int nents)
{
int extralen = ORH_HLEN + COMP_HLEN;
struct se_crypto_request *creq = &nkreq->creq;
nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp);
if (!nkreq->dst)
return -ENOMEM;
return 0;
}
static inline void nitrox_creq_set_orh(struct nitrox_kcrypt_request *nkreq)
{
struct se_crypto_request *creq = &nkreq->creq;
creq->orh = (u64 *)(nkreq->dst);
set_orh_value(creq->orh);
}
static inline void nitrox_creq_set_comp(struct nitrox_kcrypt_request *nkreq)
{
struct se_crypto_request *creq = &nkreq->creq;
creq->comp = (u64 *)(nkreq->dst + ORH_HLEN);
set_comp_value(creq->comp);
}
static inline struct scatterlist *nitrox_creq_dst_sg(char *dst)
{
return (struct scatterlist *)(dst + ORH_HLEN + COMP_HLEN);
}
static inline void nitrox_creq_set_dst_sg(struct nitrox_kcrypt_request *nkreq,
int nents, int ivsize,
struct scatterlist *dst, int buflen)
{
struct se_crypto_request *creq = &nkreq->creq;
struct scatterlist *sg;
char *iv = nkreq->src;
creq->dst = nitrox_creq_dst_sg(nkreq->dst);
sg = creq->dst;
sg_init_table(sg, nents);
/* Output format:
* +-----+----+----------------+-----------------+
* | ORH | IV | DST sg entries | COMPLETION Bytes|
* +-----+----+----------------+-----------------+
*/
/* ORH */
sg = create_single_sg(sg, creq->orh, ORH_HLEN);
/* IV */
sg = create_single_sg(sg, iv, ivsize);
/* DST entries */
sg = create_multi_sg(sg, dst, buflen);
/* COMPLETION Bytes */
create_single_sg(sg, creq->comp, COMP_HLEN);
}
#endif /* __NITROX_REQ_H */
......@@ -269,6 +269,8 @@ static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
smp_mb__after_atomic();
return true;
}
/* sync with other cpus */
smp_mb__after_atomic();
return false;
}
......@@ -324,8 +326,6 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
struct skcipher_request *skreq;
/* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
ret = -ENOSPC;
......@@ -337,12 +337,8 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* sync with other cpus */
smp_mb__after_atomic();
skreq = sr->skreq;
/* post the command */
post_se_instr(sr, cmdq);
/* backlog requests are posted, wakeup with -EINPROGRESS */
skcipher_request_complete(skreq, -EINPROGRESS);
}
spin_unlock_bh(&cmdq->backlog_qlock);
......@@ -365,7 +361,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
}
/* add to backlog list */
backlog_list_add(sr, cmdq);
return -EBUSY;
return -EINPROGRESS;
}
post_se_instr(sr, cmdq);
......@@ -382,7 +378,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t callback,
struct skcipher_request *skreq)
void *cb_arg)
{
struct nitrox_softreq *sr;
dma_addr_t ctx_handle = 0;
......@@ -399,7 +395,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
sr->flags = req->flags;
sr->gfp = req->gfp;
sr->callback = callback;
sr->skreq = skreq;
sr->cb_arg = cb_arg;
atomic_set(&sr->status, REQ_NOT_POSTED);
......@@ -513,7 +509,20 @@ void backlog_qflush_work(struct work_struct *work)
static bool sr_completed(struct nitrox_softreq *sr)
{
return (READ_ONCE(*sr->resp.orh) != READ_ONCE(*sr->resp.completion));
u64 orh = READ_ONCE(*sr->resp.orh);
unsigned long timeout = jiffies + msecs_to_jiffies(1);
if ((orh != PENDING_SIG) && (orh & 0xff))
return true;
while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
if (time_after(jiffies, timeout)) {
pr_err("comp not done\n");
return false;
}
}
return true;
}
/**
......@@ -527,8 +536,6 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
struct nitrox_softreq *sr;
struct skcipher_request *skreq;
completion_t callback;
int req_completed = 0, err = 0, budget;
/* check all pending requests */
......@@ -558,15 +565,12 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* remove from response list */
response_list_del(sr, cmdq);
callback = sr->callback;
skreq = sr->skreq;
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
softreq_destroy(sr);
if (callback)
callback(skreq, err);
if (sr->callback)
sr->callback(sr->cb_arg, err);
req_completed++;
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment