Commit d31e7123 authored by Zeng, Xin's avatar Zeng, Xin Committed by Herbert Xu

crypto: virtio - Refacotor virtio_crypto driver for new virito crypto services

In current virtio crypto device driver, some common data structures and
implementations that should be used by other virtio crypto algorithms
(e.g. asymmetric crypto algorithms) introduce symmetric crypto algorithms
specific implementations.
This patch refactors these pieces of code so that they can be reused by
other virtio crypto algorithms.
Acked-by: default avatarGonglei <arei.gonglei@huawei.com>
Signed-off-by: default avatarXin Zeng <xin.zeng@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 41cdf7a4
...@@ -27,12 +27,68 @@ ...@@ -27,12 +27,68 @@
#include <uapi/linux/virtio_crypto.h> #include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h" #include "virtio_crypto_common.h"
struct virtio_crypto_ablkcipher_ctx {
struct virtio_crypto *vcrypto;
struct crypto_tfm *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
};
struct virtio_crypto_sym_request {
struct virtio_crypto_request base;
/* Cipher or aead */
uint32_t type;
struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
struct ablkcipher_request *ablkcipher_req;
uint8_t *iv;
/* Encryption? */
bool encrypt;
};
/* /*
* The algs_lock protects the below global virtio_crypto_active_devs * The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion. * and crypto algorithms registion.
*/ */
static DEFINE_MUTEX(algs_lock); static DEFINE_MUTEX(algs_lock);
static unsigned int virtio_crypto_active_devs; static unsigned int virtio_crypto_active_devs;
static void virtio_crypto_ablkcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
int err);
static void virtio_crypto_dataq_sym_callback
(struct virtio_crypto_request *vc_req, int len)
{
struct virtio_crypto_sym_request *vc_sym_req =
container_of(vc_req, struct virtio_crypto_sym_request, base);
struct ablkcipher_request *ablk_req;
int error;
/* Finish the encrypt or decrypt process */
if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
switch (vc_req->status) {
case VIRTIO_CRYPTO_OK:
error = 0;
break;
case VIRTIO_CRYPTO_INVSESS:
case VIRTIO_CRYPTO_ERR:
error = -EINVAL;
break;
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
default:
error = -EIO;
break;
}
ablk_req = vc_sym_req->ablkcipher_req;
virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
ablk_req, error);
}
}
static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg) static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
{ {
...@@ -286,13 +342,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, ...@@ -286,13 +342,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
} }
static int static int
__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req, struct ablkcipher_request *req,
struct data_queue *data_vq) struct data_queue *data_vq)
{ {
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data; struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents; int src_nents, dst_nents;
...@@ -326,9 +383,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, ...@@ -326,9 +383,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
} }
vc_req->req_data = req_data; vc_req->req_data = req_data;
vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER; vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
/* Head of operation */ /* Head of operation */
if (vc_req->encrypt) { if (vc_sym_req->encrypt) {
req_data->header.session_id = req_data->header.session_id =
cpu_to_le64(ctx->enc_sess_info.session_id); cpu_to_le64(ctx->enc_sess_info.session_id);
req_data->header.opcode = req_data->header.opcode =
...@@ -383,7 +440,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, ...@@ -383,7 +440,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
memcpy(iv, req->info, ivsize); memcpy(iv, req->info, ivsize);
sg_init_one(&iv_sg, iv, ivsize); sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg; sgs[num_out++] = &iv_sg;
vc_req->iv = iv; vc_sym_req->iv = iv;
/* Source data */ /* Source data */
for (i = 0; i < src_nents; i++) for (i = 0; i < src_nents; i++)
...@@ -421,15 +478,18 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) ...@@ -421,15 +478,18 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{ {
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); struct virtio_crypto_sym_request *vc_sym_req =
ablkcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */ /* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0]; struct data_queue *data_vq = &vcrypto->data_vq[0];
vc_req->ablkcipher_ctx = ctx;
vc_req->ablkcipher_req = req;
vc_req->encrypt = true;
vc_req->dataq = data_vq; vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
vc_sym_req->ablkcipher_ctx = ctx;
vc_sym_req->ablkcipher_req = req;
vc_sym_req->encrypt = true;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
} }
...@@ -438,16 +498,18 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) ...@@ -438,16 +498,18 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{ {
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); struct virtio_crypto_sym_request *vc_sym_req =
ablkcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */ /* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0]; struct data_queue *data_vq = &vcrypto->data_vq[0];
vc_req->ablkcipher_ctx = ctx;
vc_req->ablkcipher_req = req;
vc_req->encrypt = false;
vc_req->dataq = data_vq; vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
vc_sym_req->ablkcipher_ctx = ctx;
vc_sym_req->ablkcipher_req = req;
vc_sym_req->encrypt = false;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
} }
...@@ -456,7 +518,7 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm) ...@@ -456,7 +518,7 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
{ {
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request); tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
ctx->tfm = tfm; ctx->tfm = tfm;
return 0; return 0;
...@@ -479,11 +541,13 @@ int virtio_crypto_ablkcipher_crypt_req( ...@@ -479,11 +541,13 @@ int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine, struct crypto_engine *engine,
struct ablkcipher_request *req) struct ablkcipher_request *req)
{ {
struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); struct virtio_crypto_sym_request *vc_sym_req =
ablkcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq; struct data_queue *data_vq = vc_req->dataq;
int ret; int ret;
ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq); ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -492,14 +556,15 @@ int virtio_crypto_ablkcipher_crypt_req( ...@@ -492,14 +556,15 @@ int virtio_crypto_ablkcipher_crypt_req(
return 0; return 0;
} }
void virtio_crypto_ablkcipher_finalize_req( static void virtio_crypto_ablkcipher_finalize_req(
struct virtio_crypto_request *vc_req, struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req, struct ablkcipher_request *req,
int err) int err)
{ {
crypto_finalize_cipher_request(vc_req->dataq->engine, req, err); crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine,
req, err);
virtcrypto_clear_request(vc_req); kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
} }
static struct crypto_alg virtio_crypto_algs[] = { { static struct crypto_alg virtio_crypto_algs[] = { {
......
...@@ -83,26 +83,16 @@ struct virtio_crypto_sym_session_info { ...@@ -83,26 +83,16 @@ struct virtio_crypto_sym_session_info {
__u64 session_id; __u64 session_id;
}; };
struct virtio_crypto_ablkcipher_ctx { struct virtio_crypto_request;
struct virtio_crypto *vcrypto; typedef void (*virtio_crypto_data_callback)
struct crypto_tfm *tfm; (struct virtio_crypto_request *vc_req, int len);
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
};
struct virtio_crypto_request { struct virtio_crypto_request {
/* Cipher or aead */
uint32_t type;
uint8_t status; uint8_t status;
struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
struct ablkcipher_request *ablkcipher_req;
struct virtio_crypto_op_data_req *req_data; struct virtio_crypto_op_data_req *req_data;
struct scatterlist **sgs; struct scatterlist **sgs;
uint8_t *iv;
/* Encryption? */
bool encrypt;
struct data_queue *dataq; struct data_queue *dataq;
virtio_crypto_data_callback alg_cb;
}; };
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev); int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
...@@ -119,10 +109,6 @@ void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); ...@@ -119,10 +109,6 @@ void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req( int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine, struct crypto_engine *engine,
struct ablkcipher_request *req); struct ablkcipher_request *req);
void virtio_crypto_ablkcipher_finalize_req(
struct virtio_crypto_request *vc_req,
struct ablkcipher_request *req,
int err);
void void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req); virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
......
...@@ -29,7 +29,6 @@ void ...@@ -29,7 +29,6 @@ void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req) virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
{ {
if (vc_req) { if (vc_req) {
kzfree(vc_req->iv);
kzfree(vc_req->req_data); kzfree(vc_req->req_data);
kfree(vc_req->sgs); kfree(vc_req->sgs);
} }
...@@ -41,41 +40,19 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq) ...@@ -41,41 +40,19 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq)
struct virtio_crypto_request *vc_req; struct virtio_crypto_request *vc_req;
unsigned long flags; unsigned long flags;
unsigned int len; unsigned int len;
struct ablkcipher_request *ablk_req;
int error;
unsigned int qid = vq->index; unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do { do {
virtqueue_disable_cb(vq); virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
switch (vc_req->status) {
case VIRTIO_CRYPTO_OK:
error = 0;
break;
case VIRTIO_CRYPTO_INVSESS:
case VIRTIO_CRYPTO_ERR:
error = -EINVAL;
break;
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
default:
error = -EIO;
break;
}
ablk_req = vc_req->ablkcipher_req;
spin_unlock_irqrestore( spin_unlock_irqrestore(
&vcrypto->data_vq[qid].lock, flags); &vcrypto->data_vq[qid].lock, flags);
/* Finish the encrypt or decrypt process */ if (vc_req->alg_cb)
virtio_crypto_ablkcipher_finalize_req(vc_req, vc_req->alg_cb(vc_req, len);
ablk_req, error);
spin_lock_irqsave( spin_lock_irqsave(
&vcrypto->data_vq[qid].lock, flags); &vcrypto->data_vq[qid].lock, flags);
} }
}
} while (!virtqueue_enable_cb(vq)); } while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
} }
...@@ -270,7 +247,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto) ...@@ -270,7 +247,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
return -EPERM; return -EPERM;
} }
dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n"); dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
} else { } else {
virtcrypto_dev_stop(vcrypto); virtcrypto_dev_stop(vcrypto);
dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment