Commit 81760ea6 authored by Herbert Xu's avatar Herbert Xu

crypto: cryptd - Add helpers to check whether a tfm is queued

This patch adds helpers to check whether a given tfm is currently
queued.  This is meant to be used by ablk_helper and similar
entities to ensure that no reordering is introduced because of
requests queued in cryptd with respect to requests being processed
in softirq context.

The per-cpu queue length limit is also increased to 1000 in line
with network limits.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 47a1f0b2
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <crypto/crypto_wq.h> #include <crypto/crypto_wq.h>
#include <linux/atomic.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -31,7 +32,7 @@ ...@@ -31,7 +32,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#define CRYPTD_MAX_CPU_QLEN 100 #define CRYPTD_MAX_CPU_QLEN 1000
struct cryptd_cpu_queue { struct cryptd_cpu_queue {
struct crypto_queue queue; struct crypto_queue queue;
...@@ -58,6 +59,7 @@ struct aead_instance_ctx { ...@@ -58,6 +59,7 @@ struct aead_instance_ctx {
}; };
struct cryptd_blkcipher_ctx { struct cryptd_blkcipher_ctx {
atomic_t refcnt;
struct crypto_blkcipher *child; struct crypto_blkcipher *child;
}; };
...@@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx { ...@@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx {
}; };
struct cryptd_hash_ctx { struct cryptd_hash_ctx {
atomic_t refcnt;
struct crypto_shash *child; struct crypto_shash *child;
}; };
...@@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx { ...@@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx {
}; };
struct cryptd_aead_ctx { struct cryptd_aead_ctx {
atomic_t refcnt;
struct crypto_aead *child; struct crypto_aead *child;
}; };
...@@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, ...@@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
{ {
int cpu, err; int cpu, err;
struct cryptd_cpu_queue *cpu_queue; struct cryptd_cpu_queue *cpu_queue;
struct crypto_tfm *tfm;
atomic_t *refcnt;
bool may_backlog;
cpu = get_cpu(); cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue); cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request); err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
if (err == -EBUSY && !may_backlog)
goto out_put_cpu;
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
if (!atomic_read(refcnt))
goto out_put_cpu;
tfm = request->tfm;
atomic_inc(refcnt);
out_put_cpu:
put_cpu(); put_cpu();
return err; return err;
...@@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, ...@@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
unsigned int len)) unsigned int len))
{ {
struct cryptd_blkcipher_request_ctx *rctx; struct cryptd_blkcipher_request_ctx *rctx;
struct cryptd_blkcipher_ctx *ctx;
struct crypto_ablkcipher *tfm;
struct blkcipher_desc desc; struct blkcipher_desc desc;
int refcnt;
rctx = ablkcipher_request_ctx(req); rctx = ablkcipher_request_ctx(req);
...@@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, ...@@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
out: out:
tfm = crypto_ablkcipher_reqtfm(req);
ctx = crypto_ablkcipher_ctx(tfm);
refcnt = atomic_read(&ctx->refcnt);
local_bh_disable(); local_bh_disable();
rctx->complete(&req->base, err); rctx->complete(&req->base, err);
local_bh_enable(); local_bh_enable();
if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
crypto_free_ablkcipher(tfm);
} }
static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
...@@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req, ...@@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
return cryptd_enqueue_request(queue, &req->base); return cryptd_enqueue_request(queue, &req->base);
} }
static void cryptd_hash_complete(struct ahash_request *req, int err)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
int refcnt = atomic_read(&ctx->refcnt);
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
crypto_free_ahash(tfm);
}
static void cryptd_hash_init(struct crypto_async_request *req_async, int err) static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{ {
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
...@@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err) ...@@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
out: out:
local_bh_disable(); cryptd_hash_complete(req, err);
rctx->complete(&req->base, err);
local_bh_enable();
} }
static int cryptd_hash_init_enqueue(struct ahash_request *req) static int cryptd_hash_init_enqueue(struct ahash_request *req)
...@@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err) ...@@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
out: out:
local_bh_disable(); cryptd_hash_complete(req, err);
rctx->complete(&req->base, err);
local_bh_enable();
} }
static int cryptd_hash_update_enqueue(struct ahash_request *req) static int cryptd_hash_update_enqueue(struct ahash_request *req)
...@@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err) ...@@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
out: out:
local_bh_disable(); cryptd_hash_complete(req, err);
rctx->complete(&req->base, err);
local_bh_enable();
} }
static int cryptd_hash_final_enqueue(struct ahash_request *req) static int cryptd_hash_final_enqueue(struct ahash_request *req)
...@@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) ...@@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
out: out:
local_bh_disable(); cryptd_hash_complete(req, err);
rctx->complete(&req->base, err);
local_bh_enable();
} }
static int cryptd_hash_finup_enqueue(struct ahash_request *req) static int cryptd_hash_finup_enqueue(struct ahash_request *req)
...@@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) ...@@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
out: out:
local_bh_disable(); cryptd_hash_complete(req, err);
rctx->complete(&req->base, err);
local_bh_enable();
} }
static int cryptd_hash_digest_enqueue(struct ahash_request *req) static int cryptd_hash_digest_enqueue(struct ahash_request *req)
...@@ -688,7 +725,10 @@ static void cryptd_aead_crypt(struct aead_request *req, ...@@ -688,7 +725,10 @@ static void cryptd_aead_crypt(struct aead_request *req,
int (*crypt)(struct aead_request *req)) int (*crypt)(struct aead_request *req))
{ {
struct cryptd_aead_request_ctx *rctx; struct cryptd_aead_request_ctx *rctx;
struct cryptd_aead_ctx *ctx;
crypto_completion_t compl; crypto_completion_t compl;
struct crypto_aead *tfm;
int refcnt;
rctx = aead_request_ctx(req); rctx = aead_request_ctx(req);
compl = rctx->complete; compl = rctx->complete;
...@@ -697,10 +737,18 @@ static void cryptd_aead_crypt(struct aead_request *req, ...@@ -697,10 +737,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
goto out; goto out;
aead_request_set_tfm(req, child); aead_request_set_tfm(req, child);
err = crypt( req ); err = crypt( req );
out: out:
tfm = crypto_aead_reqtfm(req);
ctx = crypto_aead_ctx(tfm);
refcnt = atomic_read(&ctx->refcnt);
local_bh_disable(); local_bh_disable();
compl(&req->base, err); compl(&req->base, err);
local_bh_enable(); local_bh_enable();
if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
crypto_free_aead(tfm);
} }
static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
...@@ -883,6 +931,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, ...@@ -883,6 +931,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask) u32 type, u32 mask)
{ {
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_blkcipher_ctx *ctx;
struct crypto_tfm *tfm; struct crypto_tfm *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
...@@ -899,6 +948,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, ...@@ -899,6 +948,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
ctx = crypto_tfm_ctx(tfm);
atomic_set(&ctx->refcnt, 1);
return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
} }
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
...@@ -910,9 +962,20 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) ...@@ -910,9 +962,20 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
} }
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
{
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
return atomic_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
{ {
crypto_free_ablkcipher(&tfm->base); struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
if (atomic_dec_and_test(&ctx->refcnt))
crypto_free_ablkcipher(&tfm->base);
} }
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
...@@ -920,6 +983,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, ...@@ -920,6 +983,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask) u32 type, u32 mask)
{ {
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_hash_ctx *ctx;
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
...@@ -933,6 +997,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, ...@@ -933,6 +997,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
ctx = crypto_ahash_ctx(tfm);
atomic_set(&ctx->refcnt, 1);
return __cryptd_ahash_cast(tfm); return __cryptd_ahash_cast(tfm);
} }
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
...@@ -952,9 +1019,20 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req) ...@@ -952,9 +1019,20 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
} }
EXPORT_SYMBOL_GPL(cryptd_shash_desc); EXPORT_SYMBOL_GPL(cryptd_shash_desc);
bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return atomic_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
void cryptd_free_ahash(struct cryptd_ahash *tfm) void cryptd_free_ahash(struct cryptd_ahash *tfm)
{ {
crypto_free_ahash(&tfm->base); struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
if (atomic_dec_and_test(&ctx->refcnt))
crypto_free_ahash(&tfm->base);
} }
EXPORT_SYMBOL_GPL(cryptd_free_ahash); EXPORT_SYMBOL_GPL(cryptd_free_ahash);
...@@ -962,6 +1040,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, ...@@ -962,6 +1040,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask) u32 type, u32 mask)
{ {
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_aead_ctx *ctx;
struct crypto_aead *tfm; struct crypto_aead *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
...@@ -974,6 +1053,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, ...@@ -974,6 +1053,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
crypto_free_aead(tfm); crypto_free_aead(tfm);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
ctx = crypto_aead_ctx(tfm);
atomic_set(&ctx->refcnt, 1);
return __cryptd_aead_cast(tfm); return __cryptd_aead_cast(tfm);
} }
EXPORT_SYMBOL_GPL(cryptd_alloc_aead); EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
...@@ -986,9 +1069,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) ...@@ -986,9 +1069,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
} }
EXPORT_SYMBOL_GPL(cryptd_aead_child); EXPORT_SYMBOL_GPL(cryptd_aead_child);
bool cryptd_aead_queued(struct cryptd_aead *tfm)
{
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
return atomic_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_aead_queued);
void cryptd_free_aead(struct cryptd_aead *tfm) void cryptd_free_aead(struct cryptd_aead *tfm)
{ {
crypto_free_aead(&tfm->base); struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
if (atomic_dec_and_test(&ctx->refcnt))
crypto_free_aead(&tfm->base);
} }
EXPORT_SYMBOL_GPL(cryptd_free_aead); EXPORT_SYMBOL_GPL(cryptd_free_aead);
......
...@@ -31,6 +31,7 @@ static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( ...@@ -31,6 +31,7 @@ static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask); u32 type, u32 mask);
struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
struct cryptd_ahash { struct cryptd_ahash {
...@@ -48,6 +49,8 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, ...@@ -48,6 +49,8 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask); u32 type, u32 mask);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
struct shash_desc *cryptd_shash_desc(struct ahash_request *req); struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
/* Must be called without moving CPUs. */
bool cryptd_ahash_queued(struct cryptd_ahash *tfm);
void cryptd_free_ahash(struct cryptd_ahash *tfm); void cryptd_free_ahash(struct cryptd_ahash *tfm);
struct cryptd_aead { struct cryptd_aead {
...@@ -64,6 +67,8 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, ...@@ -64,6 +67,8 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask); u32 type, u32 mask);
struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
/* Must be called without moving CPUs. */
bool cryptd_aead_queued(struct cryptd_aead *tfm);
void cryptd_free_aead(struct cryptd_aead *tfm); void cryptd_free_aead(struct cryptd_aead *tfm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment