Commit 03882e04 authored by James Morris's avatar James Morris

[CRYPTO]: Cleanups and more consistency checks.

- Removed local_bh_disable() from kmap wrapper, not needed now with
two atomic kmaps.
- Nuked atomic flag, use in_softirq() instead.
- Converted crypto_kmap() and crypto_yield() to check in_softirq().
- Check CRYPTO_MAX_CIPHER_BLOCK_SIZE during alg init.
- Try to initialize as much at compile time as possible
(feedback from Christoph Hellwig).
- Clean up list handling a bit (feedback from Christoph Hellwig).
parent 3ba6853f
......@@ -23,7 +23,7 @@
#include "internal.h"
static LIST_HEAD(crypto_alg_list);
static struct rw_semaphore crypto_alg_sem;
static DECLARE_RWSEM(crypto_alg_sem);
static inline int crypto_alg_get(struct crypto_alg *alg)
{
......@@ -38,23 +38,18 @@ static inline void crypto_alg_put(struct crypto_alg *alg)
struct crypto_alg *crypto_alg_lookup(char *name)
{
struct list_head *p;
struct crypto_alg *alg = NULL;
struct crypto_alg *q, *alg = NULL;
down_read(&crypto_alg_sem);
list_for_each(p, &crypto_alg_list) {
struct crypto_alg *q =
list_entry(p, struct crypto_alg, cra_list);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (!(strcmp(q->cra_name, name))) {
if (crypto_alg_get(q))
alg = q;
break;
}
}
up_read(&crypto_alg_sem);
return alg;
}
......@@ -63,9 +58,6 @@ static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
{
tfm->crt_flags = 0;
if (flags & CRYPTO_TFM_REQ_ATOMIC)
tfm->crt_flags |= CRYPTO_TFM_REQ_ATOMIC;
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_flags(tfm, flags);
......@@ -164,23 +156,35 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
kfree(tfm);
}
static inline int crypto_alg_blocksize_check(struct crypto_alg *alg)
{
return ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK)
== CRYPTO_ALG_TYPE_CIPHER &&
alg->cra_blocksize > CRYPTO_MAX_CIPHER_BLOCK_SIZE);
}
int crypto_register_alg(struct crypto_alg *alg)
{
int ret = 0;
struct list_head *p;
struct crypto_alg *q;
down_write(&crypto_alg_sem);
list_for_each(p, &crypto_alg_list) {
struct crypto_alg *q =
list_entry(p, struct crypto_alg, cra_list);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (!(strcmp(q->cra_name, alg->cra_name))) {
ret = -EEXIST;
goto out;
}
}
list_add_tail(&alg->cra_list, &crypto_alg_list);
if (crypto_alg_blocksize_check(alg)) {
printk(KERN_WARNING "%s: blocksize %Zd exceeds max. "
"size %Zd\n", __FUNCTION__, alg->cra_blocksize,
CRYPTO_MAX_CIPHER_BLOCK_SIZE);
ret = -EINVAL;
}
else
list_add_tail(&alg->cra_list, &crypto_alg_list);
out:
up_write(&crypto_alg_sem);
return ret;
......@@ -189,14 +193,14 @@ int crypto_register_alg(struct crypto_alg *alg)
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret = -ENOENT;
struct list_head *p;
struct crypto_alg *q;
BUG_ON(!alg->cra_module);
down_write(&crypto_alg_sem);
list_for_each(p, &crypto_alg_list) {
if (alg == (void *)p) {
list_del(p);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (alg == q) {
list_del(&alg->cra_list);
ret = 0;
goto out;
}
......@@ -282,7 +286,6 @@ static int __init init_crypto(void)
struct proc_dir_entry *proc;
printk(KERN_INFO "Initializing Cryptographic API\n");
init_rwsem(&crypto_alg_sem);
proc = create_proc_entry("crypto", 0, NULL);
if (proc)
proc->proc_fops = &proc_crypto_ops;
......
......@@ -71,14 +71,14 @@ static int copy_chunks(struct crypto_tfm *tfm, u8 *buf,
clen = rlen;
}
p = crypto_kmap(tfm, sg[i].page) + sg[i].offset + coff;
p = crypto_kmap(sg[i].page) + sg[i].offset + coff;
if (in)
memcpy(&buf[copied], p, clen);
else
memcpy(p, &buf[copied], clen);
crypto_kunmap(tfm, p);
crypto_kunmap(p);
*last = aligned ? 0 : clen;
copied += clen;
}
......@@ -118,7 +118,7 @@ static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg,
{
int i, coff;
size_t bsize = crypto_tfm_alg_blocksize(tfm);
u8 tmp[CRYPTO_MAX_BLOCK_SIZE];
u8 tmp[CRYPTO_MAX_CIPHER_BLOCK_SIZE];
if (sglen(sg, nsg) % bsize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
......@@ -128,11 +128,11 @@ static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg,
for (i = 0, coff = 0; i < nsg; i++) {
int n = 0, boff = 0;
int len = sg[i].length - coff;
char *p = crypto_kmap(tfm, sg[i].page) + sg[i].offset + coff;
char *p = crypto_kmap(sg[i].page) + sg[i].offset + coff;
while (len) {
if (len < bsize) {
crypto_kunmap(tfm, p);
crypto_kunmap(p);
n = gather_chunks(tfm, tmp, sg, i, len, &coff);
prfn(tfm, tmp, crfn, enc);
scatter_chunks(tfm, tmp, sg, i, len, &coff);
......@@ -140,13 +140,13 @@ static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg,
goto unmapped;
} else {
prfn(tfm, p, crfn, enc);
crypto_kunmap(tfm, p);
crypto_kunmap(p);
crypto_yield(tfm);
/* remap and point to recalculated offset */
boff += bsize;
p = crypto_kmap(tfm, sg[i].page)
+ sg[i].offset + coff + boff;
p = crypto_kmap(sg[i].page)
+ sg[i].offset + coff + boff;
len -= bsize;
......@@ -155,7 +155,7 @@ static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg,
coff = 0;
}
}
crypto_kunmap(tfm, p);
crypto_kunmap(p);
unmapped:
i += n;
......@@ -172,7 +172,7 @@ static void cbc_process(struct crypto_tfm *tfm,
memcpy(tfm->crt_cipher.cit_iv, block,
crypto_tfm_alg_blocksize(tfm));
} else {
u8 buf[CRYPTO_MAX_BLOCK_SIZE];
u8 buf[CRYPTO_MAX_CIPHER_BLOCK_SIZE];
fn(tfm->crt_ctx, buf, block);
xor_64(buf, tfm->crt_cipher.cit_iv);
......
......@@ -1260,6 +1260,7 @@ static struct crypto_alg des_alg = {
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = { .cipher = {
.cia_keysize = DES_KEY_SIZE,
.cia_ivsize = DES_BLOCK_SIZE,
......@@ -1274,6 +1275,7 @@ static struct crypto_alg des3_ede_alg = {
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des3_ede_alg.cra_list),
.cra_u = { .cipher = {
.cia_keysize = DES3_EDE_KEY_SIZE,
.cia_ivsize = DES3_EDE_BLOCK_SIZE,
......@@ -1286,9 +1288,6 @@ static int __init init(void)
{
int ret = 0;
INIT_LIST_HEAD(&des_alg.cra_list);
INIT_LIST_HEAD(&des3_ede_alg.cra_list);
ret = crypto_register_alg(&des_alg);
if (ret < 0)
goto out;
......
......@@ -34,10 +34,10 @@ static void update(struct crypto_tfm *tfm, struct scatterlist *sg, size_t nsg)
int i;
for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(tfm, sg[i].page) + sg[i].offset;
char *p = crypto_kmap(sg[i].page) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
crypto_kunmap(tfm, p);
crypto_kunmap(p);
crypto_yield(tfm);
}
return;
......@@ -57,10 +57,10 @@ static void digest(struct crypto_tfm *tfm,
tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(tfm, sg[i].page) + sg[i].offset;
char *p = crypto_kmap(sg[i].page) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
crypto_kunmap(tfm, p);
crypto_kunmap(p);
crypto_yield(tfm);
}
crypto_digest_final(tfm, out);
......
......@@ -17,31 +17,21 @@
#include <asm/hardirq.h>
#include <asm/softirq.h>
static inline void *crypto_kmap(struct crypto_tfm *tfm, struct page *page)
static inline void *crypto_kmap(struct page *page)
{
if (tfm->crt_flags & CRYPTO_TFM_REQ_ATOMIC) {
#ifdef CONFIG_HIGHMEM /* XXX: remove this after the api change */
local_bh_disable();
#endif
return kmap_atomic(page, KM_CRYPTO_SOFTIRQ);
} else
return kmap_atomic(page, KM_CRYPTO_USER);
return kmap_atomic(page, in_softirq() ?
KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
}
static inline void crypto_kunmap(struct crypto_tfm *tfm, void *vaddr)
static inline void crypto_kunmap(void *vaddr)
{
if (tfm->crt_flags & CRYPTO_TFM_REQ_ATOMIC) {
kunmap_atomic(vaddr, KM_CRYPTO_SOFTIRQ);
#ifdef CONFIG_HIGHMEM /* XXX: remove this after the api change */
local_bh_enable();
#endif
} else
kunmap_atomic(vaddr, KM_CRYPTO_USER);
return kunmap_atomic(vaddr, in_softirq() ?
KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
}
static inline void crypto_yield(struct crypto_tfm *tfm)
{
if (!(tfm->crt_flags & CRYPTO_TFM_REQ_ATOMIC))
if (!in_softirq())
cond_resched();
}
......
......@@ -226,6 +226,7 @@ static struct crypto_alg alg = {
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct md4_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = MD4_DIGEST_SIZE,
.dia_init = md4_init,
......@@ -235,7 +236,6 @@ static struct crypto_alg alg = {
static int __init init(void)
{
INIT_LIST_HEAD(&alg.cra_list);
return crypto_register_alg(&alg);
}
......
......@@ -219,6 +219,7 @@ static struct crypto_alg alg = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct md5_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = MD5_DIGEST_SIZE,
.dia_init = md5_init,
......@@ -228,7 +229,6 @@ static struct crypto_alg alg = {
static int __init init(void)
{
INIT_LIST_HEAD(&alg.cra_list);
return crypto_register_alg(&alg);
}
......
......@@ -183,6 +183,7 @@ static struct crypto_alg alg = {
.cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha1_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA1_DIGEST_SIZE,
.dia_init = sha1_init,
......@@ -192,7 +193,6 @@ static struct crypto_alg alg = {
static int __init init(void)
{
INIT_LIST_HEAD(&alg.cra_list);
return crypto_register_alg(&alg);
}
......
......@@ -739,7 +739,7 @@ test_des(void)
*/
i = 7;
key = des_tv[i].key;
tfm->crt_flags = CRYPTO_TFM_REQ_ATOMIC;
tfm->crt_flags = 0;
ret = crypto_cipher_setkey(tfm, key, 8);
if (ret) {
......@@ -985,7 +985,6 @@ test_des(void)
for (i = 0; i < DES_CBC_ENC_TEST_VECTORS; i++) {
printk("test %d:\n", i + 1);
tfm->crt_flags |= CRYPTO_TFM_REQ_ATOMIC;
key = des_tv[i].key;
ret = crypto_cipher_setkey(tfm, key, 8);
......
......@@ -37,8 +37,7 @@
#define CRYPTO_TFM_MODE_CFB 0x00000004
#define CRYPTO_TFM_MODE_CTR 0x00000008
#define CRYPTO_TFM_REQ_ATOMIC 0x00000100
#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000200
#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
......@@ -52,7 +51,7 @@
*/
#define CRYPTO_UNSPEC 0
#define CRYPTO_MAX_ALG_NAME 64
#define CRYPTO_MAX_BLOCK_SIZE 16
#define CRYPTO_MAX_CIPHER_BLOCK_SIZE 16
struct scatterlist;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment