Commit b99b66de authored by James Morris's avatar James Morris

[CRYPTO]: Add initial crypto api subsystem.

parent ecf2c214
......@@ -209,7 +209,7 @@ endif
include arch/$(ARCH)/Makefile
core-y += kernel/ mm/ fs/ ipc/ security/
core-y += kernel/ mm/ fs/ ipc/ security/ crypto/
SUBDIRS += $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
......
......@@ -398,4 +398,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -553,4 +553,5 @@ dep_bool ' Kernel low-level debugging messages via UART2' CONFIG_DEBUG_CLPS71
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -229,4 +229,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -477,6 +477,7 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
if [ "$CONFIG_SMP" = "y" ]; then
......
......@@ -288,3 +288,4 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
......@@ -547,4 +547,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -497,4 +497,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -251,4 +251,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -195,4 +195,5 @@ bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -625,4 +625,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
......@@ -207,4 +207,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -76,4 +76,5 @@ bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -80,4 +80,5 @@ bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -369,4 +369,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -252,4 +252,5 @@ bool 'Spinlock debugging' CONFIG_DEBUG_SPINLOCK
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -295,4 +295,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
......@@ -237,4 +237,5 @@ fi
endmenu
source security/Config.in
source crypto/Config.in
source lib/Config.in
#
# Cryptographic API Components
#
CONFIG_CRYPTO
This option provides the core Cryptographic API.
CONFIG_CRYPTO_MD5
MD5 message digest algorithm (RFC1321), including HMAC (RFC2104, RFC2403).
CONFIG_CRYPTO_SHA1
SHA-1 secure hash standard (FIPS 180-1), including HMAC (RFC2104, RFC2404).
CONFIG_CRYPTO_DES
DES cipher algorithm (FIPS 46-2), and 3DES_EDE.
CONFIG_CRYPTO_TEST
Quick & dirty crypto test module.
#
# Cryptographic API Configuration
#
mainmenu_option next_comment
comment 'Cryptographic options'
bool 'Cryptographic API' CONFIG_CRYPTO
if [ "$CONFIG_CRYPTO" = "y" ]; then
tristate ' MD5 digest algorithm' CONFIG_CRYPTO_MD5
tristate ' SHA-1 digest algorithm' CONFIG_CRYPTO_SHA1
tristate ' DES/3DES cipher algorithms' CONFIG_CRYPTO_DES
tristate ' Testing module' CONFIG_CRYPTO_TEST
fi
endmenu
#
# Cryptographic API
#
export-objs := api.o
obj-$(CONFIG_CRYPTO) += api.o cipher.o digest.o compress.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1.o
obj-$(CONFIG_CRYPTO_DES) += des.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
include $(TOPDIR)/Rules.make
/*
* Scatterlist Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Mller.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/crypto.h>
#include "internal.h"
static rwlock_t crypto_alg_lock = RW_LOCK_UNLOCKED;
static LIST_HEAD(crypto_alg_list);
static void *c_start(struct seq_file *m, loff_t *pos)
{
struct list_head *v;
loff_t n = *pos;
read_lock(&crypto_alg_lock);
list_for_each(v, &crypto_alg_list)
if (!n--)
return list_entry(v, struct crypto_alg, cra_list);
return NULL;
}
static void *c_next(struct seq_file *m, void *p, loff_t *pos)
{
struct list_head *v = p;
(*pos)++;
v = v->next;
return (v == &crypto_alg_list) ?
NULL : list_entry(v, struct crypto_alg, cra_list);
}
static void c_stop(struct seq_file *m, void *p)
{
read_unlock(&crypto_alg_lock);
}
static int c_show(struct seq_file *m, void *p)
{
struct crypto_alg *alg = (struct crypto_alg *)p;
seq_printf(m, "name : %s\n", alg->cra_name);
seq_printf(m, "id : 0x%08x\n", alg->cra_id);
seq_printf(m, "blocksize : %d\n", alg->cra_blocksize);
switch (alg->cra_id & CRYPTO_TYPE_MASK) {
case CRYPTO_TYPE_CIPHER:
seq_printf(m, "keysize : %d\n", alg->cra_cipher.cia_keysize);
seq_printf(m, "ivsize : %d\n", alg->cra_cipher.cia_ivsize);
break;
case CRYPTO_TYPE_DIGEST:
seq_printf(m, "digestsize : %d\n",
alg->cra_digest.dia_digestsize);
break;
}
seq_putc(m, '\n');
return 0;
}
static struct seq_operations crypto_seq_ops = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = c_show
};
static int crypto_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &crypto_seq_ops);
}
struct file_operations proc_crypto_ops = {
.open = crypto_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
};
static inline void crypto_alg_get(struct crypto_alg *alg)
{
/* XXX: inc refcount */
}
static inline void crypto_alg_put(struct crypto_alg *alg)
{
/* XXX: dec refcount */
}
struct crypto_alg *crypto_alg_lookup(__u32 algid)
{
struct list_head *p;
struct crypto_alg *alg = NULL;
read_lock(&crypto_alg_lock);
list_for_each(p, &crypto_alg_list) {
if ((((struct crypto_alg *)p)->cra_id
& CRYPTO_ALG_MASK) == algid) {
alg = (struct crypto_alg *)p;
crypto_alg_get(alg);
break;
}
}
read_unlock(&crypto_alg_lock);
return alg;
}
static void crypto_init_ops(struct crypto_tfm *tfm)
{
switch (crypto_tfm_type(tfm) & CRYPTO_TYPE_MASK) {
case CRYPTO_TYPE_CIPHER:
crypto_init_cipher_ops(tfm);
break;
case CRYPTO_TYPE_DIGEST:
crypto_init_digest_ops(tfm);
break;
case CRYPTO_TYPE_COMP:
crypto_init_compress_ops(tfm);
break;
default:
BUG();
}
}
/*
* Todo: try and load the module if the lookup fails.
*/
struct crypto_tfm *crypto_alloc_tfm(__u32 id)
{
struct crypto_tfm *tfm = NULL;
struct crypto_alg *alg;
alg = crypto_alg_lookup(id & CRYPTO_ALG_MASK);
if (alg == NULL)
goto out;
tfm = kmalloc(sizeof(*tfm), GFP_KERNEL);
if (tfm == NULL)
goto out_put;
if (alg->cra_ctxsize) {
tfm->crt_ctx = kmalloc(alg->cra_ctxsize, GFP_KERNEL);
if (tfm->crt_ctx == NULL)
goto out_free_tfm;
}
if ((alg->cra_id & CRYPTO_TYPE_MASK) == CRYPTO_TYPE_CIPHER) {
if (alg->cra_cipher.cia_ivsize) {
tfm->crt_cipher.cit_iv =
kmalloc(alg->cra_cipher.cia_ivsize, GFP_KERNEL);
if (tfm->crt_cipher.cit_iv == NULL)
goto out_free_ctx;
}
tfm->crt_cipher.cit_mode = id & CRYPTO_MODE_MASK;
}
tfm->__crt_alg = alg;
crypto_init_ops(tfm);
goto out;
out_free_ctx:
if (tfm->__crt_alg->cra_ctxsize)
kfree(tfm->crt_ctx);
out_free_tfm:
kfree(tfm);
out_put:
crypto_alg_put(alg);
out:
return tfm;
}
void crypto_free_tfm(struct crypto_tfm *tfm)
{
if (tfm->__crt_alg->cra_ctxsize)
kfree(tfm->crt_ctx);
if (crypto_tfm_type(tfm) == CRYPTO_TYPE_CIPHER)
if (tfm->__crt_alg->cra_cipher.cia_ivsize)
kfree(tfm->crt_cipher.cit_iv);
crypto_alg_put(tfm->__crt_alg);
kfree(tfm);
}
int crypto_register_alg(struct crypto_alg *alg)
{
int ret = 0;
struct list_head *p;
write_lock(&crypto_alg_lock);
list_for_each(p, &crypto_alg_list) {
struct crypto_alg *q = (struct crypto_alg *)p;
if (q->cra_id == alg->cra_id) {
ret = -EEXIST;
goto out;
}
}
list_add_tail(&alg->cra_list, &crypto_alg_list);
out:
write_unlock(&crypto_alg_lock);
return ret;
}
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret = -ENOENT;
struct list_head *p;
write_lock(&crypto_alg_lock);
list_for_each(p, &crypto_alg_list) {
if (alg == (struct crypto_alg *)p) {
list_del(p);
ret = 0;
goto out;
}
}
out:
write_unlock(&crypto_alg_lock);
return ret;
}
static int __init init_crypto(void)
{
struct proc_dir_entry *proc;
printk(KERN_INFO "Initializing Cryptographic API\n");
proc = create_proc_entry("crypto", 0, NULL);
if (proc)
proc->proc_fops = &proc_crypto_ops;
return 0;
}
__initcall(init_crypto);
EXPORT_SYMBOL_GPL(crypto_register_alg);
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
EXPORT_SYMBOL_GPL(crypto_free_tfm);
/*
* Cryptographic API.
*
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
#include "internal.h"
typedef void (cryptfn_t)(void *, __u8 *, __u8 *);
typedef void (procfn_t)(struct crypto_tfm *, __u8 *, cryptfn_t, int enc);
static inline void xor_64(__u8 *a, const __u8 *b)
{
((__u32 *)a)[0] ^= ((__u32 *)b)[0];
((__u32 *)a)[1] ^= ((__u32 *)b)[1];
}
static inline size_t sglen(struct scatterlist *sg, size_t nsg)
{
int i;
size_t n;
for (i = 0, n = 0; i < nsg; i++)
n += sg[i].length;
return n;
}
/*
* Do not call this unless the total length of all of the fragments
* has been verified as multiple of the block size.
*/
static int copy_chunks(struct crypto_tfm *tfm, __u8 *buf,
struct scatterlist *sg, int sgidx,
int rlen, int *last, int in)
{
int i, copied, coff, j, aligned;
size_t bsize = crypto_tfm_blocksize(tfm);
for (i = sgidx, j = copied = 0, aligned = 0 ; copied < bsize; i++) {
int len = sg[i].length;
int clen;
char *p;
if (copied) {
coff = 0;
clen = min_t(int, len, bsize - copied);
if (len == bsize - copied)
aligned = 1; /* last + right aligned */
} else {
coff = len - rlen;
clen = rlen;
}
p = crypto_kmap(tfm, sg[i].page) + sg[i].offset + coff;
if (in)
memcpy(&buf[copied], p, clen);
else
memcpy(p, &buf[copied], clen);
crypto_kunmap(tfm, sg[i].page, p);
*last = aligned ? 0 : clen;
copied += clen;
}
return i - sgidx - 2 + aligned;
}
static inline int gather_chunks(struct crypto_tfm *tfm, __u8 *buf,
struct scatterlist *sg,
int sgidx, int rlen, int *last)
{
return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 1);
}
static inline int scatter_chunks(struct crypto_tfm *tfm, __u8 *buf,
struct scatterlist *sg,
int sgidx, int rlen, int *last)
{
return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 0);
}
/*
* Generic encrypt/decrypt wrapper for ciphers.
*
* If we find a a remnant at the end of a frag, we have to encrypt or
* decrypt across possibly multiple page boundaries via a temporary
* block, then continue processing with a chunk offset until the end
* of a frag is block aligned.
*/
static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg,
size_t nsg, cryptfn_t crfn, procfn_t prfn, int enc)
{
int i, coff;
size_t bsize = crypto_tfm_blocksize(tfm);
__u8 tmp[CRYPTO_MAX_BLOCK_SIZE];
if (sglen(sg, nsg) % bsize) {
tfm->crt_flags |= CRYPTO_BAD_BLOCK_LEN;
return -EINVAL;
}
for (i = 0, coff = 0; i < nsg; i++) {
int n = 0;
int len = sg[i].length - coff;
char *p = crypto_kmap(tfm, sg[i].page) + sg[i].offset + coff;
while (len) {
if (len < bsize) {
crypto_kunmap(tfm, sg[i].page, p);
n = gather_chunks(tfm, tmp, sg, i, len, &coff);
prfn(tfm, tmp, crfn, enc);
scatter_chunks(tfm, tmp, sg, i, len, &coff);
crypto_yield(tfm);
goto unmapped;
} else {
prfn(tfm, p, crfn, enc);
crypto_kunmap(tfm, sg[i].page, p);
crypto_yield(tfm);
p = crypto_kmap(tfm, sg[i].page) + sg[i].offset + coff;
p += bsize;
len -= bsize;
/* End of frag with no remnant? */
if (coff && len == 0)
coff = 0;
}
}
crypto_kunmap(tfm, sg[i].page, p);
unmapped:
i += n;
}
return 0;
}
static void cbc_process(struct crypto_tfm *tfm,
__u8 *block, cryptfn_t fn, int enc)
{
if (enc) {
xor_64(tfm->crt_cipher.cit_iv, block);
fn(tfm->crt_ctx, block, tfm->crt_cipher.cit_iv);
memcpy(tfm->crt_cipher.cit_iv, block,
crypto_tfm_blocksize(tfm));
} else {
__u8 buf[CRYPTO_MAX_BLOCK_SIZE];
fn(tfm->crt_ctx, buf, block);
xor_64(buf, tfm->crt_cipher.cit_iv);
memcpy(tfm->crt_cipher.cit_iv, block,
crypto_tfm_blocksize(tfm));
memcpy(block, buf, crypto_tfm_blocksize(tfm));
}
}
static void ecb_process(struct crypto_tfm *tfm, __u8 *block,
cryptfn_t fn, int enc)
{
fn(tfm->crt_ctx, block, block);
}
static int setkey(struct crypto_tfm *tfm, const __u8 *key, size_t keylen)
{
return tfm->__crt_alg->cra_cipher.cia_setkey(tfm->crt_ctx, key,
keylen, &tfm->crt_flags);
}
static int ecb_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg)
{
return crypt(tfm, sg, nsg,
tfm->__crt_alg->cra_cipher.cia_encrypt, ecb_process, 1);
}
static int ecb_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg)
{
return crypt(tfm, sg, nsg,
tfm->__crt_alg->cra_cipher.cia_decrypt, ecb_process, 1);
}
static int cbc_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg)
{
return crypt(tfm, sg, nsg,
tfm->__crt_alg->cra_cipher.cia_encrypt, cbc_process, 1);
}
static int cbc_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg)
{
return crypt(tfm, sg, nsg,
tfm->__crt_alg->cra_cipher.cia_decrypt, cbc_process, 0);
}
static int nocrypt(struct crypto_tfm *tfm, struct scatterlist *sg, size_t nsg)
{
return -ENOSYS;
}
void crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
struct cipher_tfm *ops = &tfm->crt_cipher;
ops->cit_setkey = setkey;
switch (tfm->crt_cipher.cit_mode) {
case CRYPTO_MODE_ECB:
ops->cit_encrypt = ecb_encrypt;
ops->cit_decrypt = ecb_decrypt;
break;
case CRYPTO_MODE_CBC:
ops->cit_encrypt = cbc_encrypt;
ops->cit_decrypt = cbc_decrypt;
break;
case CRYPTO_MODE_CFB:
ops->cit_encrypt = nocrypt;
ops->cit_decrypt = nocrypt;
break;
case CRYPTO_MODE_CTR:
ops->cit_encrypt = nocrypt;
ops->cit_decrypt = nocrypt;
break;
default:
BUG();
}
}
/*
* Cryptographic API.
*
* Compression operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/types.h>
#include <linux/list.h>
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include "internal.h"
/*
* This code currently implements blazingly fast and
* lossless Quadruple ROT13 compression.
*/
static void crypto_compress(struct crypto_tfm *tfm)
{
return;
}
static void crypto_decompress(struct crypto_tfm *tfm)
{
return;
}
void crypto_init_compress_ops(struct crypto_tfm *tfm)
{
struct compress_tfm *ops = &tfm->crt_compress;
ops->cot_compress = crypto_compress;
ops->cot_decompress = crypto_decompress;
}
This diff is collapsed.
/*
* Cryptographic API.
*
* Digest operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
#include "internal.h"
static void init(struct crypto_tfm *tfm)
{
tfm->__crt_alg->cra_digest.dia_init(tfm->crt_ctx);
return;
}
static void update(struct crypto_tfm *tfm, struct scatterlist *sg, size_t nsg)
{
int i;
for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(tfm, sg[i].page) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
crypto_kunmap(tfm, sg[i].page, p);
crypto_yield(tfm);
}
return;
}
static void final(struct crypto_tfm *tfm, __u8 *out)
{
tfm->__crt_alg->cra_digest.dia_final(tfm->crt_ctx, out);
return;
}
static void digest(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg, __u8 *out)
{
int i;
tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(tfm, sg[i].page) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
crypto_kunmap(tfm, sg[i].page, p);
crypto_yield(tfm);
}
crypto_digest_final(tfm, out);
return;
}
static void hmac(struct crypto_tfm *tfm, __u8 *key, size_t keylen,
struct scatterlist *sg, size_t nsg, __u8 *out)
{
int i;
struct scatterlist tmp;
char ipad[crypto_tfm_blocksize(tfm) + 1];
char opad[crypto_tfm_blocksize(tfm) + 1];
if (keylen > crypto_tfm_blocksize(tfm)) {
tmp.page = virt_to_page(key);
tmp.offset = ((long)key & ~PAGE_MASK);
tmp.length = keylen;
crypto_digest_digest(tfm, &tmp, 1, key);
keylen = crypto_tfm_digestsize(tfm);
}
memset(ipad, 0, sizeof(ipad));
memset(opad, 0, sizeof(opad));
memcpy(ipad, key, keylen);
memcpy(opad, key, keylen);
for (i = 0; i < crypto_tfm_blocksize(tfm); i++) {
ipad[i] ^= 0x36;
opad[i] ^= 0x5c;
}
tmp.page = virt_to_page(ipad);
tmp.offset = ((long)ipad & ~PAGE_MASK);
tmp.length = crypto_tfm_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
crypto_digest_update(tfm, sg, nsg);
crypto_digest_final(tfm, out);
tmp.page = virt_to_page(opad);
tmp.offset = ((long)opad & ~PAGE_MASK);
tmp.length = crypto_tfm_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
tmp.page = virt_to_page(out);
tmp.offset = ((long)out & ~PAGE_MASK);
tmp.length = crypto_tfm_digestsize(tfm);
crypto_digest_update(tfm, &tmp, 1);
crypto_digest_final(tfm, out);
return;
}
void crypto_init_digest_ops(struct crypto_tfm *tfm)
{
struct digest_tfm *ops = &tfm->crt_digest;
ops->dit_init = init;
ops->dit_update = update;
ops->dit_final = final;
ops->dit_digest = digest;
ops->dit_hmac = hmac;
}
/*
* Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_INTERNAL_H
#define _CRYPTO_INTERNAL_H
#include <linux/highmem.h>
#include <asm/hardirq.h>
#include <asm/softirq.h>
static inline void *crypto_kmap(struct crypto_tfm *tfm, struct page *page)
{
if (tfm->crt_flags & CRYPTO_ATOMIC) {
#ifdef CONFIG_HIGHMEM
local_bh_disable();
#endif
return kmap_atomic(page, KM_CRYPTO);
} else
return kmap(page);
}
static inline void crypto_kunmap(struct crypto_tfm *tfm,
struct page *page, void *vaddr)
{
if (tfm->crt_flags & CRYPTO_ATOMIC) {
kunmap_atomic(vaddr, KM_CRYPTO);
#ifdef CONFIG_HIGHMEM
local_bh_enable();
#endif
} else
kunmap(page);
}
static inline void crypto_yield(struct crypto_tfm *tfm)
{
if (!(tfm->crt_flags & CRYPTO_ATOMIC))
cond_resched();
}
void crypto_init_digest_ops(struct crypto_tfm *tfm);
void crypto_init_cipher_ops(struct crypto_tfm *tfm);
void crypto_init_compress_ops(struct crypto_tfm *tfm);
#endif /* _CRYPTO_INTERNAL_H */
/*
* Cryptographic API.
*
* MD5 Message Digest Algorithm (RFC1321).
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/crypto.h>
#define MD5_DIGEST_SIZE 16
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, in, s) \
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
struct md5_ctx {
__u32 hash[MD5_HASH_WORDS];
__u32 block[MD5_BLOCK_WORDS];
__u64 byte_count;
};
static inline void md5_transform(__u32 *hash, __u32 const *in)
{
register __u32 a, b, c, d;
a = hash[0];
b = hash[1];
c = hash[2];
d = hash[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
hash[0] += a;
hash[1] += b;
hash[2] += c;
hash[3] += d;
}
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(__u32 *buf, unsigned words)
{
while (words--) {
__le32_to_cpus(buf);
buf++;
}
}
static inline void cpu_to_le32_array(__u32 *buf, unsigned words)
{
while (words--) {
__cpu_to_le32s(buf);
buf++;
}
}
static inline void md5_transform_helper(struct md5_ctx *ctx)
{
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(__u32));
md5_transform(ctx->hash, ctx->block);
}
static void md5_init(void *ctx)
{
struct md5_ctx *mctx = ctx;
mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89;
mctx->hash[2] = 0x98badcfe;
mctx->hash[3] = 0x10325476;
mctx->byte_count = 0;
}
static void md5_update(void *ctx, const __u8 *data, size_t len)
{
struct md5_ctx *mctx = ctx;
const __u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
if (avail > len) {
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, len);
return;
}
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, avail);
md5_transform_helper(mctx);
data += avail;
len -= avail;
while (len >= sizeof(mctx->block)) {
memcpy(mctx->block, data, sizeof(mctx->block));
md5_transform_helper(mctx);
data += sizeof(mctx->block);
len -= sizeof(mctx->block);
}
memcpy(mctx->block, data, len);
}
static void md5_final(void *ctx, __u8 *out)
{
struct md5_ctx *mctx = ctx;
const int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (__u64));
md5_transform_helper(mctx);
p = (char *)mctx->block;
padding = 56;
}
memset(p, 0, padding);
mctx->block[14] = mctx->byte_count << 3;
mctx->block[15] = mctx->byte_count >> 29;
le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
sizeof(__u64)) / sizeof(__u32));
md5_transform(mctx->hash, mctx->block);
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(__u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
memset(mctx, 0, sizeof(mctx));
}
static struct crypto_alg alg = {
.cra_id = CRYPTO_ALG_MD5,
.cra_name = "md5",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct md5_ctx),
.cra_u = { .digest = {
.dia_digestsize = MD5_DIGEST_SIZE,
.dia_init = md5_init,
.dia_update = md5_update,
.dia_final = md5_final } }
};
static int __init init(void)
{
INIT_LIST_HEAD(&alg.cra_list);
return crypto_register_alg(&alg);
}
static void __exit fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
/*
* Cryptographic API.
*
* SHA1 Secure Hash Algorithm.
*
* Derived from cryptoapi implementation, adapted for in-place
* scatterlist interface. Originally based on the public domain
* implementation written by Steve Raid.
*
* Copyright (c) Alan Smithee.
* Copyright (c) McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <asm/scatterlist.h>
#define SHA1_DIGEST_SIZE 20
#define SHA1_HMAC_BLOCK_SIZE 64
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
/* blk0() and blk() perform the initial expand. */
/* I got the idea of expanding during the round function from SSLeay */
# define blk0(i) block32[i]
#define blk(i) (block32[i&15] = rol(block32[(i+13)&15]^block32[(i+8)&15] \
^block32[(i+2)&15]^block32[i&15],1))
/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5); \
w=rol(w,30);
#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5); \
w=rol(w,30);
#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5); \
w=rol(w,30);
#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
struct sha1_ctx {
__u64 count;
__u32 state[5];
__u8 buffer[64];
};
/* Hash a single 512-bit block. This is the core of the algorithm. */
static void sha1_transform(__u32 *state, const char *in)
{
__u32 a, b, c, d, e;
__u32 block32[16];
/* convert/copy data to workspace */
for (a = 0; a < sizeof(block32)/sizeof(__u32); a++)
block32[a] = be32_to_cpu (((const __u32 *)in)[a]);
/* Copy context->state[] to working vars */
a = state[0];
b = state[1];
c = state[2];
d = state[3];
e = state[4];
/* 4 rounds of 20 operations each. Loop unrolled. */
R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
/* Add the working vars back into context.state[] */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
state[4] += e;
/* Wipe variables */
a = b = c = d = e = 0;
memset (block32, 0x00, sizeof block32);
}
static void sha1_init(void *ctx)
{
struct sha1_ctx *sctx = ctx;
const static struct sha1_ctx initstate = {
0,
{ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
{ 0, }
};
*sctx = initstate;
}
static void sha1_update(void *ctx, const __u8 *data, size_t len)
{
struct sha1_ctx *sctx = ctx;
unsigned i, j;
j = (sctx->count >> 3) & 0x3f;
sctx->count += len << 3;
if ((j + len) > 63) {
memcpy(&sctx->buffer[j], data, (i = 64-j));
sha1_transform(sctx->state, sctx->buffer);
for ( ; i + 63 < len; i += 64) {
sha1_transform(sctx->state, &data[i]);
}
j = 0;
}
else i = 0;
memcpy(&sctx->buffer[j], &data[i], len - i);
}
/* Add padding and return the message digest. */
static void sha1_final(void* ctx, __u8 *out)
{
struct sha1_ctx *sctx = ctx;
__u32 i, j, index, padlen;
__u64 t;
__u8 bits[8] = { 0, };
const static __u8 padding[64] = { 0x80, };
t = sctx->count;
bits[7] = 0xff & t; t>>=8;
bits[6] = 0xff & t; t>>=8;
bits[5] = 0xff & t; t>>=8;
bits[4] = 0xff & t; t>>=8;
bits[3] = 0xff & t; t>>=8;
bits[2] = 0xff & t; t>>=8;
bits[1] = 0xff & t; t>>=8;
bits[0] = 0xff & t;
/* Pad out to 56 mod 64 */
index = (sctx->count >> 3) & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(sctx, padding, padlen);
/* Append length */
sha1_update(sctx, bits, sizeof bits);
/* Store state in digest */
for (i = j = 0; i < 5; i++, j += 4) {
__u32 t2 = sctx->state[i];
out[j+3] = t2 & 0xff; t2>>=8;
out[j+2] = t2 & 0xff; t2>>=8;
out[j+1] = t2 & 0xff; t2>>=8;
out[j ] = t2 & 0xff;
}
/* Wipe context */
memset(sctx, 0, sizeof *sctx);
}
static struct crypto_alg alg = {
.cra_id = CRYPTO_ALG_SHA1,
.cra_name = "sha1",
.cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha1_ctx),
.cra_u = { .digest = {
.dia_digestsize = SHA1_DIGEST_SIZE,
.dia_init = sha1_init,
.dia_update = sha1_update,
.dia_final = sha1_final } }
};
static int __init init(void)
{
INIT_LIST_HEAD(&alg.cra_list);
return crypto_register_alg(&alg);
}
static void __exit fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
This diff is collapsed.
This diff is collapsed.
......@@ -14,16 +14,17 @@
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_TYPE_NR
D(2) KM_CRYPTO,
D(3) KM_SKB_DATA_SOFTIRQ,
D(4) KM_USER0,
D(5) KM_USER1,
D(6) KM_BIO_SRC_IRQ,
D(7) KM_BIO_DST_IRQ,
D(8) KM_PTE0,
D(9) KM_PTE1,
D(10) KM_IRQ0,
D(11) KM_IRQ1,
D(12) KM_TYPE_NR
};
#undef D
......
......@@ -12,17 +12,18 @@
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_PTE2,
D(10) KM_IRQ0,
D(11) KM_IRQ1,
D(12) KM_TYPE_NR
D(2) KM_CRYPTO,
D(3) KM_SKB_DATA_SOFTIRQ,
D(4) KM_USER0,
D(5) KM_USER1,
D(6) KM_BIO_SRC_IRQ,
D(7) KM_BIO_DST_IRQ,
D(8) KM_PTE0,
D(9) KM_PTE1,
D(10) KM_PTE2,
D(11) KM_IRQ0,
D(12) KM_IRQ1,
D(13) KM_TYPE_NR
};
#undef D
......
......@@ -12,16 +12,17 @@
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_TYPE_NR
D(2) KM_CRYPTO,
D(3) KM_SKB_DATA_SOFTIRQ,
D(4) KM_USER0,
D(5) KM_USER1,
D(6) KM_BIO_SRC_IRQ,
D(7) KM_BIO_DST_IRQ,
D(8) KM_PTE0,
D(9) KM_PTE1,
D(10) KM_IRQ0,
D(11) KM_IRQ1,
D(12) KM_TYPE_NR
};
#undef D
......
......@@ -5,6 +5,7 @@
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_CRYPTO,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
......
......@@ -5,6 +5,7 @@
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_CRYPTO,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
......
......@@ -5,6 +5,7 @@
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_CRYPTO,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
......
......@@ -4,6 +4,7 @@
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_CRYPTO,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
......
......@@ -8,6 +8,7 @@
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_CRYPTO,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
......
......@@ -4,6 +4,7 @@
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_CRYPTO,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
......
/*
* Scatterlist Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Mller.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
/*
* Crypto context flags.
*/
#define CRYPTO_WEAK_KEY_CHECK 0x0001
#define CRYPTO_WEAK_KEY 0x0008
#define CRYPTO_BAD_KEY_LEN 0x0010
#define CRYPTO_BAD_BLOCK_LEN 0x0020
#define CRYPTO_ATOMIC 0x1000
/*
* Algorithm identifiers. These may be expanded later to 64 bits
* and include vendor id info, so we can have multiple versions
* (e.g. asm, various hardware versions etc).
*
* Todo: sadb translation.
*/
#define CRYPTO_TYPE_MASK 0xf0000000
#define CRYPTO_MODE_MASK 0x0ff00000
#define CRYPTO_ALG_MASK 0x000fffff
#define CRYPTO_TYPE_CIPHER 0x00000000
#define CRYPTO_TYPE_DIGEST 0x10000000
#define CRYPTO_TYPE_COMP 0x20000000
#define CRYPTO_MODE_ECB 0x00000000
#define CRYPTO_MODE_CBC 0x00100000
#define CRYPTO_MODE_CFB 0x00200000
#define CRYPTO_MODE_CTR 0x00400000
#define CRYPTO_ALG_NULL 0x00000000
#define CRYPTO_ALG_DES (0x00000001|CRYPTO_TYPE_CIPHER)
#define CRYPTO_ALG_DES_ECB (CRYPTO_ALG_DES|CRYPTO_MODE_ECB)
#define CRYPTO_ALG_DES_CBC (CRYPTO_ALG_DES|CRYPTO_MODE_CBC)
#define CRYPTO_ALG_3DES_EDE (0x00000002|CRYPTO_TYPE_CIPHER)
#define CRYPTO_ALG_3DES_EDE_ECB (CRYPTO_ALG_3DES_EDE|CRYPTO_MODE_ECB)
#define CRYPTO_ALG_3DES_EDE_CBC (CRYPTO_ALG_3DES_EDE|CRYPTO_MODE_CBC)
#define CRYPTO_ALG_MD5 (0x00000f00|CRYPTO_TYPE_DIGEST)
#define CRYPTO_ALG_SHA1 (0x00000f01|CRYPTO_TYPE_DIGEST)
#define CRYPTO_MAX_ALG_NAME 64
#define CRYPTO_MAX_BLOCK_SIZE 16
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_compress cra_u.compress
struct scatterlist;
struct cipher_alg {
size_t cia_keysize;
size_t cia_ivsize;
int (*cia_setkey)(void *ctx, const __u8 *key, size_t keylen,
int *flags);
void (*cia_encrypt)(void *ctx, __u8 *dst, __u8 *src);
void (*cia_decrypt)(void *ctx, __u8 *dst, __u8 *src);
};
struct digest_alg {
size_t dia_digestsize;
void (*dia_init)(void *ctx);
void (*dia_update)(void *ctx, const __u8 *data, size_t len);
void (*dia_final)(void *ctx, __u8 *out);
};
struct compress_alg {
void (*coa_compress)(void);
void (*coa_decompress)(void);
};
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_compress cra_u.compress
struct crypto_alg {
struct list_head cra_list;
__u32 cra_id;
size_t cra_blocksize;
size_t cra_ctxsize;
char cra_name[CRYPTO_MAX_ALG_NAME];
union {
struct cipher_alg cipher;
struct digest_alg digest;
struct compress_alg compress;
} cra_u;
};
/*
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
int crypto_unregister_alg(struct crypto_alg *alg);
struct crypto_tfm;
/*
* Transformations: user-instantiated algorithms.
*/
struct cipher_tfm {
void *cit_iv;
__u32 cit_mode;
int (*cit_setkey)(struct crypto_tfm *tfm, const __u8 *key,
size_t keylen);
int (*cit_encrypt)(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg);
int (*cit_decrypt)(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg);
};
struct digest_tfm {
void (*dit_init)(struct crypto_tfm *tfm);
void (*dit_update)(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg);
void (*dit_final)(struct crypto_tfm *tfm, __u8 *out);
void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
size_t nsg, __u8 *out);
void (*dit_hmac)(struct crypto_tfm *tfm, __u8 *key, size_t keylen,
struct scatterlist *sg, size_t nsg, __u8 *out);
};
struct compress_tfm {
void (*cot_compress)(struct crypto_tfm *tfm);
void (*cot_decompress)(struct crypto_tfm *tfm);
};
#define crt_cipher crt_u.cipher
#define crt_digest crt_u.digest
#define crt_compress crt_u.compress
struct crypto_tfm {
void *crt_ctx;
int crt_flags;
union {
struct cipher_tfm cipher;
struct digest_tfm digest;
struct compress_tfm compress;
} crt_u;
struct crypto_alg *__crt_alg;
};
/*
* Finds specified algorithm, allocates and returns a transform for it.
* Will try an load a module based on the name if not present
* in the kernel. Increments its algorithm refcount.
*/
struct crypto_tfm *crypto_alloc_tfm(__u32 id);
/*
* Frees the transform and decrements its algorithm's recount.
*/
void crypto_free_tfm(struct crypto_tfm *tfm);
/*
* API wrappers.
*/
static inline void crypto_digest_init(struct crypto_tfm *tfm)
{
tfm->crt_digest.dit_init(tfm);
}
static inline void crypto_digest_update(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg)
{
tfm->crt_digest.dit_update(tfm, sg, nsg);
}
static inline void crypto_digest_final(struct crypto_tfm *tfm, __u8 *out)
{
tfm->crt_digest.dit_final(tfm, out);
}
static inline void crypto_digest_digest(struct crypto_tfm *tfm,
struct scatterlist *sg,
size_t nsg, __u8 *out)
{
tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
}
static inline void crypto_digest_hmac(struct crypto_tfm *tfm, __u8 *key,
size_t keylen, struct scatterlist *sg,
size_t nsg, __u8 *out)
{
tfm->crt_digest.dit_hmac(tfm, key, keylen, sg, nsg, out);
}
static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
const __u8 *key, size_t keylen)
{
return tfm->crt_cipher.cit_setkey(tfm, key, keylen);
}
static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg)
{
return tfm->crt_cipher.cit_encrypt(tfm, sg, nsg);
}
static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, size_t nsg)
{
return tfm->crt_cipher.cit_decrypt(tfm, sg, nsg);
}
static inline void crypto_cipher_copy_iv(struct crypto_tfm *tfm,
__u8 *src, size_t len)
{
memcpy(tfm->crt_cipher.cit_iv, src, len);
}
static inline void crypto_comp_compress(struct crypto_tfm *tfm)
{
tfm->crt_compress.cot_compress(tfm);
}
static inline void crypto_comp_decompress(struct crypto_tfm *tfm)
{
tfm->crt_compress.cot_decompress(tfm);
}
/*
* Transform helpers which allow the underlying algorithm to be queried.
*/
static inline int crypto_tfm_id(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_id;
}
static inline int crypto_tfm_alg(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_id & CRYPTO_ALG_MASK;
}
static inline char *crypto_tfm_name(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_name;
}
static inline __u32 crypto_tfm_type(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_id & CRYPTO_TYPE_MASK;
}
static inline size_t crypto_tfm_keysize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_cipher.cia_keysize;
}
static inline size_t crypto_tfm_ivsize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_cipher.cia_ivsize;
}
static inline size_t crypto_tfm_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
}
static inline size_t crypto_tfm_digestsize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_digest.dia_digestsize;
}
#endif /* _LINUX_CRYPTO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment