Commit 065ce327 authored by Herbert Xu's avatar Herbert Xu

crypto: glue_helper - Add skcipher xts helpers

This patch adds xts helpers that use the skcipher interface rather
than blkcipher.  This will be used by aesni_intel.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 043a4400
...@@ -27,10 +27,10 @@ ...@@ -27,10 +27,10 @@
#include <linux/module.h> #include <linux/module.h>
#include <crypto/b128ops.h> #include <crypto/b128ops.h>
#include <crypto/internal/skcipher.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h>
static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc, struct blkcipher_desc *desc,
...@@ -339,6 +339,41 @@ static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, ...@@ -339,6 +339,41 @@ static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
return nbytes; return nbytes;
} }
static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
void *ctx,
struct skcipher_walk *walk)
{
const unsigned int bsize = 128 / 8;
unsigned int nbytes = walk->nbytes;
u128 *src = walk->src.virt.addr;
u128 *dst = walk->dst.virt.addr;
unsigned int num_blocks, func_bytes;
unsigned int i;
/* Process multi-block batch */
for (i = 0; i < gctx->num_funcs; i++) {
num_blocks = gctx->funcs[i].num_blocks;
func_bytes = bsize * num_blocks;
if (nbytes >= func_bytes) {
do {
gctx->funcs[i].fn_u.xts(ctx, dst, src,
walk->iv);
src += num_blocks;
dst += num_blocks;
nbytes -= func_bytes;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
goto done;
}
}
done:
return nbytes;
}
/* for implementations implementing faster XTS IV generator */ /* for implementations implementing faster XTS IV generator */
int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc, struct scatterlist *dst, struct blkcipher_desc *desc, struct scatterlist *dst,
...@@ -379,6 +414,43 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, ...@@ -379,6 +414,43 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
} }
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req,
common_glue_func_t tweak_fn, void *tweak_ctx,
void *crypt_ctx)
{
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
bool fpu_enabled = false;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
nbytes = walk.nbytes;
if (!nbytes)
return err;
/* set minimum length to bsize, for tweak_fn */
fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
&walk, fpu_enabled,
nbytes < bsize ? bsize : nbytes);
/* calculate first value of T */
tweak_fn(tweak_ctx, walk.iv, walk.iv);
while (nbytes) {
nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
err = skcipher_walk_done(&walk, nbytes);
nbytes = walk.nbytes;
}
glue_fpu_end(fpu_enabled);
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
common_glue_func_t fn) common_glue_func_t fn)
{ {
......
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
#ifndef _CRYPTO_GLUE_HELPER_H #ifndef _CRYPTO_GLUE_HELPER_H
#define _CRYPTO_GLUE_HELPER_H #define _CRYPTO_GLUE_HELPER_H
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/crypto.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <crypto/b128ops.h> #include <crypto/b128ops.h>
...@@ -69,6 +69,31 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, ...@@ -69,6 +69,31 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
return true; return true;
} }
static inline bool glue_skwalk_fpu_begin(unsigned int bsize,
int fpu_blocks_limit,
struct skcipher_walk *walk,
bool fpu_enabled, unsigned int nbytes)
{
if (likely(fpu_blocks_limit < 0))
return false;
if (fpu_enabled)
return true;
/*
* Vector-registers are only used when chunk to be processed is large
* enough, so do not enable FPU until it is necessary.
*/
if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
return false;
/* prevent sleeping if FPU is in use */
skcipher_walk_atomise(walk);
kernel_fpu_begin();
return true;
}
static inline void glue_fpu_end(bool fpu_enabled) static inline void glue_fpu_end(bool fpu_enabled)
{ {
if (fpu_enabled) if (fpu_enabled)
...@@ -139,6 +164,18 @@ extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, ...@@ -139,6 +164,18 @@ extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
common_glue_func_t tweak_fn, void *tweak_ctx, common_glue_func_t tweak_fn, void *tweak_ctx,
void *crypt_ctx); void *crypt_ctx);
extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
common_glue_func_t tweak_fn, void *tweak_ctx,
void *crypt_ctx);
extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req,
common_glue_func_t tweak_fn, void *tweak_ctx,
void *crypt_ctx);
extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
le128 *iv, common_glue_func_t fn); le128 *iv, common_glue_func_t fn);
......
...@@ -253,7 +253,7 @@ config CRYPTO_SIMD ...@@ -253,7 +253,7 @@ config CRYPTO_SIMD
config CRYPTO_GLUE_HELPER_X86 config CRYPTO_GLUE_HELPER_X86
tristate tristate
depends on X86 depends on X86
select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER
config CRYPTO_ENGINE config CRYPTO_ENGINE
tristate tristate
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment