Commit 70613783 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

[CRYPTO] blkcipher: Remove alignment restriction on block size

Previously we assumed for convenience that the block size is a multiple of
the algorithm's required alignment.  With the pending addition of CTR this
will no longer be the case as the block size will be 1 due to it being a
stream cipher.  However, the alignment requirement will be that of the
underlying implementation which will most likely be greater than 1.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent e4c5c6c9
...@@ -63,9 +63,6 @@ static int crypto_check_alg(struct crypto_alg *alg) ...@@ -63,9 +63,6 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_alignmask & (alg->cra_alignmask + 1)) if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL; return -EINVAL;
if (alg->cra_alignmask & alg->cra_blocksize)
return -EINVAL;
if (alg->cra_blocksize > PAGE_SIZE / 8) if (alg->cra_blocksize > PAGE_SIZE / 8)
return -EINVAL; return -EINVAL;
......
...@@ -149,6 +149,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc, ...@@ -149,6 +149,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
unsigned int alignmask) unsigned int alignmask)
{ {
unsigned int n; unsigned int n;
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
if (walk->buffer) if (walk->buffer)
goto ok; goto ok;
...@@ -167,8 +168,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc, ...@@ -167,8 +168,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
alignmask + 1); alignmask + 1);
walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize, walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
bsize); aligned_bsize, bsize);
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
...@@ -278,7 +279,9 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, ...@@ -278,7 +279,9 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
{ {
unsigned bs = crypto_blkcipher_blocksize(tfm); unsigned bs = crypto_blkcipher_blocksize(tfm);
unsigned int ivsize = crypto_blkcipher_ivsize(tfm); unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1); unsigned aligned_bs = ALIGN(bs, alignmask + 1);
unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
(alignmask + 1);
u8 *iv; u8 *iv;
size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
...@@ -287,8 +290,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, ...@@ -287,8 +290,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
return -ENOMEM; return -ENOMEM;
iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
iv = blkcipher_get_spot(iv, bs) + bs; iv = blkcipher_get_spot(iv, bs) + aligned_bs;
iv = blkcipher_get_spot(iv, bs) + bs; iv = blkcipher_get_spot(iv, bs) + aligned_bs;
iv = blkcipher_get_spot(iv, ivsize); iv = blkcipher_get_spot(iv, ivsize);
walk->iv = memcpy(iv, walk->iv, ivsize); walk->iv = memcpy(iv, walk->iv, ivsize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment