Commit c778f96b authored by Ondrej Mosnacek's avatar Ondrej Mosnacek Committed by Herbert Xu

crypto: lrw - Optimize tweak computation

This patch rewrites the tweak computation to a slightly simpler method
that performs less bswaps. Based on performance measurements the new
code seems to provide slightly better performance than the old one.

PERFORMANCE MEASUREMENTS (x86_64)
Performed using: https://gitlab.com/omos/linux-crypto-bench
Crypto driver used: lrw(ecb-aes-aesni)

Before:
       ALGORITHM KEY (b)        DATA (B)   TIME ENC (ns)   TIME DEC (ns)
        lrw(aes)     256              64             204             286
        lrw(aes)     320              64             227             203
        lrw(aes)     384              64             208             204
        lrw(aes)     256             512             441             439
        lrw(aes)     320             512             456             455
        lrw(aes)     384             512             469             483
        lrw(aes)     256            4096            2136            2190
        lrw(aes)     320            4096            2161            2213
        lrw(aes)     384            4096            2295            2369
        lrw(aes)     256           16384            7692            7868
        lrw(aes)     320           16384            8230            8691
        lrw(aes)     384           16384            8971            8813
        lrw(aes)     256           32768           15336           15560
        lrw(aes)     320           32768           16410           16346
        lrw(aes)     384           32768           18023           17465

After:
       ALGORITHM KEY (b)        DATA (B)   TIME ENC (ns)   TIME DEC (ns)
        lrw(aes)     256              64             200             203
        lrw(aes)     320              64             202             204
        lrw(aes)     384              64             204             205
        lrw(aes)     256             512             415             415
        lrw(aes)     320             512             432             440
        lrw(aes)     384             512             449             451
        lrw(aes)     256            4096            1838            1995
        lrw(aes)     320            4096            2123            1980
        lrw(aes)     384            4096            2100            2119
        lrw(aes)     256           16384            7183            6954
        lrw(aes)     320           16384            7844            7631
        lrw(aes)     384           16384            8256            8126
        lrw(aes)     256           32768           14772           14484
        lrw(aes)     320           32768           15281           15431
        lrw(aes)     384           32768           16469           16293
Signed-off-by: default avatarOndrej Mosnacek <omosnace@redhat.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent dc6d6d5a
...@@ -120,27 +120,28 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, ...@@ -120,27 +120,28 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
return 0; return 0;
} }
static inline void inc(be128 *iv) /*
{ * Returns the number of trailing '1' bits in the words of the counter, which is
be64_add_cpu(&iv->b, 1); * represented by 4 32-bit words, arranged from least to most significant.
if (!iv->b) * At the same time, increments the counter by one.
be64_add_cpu(&iv->a, 1); *
} * For example:
*
/* this returns the number of consequative 1 bits starting * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
* from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ * int i = next_index(&counter);
static inline int get_index128(be128 *block) * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
*/
static int next_index(u32 *counter)
{ {
int x; int i, res = 0;
__be32 *p = (__be32 *) block;
for (p += 3, x = 0; x < 128; p--, x += 32) { for (i = 0; i < 4; i++) {
u32 val = be32_to_cpup(p); if (counter[i] + 1 != 0) {
res += ffz(counter[i]++);
if (!~val) break;
continue; }
counter[i] = 0;
return x + ffz(val); res += 32;
} }
/* /*
...@@ -214,8 +215,9 @@ static int pre_crypt(struct skcipher_request *req) ...@@ -214,8 +215,9 @@ static int pre_crypt(struct skcipher_request *req)
struct scatterlist *sg; struct scatterlist *sg;
unsigned cryptlen; unsigned cryptlen;
unsigned offset; unsigned offset;
be128 *iv;
bool more; bool more;
__be32 *iv;
u32 counter[4];
int err; int err;
subreq = &rctx->subreq; subreq = &rctx->subreq;
...@@ -230,7 +232,12 @@ static int pre_crypt(struct skcipher_request *req) ...@@ -230,7 +232,12 @@ static int pre_crypt(struct skcipher_request *req)
cryptlen, req->iv); cryptlen, req->iv);
err = skcipher_walk_virt(&w, subreq, false); err = skcipher_walk_virt(&w, subreq, false);
iv = w.iv; iv = (__be32 *)w.iv;
counter[0] = be32_to_cpu(iv[3]);
counter[1] = be32_to_cpu(iv[2]);
counter[2] = be32_to_cpu(iv[1]);
counter[3] = be32_to_cpu(iv[0]);
while (w.nbytes) { while (w.nbytes) {
unsigned int avail = w.nbytes; unsigned int avail = w.nbytes;
...@@ -247,10 +254,16 @@ static int pre_crypt(struct skcipher_request *req) ...@@ -247,10 +254,16 @@ static int pre_crypt(struct skcipher_request *req)
/* T <- I*Key2, using the optimization /* T <- I*Key2, using the optimization
* discussed in the specification */ * discussed in the specification */
be128_xor(&rctx->t, &rctx->t, be128_xor(&rctx->t, &rctx->t,
&ctx->mulinc[get_index128(iv)]); &ctx->mulinc[next_index(counter)]);
inc(iv);
} while ((avail -= bs) >= bs); } while ((avail -= bs) >= bs);
if (w.nbytes == w.total) {
iv[0] = cpu_to_be32(counter[3]);
iv[1] = cpu_to_be32(counter[2]);
iv[2] = cpu_to_be32(counter[1]);
iv[3] = cpu_to_be32(counter[0]);
}
err = skcipher_walk_done(&w, avail); err = skcipher_walk_done(&w, avail);
} }
...@@ -548,7 +561,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ...@@ -548,7 +561,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask | inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
(__alignof__(u64) - 1); (__alignof__(__be32) - 1);
inst->alg.ivsize = LRW_BLOCK_SIZE; inst->alg.ivsize = LRW_BLOCK_SIZE;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment