Commit 35591285 authored by Stephan Mueller's avatar Stephan Mueller Committed by Herbert Xu

crypto: drbg - use CTR AES instead of ECB AES

The CTR DRBG derives its random data from the CTR that is encrypted with
AES.

This patch now changes the CTR DRBG implementation such that the
CTR AES mode is employed. This allows the use of steamlined CTR AES
implementation such as ctr-aes-aesni.

Unfortunately there are the following subtile changes we need to apply
when using the CTR AES mode:

- the CTR mode increments the counter after the cipher operation, but
  the CTR DRBG requires the increment before the cipher op. Hence, the
  crypto_inc is applied to the counter (drbg->V) once it is
  recalculated.

- the CTR mode wants to encrypt data, but the CTR DRBG is interested in
  the encrypted counter only. The full CTR mode is the XOR of the
  encrypted counter with the plaintext data. To access the encrypted
  counter, the patch uses a NULL data vector as plaintext to be
  "encrypted".
Signed-off-by: default avatarStephan Mueller <smueller@chronox.de>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent e123be16
...@@ -1567,6 +1567,7 @@ config CRYPTO_DRBG_HASH ...@@ -1567,6 +1567,7 @@ config CRYPTO_DRBG_HASH
config CRYPTO_DRBG_CTR config CRYPTO_DRBG_CTR
bool "Enable CTR DRBG" bool "Enable CTR DRBG"
select CRYPTO_AES select CRYPTO_AES
depends on CRYPTO_CTR
help help
Enable the CTR DRBG variant as defined in NIST SP800-90A. Enable the CTR DRBG variant as defined in NIST SP800-90A.
......
...@@ -258,6 +258,7 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, ...@@ -258,6 +258,7 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in); const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg); static int drbg_init_sym_kernel(struct drbg_state *drbg);
static int drbg_fini_sym_kernel(struct drbg_state *drbg); static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *outbuf, u32 outlen);
/* BCC function for CTR DRBG as defined in 10.4.3 */ /* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg, static int drbg_ctr_bcc(struct drbg_state *drbg,
...@@ -482,35 +483,36 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, ...@@ -482,35 +483,36 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
drbg_blocklen(drbg); drbg_blocklen(drbg);
unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */ unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
unsigned int len = 0; unsigned int len = 0;
struct drbg_string cipherin;
if (3 > reseed) if (3 > reseed)
memset(df_data, 0, drbg_statelen(drbg)); memset(df_data, 0, drbg_statelen(drbg));
if (!reseed) {
/*
* The DRBG uses the CTR mode of the underlying AES cipher. The
* CTR mode increments the counter value after the AES operation
* but SP800-90A requires that the counter is incremented before
* the AES operation. Hence, we increment it at the time we set
* it by one.
*/
crypto_inc(drbg->V, drbg_blocklen(drbg));
ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
drbg_keylen(drbg));
if (ret)
goto out;
}
/* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */ /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
if (seed) { if (seed) {
ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
if (ret) if (ret)
goto out; goto out;
drbg_kcapi_symsetkey(drbg, drbg->C);
} }
drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg)); ret = drbg_kcapi_sym_ctr(drbg, temp, drbg_statelen(drbg));
/*
* 10.2.1.3.2 steps 2 and 3 are already covered as the allocation
* zeroizes all memory during initialization
*/
while (len < (drbg_statelen(drbg))) {
/* 10.2.1.2 step 2.1 */
crypto_inc(drbg->V, drbg_blocklen(drbg));
/*
* 10.2.1.2 step 2.2 */
ret = drbg_kcapi_sym(drbg, temp + len, &cipherin);
if (ret) if (ret)
goto out; return ret;
/* 10.2.1.2 step 2.3 and 3 */
len += drbg_blocklen(drbg);
}
/* 10.2.1.2 step 4 */ /* 10.2.1.2 step 4 */
temp_p = temp; temp_p = temp;
...@@ -522,9 +524,14 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, ...@@ -522,9 +524,14 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
/* 10.2.1.2 step 5 */ /* 10.2.1.2 step 5 */
memcpy(drbg->C, temp, drbg_keylen(drbg)); memcpy(drbg->C, temp, drbg_keylen(drbg));
drbg_kcapi_symsetkey(drbg, drbg->C); ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
drbg_keylen(drbg));
if (ret)
goto out;
/* 10.2.1.2 step 6 */ /* 10.2.1.2 step 6 */
memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg)); memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
/* See above: increment counter by one to compensate timing of CTR op */
crypto_inc(drbg->V, drbg_blocklen(drbg));
ret = 0; ret = 0;
out: out:
...@@ -543,46 +550,26 @@ static int drbg_ctr_generate(struct drbg_state *drbg, ...@@ -543,46 +550,26 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen, unsigned char *buf, unsigned int buflen,
struct list_head *addtl) struct list_head *addtl)
{ {
int len = 0; int ret;
int ret = 0; int len = min_t(int, buflen, INT_MAX);
struct drbg_string data;
/* 10.2.1.5.2 step 2 */ /* 10.2.1.5.2 step 2 */
if (addtl && !list_empty(addtl)) { if (addtl && !list_empty(addtl)) {
ret = drbg_ctr_update(drbg, addtl, 2); ret = drbg_ctr_update(drbg, addtl, 2);
if (ret) if (ret)
return 0; return 0;
drbg_kcapi_symsetkey(drbg, drbg->C);
} }
/* 10.2.1.5.2 step 4.1 */ /* 10.2.1.5.2 step 4.1 */
crypto_inc(drbg->V, drbg_blocklen(drbg)); ret = drbg_kcapi_sym_ctr(drbg, buf, len);
drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg)); if (ret)
while (len < buflen) { return ret;
int outlen = 0;
/* 10.2.1.5.2 step 4.2 */
ret = drbg_kcapi_sym(drbg, drbg->scratchpad, &data);
if (ret) {
len = ret;
goto out;
}
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
/* 10.2.1.5.2 step 4.3 */
memcpy(buf + len, drbg->scratchpad, outlen);
len += outlen;
/* 10.2.1.5.2 step 6 */
if (len < buflen)
crypto_inc(drbg->V, drbg_blocklen(drbg));
}
/* 10.2.1.5.2 step 6 */ /* 10.2.1.5.2 step 6 */
ret = drbg_ctr_update(drbg, NULL, 3); ret = drbg_ctr_update(drbg, NULL, 3);
if (ret) if (ret)
len = ret; len = ret;
out:
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len; return len;
} }
...@@ -1634,10 +1621,46 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, ...@@ -1634,10 +1621,46 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
#ifdef CONFIG_CRYPTO_DRBG_CTR #ifdef CONFIG_CRYPTO_DRBG_CTR
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm =
(struct crypto_cipher *)drbg->priv_data;
if (tfm)
crypto_free_cipher(tfm);
drbg->priv_data = NULL;
if (drbg->ctr_handle)
crypto_free_skcipher(drbg->ctr_handle);
drbg->ctr_handle = NULL;
if (drbg->ctr_req)
skcipher_request_free(drbg->ctr_req);;
drbg->ctr_req = NULL;
kfree(drbg->ctr_null_value_buf);
drbg->ctr_null_value = NULL;
return 0;
}
static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
{
struct drbg_state *drbg = req->data;
if (error == -EINPROGRESS)
return;
drbg->ctr_async_err = error;
complete(&drbg->ctr_completion);
}
#define DRBG_CTR_NULL_LEN 128
static int drbg_init_sym_kernel(struct drbg_state *drbg) static int drbg_init_sym_kernel(struct drbg_state *drbg)
{ {
int ret = 0;
struct crypto_cipher *tfm; struct crypto_cipher *tfm;
struct crypto_skcipher *sk_tfm;
struct skcipher_request *req;
unsigned int alignmask;
char ctr_name[CRYPTO_MAX_ALG_NAME];
tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) { if (IS_ERR(tfm)) {
...@@ -1647,16 +1670,41 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) ...@@ -1647,16 +1670,41 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
} }
BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
drbg->priv_data = tfm; drbg->priv_data = tfm;
return ret;
}
static int drbg_fini_sym_kernel(struct drbg_state *drbg) if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
{ drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
struct crypto_cipher *tfm = drbg_fini_sym_kernel(drbg);
(struct crypto_cipher *)drbg->priv_data; return -EINVAL;
if (tfm) }
crypto_free_cipher(tfm); sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0);
drbg->priv_data = NULL; if (IS_ERR(sk_tfm)) {
pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n",
ctr_name);
drbg_fini_sym_kernel(drbg);
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
pr_info("DRBG: could not allocate request queue\n");
drbg_fini_sym_kernel(drbg);
return PTR_ERR(req);
}
drbg->ctr_req = req;
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
drbg_skcipher_cb, drbg);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
GFP_KERNEL);
if (!drbg->ctr_null_value_buf) {
drbg_fini_sym_kernel(drbg);
return -ENOMEM;
}
drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
alignmask + 1);
return 0; return 0;
} }
...@@ -1680,6 +1728,43 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, ...@@ -1680,6 +1728,43 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
crypto_cipher_encrypt_one(tfm, outval, in->buf); crypto_cipher_encrypt_one(tfm, outval, in->buf);
return 0; return 0;
} }
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *outbuf, u32 outlen)
{
struct scatterlist sg_in;
sg_init_one(&sg_in, drbg->ctr_null_value, DRBG_CTR_NULL_LEN);
while (outlen) {
u32 cryptlen = min_t(u32, outlen, DRBG_CTR_NULL_LEN);
struct scatterlist sg_out;
int ret;
sg_init_one(&sg_out, outbuf, cryptlen);
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
ret = crypto_skcipher_encrypt(drbg->ctr_req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&drbg->ctr_completion);
if (!ret && !drbg->ctr_async_err) {
reinit_completion(&drbg->ctr_completion);
break;
}
default:
return ret;
}
init_completion(&drbg->ctr_completion);
outlen -= cryptlen;
}
return 0;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */ #endif /* CONFIG_CRYPTO_DRBG_CTR */
/*************************************************************** /***************************************************************
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <crypto/hash.h> #include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -115,6 +116,14 @@ struct drbg_state { ...@@ -115,6 +116,14 @@ struct drbg_state {
/* some memory the DRBG can use for its operation */ /* some memory the DRBG can use for its operation */
unsigned char *scratchpad; unsigned char *scratchpad;
void *priv_data; /* Cipher handle */ void *priv_data; /* Cipher handle */
struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
struct skcipher_request *ctr_req; /* CTR mode request handle */
__u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
__u8 *ctr_null_value; /* CTR mode aligned zero buf */
struct completion ctr_completion; /* CTR mode async handler */
int ctr_async_err; /* CTR mode async error */
bool seeded; /* DRBG fully seeded? */ bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */ bool pr; /* Prediction resistance enabled? */
struct work_struct seed_work; /* asynchronous seeding support */ struct work_struct seed_work; /* asynchronous seeding support */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment