Commit 36eb2caa authored by Jan Glauber's avatar Jan Glauber Committed by Martin Schwidefsky

s390/crypto: Don't panic after crypto instruction failures

Remove the BUG_ON's that check for failure or incomplete
results of the s390 hardware crypto instructions.
Rather report the errors as -EIO to the crypto layer.
Signed-off-by: default avatarJan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent ce1d8014
...@@ -325,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, ...@@ -325,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n); ret = crypt_s390_km(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -457,7 +458,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, ...@@ -457,7 +458,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n); ret = crypt_s390_kmc(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -625,7 +627,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -625,7 +627,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
param = xts_ctx->pcc.key + offset; param = xts_ctx->pcc.key + offset;
ret = crypt_s390_pcc(func, param); ret = crypt_s390_pcc(func, param);
BUG_ON(ret < 0); if (ret < 0)
return -EIO;
memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
param = xts_ctx->key + offset; param = xts_ctx->key + offset;
...@@ -636,7 +639,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -636,7 +639,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n); ret = crypt_s390_km(func, param, out, in, n);
BUG_ON(ret < 0 || ret != n); if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -769,7 +773,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -769,7 +773,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, AES_BLOCK_SIZE); crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
} }
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
BUG_ON(ret < 0 || ret != n); if (ret < 0 || ret != n)
return -EIO;
if (n > AES_BLOCK_SIZE) if (n > AES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE); AES_BLOCK_SIZE);
...@@ -788,7 +793,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -788,7 +793,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in, ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrblk); AES_BLOCK_SIZE, ctrblk);
BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE); if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes); memcpy(out, buf, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE); crypto_inc(ctrblk, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0); ret = blkcipher_walk_done(desc, walk, 0);
......
...@@ -94,7 +94,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -94,7 +94,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n); ret = crypt_s390_km(func, key, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1; nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -120,7 +121,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -120,7 +121,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, iv, out, in, n); ret = crypt_s390_kmc(func, iv, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1; nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -386,7 +388,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -386,7 +388,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, DES_BLOCK_SIZE); crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
} }
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
if (n > DES_BLOCK_SIZE) if (n > DES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE); DES_BLOCK_SIZE);
...@@ -404,7 +407,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -404,7 +407,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in, ret = crypt_s390_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrblk); DES_BLOCK_SIZE, ctrblk);
BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE); if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes); memcpy(out, buf, nbytes);
crypto_inc(ctrblk, DES_BLOCK_SIZE); crypto_inc(ctrblk, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0); ret = blkcipher_walk_done(desc, walk, 0);
......
...@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc, ...@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
if (!dctx->bytes) { if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
GHASH_BLOCK_SIZE); GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE)
return -EIO;
} }
} }
n = srclen & ~(GHASH_BLOCK_SIZE - 1); n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) { if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
BUG_ON(ret != n); if (ret != n)
return -EIO;
src += n; src += n;
srclen -= n; srclen -= n;
} }
...@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc, ...@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
return 0; return 0;
} }
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{ {
u8 *buf = dctx->buffer; u8 *buf = dctx->buffer;
int ret; int ret;
...@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) ...@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes); memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE)
return -EIO;
} }
dctx->bytes = 0; dctx->bytes = 0;
return 0;
} }
static int ghash_final(struct shash_desc *desc, u8 *dst) static int ghash_final(struct shash_desc *desc, u8 *dst)
{ {
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
int ret;
ghash_flush(ctx, dctx); ret = ghash_flush(ctx, dctx);
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); if (!ret)
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
return 0; return ret;
} }
static struct shash_alg ghash_alg = { static struct shash_alg ghash_alg = {
......
...@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (index) { if (index) {
memcpy(ctx->buf + index, data, bsize - index); memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize); ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
BUG_ON(ret != bsize); if (ret != bsize)
return -EIO;
data += bsize - index; data += bsize - index;
len -= bsize - index; len -= bsize - index;
index = 0; index = 0;
...@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (len >= bsize) { if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data, ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1)); len & ~(bsize - 1));
BUG_ON(ret != (len & ~(bsize - 1))); if (ret != (len & ~(bsize - 1)))
return -EIO;
data += ret; data += ret;
len -= ret; len -= ret;
} }
...@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) ...@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
memcpy(ctx->buf + end - 8, &bits, sizeof(bits)); memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end); ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
BUG_ON(ret != end); if (ret != end)
return -EIO;
/* copy digest to out */ /* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment