Commit fffdaef2 authored by Kevin Coffman's avatar Kevin Coffman Committed by Trond Myklebust

gss_krb5: Add support for rc4-hmac encryption

Add necessary changes to add kernel support for the rc4-hmac Kerberos
encryption type used by Microsoft and described in rfc4757.
Signed-off-by: default avatarKevin Coffman <kwc@citi.umich.edu>
Signed-off-by: default avatarSteve Dickson <steved@redhat.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 5af46547
...@@ -317,5 +317,14 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, ...@@ -317,5 +317,14 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, u32 *plainoffset, struct xdr_buf *buf, u32 *plainoffset,
u32 *plainlen); u32 *plainlen);
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
struct crypto_blkcipher *cipher,
unsigned char *cksum);
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
struct crypto_blkcipher *cipher,
s32 seqnum);
void void
gss_krb5_make_confounder(char *p, u32 conflen); gss_krb5_make_confounder(char *p, u32 conflen);
...@@ -124,6 +124,114 @@ checksummer(struct scatterlist *sg, void *data) ...@@ -124,6 +124,114 @@ checksummer(struct scatterlist *sg, void *data)
return crypto_hash_update(desc, sg, sg->length); return crypto_hash_update(desc, sg, sg->length);
} }
static int
arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
{
unsigned int ms_usage;
switch (usage) {
case KG_USAGE_SIGN:
ms_usage = 15;
break;
case KG_USAGE_SEAL:
ms_usage = 13;
break;
default:
return EINVAL;;
}
salt[0] = (ms_usage >> 0) & 0xff;
salt[1] = (ms_usage >> 8) & 0xff;
salt[2] = (ms_usage >> 16) & 0xff;
salt[3] = (ms_usage >> 24) & 0xff;
return 0;
}
static u32
make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct hash_desc desc;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
u8 rc4salt[4];
struct crypto_hash *md5;
struct crypto_hash *hmac_md5;
if (cksumkey == NULL)
return GSS_S_FAILURE;
if (cksumout->len < kctx->gk5e->cksumlength) {
dprintk("%s: checksum buffer length, %u, too small for %s\n",
__func__, cksumout->len, kctx->gk5e->name);
return GSS_S_FAILURE;
}
if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
dprintk("%s: invalid usage value %u\n", __func__, usage);
return GSS_S_FAILURE;
}
md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(md5))
return GSS_S_FAILURE;
hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_md5)) {
crypto_free_hash(md5);
return GSS_S_FAILURE;
}
desc.tfm = md5;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_init(&desc);
if (err)
goto out;
sg_init_one(sg, rc4salt, 4);
err = crypto_hash_update(&desc, sg, 4);
if (err)
goto out;
sg_init_one(sg, header, hdrlen);
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
err = crypto_hash_final(&desc, checksumdata);
if (err)
goto out;
desc.tfm = hmac_md5;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_init(&desc);
if (err)
goto out;
err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
if (err)
goto out;
sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
checksumdata);
if (err)
goto out;
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
cksumout->len = kctx->gk5e->cksumlength;
out:
crypto_free_hash(md5);
crypto_free_hash(hmac_md5);
return err ? GSS_S_FAILURE : 0;
}
/* /*
* checksum the plaintext data and hdrlen bytes of the token header * checksum the plaintext data and hdrlen bytes of the token header
* The checksum is performed over the first 8 bytes of the * The checksum is performed over the first 8 bytes of the
...@@ -140,6 +248,11 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -140,6 +248,11 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
unsigned int checksumlen; unsigned int checksumlen;
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
return make_checksum_hmac_md5(kctx, header, hdrlen,
body, body_offset,
cksumkey, usage, cksumout);
if (cksumout->len < kctx->gk5e->cksumlength) { if (cksumout->len < kctx->gk5e->cksumlength) {
dprintk("%s: checksum buffer length, %u, too small for %s\n", dprintk("%s: checksum buffer length, %u, too small for %s\n",
__func__, cksumout->len, kctx->gk5e->name); __func__, cksumout->len, kctx->gk5e->name);
...@@ -733,3 +846,145 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, ...@@ -733,3 +846,145 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
ret = GSS_S_FAILURE; ret = GSS_S_FAILURE;
return ret; return ret;
} }
/*
* Compute Kseq given the initial session key and the checksum.
* Set the key of the given cipher.
*/
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
unsigned char *cksum)
{
struct crypto_hash *hmac;
struct hash_desc desc;
struct scatterlist sg[1];
u8 Kseq[GSS_KRB5_MAX_KEYLEN];
u32 zeroconstant = 0;
int err;
dprintk("%s: entered\n", __func__);
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld, allocating hash '%s'\n",
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
return PTR_ERR(hmac);
}
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err;
/* Compute intermediate Kseq from session key */
err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_init_table(sg, 1);
sg_set_buf(sg, &zeroconstant, 4);
err = crypto_hash_digest(&desc, sg, 4, Kseq);
if (err)
goto out_err;
/* Compute final Kseq from the checksum and intermediate Kseq */
err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_set_buf(sg, cksum, 8);
err = crypto_hash_digest(&desc, sg, 8, Kseq);
if (err)
goto out_err;
err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
err = 0;
out_err:
crypto_free_hash(hmac);
dprintk("%s: returning %d\n", __func__, err);
return err;
}
/*
* Compute Kcrypt given the initial session key and the plaintext seqnum.
* Set the key of cipher kctx->enc.
*/
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
s32 seqnum)
{
struct crypto_hash *hmac;
struct hash_desc desc;
struct scatterlist sg[1];
u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
u8 zeroconstant[4] = {0};
u8 seqnumarray[4];
int err, i;
dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld, allocating hash '%s'\n",
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
return PTR_ERR(hmac);
}
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err;
/* Compute intermediate Kcrypt from session key */
for (i = 0; i < kctx->gk5e->keylength; i++)
Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_init_table(sg, 1);
sg_set_buf(sg, zeroconstant, 4);
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
if (err)
goto out_err;
/* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
sg_set_buf(sg, seqnumarray, 4);
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
if (err)
goto out_err;
err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
err = 0;
out_err:
crypto_free_hash(hmac);
dprintk("%s: returning %d\n", __func__, err);
return err;
}
...@@ -72,6 +72,27 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { ...@@ -72,6 +72,27 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.cksumlength = 8, .cksumlength = 8,
.keyed_cksum = 0, .keyed_cksum = 0,
}, },
/*
* RC4-HMAC
*/
{
.etype = ENCTYPE_ARCFOUR_HMAC,
.ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR,
.name = "rc4-hmac",
.encrypt_name = "ecb(arc4)",
.cksum_name = "hmac(md5)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = NULL,
.signalg = SGN_ALG_HMAC_MD5,
.sealalg = SEAL_ALG_MICROSOFT_RC4,
.keybytes = 16,
.keylength = 16,
.blocksize = 1,
.conflen = 8,
.cksumlength = 8,
.keyed_cksum = 1,
},
/* /*
* 3DES * 3DES
*/ */
...@@ -392,6 +413,79 @@ context_derive_keys_des3(struct krb5_ctx *ctx) ...@@ -392,6 +413,79 @@ context_derive_keys_des3(struct krb5_ctx *ctx)
return -EINVAL; return -EINVAL;
} }
/*
* Note that RC4 depends on deriving keys using the sequence
* number or the checksum of a token. Therefore, the final keys
* cannot be calculated until the token is being constructed!
*/
static int
context_derive_keys_rc4(struct krb5_ctx *ctx)
{
struct crypto_hash *hmac;
char sigkeyconstant[] = "signaturekey";
int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
struct hash_desc desc;
struct scatterlist sg[1];
int err;
dprintk("RPC: %s: entered\n", __func__);
/*
* derive cksum (aka Ksign) key
*/
hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld allocating hash '%s'\n",
__func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
err = PTR_ERR(hmac);
goto out_err;
}
err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
if (err)
goto out_err_free_hmac;
sg_init_table(sg, 1);
sg_set_buf(sg, sigkeyconstant, slen);
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err_free_hmac;
err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
if (err)
goto out_err_free_hmac;
/*
* allocate hash, and blkciphers for data and seqnum encryption
*/
ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->enc)) {
err = PTR_ERR(ctx->enc);
goto out_err_free_hmac;
}
ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->seq)) {
crypto_free_blkcipher(ctx->enc);
err = PTR_ERR(ctx->seq);
goto out_err_free_hmac;
}
dprintk("RPC: %s: returning success\n", __func__);
err = 0;
out_err_free_hmac:
crypto_free_hash(hmac);
out_err:
dprintk("RPC: %s: returning %d\n", __func__, err);
return err;
}
static int static int
context_derive_keys_new(struct krb5_ctx *ctx) context_derive_keys_new(struct krb5_ctx *ctx)
{ {
...@@ -561,6 +655,8 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) ...@@ -561,6 +655,8 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx)
switch (ctx->enctype) { switch (ctx->enctype) {
case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_DES3_CBC_RAW:
return context_derive_keys_des3(ctx); return context_derive_keys_des3(ctx);
case ENCTYPE_ARCFOUR_HMAC:
return context_derive_keys_rc4(ctx);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return context_derive_keys_new(ctx); return context_derive_keys_new(ctx);
......
...@@ -213,6 +213,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, ...@@ -213,6 +213,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
BUG(); BUG();
case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC:
return gss_get_mic_v1(ctx, text, token); return gss_get_mic_v1(ctx, text, token);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
......
...@@ -39,6 +39,38 @@ ...@@ -39,6 +39,38 @@
# define RPCDBG_FACILITY RPCDBG_AUTH # define RPCDBG_FACILITY RPCDBG_AUTH
#endif #endif
static s32
krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
struct crypto_blkcipher *cipher;
unsigned char plain[8];
s32 code;
dprintk("RPC: %s:\n", __func__);
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
plain[3] = (unsigned char) ((seqnum >> 0) & 0xff);
plain[4] = direction;
plain[5] = direction;
plain[6] = direction;
plain[7] = direction;
code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
if (code)
goto out;
code = krb5_encrypt(cipher, cksum, plain, buf, 8);
out:
crypto_free_blkcipher(cipher);
return code;
}
s32 s32
krb5_make_seq_num(struct krb5_ctx *kctx, krb5_make_seq_num(struct krb5_ctx *kctx,
struct crypto_blkcipher *key, struct crypto_blkcipher *key,
...@@ -48,6 +80,10 @@ krb5_make_seq_num(struct krb5_ctx *kctx, ...@@ -48,6 +80,10 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
{ {
unsigned char plain[8]; unsigned char plain[8];
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
return krb5_make_rc4_seq_num(kctx, direction, seqnum,
cksum, buf);
plain[0] = (unsigned char) (seqnum & 0xff); plain[0] = (unsigned char) (seqnum & 0xff);
plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
...@@ -61,6 +97,43 @@ krb5_make_seq_num(struct krb5_ctx *kctx, ...@@ -61,6 +97,43 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
return krb5_encrypt(key, cksum, plain, buf, 8); return krb5_encrypt(key, cksum, plain, buf, 8);
} }
static s32
krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
unsigned char *buf, int *direction, s32 *seqnum)
{
struct crypto_blkcipher *cipher;
unsigned char plain[8];
s32 code;
dprintk("RPC: %s:\n", __func__);
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
if (code)
goto out;
code = krb5_decrypt(cipher, cksum, buf, plain, 8);
if (code)
goto out;
if ((plain[4] != plain[5]) || (plain[4] != plain[6])
|| (plain[4] != plain[7])) {
code = (s32)KG_BAD_SEQ;
goto out;
}
*direction = plain[4];
*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
(plain[2] << 8) | (plain[3]));
out:
crypto_free_blkcipher(cipher);
return code;
}
s32 s32
krb5_get_seq_num(struct krb5_ctx *kctx, krb5_get_seq_num(struct krb5_ctx *kctx,
unsigned char *cksum, unsigned char *cksum,
...@@ -73,6 +146,10 @@ krb5_get_seq_num(struct krb5_ctx *kctx, ...@@ -73,6 +146,10 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
dprintk("RPC: krb5_get_seq_num:\n"); dprintk("RPC: krb5_get_seq_num:\n");
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
return krb5_get_rc4_seq_num(kctx, cksum, buf,
direction, seqnum);
if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
return code; return code;
......
...@@ -216,6 +216,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, ...@@ -216,6 +216,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
BUG(); BUG();
case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC:
return gss_verify_mic_v1(ctx, message_buffer, read_token); return gss_verify_mic_v1(ctx, message_buffer, read_token);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
......
...@@ -232,9 +232,26 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, ...@@ -232,9 +232,26 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
return GSS_S_FAILURE; return GSS_S_FAILURE;
if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - conflen, if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
pages)) struct crypto_blkcipher *cipher;
return GSS_S_FAILURE; int err;
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(cipher))
return GSS_S_FAILURE;
krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
err = gss_encrypt_xdr_buf(cipher, buf,
offset + headlen - conflen, pages);
crypto_free_blkcipher(cipher);
if (err)
return GSS_S_FAILURE;
} else {
if (gss_encrypt_xdr_buf(kctx->enc, buf,
offset + headlen - conflen, pages))
return GSS_S_FAILURE;
}
return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
} }
...@@ -291,8 +308,37 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -291,8 +308,37 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
*/ */
crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
(unsigned char *)buf->head[0].iov_base; (unsigned char *)buf->head[0].iov_base;
if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
return GSS_S_DEFECTIVE_TOKEN; /*
* Need plaintext seqnum to derive encryption key for arcfour-hmac
*/
if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
ptr + 8, &direction, &seqnum))
return GSS_S_BAD_SIG;
if ((kctx->initiate && direction != 0xff) ||
(!kctx->initiate && direction != 0))
return GSS_S_BAD_SIG;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
struct crypto_blkcipher *cipher;
int err;
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(cipher))
return GSS_S_FAILURE;
krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
crypto_free_blkcipher(cipher);
if (err)
return GSS_S_DEFECTIVE_TOKEN;
} else {
if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
return GSS_S_DEFECTIVE_TOKEN;
}
if (kctx->gk5e->keyed_cksum) if (kctx->gk5e->keyed_cksum)
cksumkey = kctx->cksum; cksumkey = kctx->cksum;
...@@ -316,14 +362,6 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -316,14 +362,6 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
/* do sequencing checks */ /* do sequencing checks */
if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
ptr + 8, &direction, &seqnum))
return GSS_S_BAD_SIG;
if ((kctx->initiate && direction != 0xff) ||
(!kctx->initiate && direction != 0))
return GSS_S_BAD_SIG;
/* Copy the data back to the right position. XXX: Would probably be /* Copy the data back to the right position. XXX: Would probably be
* better to copy and encrypt at the same time. */ * better to copy and encrypt at the same time. */
...@@ -521,6 +559,7 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, ...@@ -521,6 +559,7 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
BUG(); BUG();
case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC:
return gss_wrap_kerberos_v1(kctx, offset, buf, pages); return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
...@@ -538,6 +577,7 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) ...@@ -538,6 +577,7 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
BUG(); BUG();
case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC:
return gss_unwrap_kerberos_v1(kctx, offset, buf); return gss_unwrap_kerberos_v1(kctx, offset, buf);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment