Commit 8db44ab2 authored by Sabrina Dubroca's avatar Sabrina Dubroca Committed by Jakub Kicinski

tls: rename tls_cipher_size_desc to tls_cipher_desc

We're going to add other fields to it to fully describe a cipher, so
the "_size" name won't match the contents.
Signed-off-by: default avatarSabrina Dubroca <sd@queasysnail.net>
Link: https://lore.kernel.org/r/76ca6c7686bd6d1534dfa188fb0f1f6fabebc791.1692977948.git.sd@queasysnail.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 037303d6
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#define TLS_DEC_STATS(net, field) \ #define TLS_DEC_STATS(net, field) \
SNMP_DEC_STATS((net)->mib.tls_statistics, field) SNMP_DEC_STATS((net)->mib.tls_statistics, field)
struct tls_cipher_size_desc { struct tls_cipher_desc {
unsigned int iv; unsigned int iv;
unsigned int key; unsigned int key;
unsigned int salt; unsigned int salt;
...@@ -61,14 +61,14 @@ struct tls_cipher_size_desc { ...@@ -61,14 +61,14 @@ struct tls_cipher_size_desc {
#define TLS_CIPHER_MIN TLS_CIPHER_AES_GCM_128 #define TLS_CIPHER_MIN TLS_CIPHER_AES_GCM_128
#define TLS_CIPHER_MAX TLS_CIPHER_ARIA_GCM_256 #define TLS_CIPHER_MAX TLS_CIPHER_ARIA_GCM_256
extern const struct tls_cipher_size_desc tls_cipher_size_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN]; extern const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN];
static inline const struct tls_cipher_size_desc *get_cipher_size_desc(u16 cipher_type) static inline const struct tls_cipher_desc *get_cipher_desc(u16 cipher_type)
{ {
if (cipher_type < TLS_CIPHER_MIN || cipher_type > TLS_CIPHER_MAX) if (cipher_type < TLS_CIPHER_MIN || cipher_type > TLS_CIPHER_MAX)
return NULL; return NULL;
return &tls_cipher_size_desc[cipher_type - TLS_CIPHER_MIN]; return &tls_cipher_desc[cipher_type - TLS_CIPHER_MIN];
} }
......
...@@ -884,7 +884,7 @@ static int ...@@ -884,7 +884,7 @@ static int
tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
{ {
struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
const struct tls_cipher_size_desc *cipher_sz; const struct tls_cipher_desc *cipher_desc;
int err, offset, copy, data_len, pos; int err, offset, copy, data_len, pos;
struct sk_buff *skb, *skb_iter; struct sk_buff *skb, *skb_iter;
struct scatterlist sg[1]; struct scatterlist sg[1];
...@@ -898,10 +898,10 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) ...@@ -898,10 +898,10 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
default: default:
return -EINVAL; return -EINVAL;
} }
cipher_sz = get_cipher_size_desc(tls_ctx->crypto_recv.info.cipher_type); cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
rxm = strp_msg(tls_strp_msg(sw_ctx)); rxm = strp_msg(tls_strp_msg(sw_ctx));
orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv, orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
sk->sk_allocation); sk->sk_allocation);
if (!orig_buf) if (!orig_buf)
return -ENOMEM; return -ENOMEM;
...@@ -917,8 +917,8 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) ...@@ -917,8 +917,8 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
sg_init_table(sg, 1); sg_init_table(sg, 1);
sg_set_buf(&sg[0], buf, sg_set_buf(&sg[0], buf,
rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv); rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv); err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
if (err) if (err)
goto free_buf; goto free_buf;
...@@ -929,7 +929,7 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) ...@@ -929,7 +929,7 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
else else
err = 0; err = 0;
data_len = rxm->full_len - cipher_sz->tag; data_len = rxm->full_len - cipher_desc->tag;
if (skb_pagelen(skb) > offset) { if (skb_pagelen(skb) > offset) {
copy = min_t(int, skb_pagelen(skb) - offset, data_len); copy = min_t(int, skb_pagelen(skb) - offset, data_len);
...@@ -1046,7 +1046,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -1046,7 +1046,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_prot_info *prot = &tls_ctx->prot_info;
const struct tls_cipher_size_desc *cipher_sz; const struct tls_cipher_desc *cipher_desc;
struct tls_record_info *start_marker_record; struct tls_record_info *start_marker_record;
struct tls_offload_context_tx *offload_ctx; struct tls_offload_context_tx *offload_ctx;
struct tls_crypto_info *crypto_info; struct tls_crypto_info *crypto_info;
...@@ -1094,31 +1094,31 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -1094,31 +1094,31 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
rc = -EINVAL; rc = -EINVAL;
goto release_netdev; goto release_netdev;
} }
cipher_sz = get_cipher_size_desc(crypto_info->cipher_type); cipher_desc = get_cipher_desc(crypto_info->cipher_type);
/* Sanity-check the rec_seq_size for stack allocations */ /* Sanity-check the rec_seq_size for stack allocations */
if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) { if (cipher_desc->rec_seq > TLS_MAX_REC_SEQ_SIZE) {
rc = -EINVAL; rc = -EINVAL;
goto release_netdev; goto release_netdev;
} }
prot->version = crypto_info->version; prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type; prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv; prot->prepend_size = TLS_HEADER_SIZE + cipher_desc->iv;
prot->tag_size = cipher_sz->tag; prot->tag_size = cipher_desc->tag;
prot->overhead_size = prot->prepend_size + prot->tag_size; prot->overhead_size = prot->prepend_size + prot->tag_size;
prot->iv_size = cipher_sz->iv; prot->iv_size = cipher_desc->iv;
prot->salt_size = cipher_sz->salt; prot->salt_size = cipher_desc->salt;
ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL); ctx->tx.iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL);
if (!ctx->tx.iv) { if (!ctx->tx.iv) {
rc = -ENOMEM; rc = -ENOMEM;
goto release_netdev; goto release_netdev;
} }
memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv); memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
prot->rec_seq_size = cipher_sz->rec_seq; prot->rec_seq_size = cipher_desc->rec_seq;
ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL); ctx->tx.rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL);
if (!ctx->tx.rec_seq) { if (!ctx->tx.rec_seq) {
rc = -ENOMEM; rc = -ENOMEM;
goto free_iv; goto free_iv;
......
...@@ -55,7 +55,7 @@ static int tls_enc_record(struct aead_request *aead_req, ...@@ -55,7 +55,7 @@ static int tls_enc_record(struct aead_request *aead_req,
struct tls_prot_info *prot) struct tls_prot_info *prot)
{ {
unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE]; unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE];
const struct tls_cipher_size_desc *cipher_sz; const struct tls_cipher_desc *cipher_desc;
struct scatterlist sg_in[3]; struct scatterlist sg_in[3];
struct scatterlist sg_out[3]; struct scatterlist sg_out[3];
unsigned int buf_size; unsigned int buf_size;
...@@ -69,9 +69,9 @@ static int tls_enc_record(struct aead_request *aead_req, ...@@ -69,9 +69,9 @@ static int tls_enc_record(struct aead_request *aead_req,
default: default:
return -EINVAL; return -EINVAL;
} }
cipher_sz = get_cipher_size_desc(prot->cipher_type); cipher_desc = get_cipher_desc(prot->cipher_type);
buf_size = TLS_HEADER_SIZE + cipher_sz->iv; buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
len = min_t(int, *in_len, buf_size); len = min_t(int, *in_len, buf_size);
scatterwalk_copychunks(buf, in, len, 0); scatterwalk_copychunks(buf, in, len, 0);
...@@ -85,11 +85,11 @@ static int tls_enc_record(struct aead_request *aead_req, ...@@ -85,11 +85,11 @@ static int tls_enc_record(struct aead_request *aead_req,
scatterwalk_pagedone(out, 1, 1); scatterwalk_pagedone(out, 1, 1);
len = buf[4] | (buf[3] << 8); len = buf[4] | (buf[3] << 8);
len -= cipher_sz->iv; len -= cipher_desc->iv;
tls_make_aad(aad, len - cipher_sz->tag, (char *)&rcd_sn, buf[0], prot); tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot);
memcpy(iv + cipher_sz->salt, buf + TLS_HEADER_SIZE, cipher_sz->iv); memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv);
sg_init_table(sg_in, ARRAY_SIZE(sg_in)); sg_init_table(sg_in, ARRAY_SIZE(sg_in));
sg_init_table(sg_out, ARRAY_SIZE(sg_out)); sg_init_table(sg_out, ARRAY_SIZE(sg_out));
...@@ -100,7 +100,7 @@ static int tls_enc_record(struct aead_request *aead_req, ...@@ -100,7 +100,7 @@ static int tls_enc_record(struct aead_request *aead_req,
*in_len -= len; *in_len -= len;
if (*in_len < 0) { if (*in_len < 0) {
*in_len += cipher_sz->tag; *in_len += cipher_desc->tag;
/* the input buffer doesn't contain the entire record. /* the input buffer doesn't contain the entire record.
* trim len accordingly. The resulting authentication tag * trim len accordingly. The resulting authentication tag
* will contain garbage, but we don't care, so we won't * will contain garbage, but we don't care, so we won't
...@@ -121,7 +121,7 @@ static int tls_enc_record(struct aead_request *aead_req, ...@@ -121,7 +121,7 @@ static int tls_enc_record(struct aead_request *aead_req,
scatterwalk_pagedone(out, 1, 1); scatterwalk_pagedone(out, 1, 1);
} }
len -= cipher_sz->tag; len -= cipher_desc->tag;
aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv); aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
rc = crypto_aead_encrypt(aead_req); rc = crypto_aead_encrypt(aead_req);
...@@ -309,14 +309,14 @@ static void fill_sg_out(struct scatterlist sg_out[3], void *buf, ...@@ -309,14 +309,14 @@ static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
int sync_size, int sync_size,
void *dummy_buf) void *dummy_buf)
{ {
const struct tls_cipher_size_desc *cipher_sz = const struct tls_cipher_desc *cipher_desc =
get_cipher_size_desc(tls_ctx->crypto_send.info.cipher_type); get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
sg_set_buf(&sg_out[0], dummy_buf, sync_size); sg_set_buf(&sg_out[0], dummy_buf, sync_size);
sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len); sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
/* Add room for authentication tag produced by crypto */ /* Add room for authentication tag produced by crypto */
dummy_buf += sync_size; dummy_buf += sync_size;
sg_set_buf(&sg_out[2], dummy_buf, cipher_sz->tag); sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag);
} }
static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
...@@ -328,7 +328,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, ...@@ -328,7 +328,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int tcp_payload_offset = skb_tcp_all_headers(skb); int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset; int payload_len = skb->len - tcp_payload_offset;
const struct tls_cipher_size_desc *cipher_sz; const struct tls_cipher_desc *cipher_desc;
void *buf, *iv, *aad, *dummy_buf, *salt; void *buf, *iv, *aad, *dummy_buf, *salt;
struct aead_request *aead_req; struct aead_request *aead_req;
struct sk_buff *nskb = NULL; struct sk_buff *nskb = NULL;
...@@ -348,16 +348,16 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, ...@@ -348,16 +348,16 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
default: default:
goto free_req; goto free_req;
} }
cipher_sz = get_cipher_size_desc(tls_ctx->crypto_send.info.cipher_type); cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE + buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
sync_size + cipher_sz->tag; sync_size + cipher_desc->tag;
buf = kmalloc(buf_len, GFP_ATOMIC); buf = kmalloc(buf_len, GFP_ATOMIC);
if (!buf) if (!buf)
goto free_req; goto free_req;
iv = buf; iv = buf;
memcpy(iv, salt, cipher_sz->salt); memcpy(iv, salt, cipher_desc->salt);
aad = buf + cipher_sz->salt + cipher_sz->iv; aad = buf + cipher_desc->salt + cipher_desc->iv;
dummy_buf = aad + TLS_AAD_SPACE_SIZE; dummy_buf = aad + TLS_AAD_SPACE_SIZE;
nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC); nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
...@@ -471,7 +471,7 @@ int tls_sw_fallback_init(struct sock *sk, ...@@ -471,7 +471,7 @@ int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx, struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info) struct tls_crypto_info *crypto_info)
{ {
const struct tls_cipher_size_desc *cipher_sz; const struct tls_cipher_desc *cipher_desc;
const u8 *key; const u8 *key;
int rc; int rc;
...@@ -495,13 +495,13 @@ int tls_sw_fallback_init(struct sock *sk, ...@@ -495,13 +495,13 @@ int tls_sw_fallback_init(struct sock *sk,
rc = -EINVAL; rc = -EINVAL;
goto free_aead; goto free_aead;
} }
cipher_sz = get_cipher_size_desc(crypto_info->cipher_type); cipher_desc = get_cipher_desc(crypto_info->cipher_type);
rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_sz->key); rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_desc->key);
if (rc) if (rc)
goto free_aead; goto free_aead;
rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_sz->tag); rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag);
if (rc) if (rc)
goto free_aead; goto free_aead;
......
...@@ -58,7 +58,7 @@ enum { ...@@ -58,7 +58,7 @@ enum {
TLS_NUM_PROTS, TLS_NUM_PROTS,
}; };
#define CIPHER_SIZE_DESC(cipher) [cipher - TLS_CIPHER_MIN] = { \ #define CIPHER_DESC(cipher) [cipher - TLS_CIPHER_MIN] = { \
.iv = cipher ## _IV_SIZE, \ .iv = cipher ## _IV_SIZE, \
.key = cipher ## _KEY_SIZE, \ .key = cipher ## _KEY_SIZE, \
.salt = cipher ## _SALT_SIZE, \ .salt = cipher ## _SALT_SIZE, \
...@@ -66,15 +66,15 @@ enum { ...@@ -66,15 +66,15 @@ enum {
.rec_seq = cipher ## _REC_SEQ_SIZE, \ .rec_seq = cipher ## _REC_SEQ_SIZE, \
} }
const struct tls_cipher_size_desc tls_cipher_size_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = { const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = {
CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_128), CIPHER_DESC(TLS_CIPHER_AES_GCM_128),
CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_256), CIPHER_DESC(TLS_CIPHER_AES_GCM_256),
CIPHER_SIZE_DESC(TLS_CIPHER_AES_CCM_128), CIPHER_DESC(TLS_CIPHER_AES_CCM_128),
CIPHER_SIZE_DESC(TLS_CIPHER_CHACHA20_POLY1305), CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305),
CIPHER_SIZE_DESC(TLS_CIPHER_SM4_GCM), CIPHER_DESC(TLS_CIPHER_SM4_GCM),
CIPHER_SIZE_DESC(TLS_CIPHER_SM4_CCM), CIPHER_DESC(TLS_CIPHER_SM4_CCM),
CIPHER_SIZE_DESC(TLS_CIPHER_ARIA_GCM_128), CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128),
CIPHER_SIZE_DESC(TLS_CIPHER_ARIA_GCM_256), CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256),
}; };
static const struct proto *saved_tcpv6_prot; static const struct proto *saved_tcpv6_prot;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment