Commit 5bc8f147 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.7-2023-10-17' of git://git.infradead.org/nvme into for-6.7/block

Pull NVMe updates from Keith:

"nvme updates for Linux 6.7

 - nvme-auth updates (Mark)
 - nvme-tcp tls (Hannes)
 - nvme-fc annotaions (Kees)"

* tag 'nvme-6.7-2023-10-17' of git://git.infradead.org/nvme: (24 commits)
  nvme-auth: allow mixing of secret and hash lengths
  nvme-auth: use transformed key size to create resp
  nvme-auth: alloc nvme_dhchap_key as single buffer
  nvmet-tcp: use 'spin_lock_bh' for state_lock()
  nvme: rework NVME_AUTH Kconfig selection
  nvmet-tcp: peek icreq before starting TLS
  nvmet-tcp: control messages for recvmsg()
  nvmet-tcp: enable TLS handshake upcall
  nvmet: Set 'TREQ' to 'required' when TLS is enabled
  nvmet-tcp: allocate socket file
  nvmet-tcp: make nvmet_tcp_alloc_queue() a void function
  nvmet: make TCP sectype settable via configfs
  nvme-fabrics: parse options 'keyring' and 'tls_key'
  nvme-tcp: improve icreq/icresp logging
  nvme-tcp: control message handling for recvmsg()
  nvme-tcp: enable TLS handshake upcall
  nvme-tcp: allocate socket file
  security/keys: export key_lookup()
  nvme-keyring: implement nvme_tls_psk_default()
  nvme-tcp: add definitions for TLS cipher suites
  ...
parents ec8cf230 32445526
...@@ -2,3 +2,16 @@ ...@@ -2,3 +2,16 @@
config NVME_COMMON config NVME_COMMON
tristate tristate
config NVME_KEYRING
bool
select KEYS
config NVME_AUTH
bool
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
...@@ -4,4 +4,5 @@ ccflags-y += -I$(src) ...@@ -4,4 +4,5 @@ ccflags-y += -I$(src)
obj-$(CONFIG_NVME_COMMON) += nvme-common.o obj-$(CONFIG_NVME_COMMON) += nvme-common.o
nvme-common-y += auth.o nvme-common-$(CONFIG_NVME_AUTH) += auth.o
nvme-common-$(CONFIG_NVME_KEYRING) += keyring.o
...@@ -150,6 +150,14 @@ size_t nvme_auth_hmac_hash_len(u8 hmac_id) ...@@ -150,6 +150,14 @@ size_t nvme_auth_hmac_hash_len(u8 hmac_id)
} }
EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len); EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
u32 nvme_auth_key_struct_size(u32 key_len)
{
struct nvme_dhchap_key key;
return struct_size(&key, key, key_len);
}
EXPORT_SYMBOL_GPL(nvme_auth_key_struct_size);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash) u8 key_hash)
{ {
...@@ -163,14 +171,9 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, ...@@ -163,14 +171,9 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
p = strrchr(secret, ':'); p = strrchr(secret, ':');
if (p) if (p)
allocated_len = p - secret; allocated_len = p - secret;
key = kzalloc(sizeof(*key), GFP_KERNEL); key = nvme_auth_alloc_key(allocated_len, 0);
if (!key) if (!key)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
key->key = kzalloc(allocated_len, GFP_KERNEL);
if (!key->key) {
ret = -ENOMEM;
goto out_free_key;
}
key_len = base64_decode(secret, allocated_len, key->key); key_len = base64_decode(secret, allocated_len, key->key);
if (key_len < 0) { if (key_len < 0) {
...@@ -187,14 +190,6 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, ...@@ -187,14 +190,6 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
goto out_free_secret; goto out_free_secret;
} }
if (key_hash > 0 &&
(key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
pr_err("Mismatched key len %d for %s\n", key_len,
nvme_auth_hmac_name(key_hash));
ret = -EINVAL;
goto out_free_secret;
}
/* The last four bytes is the CRC in little-endian format */ /* The last four bytes is the CRC in little-endian format */
key_len -= 4; key_len -= 4;
/* /*
...@@ -213,37 +208,51 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, ...@@ -213,37 +208,51 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
key->hash = key_hash; key->hash = key_hash;
return key; return key;
out_free_secret: out_free_secret:
kfree_sensitive(key->key); nvme_auth_free_key(key);
out_free_key:
kfree(key);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(nvme_auth_extract_key); EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash)
{
u32 num_bytes = nvme_auth_key_struct_size(len);
struct nvme_dhchap_key *key = kzalloc(num_bytes, GFP_KERNEL);
if (key) {
key->len = len;
key->hash = hash;
}
return key;
}
EXPORT_SYMBOL_GPL(nvme_auth_alloc_key);
void nvme_auth_free_key(struct nvme_dhchap_key *key) void nvme_auth_free_key(struct nvme_dhchap_key *key)
{ {
if (!key) if (!key)
return; return;
kfree_sensitive(key->key); kfree_sensitive(key);
kfree(key);
} }
EXPORT_SYMBOL_GPL(nvme_auth_free_key); EXPORT_SYMBOL_GPL(nvme_auth_free_key);
u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) struct nvme_dhchap_key *nvme_auth_transform_key(
struct nvme_dhchap_key *key, char *nqn)
{ {
const char *hmac_name; const char *hmac_name;
struct crypto_shash *key_tfm; struct crypto_shash *key_tfm;
struct shash_desc *shash; struct shash_desc *shash;
u8 *transformed_key; struct nvme_dhchap_key *transformed_key;
int ret; int ret, key_len;
if (!key || !key->key) { if (!key) {
pr_warn("No key specified\n"); pr_warn("No key specified\n");
return ERR_PTR(-ENOKEY); return ERR_PTR(-ENOKEY);
} }
if (key->hash == 0) { if (key->hash == 0) {
transformed_key = kmemdup(key->key, key->len, GFP_KERNEL); key_len = nvme_auth_key_struct_size(key->len);
return transformed_key ? transformed_key : ERR_PTR(-ENOMEM); transformed_key = kmemdup(key, key_len, GFP_KERNEL);
if (!transformed_key)
return ERR_PTR(-ENOMEM);
return transformed_key;
} }
hmac_name = nvme_auth_hmac_name(key->hash); hmac_name = nvme_auth_hmac_name(key->hash);
if (!hmac_name) { if (!hmac_name) {
...@@ -253,7 +262,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) ...@@ -253,7 +262,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
key_tfm = crypto_alloc_shash(hmac_name, 0, 0); key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(key_tfm)) if (IS_ERR(key_tfm))
return (u8 *)key_tfm; return ERR_CAST(key_tfm);
shash = kmalloc(sizeof(struct shash_desc) + shash = kmalloc(sizeof(struct shash_desc) +
crypto_shash_descsize(key_tfm), crypto_shash_descsize(key_tfm),
...@@ -263,7 +272,8 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) ...@@ -263,7 +272,8 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
goto out_free_key; goto out_free_key;
} }
transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL); key_len = crypto_shash_digestsize(key_tfm);
transformed_key = nvme_auth_alloc_key(key_len, key->hash);
if (!transformed_key) { if (!transformed_key) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_shash; goto out_free_shash;
...@@ -282,7 +292,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) ...@@ -282,7 +292,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17); ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
if (ret < 0) if (ret < 0)
goto out_free_transformed_key; goto out_free_transformed_key;
ret = crypto_shash_final(shash, transformed_key); ret = crypto_shash_final(shash, transformed_key->key);
if (ret < 0) if (ret < 0)
goto out_free_transformed_key; goto out_free_transformed_key;
...@@ -292,7 +302,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) ...@@ -292,7 +302,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
return transformed_key; return transformed_key;
out_free_transformed_key: out_free_transformed_key:
kfree_sensitive(transformed_key); nvme_auth_free_key(transformed_key);
out_free_shash: out_free_shash:
kfree(shash); kfree(shash);
out_free_key: out_free_key:
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Hannes Reinecke, SUSE Labs
*/
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/key.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <linux/nvme.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
static struct key *nvme_keyring;
key_serial_t nvme_keyring_id(void)
{
return nvme_keyring->serial;
}
EXPORT_SYMBOL_GPL(nvme_keyring_id);
static void nvme_tls_psk_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
seq_printf(m, ": %u", key->datalen);
}
static bool nvme_tls_psk_match(const struct key *key,
const struct key_match_data *match_data)
{
const char *match_id;
size_t match_len;
if (!key->description) {
pr_debug("%s: no key description\n", __func__);
return false;
}
match_len = strlen(key->description);
pr_debug("%s: id %s len %zd\n", __func__, key->description, match_len);
if (!match_data->raw_data) {
pr_debug("%s: no match data\n", __func__);
return false;
}
match_id = match_data->raw_data;
pr_debug("%s: match '%s' '%s' len %zd\n",
__func__, match_id, key->description, match_len);
return !memcmp(key->description, match_id, match_len);
}
static int nvme_tls_psk_match_preparse(struct key_match_data *match_data)
{
match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE;
match_data->cmp = nvme_tls_psk_match;
return 0;
}
static struct key_type nvme_tls_psk_key_type = {
.name = "psk",
.flags = KEY_TYPE_NET_DOMAIN,
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.match_preparse = nvme_tls_psk_match_preparse,
.instantiate = generic_key_instantiate,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = nvme_tls_psk_describe,
.read = user_read,
};
static struct key *nvme_tls_psk_lookup(struct key *keyring,
const char *hostnqn, const char *subnqn,
int hmac, bool generated)
{
char *identity;
size_t identity_len = (NVMF_NQN_SIZE) * 2 + 11;
key_ref_t keyref;
key_serial_t keyring_id;
identity = kzalloc(identity_len, GFP_KERNEL);
if (!identity)
return ERR_PTR(-ENOMEM);
snprintf(identity, identity_len, "NVMe0%c%02d %s %s",
generated ? 'G' : 'R', hmac, hostnqn, subnqn);
if (!keyring)
keyring = nvme_keyring;
keyring_id = key_serial(keyring);
pr_debug("keyring %x lookup tls psk '%s'\n",
keyring_id, identity);
keyref = keyring_search(make_key_ref(keyring, true),
&nvme_tls_psk_key_type,
identity, false);
if (IS_ERR(keyref)) {
pr_debug("lookup tls psk '%s' failed, error %ld\n",
identity, PTR_ERR(keyref));
kfree(identity);
return ERR_PTR(-ENOKEY);
}
kfree(identity);
return key_ref_to_ptr(keyref);
}
/*
* NVMe PSK priority list
*
* 'Retained' PSKs (ie 'generated == false')
* should be preferred to 'generated' PSKs,
* and SHA-384 should be preferred to SHA-256.
*/
struct nvme_tls_psk_priority_list {
bool generated;
enum nvme_tcp_tls_cipher cipher;
} nvme_tls_psk_prio[] = {
{ .generated = false,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = false,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
{ .generated = true,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = true,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
};
/*
* nvme_tls_psk_default - Return the preferred PSK to use for TLS ClientHello
*/
key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn)
{
struct key *tls_key;
key_serial_t tls_key_id;
int prio;
for (prio = 0; prio < ARRAY_SIZE(nvme_tls_psk_prio); prio++) {
bool generated = nvme_tls_psk_prio[prio].generated;
enum nvme_tcp_tls_cipher cipher = nvme_tls_psk_prio[prio].cipher;
tls_key = nvme_tls_psk_lookup(keyring, hostnqn, subnqn,
cipher, generated);
if (!IS_ERR(tls_key)) {
tls_key_id = tls_key->serial;
key_put(tls_key);
return tls_key_id;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(nvme_tls_psk_default);
int nvme_keyring_init(void)
{
int err;
nvme_keyring = keyring_alloc(".nvme",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
current_cred(),
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
(KEY_USR_ALL & ~KEY_USR_SETATTR),
KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
if (IS_ERR(nvme_keyring))
return PTR_ERR(nvme_keyring);
err = register_key_type(&nvme_tls_psk_key_type);
if (err) {
key_put(nvme_keyring);
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(nvme_keyring_init);
void nvme_keyring_exit(void)
{
unregister_key_type(&nvme_tls_psk_key_type);
key_revoke(nvme_keyring);
key_put(nvme_keyring);
}
EXPORT_SYMBOL_GPL(nvme_keyring_exit);
...@@ -92,16 +92,26 @@ config NVME_TCP ...@@ -92,16 +92,26 @@ config NVME_TCP
If unsure, say N. If unsure, say N.
config NVME_AUTH config NVME_TCP_TLS
bool "NVMe over Fabrics TCP TLS encryption support"
depends on NVME_TCP
select NVME_COMMON
select NVME_KEYRING
select NET_HANDSHAKE
select KEYS
help
Enables TLS encryption for NVMe TCP using the netlink handshake API.
The TLS handshake daemon is availble at
https://github.com/oracle/ktls-utils.
If unsure, say N.
config NVME_HOST_AUTH
bool "NVM Express over Fabrics In-Band Authentication" bool "NVM Express over Fabrics In-Band Authentication"
depends on NVME_CORE depends on NVME_CORE
select NVME_COMMON select NVME_COMMON
select CRYPTO select NVME_AUTH
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
help help
This provides support for NVMe over Fabrics In-Band Authentication. This provides support for NVMe over Fabrics In-Band Authentication.
......
...@@ -17,7 +17,7 @@ nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o ...@@ -17,7 +17,7 @@ nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
nvme-core-$(CONFIG_NVME_AUTH) += auth.o nvme-core-$(CONFIG_NVME_HOST_AUTH) += auth.o
nvme-y += pci.o nvme-y += pci.o
......
...@@ -23,6 +23,7 @@ struct nvme_dhchap_queue_context { ...@@ -23,6 +23,7 @@ struct nvme_dhchap_queue_context {
struct nvme_ctrl *ctrl; struct nvme_ctrl *ctrl;
struct crypto_shash *shash_tfm; struct crypto_shash *shash_tfm;
struct crypto_kpp *dh_tfm; struct crypto_kpp *dh_tfm;
struct nvme_dhchap_key *transformed_key;
void *buf; void *buf;
int qid; int qid;
int error; int error;
...@@ -36,7 +37,6 @@ struct nvme_dhchap_queue_context { ...@@ -36,7 +37,6 @@ struct nvme_dhchap_queue_context {
u8 c1[64]; u8 c1[64];
u8 c2[64]; u8 c2[64];
u8 response[64]; u8 response[64];
u8 *host_response;
u8 *ctrl_key; u8 *ctrl_key;
u8 *host_key; u8 *host_key;
u8 *sess_key; u8 *sess_key;
...@@ -428,12 +428,12 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, ...@@ -428,12 +428,12 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
__func__, chap->qid, chap->s1, chap->transaction); __func__, chap->qid, chap->s1, chap->transaction);
if (!chap->host_response) { if (!chap->transformed_key) {
chap->host_response = nvme_auth_transform_key(ctrl->host_key, chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
ctrl->opts->host->nqn); ctrl->opts->host->nqn);
if (IS_ERR(chap->host_response)) { if (IS_ERR(chap->transformed_key)) {
ret = PTR_ERR(chap->host_response); ret = PTR_ERR(chap->transformed_key);
chap->host_response = NULL; chap->transformed_key = NULL;
return ret; return ret;
} }
} else { } else {
...@@ -442,7 +442,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, ...@@ -442,7 +442,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
} }
ret = crypto_shash_setkey(chap->shash_tfm, ret = crypto_shash_setkey(chap->shash_tfm,
chap->host_response, ctrl->host_key->len); chap->transformed_key->key, chap->transformed_key->len);
if (ret) { if (ret) {
dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
chap->qid, ret); chap->qid, ret);
...@@ -508,19 +508,19 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, ...@@ -508,19 +508,19 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap) struct nvme_dhchap_queue_context *chap)
{ {
SHASH_DESC_ON_STACK(shash, chap->shash_tfm); SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
u8 *ctrl_response; struct nvme_dhchap_key *transformed_key;
u8 buf[4], *challenge = chap->c2; u8 buf[4], *challenge = chap->c2;
int ret; int ret;
ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
ctrl->opts->subsysnqn); ctrl->opts->subsysnqn);
if (IS_ERR(ctrl_response)) { if (IS_ERR(transformed_key)) {
ret = PTR_ERR(ctrl_response); ret = PTR_ERR(transformed_key);
return ret; return ret;
} }
ret = crypto_shash_setkey(chap->shash_tfm, ret = crypto_shash_setkey(chap->shash_tfm,
ctrl_response, ctrl->ctrl_key->len); transformed_key->key, transformed_key->len);
if (ret) { if (ret) {
dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
chap->qid, ret); chap->qid, ret);
...@@ -586,7 +586,7 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, ...@@ -586,7 +586,7 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
out: out:
if (challenge != chap->c2) if (challenge != chap->c2)
kfree(challenge); kfree(challenge);
kfree(ctrl_response); nvme_auth_free_key(transformed_key);
return ret; return ret;
} }
...@@ -648,8 +648,8 @@ static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, ...@@ -648,8 +648,8 @@ static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
{ {
kfree_sensitive(chap->host_response); nvme_auth_free_key(chap->transformed_key);
chap->host_response = NULL; chap->transformed_key = NULL;
kfree_sensitive(chap->host_key); kfree_sensitive(chap->host_key);
chap->host_key = NULL; chap->host_key = NULL;
chap->host_key_len = 0; chap->host_key_len = 0;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "nvme.h" #include "nvme.h"
#include "fabrics.h" #include "fabrics.h"
#include <linux/nvme-auth.h> #include <linux/nvme-auth.h>
#include <linux/nvme-keyring.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
...@@ -420,7 +421,7 @@ void nvme_complete_rq(struct request *req) ...@@ -420,7 +421,7 @@ void nvme_complete_rq(struct request *req)
nvme_failover_req(req); nvme_failover_req(req);
return; return;
case AUTHENTICATE: case AUTHENTICATE:
#ifdef CONFIG_NVME_AUTH #ifdef CONFIG_NVME_HOST_AUTH
queue_work(nvme_wq, &ctrl->dhchap_auth_work); queue_work(nvme_wq, &ctrl->dhchap_auth_work);
nvme_retry_req(req); nvme_retry_req(req);
#else #else
...@@ -4399,7 +4400,7 @@ static void nvme_free_ctrl(struct device *dev) ...@@ -4399,7 +4400,7 @@ static void nvme_free_ctrl(struct device *dev)
if (!subsys || ctrl->instance != subsys->instance) if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance); ida_free(&nvme_instance_ida, ctrl->instance);
key_put(ctrl->tls_key);
nvme_free_cels(ctrl); nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl); nvme_mpath_uninit(ctrl);
nvme_auth_stop(ctrl); nvme_auth_stop(ctrl);
...@@ -4723,12 +4724,16 @@ static int __init nvme_core_init(void) ...@@ -4723,12 +4724,16 @@ static int __init nvme_core_init(void)
result = PTR_ERR(nvme_ns_chr_class); result = PTR_ERR(nvme_ns_chr_class);
goto unregister_generic_ns; goto unregister_generic_ns;
} }
result = nvme_keyring_init();
result = nvme_init_auth();
if (result) if (result)
goto destroy_ns_chr; goto destroy_ns_chr;
result = nvme_init_auth();
if (result)
goto keyring_exit;
return 0; return 0;
keyring_exit:
nvme_keyring_exit();
destroy_ns_chr: destroy_ns_chr:
class_destroy(nvme_ns_chr_class); class_destroy(nvme_ns_chr_class);
unregister_generic_ns: unregister_generic_ns:
...@@ -4752,6 +4757,7 @@ static int __init nvme_core_init(void) ...@@ -4752,6 +4757,7 @@ static int __init nvme_core_init(void)
static void __exit nvme_core_exit(void) static void __exit nvme_core_exit(void)
{ {
nvme_exit_auth(); nvme_exit_auth();
nvme_keyring_exit();
class_destroy(nvme_ns_chr_class); class_destroy(nvme_ns_chr_class);
class_destroy(nvme_subsys_class); class_destroy(nvme_subsys_class);
class_destroy(nvme_class); class_destroy(nvme_class);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include "nvme.h" #include "nvme.h"
#include "fabrics.h" #include "fabrics.h"
#include <linux/nvme-keyring.h>
static LIST_HEAD(nvmf_transports); static LIST_HEAD(nvmf_transports);
static DECLARE_RWSEM(nvmf_transports_rwsem); static DECLARE_RWSEM(nvmf_transports_rwsem);
...@@ -622,6 +623,23 @@ static struct nvmf_transport_ops *nvmf_lookup_transport( ...@@ -622,6 +623,23 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL; return NULL;
} }
static struct key *nvmf_parse_key(int key_id)
{
struct key *key;
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
pr_err("TLS is not supported\n");
return ERR_PTR(-EINVAL);
}
key = key_lookup(key_id);
if (!IS_ERR(key))
pr_err("key id %08x not found\n", key_id);
else
pr_debug("Using key id %08x\n", key_id);
return key;
}
static const match_table_t opt_tokens = { static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" }, { NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" }, { NVMF_OPT_TRADDR, "traddr=%s" },
...@@ -643,10 +661,17 @@ static const match_table_t opt_tokens = { ...@@ -643,10 +661,17 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" }, { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" }, { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" }, { NVMF_OPT_TOS, "tos=%d" },
#ifdef CONFIG_NVME_TCP_TLS
{ NVMF_OPT_KEYRING, "keyring=%d" },
{ NVMF_OPT_TLS_KEY, "tls_key=%d" },
#endif
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" }, { NVMF_OPT_DISCOVERY, "discovery" },
{ NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" }, { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
{ NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" }, { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
#ifdef CONFIG_NVME_TCP_TLS
{ NVMF_OPT_TLS, "tls" },
#endif
{ NVMF_OPT_ERR, NULL } { NVMF_OPT_ERR, NULL }
}; };
...@@ -657,9 +682,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -657,9 +682,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
char *options, *o, *p; char *options, *o, *p;
int token, ret = 0; int token, ret = 0;
size_t nqnlen = 0; size_t nqnlen = 0;
int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO, key_id;
uuid_t hostid; uuid_t hostid;
char hostnqn[NVMF_NQN_SIZE]; char hostnqn[NVMF_NQN_SIZE];
struct key *key;
/* Set defaults */ /* Set defaults */
opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->queue_size = NVMF_DEF_QUEUE_SIZE;
...@@ -671,6 +697,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -671,6 +697,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->hdr_digest = false; opts->hdr_digest = false;
opts->data_digest = false; opts->data_digest = false;
opts->tos = -1; /* < 0 == use transport default */ opts->tos = -1; /* < 0 == use transport default */
opts->tls = false;
opts->tls_key = NULL;
opts->keyring = NULL;
options = o = kstrdup(buf, GFP_KERNEL); options = o = kstrdup(buf, GFP_KERNEL);
if (!options) if (!options)
...@@ -924,6 +953,32 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -924,6 +953,32 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
} }
opts->tos = token; opts->tos = token;
break; break;
case NVMF_OPT_KEYRING:
if (match_int(args, &key_id) || key_id <= 0) {
ret = -EINVAL;
goto out;
}
key = nvmf_parse_key(key_id);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto out;
}
key_put(opts->keyring);
opts->keyring = key;
break;
case NVMF_OPT_TLS_KEY:
if (match_int(args, &key_id) || key_id <= 0) {
ret = -EINVAL;
goto out;
}
key = nvmf_parse_key(key_id);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto out;
}
key_put(opts->tls_key);
opts->tls_key = key;
break;
case NVMF_OPT_DISCOVERY: case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true; opts->discovery_nqn = true;
break; break;
...@@ -955,6 +1010,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -955,6 +1010,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
kfree(opts->dhchap_ctrl_secret); kfree(opts->dhchap_ctrl_secret);
opts->dhchap_ctrl_secret = p; opts->dhchap_ctrl_secret = p;
break; break;
case NVMF_OPT_TLS:
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
pr_err("TLS is not supported\n");
ret = -EINVAL;
goto out;
}
opts->tls = true;
break;
default: default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p); p);
...@@ -1156,6 +1219,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, ...@@ -1156,6 +1219,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
void nvmf_free_options(struct nvmf_ctrl_options *opts) void nvmf_free_options(struct nvmf_ctrl_options *opts)
{ {
nvmf_host_put(opts->host); nvmf_host_put(opts->host);
key_put(opts->keyring);
key_put(opts->tls_key);
kfree(opts->transport); kfree(opts->transport);
kfree(opts->traddr); kfree(opts->traddr);
kfree(opts->trsvcid); kfree(opts->trsvcid);
......
...@@ -70,6 +70,9 @@ enum { ...@@ -70,6 +70,9 @@ enum {
NVMF_OPT_DISCOVERY = 1 << 22, NVMF_OPT_DISCOVERY = 1 << 22,
NVMF_OPT_DHCHAP_SECRET = 1 << 23, NVMF_OPT_DHCHAP_SECRET = 1 << 23,
NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24, NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
NVMF_OPT_TLS = 1 << 25,
NVMF_OPT_KEYRING = 1 << 26,
NVMF_OPT_TLS_KEY = 1 << 27,
}; };
/** /**
...@@ -102,6 +105,9 @@ enum { ...@@ -102,6 +105,9 @@ enum {
* @dhchap_secret: DH-HMAC-CHAP secret * @dhchap_secret: DH-HMAC-CHAP secret
* @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
* authentication * authentication
* @keyring: Keyring to use for key lookups
* @tls_key: TLS key for encrypted connections (TCP)
* @tls: Start TLS encrypted connections (TCP)
* @disable_sqflow: disable controller sq flow control * @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP) * @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP) * @data_digest: generate/verify data digest (TCP)
...@@ -128,6 +134,9 @@ struct nvmf_ctrl_options { ...@@ -128,6 +134,9 @@ struct nvmf_ctrl_options {
struct nvmf_host *host; struct nvmf_host *host;
char *dhchap_secret; char *dhchap_secret;
char *dhchap_ctrl_secret; char *dhchap_ctrl_secret;
struct key *keyring;
struct key *tls_key;
bool tls;
bool disable_sqflow; bool disable_sqflow;
bool hdr_digest; bool hdr_digest;
bool data_digest; bool data_digest;
......
...@@ -349,7 +349,7 @@ struct nvme_ctrl { ...@@ -349,7 +349,7 @@ struct nvme_ctrl {
struct work_struct ana_work; struct work_struct ana_work;
#endif #endif
#ifdef CONFIG_NVME_AUTH #ifdef CONFIG_NVME_HOST_AUTH
struct work_struct dhchap_auth_work; struct work_struct dhchap_auth_work;
struct mutex dhchap_auth_mutex; struct mutex dhchap_auth_mutex;
struct nvme_dhchap_queue_context *dhchap_ctxs; struct nvme_dhchap_queue_context *dhchap_ctxs;
...@@ -357,6 +357,7 @@ struct nvme_ctrl { ...@@ -357,6 +357,7 @@ struct nvme_ctrl {
struct nvme_dhchap_key *ctrl_key; struct nvme_dhchap_key *ctrl_key;
u16 transaction; u16 transaction;
#endif #endif
struct key *tls_key;
/* Power saving configuration */ /* Power saving configuration */
u64 ps_max_latency_us; u64 ps_max_latency_us;
...@@ -1048,7 +1049,7 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) ...@@ -1048,7 +1049,7 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1)); return ctrl->sgls & ((1 << 0) | (1 << 1));
} }
#ifdef CONFIG_NVME_AUTH #ifdef CONFIG_NVME_HOST_AUTH
int __init nvme_init_auth(void); int __init nvme_init_auth(void);
void __exit nvme_exit_auth(void); void __exit nvme_exit_auth(void);
int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl); int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
......
...@@ -409,7 +409,7 @@ static ssize_t dctype_show(struct device *dev, ...@@ -409,7 +409,7 @@ static ssize_t dctype_show(struct device *dev,
} }
static DEVICE_ATTR_RO(dctype); static DEVICE_ATTR_RO(dctype);
#ifdef CONFIG_NVME_AUTH #ifdef CONFIG_NVME_HOST_AUTH
static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -527,6 +527,19 @@ static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, ...@@ -527,6 +527,19 @@ static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
#endif #endif
#ifdef CONFIG_NVME_TCP_TLS
static ssize_t tls_key_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (!ctrl->tls_key)
return 0;
return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key));
}
static DEVICE_ATTR_RO(tls_key);
#endif
static struct attribute *nvme_dev_attrs[] = { static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr, &dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr, &dev_attr_rescan_controller.attr,
...@@ -550,9 +563,12 @@ static struct attribute *nvme_dev_attrs[] = { ...@@ -550,9 +563,12 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr, &dev_attr_kato.attr,
&dev_attr_cntrltype.attr, &dev_attr_cntrltype.attr,
&dev_attr_dctype.attr, &dev_attr_dctype.attr,
#ifdef CONFIG_NVME_AUTH #ifdef CONFIG_NVME_HOST_AUTH
&dev_attr_dhchap_secret.attr, &dev_attr_dhchap_secret.attr,
&dev_attr_dhchap_ctrl_secret.attr, &dev_attr_dhchap_ctrl_secret.attr,
#endif
#ifdef CONFIG_NVME_TCP_TLS
&dev_attr_tls_key.attr,
#endif #endif
NULL NULL
}; };
...@@ -577,12 +593,17 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, ...@@ -577,12 +593,17 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0; return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0; return 0;
#ifdef CONFIG_NVME_AUTH #ifdef CONFIG_NVME_HOST_AUTH
if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
return 0; return 0;
if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
return 0; return 0;
#endif #endif
#ifdef CONFIG_NVME_TCP_TLS
if (a == &dev_attr_tls_key.attr &&
(!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")))
return 0;
#endif
return a->mode; return a->mode;
} }
......
...@@ -8,9 +8,14 @@ ...@@ -8,9 +8,14 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/key.h>
#include <linux/nvme-tcp.h> #include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <net/tls.h>
#include <net/tls_prot.h>
#include <net/handshake.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <crypto/hash.h> #include <crypto/hash.h>
#include <net/busy_poll.h> #include <net/busy_poll.h>
...@@ -31,6 +36,16 @@ static int so_priority; ...@@ -31,6 +36,16 @@ static int so_priority;
module_param(so_priority, int, 0644); module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
#ifdef CONFIG_NVME_TCP_TLS
/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form /* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
...@@ -146,7 +161,10 @@ struct nvme_tcp_queue { ...@@ -146,7 +161,10 @@ struct nvme_tcp_queue {
struct ahash_request *snd_hash; struct ahash_request *snd_hash;
__le32 exp_ddgst; __le32 exp_ddgst;
__le32 recv_ddgst; __le32 recv_ddgst;
#ifdef CONFIG_NVME_TCP_TLS
struct completion tls_complete;
int tls_err;
#endif
struct page_frag_cache pf_cache; struct page_frag_cache pf_cache;
void (*state_change)(struct sock *); void (*state_change)(struct sock *);
...@@ -1338,7 +1356,9 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) ...@@ -1338,7 +1356,9 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
} }
noreclaim_flag = memalloc_noreclaim_save(); noreclaim_flag = memalloc_noreclaim_save();
sock_release(queue->sock); /* ->sock will be released by fput() */
fput(queue->sock->file);
queue->sock = NULL;
memalloc_noreclaim_restore(noreclaim_flag); memalloc_noreclaim_restore(noreclaim_flag);
kfree(queue->pdu); kfree(queue->pdu);
...@@ -1350,6 +1370,8 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) ...@@ -1350,6 +1370,8 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
{ {
struct nvme_tcp_icreq_pdu *icreq; struct nvme_tcp_icreq_pdu *icreq;
struct nvme_tcp_icresp_pdu *icresp; struct nvme_tcp_icresp_pdu *icresp;
char cbuf[CMSG_LEN(sizeof(char))] = {};
u8 ctype;
struct msghdr msg = {}; struct msghdr msg = {};
struct kvec iov; struct kvec iov;
bool ctrl_hdgst, ctrl_ddgst; bool ctrl_hdgst, ctrl_ddgst;
...@@ -1381,17 +1403,35 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) ...@@ -1381,17 +1403,35 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
iov.iov_base = icreq; iov.iov_base = icreq;
iov.iov_len = sizeof(*icreq); iov.iov_len = sizeof(*icreq);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0) if (ret < 0) {
pr_warn("queue %d: failed to send icreq, error %d\n",
nvme_tcp_queue_id(queue), ret);
goto free_icresp; goto free_icresp;
}
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp; iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp); iov.iov_len = sizeof(*icresp);
if (queue->ctrl->ctrl.opts->tls) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags); iov.iov_len, msg.msg_flags);
if (ret < 0) if (ret < 0) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
goto free_icresp; goto free_icresp;
}
if (queue->ctrl->ctrl.opts->tls) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
pr_err("queue %d: unhandled TLS record %d\n",
nvme_tcp_queue_id(queue), ctype);
return -ENOTCONN;
}
}
ret = -EINVAL; ret = -EINVAL;
if (icresp->hdr.type != nvme_tcp_icresp) { if (icresp->hdr.type != nvme_tcp_icresp) {
pr_err("queue %d: bad type returned %d\n", pr_err("queue %d: bad type returned %d\n",
...@@ -1507,11 +1547,99 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) ...@@ -1507,11 +1547,99 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
} }
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) #ifdef CONFIG_NVME_TCP_TLS
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
{
struct nvme_tcp_queue *queue = data;
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
int qid = nvme_tcp_queue_id(queue);
struct key *tls_key;
dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n",
qid, pskid, status);
if (status) {
queue->tls_err = -status;
goto out_complete;
}
tls_key = key_lookup(pskid);
if (IS_ERR(tls_key)) {
dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
qid, pskid);
queue->tls_err = -ENOKEY;
} else {
ctrl->ctrl.tls_key = tls_key;
queue->tls_err = 0;
}
out_complete:
complete(&queue->tls_complete);
}
static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue,
key_serial_t pskid)
{
int qid = nvme_tcp_queue_id(queue);
int ret;
struct tls_handshake_args args;
unsigned long tmo = tls_handshake_timeout * HZ;
key_serial_t keyring = nvme_keyring_id();
dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n",
qid, pskid);
memset(&args, 0, sizeof(args));
args.ta_sock = queue->sock;
args.ta_done = nvme_tcp_tls_done;
args.ta_data = queue;
args.ta_my_peerids[0] = pskid;
args.ta_num_peerids = 1;
if (nctrl->opts->keyring)
keyring = key_serial(nctrl->opts->keyring);
args.ta_keyring = keyring;
args.ta_timeout_ms = tls_handshake_timeout * 1000;
queue->tls_err = -EOPNOTSUPP;
init_completion(&queue->tls_complete);
ret = tls_client_hello_psk(&args, GFP_KERNEL);
if (ret) {
dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n",
qid, ret);
return ret;
}
ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo);
if (ret <= 0) {
if (ret == 0)
ret = -ETIMEDOUT;
dev_err(nctrl->device,
"queue %d: TLS handshake failed, error %d\n",
qid, ret);
tls_handshake_cancel(queue->sock->sk);
} else {
dev_dbg(nctrl->device,
"queue %d: TLS handshake complete, error %d\n",
qid, queue->tls_err);
ret = queue->tls_err;
}
return ret;
}
#else
static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue,
key_serial_t pskid)
{
return -EPROTONOSUPPORT;
}
#endif
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
key_serial_t pskid)
{ {
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid]; struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size; int ret, rcv_pdu_size;
struct file *sock_file;
mutex_init(&queue->queue_lock); mutex_init(&queue->queue_lock);
queue->ctrl = ctrl; queue->ctrl = ctrl;
...@@ -1534,6 +1662,11 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) ...@@ -1534,6 +1662,11 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
goto err_destroy_mutex; goto err_destroy_mutex;
} }
sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
if (IS_ERR(sock_file)) {
ret = PTR_ERR(sock_file);
goto err_destroy_mutex;
}
nvme_tcp_reclassify_socket(queue->sock); nvme_tcp_reclassify_socket(queue->sock);
/* Single syn retry */ /* Single syn retry */
...@@ -1624,6 +1757,13 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) ...@@ -1624,6 +1757,13 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
goto err_rcv_pdu; goto err_rcv_pdu;
} }
/* If PSKs are configured try to start TLS */
if (pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret)
goto err_init_connect;
}
ret = nvme_tcp_init_connection(queue); ret = nvme_tcp_init_connection(queue);
if (ret) if (ret)
goto err_init_connect; goto err_init_connect;
...@@ -1640,7 +1780,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) ...@@ -1640,7 +1780,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest) if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue); nvme_tcp_free_crypto(queue);
err_sock: err_sock:
sock_release(queue->sock); /* ->sock will be released by fput() */
fput(queue->sock->file);
queue->sock = NULL; queue->sock = NULL;
err_destroy_mutex: err_destroy_mutex:
mutex_destroy(&queue->send_mutex); mutex_destroy(&queue->send_mutex);
...@@ -1772,10 +1913,25 @@ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, ...@@ -1772,10 +1913,25 @@ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{ {
int ret; int ret;
key_serial_t pskid = 0;
ret = nvme_tcp_alloc_queue(ctrl, 0); if (ctrl->opts->tls) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
else
pskid = nvme_tls_psk_default(ctrl->opts->keyring,
ctrl->opts->host->nqn,
ctrl->opts->subsysnqn);
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
ret = -ENOKEY;
goto out_free_queue;
}
}
ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
if (ret) if (ret)
return ret; goto out_free_queue;
ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
if (ret) if (ret)
...@@ -1792,8 +1948,13 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) ...@@ -1792,8 +1948,13 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{ {
int i, ret; int i, ret;
if (ctrl->opts->tls && !ctrl->tls_key) {
dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY;
}
for (i = 1; i < ctrl->queue_count; i++) { for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_alloc_queue(ctrl, i); ret = nvme_tcp_alloc_queue(ctrl, i,
key_serial(ctrl->tls_key));
if (ret) if (ret)
goto out_free_queues; goto out_free_queues;
} }
...@@ -2621,7 +2782,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = { ...@@ -2621,7 +2782,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE, NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY,
.create_ctrl = nvme_tcp_create_ctrl, .create_ctrl = nvme_tcp_create_ctrl,
}; };
......
...@@ -84,16 +84,26 @@ config NVME_TARGET_TCP ...@@ -84,16 +84,26 @@ config NVME_TARGET_TCP
If unsure, say N. If unsure, say N.
config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP
select NVME_COMMON
select NVME_KEYRING
select NET_HANDSHAKE
select KEYS
help
Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
The TLS handshake daemon is available at
https://github.com/oracle/ktls-utils.
If unsure, say N.
config NVME_TARGET_AUTH config NVME_TARGET_AUTH
bool "NVMe over Fabrics In-band Authentication support" bool "NVMe over Fabrics In-band Authentication support"
depends on NVME_TARGET depends on NVME_TARGET
select NVME_COMMON select NVME_COMMON
select CRYPTO select NVME_AUTH
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
help help
This enables support for NVMe over Fabrics In-band Authentication This enables support for NVMe over Fabrics In-band Authentication
......
...@@ -267,7 +267,8 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, ...@@ -267,7 +267,8 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
struct shash_desc *shash; struct shash_desc *shash;
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
const char *hash_name; const char *hash_name;
u8 *challenge = req->sq->dhchap_c1, *host_response; u8 *challenge = req->sq->dhchap_c1;
struct nvme_dhchap_key *transformed_key;
u8 buf[4]; u8 buf[4];
int ret; int ret;
...@@ -291,14 +292,15 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, ...@@ -291,14 +292,15 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
goto out_free_tfm; goto out_free_tfm;
} }
host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn); transformed_key = nvme_auth_transform_key(ctrl->host_key,
if (IS_ERR(host_response)) { ctrl->hostnqn);
ret = PTR_ERR(host_response); if (IS_ERR(transformed_key)) {
ret = PTR_ERR(transformed_key);
goto out_free_tfm; goto out_free_tfm;
} }
ret = crypto_shash_setkey(shash_tfm, host_response, ret = crypto_shash_setkey(shash_tfm, transformed_key->key,
ctrl->host_key->len); transformed_key->len);
if (ret) if (ret)
goto out_free_response; goto out_free_response;
...@@ -365,7 +367,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, ...@@ -365,7 +367,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
kfree(challenge); kfree(challenge);
kfree(shash); kfree(shash);
out_free_response: out_free_response:
kfree_sensitive(host_response); nvme_auth_free_key(transformed_key);
out_free_tfm: out_free_tfm:
crypto_free_shash(shash_tfm); crypto_free_shash(shash_tfm);
return 0; return 0;
...@@ -378,7 +380,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, ...@@ -378,7 +380,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
struct shash_desc *shash; struct shash_desc *shash;
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
const char *hash_name; const char *hash_name;
u8 *challenge = req->sq->dhchap_c2, *ctrl_response; u8 *challenge = req->sq->dhchap_c2;
struct nvme_dhchap_key *transformed_key;
u8 buf[4]; u8 buf[4];
int ret; int ret;
...@@ -402,15 +405,15 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, ...@@ -402,15 +405,15 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
goto out_free_tfm; goto out_free_tfm;
} }
ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
ctrl->subsysnqn); ctrl->subsysnqn);
if (IS_ERR(ctrl_response)) { if (IS_ERR(transformed_key)) {
ret = PTR_ERR(ctrl_response); ret = PTR_ERR(transformed_key);
goto out_free_tfm; goto out_free_tfm;
} }
ret = crypto_shash_setkey(shash_tfm, ctrl_response, ret = crypto_shash_setkey(shash_tfm, transformed_key->key,
ctrl->ctrl_key->len); transformed_key->len);
if (ret) if (ret)
goto out_free_response; goto out_free_response;
...@@ -474,7 +477,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, ...@@ -474,7 +477,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
kfree(challenge); kfree(challenge);
kfree(shash); kfree(shash);
out_free_response: out_free_response:
kfree_sensitive(ctrl_response); nvme_auth_free_key(transformed_key);
out_free_tfm: out_free_tfm:
crypto_free_shash(shash_tfm); crypto_free_shash(shash_tfm);
return 0; return 0;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#ifdef CONFIG_NVME_TARGET_AUTH #ifdef CONFIG_NVME_TARGET_AUTH
#include <linux/nvme-auth.h> #include <linux/nvme-auth.h>
#endif #endif
#include <linux/nvme-keyring.h>
#include <crypto/hash.h> #include <crypto/hash.h>
#include <crypto/kpp.h> #include <crypto/kpp.h>
...@@ -159,10 +160,14 @@ static const struct nvmet_type_name_map nvmet_addr_treq[] = { ...@@ -159,10 +160,14 @@ static const struct nvmet_type_name_map nvmet_addr_treq[] = {
{ NVMF_TREQ_NOT_REQUIRED, "not required" }, { NVMF_TREQ_NOT_REQUIRED, "not required" },
}; };
static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
{
return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
}
static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page) static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
{ {
u8 treq = to_nvmet_port(item)->disc_addr.treq & u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
NVME_TREQ_SECURE_CHANNEL_MASK;
int i; int i;
for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
...@@ -178,7 +183,7 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item, ...@@ -178,7 +183,7 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
const char *page, size_t count) const char *page, size_t count)
{ {
struct nvmet_port *port = to_nvmet_port(item); struct nvmet_port *port = to_nvmet_port(item);
u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK; u8 treq = nvmet_port_disc_addr_treq_mask(port);
int i; int i;
if (nvmet_is_port_enabled(port, __func__)) if (nvmet_is_port_enabled(port, __func__))
...@@ -193,6 +198,20 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item, ...@@ -193,6 +198,20 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
return -EINVAL; return -EINVAL;
found: found:
if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
switch (nvmet_addr_treq[i].type) {
case NVMF_TREQ_NOT_SPECIFIED:
pr_debug("treq '%s' not allowed for TLS1.3\n",
nvmet_addr_treq[i].name);
return -EINVAL;
case NVMF_TREQ_NOT_REQUIRED:
pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
break;
default:
break;
}
}
treq |= nvmet_addr_treq[i].type; treq |= nvmet_addr_treq[i].type;
port->disc_addr.treq = treq; port->disc_addr.treq = treq;
return count; return count;
...@@ -303,6 +322,11 @@ static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) ...@@ -303,6 +322,11 @@ static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
} }
static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
{
port->disc_addr.tsas.tcp.sectype = sectype;
}
static ssize_t nvmet_addr_trtype_store(struct config_item *item, static ssize_t nvmet_addr_trtype_store(struct config_item *item,
const char *page, size_t count) const char *page, size_t count)
{ {
...@@ -325,11 +349,99 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item, ...@@ -325,11 +349,99 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
port->disc_addr.trtype = nvmet_transport[i].type; port->disc_addr.trtype = nvmet_transport[i].type;
if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
nvmet_port_init_tsas_rdma(port); nvmet_port_init_tsas_rdma(port);
else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE);
return count; return count;
} }
CONFIGFS_ATTR(nvmet_, addr_trtype); CONFIGFS_ATTR(nvmet_, addr_trtype);
static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
{ NVMF_TCP_SECTYPE_NONE, "none" },
{ NVMF_TCP_SECTYPE_TLS13, "tls1.3" },
};
static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
{ NVMF_RDMA_QPTYPE_CONNECTED, "connected" },
{ NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" },
};
static ssize_t nvmet_addr_tsas_show(struct config_item *item,
char *page)
{
struct nvmet_port *port = to_nvmet_port(item);
int i;
if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name);
}
} else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
}
}
return sprintf(page, "reserved\n");
}
static ssize_t nvmet_addr_tsas_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = nvmet_port_disc_addr_treq_mask(port);
u8 sectype;
int i;
if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
sectype = nvmet_addr_tsas_tcp[i].type;
goto found;
}
}
pr_err("Invalid value '%s' for tsas\n", page);
return -EINVAL;
found:
if (sectype == NVMF_TCP_SECTYPE_TLS13) {
if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
pr_err("TLS is not supported\n");
return -EINVAL;
}
if (!port->keyring) {
pr_err("TLS keyring not configured\n");
return -EINVAL;
}
}
nvmet_port_init_tsas_tcp(port, sectype);
/*
* If TLS is enabled TREQ should be set to 'required' per default
*/
if (sectype == NVMF_TCP_SECTYPE_TLS13) {
u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
if (sc == NVMF_TREQ_NOT_SPECIFIED)
treq |= NVMF_TREQ_REQUIRED;
else
treq |= sc;
} else {
treq |= NVMF_TREQ_NOT_SPECIFIED;
}
port->disc_addr.treq = treq;
return count;
}
CONFIGFS_ATTR(nvmet_, addr_tsas);
/* /*
* Namespace structures & file operation functions below * Namespace structures & file operation functions below
*/ */
...@@ -1731,6 +1843,7 @@ static void nvmet_port_release(struct config_item *item) ...@@ -1731,6 +1843,7 @@ static void nvmet_port_release(struct config_item *item)
flush_workqueue(nvmet_wq); flush_workqueue(nvmet_wq);
list_del(&port->global_entry); list_del(&port->global_entry);
key_put(port->keyring);
kfree(port->ana_state); kfree(port->ana_state);
kfree(port); kfree(port);
} }
...@@ -1741,6 +1854,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = { ...@@ -1741,6 +1854,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_traddr, &nvmet_attr_addr_traddr,
&nvmet_attr_addr_trsvcid, &nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype, &nvmet_attr_addr_trtype,
&nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size, &nvmet_attr_param_inline_data_size,
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable, &nvmet_attr_param_pi_enable,
...@@ -1779,6 +1893,14 @@ static struct config_group *nvmet_ports_make(struct config_group *group, ...@@ -1779,6 +1893,14 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (nvme_keyring_id()) {
port->keyring = key_lookup(nvme_keyring_id());
if (IS_ERR(port->keyring)) {
pr_warn("NVMe keyring not available, disabling TLS\n");
port->keyring = NULL;
}
}
for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
if (i == NVMET_DEFAULT_ANA_GRPID) if (i == NVMET_DEFAULT_ANA_GRPID)
port->ana_state[1] = NVME_ANA_OPTIMIZED; port->ana_state[1] = NVME_ANA_OPTIMIZED;
......
...@@ -146,7 +146,8 @@ struct nvmet_fc_tgt_queue { ...@@ -146,7 +146,8 @@ struct nvmet_fc_tgt_queue {
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
struct kref ref; struct kref ref;
struct rcu_head rcu; struct rcu_head rcu;
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ /* array of fcp_iods */
struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
} __aligned(sizeof(unsigned long long)); } __aligned(sizeof(unsigned long long));
struct nvmet_fc_hostport { struct nvmet_fc_hostport {
......
...@@ -158,6 +158,7 @@ struct nvmet_port { ...@@ -158,6 +158,7 @@ struct nvmet_port {
struct config_group ana_groups_group; struct config_group ana_groups_group;
struct nvmet_ana_group ana_default_group; struct nvmet_ana_group ana_default_group;
enum nvme_ana_state *ana_state; enum nvme_ana_state *ana_state;
struct key *keyring;
void *priv; void *priv;
bool enabled; bool enabled;
int inline_data_size; int inline_data_size;
...@@ -178,6 +179,16 @@ static inline struct nvmet_port *ana_groups_to_port( ...@@ -178,6 +179,16 @@ static inline struct nvmet_port *ana_groups_to_port(
ana_groups_group); ana_groups_group);
} }
static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
{
return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
}
static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
{
return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
}
struct nvmet_ctrl { struct nvmet_ctrl {
struct nvmet_subsys *subsys; struct nvmet_subsys *subsys;
struct nvmet_sq **sqs; struct nvmet_sq **sqs;
......
...@@ -8,9 +8,14 @@ ...@@ -8,9 +8,14 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/key.h>
#include <linux/nvme-tcp.h> #include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <net/tls.h>
#include <net/tls_prot.h>
#include <net/handshake.h>
#include <linux/inet.h> #include <linux/inet.h>
#include <linux/llist.h> #include <linux/llist.h>
#include <crypto/hash.h> #include <crypto/hash.h>
...@@ -66,6 +71,16 @@ device_param_cb(idle_poll_period_usecs, &set_param_ops, ...@@ -66,6 +71,16 @@ device_param_cb(idle_poll_period_usecs, &set_param_ops,
MODULE_PARM_DESC(idle_poll_period_usecs, MODULE_PARM_DESC(idle_poll_period_usecs,
"nvmet tcp io_work poll till idle time period in usecs: Default 0"); "nvmet tcp io_work poll till idle time period in usecs: Default 0");
#ifdef CONFIG_NVME_TARGET_TCP_TLS
/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
#endif
#define NVMET_TCP_RECV_BUDGET 8 #define NVMET_TCP_RECV_BUDGET 8
#define NVMET_TCP_SEND_BUDGET 8 #define NVMET_TCP_SEND_BUDGET 8
#define NVMET_TCP_IO_WORK_BUDGET 64 #define NVMET_TCP_IO_WORK_BUDGET 64
...@@ -104,6 +119,7 @@ struct nvmet_tcp_cmd { ...@@ -104,6 +119,7 @@ struct nvmet_tcp_cmd {
u32 pdu_len; u32 pdu_len;
u32 pdu_recv; u32 pdu_recv;
int sg_idx; int sg_idx;
char recv_cbuf[CMSG_LEN(sizeof(char))];
struct msghdr recv_msg; struct msghdr recv_msg;
struct bio_vec *iov; struct bio_vec *iov;
u32 flags; u32 flags;
...@@ -122,8 +138,10 @@ struct nvmet_tcp_cmd { ...@@ -122,8 +138,10 @@ struct nvmet_tcp_cmd {
enum nvmet_tcp_queue_state { enum nvmet_tcp_queue_state {
NVMET_TCP_Q_CONNECTING, NVMET_TCP_Q_CONNECTING,
NVMET_TCP_Q_TLS_HANDSHAKE,
NVMET_TCP_Q_LIVE, NVMET_TCP_Q_LIVE,
NVMET_TCP_Q_DISCONNECTING, NVMET_TCP_Q_DISCONNECTING,
NVMET_TCP_Q_FAILED,
}; };
struct nvmet_tcp_queue { struct nvmet_tcp_queue {
...@@ -132,6 +150,7 @@ struct nvmet_tcp_queue { ...@@ -132,6 +150,7 @@ struct nvmet_tcp_queue {
struct work_struct io_work; struct work_struct io_work;
struct nvmet_cq nvme_cq; struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq; struct nvmet_sq nvme_sq;
struct kref kref;
/* send state */ /* send state */
struct nvmet_tcp_cmd *cmds; struct nvmet_tcp_cmd *cmds;
...@@ -155,6 +174,10 @@ struct nvmet_tcp_queue { ...@@ -155,6 +174,10 @@ struct nvmet_tcp_queue {
struct ahash_request *snd_hash; struct ahash_request *snd_hash;
struct ahash_request *rcv_hash; struct ahash_request *rcv_hash;
/* TLS state */
key_serial_t tls_pskid;
struct delayed_work tls_handshake_tmo_work;
unsigned long poll_end; unsigned long poll_end;
spinlock_t state_lock; spinlock_t state_lock;
...@@ -918,6 +941,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) ...@@ -918,6 +941,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
free_crypto: free_crypto:
if (queue->hdr_digest || queue->data_digest) if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue); nvmet_tcp_free_crypto(queue);
queue->state = NVMET_TCP_Q_FAILED;
return ret; return ret;
} }
...@@ -1099,20 +1123,65 @@ static inline bool nvmet_tcp_pdu_valid(u8 type) ...@@ -1099,20 +1123,65 @@ static inline bool nvmet_tcp_pdu_valid(u8 type)
return false; return false;
} }
static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
struct msghdr *msg, char *cbuf)
{
struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
u8 ctype, level, description;
int ret = 0;
ctype = tls_get_record_type(queue->sock->sk, cmsg);
switch (ctype) {
case 0:
break;
case TLS_RECORD_TYPE_DATA:
break;
case TLS_RECORD_TYPE_ALERT:
tls_alert_recv(queue->sock->sk, msg, &level, &description);
if (level == TLS_ALERT_LEVEL_FATAL) {
pr_err("queue %d: TLS Alert desc %u\n",
queue->idx, description);
ret = -ENOTCONN;
} else {
pr_warn("queue %d: TLS Alert desc %u\n",
queue->idx, description);
ret = -EAGAIN;
}
break;
default:
/* discard this record type */
pr_err("queue %d: TLS record %d unhandled\n",
queue->idx, ctype);
ret = -EAGAIN;
break;
}
return ret;
}
static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
{ {
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
int len; int len, ret;
struct kvec iov; struct kvec iov;
char cbuf[CMSG_LEN(sizeof(char))] = {};
struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
recv: recv:
iov.iov_base = (void *)&queue->pdu + queue->offset; iov.iov_base = (void *)&queue->pdu + queue->offset;
iov.iov_len = queue->left; iov.iov_len = queue->left;
if (queue->tls_pskid) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
len = kernel_recvmsg(queue->sock, &msg, &iov, 1, len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags); iov.iov_len, msg.msg_flags);
if (unlikely(len < 0)) if (unlikely(len < 0))
return len; return len;
if (queue->tls_pskid) {
ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
if (ret < 0)
return ret;
}
queue->offset += len; queue->offset += len;
queue->left -= len; queue->left -= len;
...@@ -1165,16 +1234,22 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) ...@@ -1165,16 +1234,22 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
{ {
struct nvmet_tcp_cmd *cmd = queue->cmd; struct nvmet_tcp_cmd *cmd = queue->cmd;
int ret; int len, ret;
while (msg_data_left(&cmd->recv_msg)) { while (msg_data_left(&cmd->recv_msg)) {
ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
cmd->recv_msg.msg_flags); cmd->recv_msg.msg_flags);
if (ret <= 0) if (len <= 0)
return ret; return len;
if (queue->tls_pskid) {
ret = nvmet_tcp_tls_record_ok(cmd->queue,
&cmd->recv_msg, cmd->recv_cbuf);
if (ret < 0)
return ret;
}
cmd->pdu_recv += ret; cmd->pdu_recv += len;
cmd->rbytes_done += ret; cmd->rbytes_done += len;
} }
if (queue->data_digest) { if (queue->data_digest) {
...@@ -1192,20 +1267,30 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) ...@@ -1192,20 +1267,30 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
{ {
struct nvmet_tcp_cmd *cmd = queue->cmd; struct nvmet_tcp_cmd *cmd = queue->cmd;
int ret; int ret, len;
char cbuf[CMSG_LEN(sizeof(char))] = {};
struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = { struct kvec iov = {
.iov_base = (void *)&cmd->recv_ddgst + queue->offset, .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
.iov_len = queue->left .iov_len = queue->left
}; };
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, if (queue->tls_pskid) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags); iov.iov_len, msg.msg_flags);
if (unlikely(ret < 0)) if (unlikely(len < 0))
return ret; return len;
if (queue->tls_pskid) {
ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
if (ret < 0)
return ret;
}
queue->offset += ret; queue->offset += len;
queue->left -= ret; queue->left -= len;
if (queue->left) if (queue->left)
return -EAGAIN; return -EAGAIN;
...@@ -1283,14 +1368,27 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, ...@@ -1283,14 +1368,27 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
return ret; return ret;
} }
static void nvmet_tcp_release_queue(struct kref *kref)
{
struct nvmet_tcp_queue *queue =
container_of(kref, struct nvmet_tcp_queue, kref);
WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
queue_work(nvmet_wq, &queue->release_work);
}
static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
{ {
spin_lock(&queue->state_lock); spin_lock_bh(&queue->state_lock);
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
/* Socket closed during handshake */
tls_handshake_cancel(queue->sock->sk);
}
if (queue->state != NVMET_TCP_Q_DISCONNECTING) { if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING; queue->state = NVMET_TCP_Q_DISCONNECTING;
queue_work(nvmet_wq, &queue->release_work); kref_put(&queue->kref, nvmet_tcp_release_queue);
} }
spin_unlock(&queue->state_lock); spin_unlock_bh(&queue->state_lock);
} }
static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
...@@ -1372,6 +1470,10 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, ...@@ -1372,6 +1470,10 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
if (!c->r2t_pdu) if (!c->r2t_pdu)
goto out_free_data; goto out_free_data;
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
c->recv_msg.msg_control = c->recv_cbuf;
c->recv_msg.msg_controllen = sizeof(c->recv_cbuf);
}
c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
list_add_tail(&c->entry, &queue->free_list); list_add_tail(&c->entry, &queue->free_list);
...@@ -1485,6 +1587,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) ...@@ -1485,6 +1587,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
mutex_unlock(&nvmet_tcp_queue_mutex); mutex_unlock(&nvmet_tcp_queue_mutex);
nvmet_tcp_restore_socket_callbacks(queue); nvmet_tcp_restore_socket_callbacks(queue);
cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
cancel_work_sync(&queue->io_work); cancel_work_sync(&queue->io_work);
/* stop accepting incoming data */ /* stop accepting incoming data */
queue->rcv_state = NVMET_TCP_RECV_ERR; queue->rcv_state = NVMET_TCP_RECV_ERR;
...@@ -1493,12 +1596,12 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) ...@@ -1493,12 +1596,12 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_sq_destroy(&queue->nvme_sq); nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work); cancel_work_sync(&queue->io_work);
nvmet_tcp_free_cmd_data_in_buffers(queue); nvmet_tcp_free_cmd_data_in_buffers(queue);
sock_release(queue->sock); /* ->sock will be released by fput() */
fput(queue->sock->file);
nvmet_tcp_free_cmds(queue); nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest) if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue); nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx); ida_free(&nvmet_tcp_queue_ida, queue->idx);
page = virt_to_head_page(queue->pf_cache.va); page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue); kfree(queue);
...@@ -1512,8 +1615,13 @@ static void nvmet_tcp_data_ready(struct sock *sk) ...@@ -1512,8 +1615,13 @@ static void nvmet_tcp_data_ready(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data; queue = sk->sk_user_data;
if (likely(queue)) if (likely(queue)) {
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); if (queue->data_ready)
queue->data_ready(sk);
if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
&queue->io_work);
}
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
} }
...@@ -1621,31 +1729,174 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) ...@@ -1621,31 +1729,174 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
return ret; return ret;
} }
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, #ifdef CONFIG_NVME_TARGET_TCP_TLS
static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
int len, ret;
struct kvec iov = {
.iov_base = (u8 *)&queue->pdu + queue->offset,
.iov_len = sizeof(struct nvme_tcp_hdr),
};
char cbuf[CMSG_LEN(sizeof(char))] = {};
struct msghdr msg = {
.msg_control = cbuf,
.msg_controllen = sizeof(cbuf),
.msg_flags = MSG_PEEK,
};
if (nvmet_port_secure_channel_required(queue->port->nport))
return 0;
len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
if (unlikely(len < 0)) {
pr_debug("queue %d: peek error %d\n",
queue->idx, len);
return len;
}
ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
if (ret < 0)
return ret;
if (len < sizeof(struct nvme_tcp_hdr)) {
pr_debug("queue %d: short read, %d bytes missing\n",
queue->idx, (int)iov.iov_len - len);
return -EAGAIN;
}
pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
queue->idx, hdr->type, hdr->hlen, hdr->plen,
(int)sizeof(struct nvme_tcp_icreq_pdu));
if (hdr->type == nvme_tcp_icreq &&
hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
hdr->plen == (__le32)sizeof(struct nvme_tcp_icreq_pdu)) {
pr_debug("queue %d: icreq detected\n",
queue->idx);
return len;
}
return 0;
}
static void nvmet_tcp_tls_handshake_done(void *data, int status,
key_serial_t peerid)
{
struct nvmet_tcp_queue *queue = data;
pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
queue->idx, peerid, status);
spin_lock_bh(&queue->state_lock);
if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
spin_unlock_bh(&queue->state_lock);
return;
}
if (!status) {
queue->tls_pskid = peerid;
queue->state = NVMET_TCP_Q_CONNECTING;
} else
queue->state = NVMET_TCP_Q_FAILED;
spin_unlock_bh(&queue->state_lock);
cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
if (status)
nvmet_tcp_schedule_release_queue(queue);
else
nvmet_tcp_set_queue_sock(queue);
kref_put(&queue->kref, nvmet_tcp_release_queue);
}
static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
{
struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
struct nvmet_tcp_queue, tls_handshake_tmo_work);
pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
/*
* If tls_handshake_cancel() fails we've lost the race with
* nvmet_tcp_tls_handshake_done() */
if (!tls_handshake_cancel(queue->sock->sk))
return;
spin_lock_bh(&queue->state_lock);
if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
spin_unlock_bh(&queue->state_lock);
return;
}
queue->state = NVMET_TCP_Q_FAILED;
spin_unlock_bh(&queue->state_lock);
nvmet_tcp_schedule_release_queue(queue);
kref_put(&queue->kref, nvmet_tcp_release_queue);
}
static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
{
int ret = -EOPNOTSUPP;
struct tls_handshake_args args;
if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
pr_warn("cannot start TLS in state %d\n", queue->state);
return -EINVAL;
}
kref_get(&queue->kref);
pr_debug("queue %d: TLS ServerHello\n", queue->idx);
memset(&args, 0, sizeof(args));
args.ta_sock = queue->sock;
args.ta_done = nvmet_tcp_tls_handshake_done;
args.ta_data = queue;
args.ta_keyring = key_serial(queue->port->nport->keyring);
args.ta_timeout_ms = tls_handshake_timeout * 1000;
ret = tls_server_hello_psk(&args, GFP_KERNEL);
if (ret) {
kref_put(&queue->kref, nvmet_tcp_release_queue);
pr_err("failed to start TLS, err=%d\n", ret);
} else {
queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
tls_handshake_timeout * HZ);
}
return ret;
}
#endif
static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
struct socket *newsock) struct socket *newsock)
{ {
struct nvmet_tcp_queue *queue; struct nvmet_tcp_queue *queue;
struct file *sock_file = NULL;
int ret; int ret;
queue = kzalloc(sizeof(*queue), GFP_KERNEL); queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue) if (!queue) {
return -ENOMEM; ret = -ENOMEM;
goto out_release;
}
INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
INIT_WORK(&queue->io_work, nvmet_tcp_io_work); INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
kref_init(&queue->kref);
queue->sock = newsock; queue->sock = newsock;
queue->port = port; queue->port = port;
queue->nr_cmds = 0; queue->nr_cmds = 0;
spin_lock_init(&queue->state_lock); spin_lock_init(&queue->state_lock);
queue->state = NVMET_TCP_Q_CONNECTING; if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
NVMF_TCP_SECTYPE_TLS13)
queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
else
queue->state = NVMET_TCP_Q_CONNECTING;
INIT_LIST_HEAD(&queue->free_list); INIT_LIST_HEAD(&queue->free_list);
init_llist_head(&queue->resp_list); init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list); INIT_LIST_HEAD(&queue->resp_send_list);
sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
if (IS_ERR(sock_file)) {
ret = PTR_ERR(sock_file);
goto out_free_queue;
}
queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) { if (queue->idx < 0) {
ret = queue->idx; ret = queue->idx;
goto out_free_queue; goto out_sock;
} }
ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
...@@ -1662,11 +1913,33 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ...@@ -1662,11 +1913,33 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
mutex_unlock(&nvmet_tcp_queue_mutex); mutex_unlock(&nvmet_tcp_queue_mutex);
#ifdef CONFIG_NVME_TARGET_TCP_TLS
INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
nvmet_tcp_tls_handshake_timeout);
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
struct sock *sk = queue->sock->sk;
/* Restore the default callbacks before starting upcall */
read_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = NULL;
sk->sk_data_ready = port->data_ready;
read_unlock_bh(&sk->sk_callback_lock);
if (!nvmet_tcp_try_peek_pdu(queue)) {
if (!nvmet_tcp_tls_handshake(queue))
return;
/* TLS handshake failed, terminate the connection */
goto out_destroy_sq;
}
/* Not a TLS connection, continue with normal processing */
queue->state = NVMET_TCP_Q_CONNECTING;
}
#endif
ret = nvmet_tcp_set_queue_sock(queue); ret = nvmet_tcp_set_queue_sock(queue);
if (ret) if (ret)
goto out_destroy_sq; goto out_destroy_sq;
return 0; return;
out_destroy_sq: out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex); mutex_lock(&nvmet_tcp_queue_mutex);
list_del_init(&queue->queue_list); list_del_init(&queue->queue_list);
...@@ -1676,9 +1949,14 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ...@@ -1676,9 +1949,14 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
nvmet_tcp_free_cmd(&queue->connect); nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove: out_ida_remove:
ida_free(&nvmet_tcp_queue_ida, queue->idx); ida_free(&nvmet_tcp_queue_ida, queue->idx);
out_sock:
fput(queue->sock->file);
out_free_queue: out_free_queue:
kfree(queue); kfree(queue);
return ret; out_release:
pr_err("failed to allocate queue, error %d\n", ret);
if (!sock_file)
sock_release(newsock);
} }
static void nvmet_tcp_accept_work(struct work_struct *w) static void nvmet_tcp_accept_work(struct work_struct *w)
...@@ -1695,11 +1973,7 @@ static void nvmet_tcp_accept_work(struct work_struct *w) ...@@ -1695,11 +1973,7 @@ static void nvmet_tcp_accept_work(struct work_struct *w)
pr_warn("failed to accept err=%d\n", ret); pr_warn("failed to accept err=%d\n", ret);
return; return;
} }
ret = nvmet_tcp_alloc_queue(port, newsock); nvmet_tcp_alloc_queue(port, newsock);
if (ret) {
pr_err("failed to allocate queue\n");
sock_release(newsock);
}
} }
} }
......
...@@ -515,6 +515,7 @@ extern void key_init(void); ...@@ -515,6 +515,7 @@ extern void key_init(void);
#define key_init() do { } while(0) #define key_init() do { } while(0)
#define key_free_user_ns(ns) do { } while(0) #define key_free_user_ns(ns) do { } while(0)
#define key_remove_domain(d) do { } while(0) #define key_remove_domain(d) do { } while(0)
#define key_lookup(k) NULL
#endif /* CONFIG_KEYS */ #endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -9,9 +9,9 @@ ...@@ -9,9 +9,9 @@
#include <crypto/kpp.h> #include <crypto/kpp.h>
struct nvme_dhchap_key { struct nvme_dhchap_key {
u8 *key;
size_t len; size_t len;
u8 hash; u8 hash;
u8 key[];
}; };
u32 nvme_auth_get_seqnum(void); u32 nvme_auth_get_seqnum(void);
...@@ -24,10 +24,13 @@ const char *nvme_auth_digest_name(u8 hmac_id); ...@@ -24,10 +24,13 @@ const char *nvme_auth_digest_name(u8 hmac_id);
size_t nvme_auth_hmac_hash_len(u8 hmac_id); size_t nvme_auth_hmac_hash_len(u8 hmac_id);
u8 nvme_auth_hmac_id(const char *hmac_name); u8 nvme_auth_hmac_id(const char *hmac_name);
u32 nvme_auth_key_struct_size(u32 key_len);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash); u8 key_hash);
void nvme_auth_free_key(struct nvme_dhchap_key *key); void nvme_auth_free_key(struct nvme_dhchap_key *key);
u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn); struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash);
struct nvme_dhchap_key *nvme_auth_transform_key(
struct nvme_dhchap_key *key, char *nqn);
int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key); int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen); u8 *challenge, u8 *aug, size_t hlen);
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Hannes Reinecke, SUSE Labs
*/
#ifndef _NVME_KEYRING_H
#define _NVME_KEYRING_H
#ifdef CONFIG_NVME_KEYRING
key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn);
key_serial_t nvme_keyring_id(void);
int nvme_keyring_init(void);
void nvme_keyring_exit(void);
#else
static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn)
{
return 0;
}
static inline key_serial_t nvme_keyring_id(void)
{
return 0;
}
static inline int nvme_keyring_init(void)
{
return 0;
}
static inline void nvme_keyring_exit(void) {}
#endif /* !CONFIG_NVME_KEYRING */
#endif /* _NVME_KEYRING_H */
...@@ -18,6 +18,12 @@ enum nvme_tcp_pfv { ...@@ -18,6 +18,12 @@ enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0, NVME_TCP_PFV_1_0 = 0x0,
}; };
enum nvme_tcp_tls_cipher {
NVME_TCP_TLS_CIPHER_INVALID = 0,
NVME_TCP_TLS_CIPHER_SHA256 = 1,
NVME_TCP_TLS_CIPHER_SHA384 = 2,
};
enum nvme_tcp_fatal_error_status { enum nvme_tcp_fatal_error_status {
NVME_TCP_FES_INVALID_PDU_HDR = 0x01, NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
NVME_TCP_FES_PDU_SEQ_ERR = 0x02, NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
......
...@@ -108,6 +108,13 @@ enum { ...@@ -108,6 +108,13 @@ enum {
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
}; };
/* TSAS SECTYPE for TCP transport */
enum {
NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
};
#define NVME_AQ_DEPTH 32 #define NVME_AQ_DEPTH 32
#define NVME_NR_AEN_COMMANDS 1 #define NVME_NR_AEN_COMMANDS 1
#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) #define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
...@@ -1493,6 +1500,9 @@ struct nvmf_disc_rsp_page_entry { ...@@ -1493,6 +1500,9 @@ struct nvmf_disc_rsp_page_entry {
__u16 pkey; __u16 pkey;
__u8 resv10[246]; __u8 resv10[246];
} rdma; } rdma;
struct tcp {
__u8 sectype;
} tcp;
} tsas; } tsas;
}; };
......
...@@ -693,6 +693,7 @@ struct key *key_lookup(key_serial_t id) ...@@ -693,6 +693,7 @@ struct key *key_lookup(key_serial_t id)
spin_unlock(&key_serial_lock); spin_unlock(&key_serial_lock);
return key; return key;
} }
EXPORT_SYMBOL(key_lookup);
/* /*
* Find and lock the specified key type against removal. * Find and lock the specified key type against removal.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment