Commit 4b803784 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.7-2023-11-10' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
      - nvme keyring config compile fixes (Hannes and Arnd)
      - fabrics keep alive fixes (Hannes)
      - tcp authentication fixes (Mark)
      - io_uring_cmd error handling fix (Anuj)
      - stale firmware attribute fix (Daniel)
      - tcp memory leak (Christophe)
      - crypto library usage simplification (Eric)

 - nbd use-after-free fix. May need a followup, but at least it's better
   than what it was before (Li)

 - Rate limit write on read-only device warnings (Yu)

* tag 'block-6.7-2023-11-10' of git://git.kernel.dk/linux:
  nvme: keyring: fix conditional compilation
  nvme: common: make keyring and auth separate modules
  blk-core: use pr_warn_ratelimited() in bio_check_ro()
  nbd: fix uaf in nbd_open
  nvme: start keep-alive after admin queue setup
  nvme-loop: always quiesce and cancel commands before destroying admin q
  nvme-tcp: avoid open-coding nvme_tcp_teardown_admin_queue()
  nvme-auth: always set valid seq_num in dhchap reply
  nvme-auth: add flag for bi-directional auth
  nvme-auth: auth success1 msg always includes resp
  nvme: fix error-handling for io_uring nvme-passthrough
  nvme: update firmware version after commit
  nvme-tcp: Fix a memory leak
  nvme-auth: use crypto_shash_tfm_digest()
parents d035e4eb 37d94868
...@@ -501,8 +501,8 @@ static inline void bio_check_ro(struct bio *bio) ...@@ -501,8 +501,8 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return; return;
pr_warn("Trying to write to read-only block-device %pg\n", pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
bio->bi_bdev); bio->bi_bdev);
/* Older lvm-tools actually trigger this */ /* Older lvm-tools actually trigger this */
} }
} }
......
...@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd) ...@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
struct gendisk *disk = nbd->disk; struct gendisk *disk = nbd->disk;
del_gendisk(disk); del_gendisk(disk);
put_disk(disk);
blk_mq_free_tag_set(&nbd->tag_set); blk_mq_free_tag_set(&nbd->tag_set);
/* /*
...@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd) ...@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
idr_remove(&nbd_index_idr, nbd->index); idr_remove(&nbd_index_idr, nbd->index);
mutex_unlock(&nbd_index_mutex); mutex_unlock(&nbd_index_mutex);
destroy_workqueue(nbd->recv_workq); destroy_workqueue(nbd->recv_workq);
kfree(nbd); put_disk(disk);
} }
static void nbd_dev_remove_work(struct work_struct *work) static void nbd_dev_remove_work(struct work_struct *work)
...@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk) ...@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk)
nbd_put(nbd); nbd_put(nbd);
} }
static void nbd_free_disk(struct gendisk *disk)
{
struct nbd_device *nbd = disk->private_data;
kfree(nbd);
}
static const struct block_device_operations nbd_fops = static const struct block_device_operations nbd_fops =
{ {
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops = ...@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops =
.release = nbd_release, .release = nbd_release,
.ioctl = nbd_ioctl, .ioctl = nbd_ioctl,
.compat_ioctl = nbd_ioctl, .compat_ioctl = nbd_ioctl,
.free_disk = nbd_free_disk,
}; };
#if IS_ENABLED(CONFIG_DEBUG_FS) #if IS_ENABLED(CONFIG_DEBUG_FS)
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NVME_COMMON) += common/ obj-y += common/
obj-y += host/ obj-y += host/
obj-y += target/ obj-y += target/
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
config NVME_COMMON
tristate
config NVME_KEYRING config NVME_KEYRING
bool tristate
select KEYS select KEYS
config NVME_AUTH config NVME_AUTH
bool tristate
select CRYPTO select CRYPTO
select CRYPTO_HMAC select CRYPTO_HMAC
select CRYPTO_SHA256 select CRYPTO_SHA256
......
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
ccflags-y += -I$(src) ccflags-y += -I$(src)
obj-$(CONFIG_NVME_COMMON) += nvme-common.o obj-$(CONFIG_NVME_AUTH) += nvme-auth.o
obj-$(CONFIG_NVME_KEYRING) += nvme-keyring.o
nvme-common-$(CONFIG_NVME_AUTH) += auth.o nvme-auth-y += auth.o
nvme-common-$(CONFIG_NVME_KEYRING) += keyring.o nvme-keyring-y += keyring.o
...@@ -341,7 +341,6 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, ...@@ -341,7 +341,6 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen) u8 *challenge, u8 *aug, size_t hlen)
{ {
struct crypto_shash *tfm; struct crypto_shash *tfm;
struct shash_desc *desc;
u8 *hashed_key; u8 *hashed_key;
const char *hmac_name; const char *hmac_name;
int ret; int ret;
...@@ -369,29 +368,11 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, ...@@ -369,29 +368,11 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
goto out_free_key; goto out_free_key;
} }
desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
goto out_free_hash;
}
desc->tfm = tfm;
ret = crypto_shash_setkey(tfm, hashed_key, hlen); ret = crypto_shash_setkey(tfm, hashed_key, hlen);
if (ret) if (ret)
goto out_free_desc; goto out_free_hash;
ret = crypto_shash_init(desc);
if (ret)
goto out_free_desc;
ret = crypto_shash_update(desc, challenge, hlen);
if (ret)
goto out_free_desc;
ret = crypto_shash_final(desc, aug); ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug);
out_free_desc:
kfree_sensitive(desc);
out_free_hash: out_free_hash:
crypto_free_shash(tfm); crypto_free_shash(tfm);
out_free_key: out_free_key:
......
...@@ -151,7 +151,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring, ...@@ -151,7 +151,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
} }
EXPORT_SYMBOL_GPL(nvme_tls_psk_default); EXPORT_SYMBOL_GPL(nvme_tls_psk_default);
int nvme_keyring_init(void) static int __init nvme_keyring_init(void)
{ {
int err; int err;
...@@ -171,12 +171,15 @@ int nvme_keyring_init(void) ...@@ -171,12 +171,15 @@ int nvme_keyring_init(void)
} }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nvme_keyring_init);
void nvme_keyring_exit(void) static void __exit nvme_keyring_exit(void)
{ {
unregister_key_type(&nvme_tls_psk_key_type); unregister_key_type(&nvme_tls_psk_key_type);
key_revoke(nvme_keyring); key_revoke(nvme_keyring);
key_put(nvme_keyring); key_put(nvme_keyring);
} }
EXPORT_SYMBOL_GPL(nvme_keyring_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
module_init(nvme_keyring_init);
module_exit(nvme_keyring_exit);
...@@ -95,7 +95,6 @@ config NVME_TCP ...@@ -95,7 +95,6 @@ config NVME_TCP
config NVME_TCP_TLS config NVME_TCP_TLS
bool "NVMe over Fabrics TCP TLS encryption support" bool "NVMe over Fabrics TCP TLS encryption support"
depends on NVME_TCP depends on NVME_TCP
select NVME_COMMON
select NVME_KEYRING select NVME_KEYRING
select NET_HANDSHAKE select NET_HANDSHAKE
select KEYS select KEYS
...@@ -110,7 +109,6 @@ config NVME_TCP_TLS ...@@ -110,7 +109,6 @@ config NVME_TCP_TLS
config NVME_HOST_AUTH config NVME_HOST_AUTH
bool "NVM Express over Fabrics In-Band Authentication" bool "NVM Express over Fabrics In-Band Authentication"
depends on NVME_CORE depends on NVME_CORE
select NVME_COMMON
select NVME_AUTH select NVME_AUTH
help help
This provides support for NVMe over Fabrics In-Band Authentication. This provides support for NVMe over Fabrics In-Band Authentication.
......
...@@ -29,6 +29,7 @@ struct nvme_dhchap_queue_context { ...@@ -29,6 +29,7 @@ struct nvme_dhchap_queue_context {
int error; int error;
u32 s1; u32 s1;
u32 s2; u32 s2;
bool bi_directional;
u16 transaction; u16 transaction;
u8 status; u8 status;
u8 dhgroup_id; u8 dhgroup_id;
...@@ -312,17 +313,17 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, ...@@ -312,17 +313,17 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
data->dhvlen = cpu_to_le16(chap->host_key_len); data->dhvlen = cpu_to_le16(chap->host_key_len);
memcpy(data->rval, chap->response, chap->hash_len); memcpy(data->rval, chap->response, chap->hash_len);
if (ctrl->ctrl_key) { if (ctrl->ctrl_key) {
chap->bi_directional = true;
get_random_bytes(chap->c2, chap->hash_len); get_random_bytes(chap->c2, chap->hash_len);
data->cvalid = 1; data->cvalid = 1;
chap->s2 = nvme_auth_get_seqnum();
memcpy(data->rval + chap->hash_len, chap->c2, memcpy(data->rval + chap->hash_len, chap->c2,
chap->hash_len); chap->hash_len);
dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
__func__, chap->qid, (int)chap->hash_len, chap->c2); __func__, chap->qid, (int)chap->hash_len, chap->c2);
} else { } else {
memset(chap->c2, 0, chap->hash_len); memset(chap->c2, 0, chap->hash_len);
chap->s2 = 0;
} }
chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2); data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) { if (chap->host_key_len) {
dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
...@@ -339,10 +340,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, ...@@ -339,10 +340,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap) struct nvme_dhchap_queue_context *chap)
{ {
struct nvmf_auth_dhchap_success1_data *data = chap->buf; struct nvmf_auth_dhchap_success1_data *data = chap->buf;
size_t size = sizeof(*data); size_t size = sizeof(*data) + chap->hash_len;
if (chap->s2)
size += chap->hash_len;
if (size > CHAP_BUF_SIZE) { if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
...@@ -663,6 +661,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) ...@@ -663,6 +661,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
chap->error = 0; chap->error = 0;
chap->s1 = 0; chap->s1 = 0;
chap->s2 = 0; chap->s2 = 0;
chap->bi_directional = false;
chap->transaction = 0; chap->transaction = 0;
memset(chap->c1, 0, sizeof(chap->c1)); memset(chap->c1, 0, sizeof(chap->c1));
memset(chap->c2, 0, sizeof(chap->c2)); memset(chap->c2, 0, sizeof(chap->c2));
...@@ -825,7 +824,7 @@ static void nvme_queue_auth_work(struct work_struct *work) ...@@ -825,7 +824,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
goto fail2; goto fail2;
} }
if (chap->s2) { if (chap->bi_directional) {
/* DH-HMAC-CHAP Step 5: send success2 */ /* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n", dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid); __func__, chap->qid);
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include "nvme.h" #include "nvme.h"
#include "fabrics.h" #include "fabrics.h"
#include <linux/nvme-auth.h> #include <linux/nvme-auth.h>
#include <linux/nvme-keyring.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
...@@ -483,6 +482,7 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset); ...@@ -483,6 +482,7 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{ {
nvme_stop_keep_alive(ctrl);
if (ctrl->admin_tagset) { if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset, blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl); nvme_cancel_request, ctrl);
...@@ -3200,6 +3200,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) ...@@ -3200,6 +3200,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
ctrl->identified = true; ctrl->identified = true;
nvme_start_keep_alive(ctrl);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
...@@ -4074,8 +4076,21 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) ...@@ -4074,8 +4076,21 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
return; return;
if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
log, sizeof(*log), 0)) log, sizeof(*log), 0)) {
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
goto out_free_log;
}
if (log->afi & 0x70 || !(log->afi & 0x7)) {
dev_info(ctrl->device,
"Firmware is activated after next Controller Level Reset\n");
goto out_free_log;
}
memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1],
sizeof(ctrl->subsys->firmware_rev));
out_free_log:
kfree(log); kfree(log);
} }
...@@ -4333,7 +4348,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) ...@@ -4333,7 +4348,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{ {
nvme_mpath_stop(ctrl); nvme_mpath_stop(ctrl);
nvme_auth_stop(ctrl); nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl); nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work); cancel_work_sync(&ctrl->fw_act_work);
...@@ -4344,8 +4358,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl); ...@@ -4344,8 +4358,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl) void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{ {
nvme_start_keep_alive(ctrl);
nvme_enable_aen(ctrl); nvme_enable_aen(ctrl);
/* /*
...@@ -4724,16 +4736,11 @@ static int __init nvme_core_init(void) ...@@ -4724,16 +4736,11 @@ static int __init nvme_core_init(void)
result = PTR_ERR(nvme_ns_chr_class); result = PTR_ERR(nvme_ns_chr_class);
goto unregister_generic_ns; goto unregister_generic_ns;
} }
result = nvme_keyring_init();
if (result)
goto destroy_ns_chr;
result = nvme_init_auth(); result = nvme_init_auth();
if (result) if (result)
goto keyring_exit; goto destroy_ns_chr;
return 0; return 0;
keyring_exit:
nvme_keyring_exit();
destroy_ns_chr: destroy_ns_chr:
class_destroy(nvme_ns_chr_class); class_destroy(nvme_ns_chr_class);
unregister_generic_ns: unregister_generic_ns:
...@@ -4757,7 +4764,6 @@ static int __init nvme_core_init(void) ...@@ -4757,7 +4764,6 @@ static int __init nvme_core_init(void)
static void __exit nvme_core_exit(void) static void __exit nvme_core_exit(void)
{ {
nvme_exit_auth(); nvme_exit_auth();
nvme_keyring_exit();
class_destroy(nvme_ns_chr_class); class_destroy(nvme_ns_chr_class);
class_destroy(nvme_subsys_class); class_destroy(nvme_subsys_class);
class_destroy(nvme_class); class_destroy(nvme_class);
......
...@@ -2530,6 +2530,12 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) ...@@ -2530,6 +2530,12 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above. * clean up the admin queue. Same thing as above.
*/ */
nvme_quiesce_admin_queue(&ctrl->ctrl); nvme_quiesce_admin_queue(&ctrl->ctrl);
/*
* Open-coding nvme_cancel_admin_tagset() as fc
* is not using nvme_cancel_request().
*/
nvme_stop_keep_alive(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q); blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl); nvme_fc_terminate_exchange, &ctrl->ctrl);
......
...@@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, ...@@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
req->bio = pdu->bio; req->bio = pdu->bio;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED) if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
pdu->nvme_status = -EINTR; pdu->nvme_status = -EINTR;
else } else {
pdu->nvme_status = nvme_req(req)->status; pdu->nvme_status = nvme_req(req)->status;
if (!pdu->nvme_status)
pdu->nvme_status = blk_status_to_errno(err);
}
pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
/* /*
......
...@@ -1423,13 +1423,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) ...@@ -1423,13 +1423,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
nvme_tcp_queue_id(queue), ret); nvme_tcp_queue_id(queue), ret);
goto free_icresp; goto free_icresp;
} }
ret = -ENOTCONN;
if (queue->ctrl->ctrl.opts->tls) { if (queue->ctrl->ctrl.opts->tls) {
ctype = tls_get_record_type(queue->sock->sk, ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf); (struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) { if (ctype != TLS_RECORD_TYPE_DATA) {
pr_err("queue %d: unhandled TLS record %d\n", pr_err("queue %d: unhandled TLS record %d\n",
nvme_tcp_queue_id(queue), ctype); nvme_tcp_queue_id(queue), ctype);
return -ENOTCONN; goto free_icresp;
} }
} }
ret = -EINVAL; ret = -EINVAL;
...@@ -2236,11 +2237,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) ...@@ -2236,11 +2237,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
nvme_tcp_destroy_io_queues(ctrl, new); nvme_tcp_destroy_io_queues(ctrl, new);
} }
destroy_admin: destroy_admin:
nvme_quiesce_admin_queue(ctrl); nvme_tcp_teardown_admin_queue(ctrl, false);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, new);
return ret; return ret;
} }
......
...@@ -87,7 +87,6 @@ config NVME_TARGET_TCP ...@@ -87,7 +87,6 @@ config NVME_TARGET_TCP
config NVME_TARGET_TCP_TLS config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support" bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP depends on NVME_TARGET_TCP
select NVME_COMMON
select NVME_KEYRING select NVME_KEYRING
select NET_HANDSHAKE select NET_HANDSHAKE
select KEYS select KEYS
...@@ -102,7 +101,6 @@ config NVME_TARGET_TCP_TLS ...@@ -102,7 +101,6 @@ config NVME_TARGET_TCP_TLS
config NVME_TARGET_AUTH config NVME_TARGET_AUTH
bool "NVMe over Fabrics In-band Authentication support" bool "NVMe over Fabrics In-band Authentication support"
depends on NVME_TARGET depends on NVME_TARGET
select NVME_COMMON
select NVME_AUTH select NVME_AUTH
help help
This enables support for NVMe over Fabrics In-band Authentication This enables support for NVMe over Fabrics In-band Authentication
......
...@@ -163,11 +163,11 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) ...@@ -163,11 +163,11 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
pr_debug("%s: ctrl %d qid %d challenge %*ph\n", pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
__func__, ctrl->cntlid, req->sq->qid, data->hl, __func__, ctrl->cntlid, req->sq->qid, data->hl,
req->sq->dhchap_c2); req->sq->dhchap_c2);
req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
} else { } else {
req->sq->authenticated = true; req->sq->authenticated = true;
req->sq->dhchap_c2 = NULL; req->sq->dhchap_c2 = NULL;
} }
req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
return 0; return 0;
} }
......
...@@ -466,6 +466,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) ...@@ -466,6 +466,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
out_destroy_io: out_destroy_io:
nvme_loop_destroy_io_queues(ctrl); nvme_loop_destroy_io_queues(ctrl);
out_destroy_admin: out_destroy_admin:
nvme_quiesce_admin_queue(&ctrl->ctrl);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl); nvme_loop_destroy_admin_queue(ctrl);
out_disable: out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
...@@ -600,6 +602,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, ...@@ -600,6 +602,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
return &ctrl->ctrl; return &ctrl->ctrl;
out_remove_admin_queue: out_remove_admin_queue:
nvme_quiesce_admin_queue(&ctrl->ctrl);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl); nvme_loop_destroy_admin_queue(ctrl);
out_free_queues: out_free_queues:
kfree(ctrl->queues); kfree(ctrl->queues);
......
...@@ -6,14 +6,12 @@ ...@@ -6,14 +6,12 @@
#ifndef _NVME_KEYRING_H #ifndef _NVME_KEYRING_H
#define _NVME_KEYRING_H #define _NVME_KEYRING_H
#ifdef CONFIG_NVME_KEYRING #if IS_ENABLED(CONFIG_NVME_KEYRING)
key_serial_t nvme_tls_psk_default(struct key *keyring, key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn); const char *hostnqn, const char *subnqn);
key_serial_t nvme_keyring_id(void); key_serial_t nvme_keyring_id(void);
int nvme_keyring_init(void);
void nvme_keyring_exit(void);
#else #else
...@@ -26,11 +24,5 @@ static inline key_serial_t nvme_keyring_id(void) ...@@ -26,11 +24,5 @@ static inline key_serial_t nvme_keyring_id(void)
{ {
return 0; return 0;
} }
static inline int nvme_keyring_init(void)
{
return 0;
}
static inline void nvme_keyring_exit(void) {}
#endif /* !CONFIG_NVME_KEYRING */ #endif /* !CONFIG_NVME_KEYRING */
#endif /* _NVME_KEYRING_H */ #endif /* _NVME_KEYRING_H */
...@@ -1732,7 +1732,7 @@ struct nvmf_auth_dhchap_success1_data { ...@@ -1732,7 +1732,7 @@ struct nvmf_auth_dhchap_success1_data {
__u8 rsvd2; __u8 rsvd2;
__u8 rvalid; __u8 rvalid;
__u8 rsvd3[7]; __u8 rsvd3[7];
/* 'hl' bytes of response value if 'rvalid' is set */ /* 'hl' bytes of response value */
__u8 rval[]; __u8 rval[];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment