Commit 489beb91 authored by Roland Dreier's avatar Roland Dreier Committed by Christoph Hellwig

nvme-fabrics: Convert nvmf_transports_mutex to an rwsem

The mutex protects against the list of transports changing while a
controller is being created, but using a plain old mutex means that it
also serializes controller creation.  This unnecessarily slows down
creating multiple controllers - for example for the RDMA transport,
creating a controller involves establishing one connection for every IO
queue, which involves even more network/software round trips, so the
delay can become significant.

The simplest way to fix this is to change the mutex to an rwsem and only
hold it for writing when the list is being mutated.  Since we can take
the rwsem for reading while creating a controller, we can create multiple
controllers in parallel.
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2b76da95
......@@ -22,7 +22,7 @@
#include "fabrics.h"
static LIST_HEAD(nvmf_transports);
static DEFINE_MUTEX(nvmf_transports_mutex);
static DECLARE_RWSEM(nvmf_transports_rwsem);
static LIST_HEAD(nvmf_hosts);
static DEFINE_MUTEX(nvmf_hosts_mutex);
......@@ -495,9 +495,9 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops)
if (!ops->create_ctrl)
return -EINVAL;
mutex_lock(&nvmf_transports_mutex);
down_write(&nvmf_transports_rwsem);
list_add_tail(&ops->entry, &nvmf_transports);
mutex_unlock(&nvmf_transports_mutex);
up_write(&nvmf_transports_rwsem);
return 0;
}
......@@ -514,9 +514,9 @@ EXPORT_SYMBOL_GPL(nvmf_register_transport);
*/
void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
{
mutex_lock(&nvmf_transports_mutex);
down_write(&nvmf_transports_rwsem);
list_del(&ops->entry);
mutex_unlock(&nvmf_transports_mutex);
up_write(&nvmf_transports_rwsem);
}
EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
......@@ -525,7 +525,7 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
{
struct nvmf_transport_ops *ops;
lockdep_assert_held(&nvmf_transports_mutex);
lockdep_assert_held(&nvmf_transports_rwsem);
list_for_each_entry(ops, &nvmf_transports, entry) {
if (strcmp(ops->name, opts->transport) == 0)
......@@ -851,7 +851,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_free_opts;
opts->mask &= ~NVMF_REQUIRED_OPTS;
mutex_lock(&nvmf_transports_mutex);
down_read(&nvmf_transports_rwsem);
ops = nvmf_lookup_transport(opts);
if (!ops) {
pr_info("no handler found for transport %s.\n",
......@@ -878,16 +878,16 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
ctrl->subnqn);
mutex_unlock(&nvmf_transports_mutex);
up_read(&nvmf_transports_rwsem);
ctrl->ops->delete_ctrl(ctrl);
return ERR_PTR(-EINVAL);
}
mutex_unlock(&nvmf_transports_mutex);
up_read(&nvmf_transports_rwsem);
return ctrl;
out_unlock:
mutex_unlock(&nvmf_transports_mutex);
up_read(&nvmf_transports_rwsem);
out_free_opts:
nvmf_free_options(opts);
return ERR_PTR(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment