Commit 1c8f1da5 authored by Bharat Potnuri's avatar Bharat Potnuri Committed by Doug Ledford

iw_cxgb4: Fix possible circular dependency locking warning

Locking sequence of iw_cxgb4 and RoCE drivers in ib_register_device() is
slightly different and this leads to possible circular dependency locking
warning when both the devices are brought up.

Here is the locking sequence upto ib_register_device():
iw_cxgb4: rtnl_mutex(net stack) --> uld_mutex --> device_mutex
RoCE drivers: device_mutex --> rtnl_mutex

Here is the possibility of cross locking:

	CPU #0 (iw_cxgb4) 		     CPU #1 (RoCE drivers)

-> on interface up cxgb4_up()
executed with rtnl_mutex held
-> hold uld_mutex and try
registering ib device
					-> In ib_register_device() hold
					   device_mutex
-> hold device mutex in
ib_register_device
					-> try acquiring rtnl_mutex in
					   ib_enum_roce_netdev()

Current patch schedules the ib_register_device() functionality of
iw_cxgb4 to a workqueue to prevent the possible cross-locking.
Also rename the labels in c4iw_reister_device().
Signed-off-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 84511455
...@@ -64,14 +64,9 @@ module_param(c4iw_wr_log_size_order, int, 0444); ...@@ -64,14 +64,9 @@ module_param(c4iw_wr_log_size_order, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log_size_order, MODULE_PARM_DESC(c4iw_wr_log_size_order,
"Number of entries (log2) in the work request timing log."); "Number of entries (log2) in the work request timing log.");
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
struct c4iw_dev *dev;
};
static LIST_HEAD(uld_ctx_list); static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex); static DEFINE_MUTEX(dev_mutex);
struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64 #define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1 #define DB_FC_RESUME_DELAY 1
...@@ -912,7 +907,7 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) ...@@ -912,7 +907,7 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
c4iw_destroy_resource(&rdev->resource); c4iw_destroy_resource(&rdev->resource);
} }
static void c4iw_dealloc(struct uld_ctx *ctx) void c4iw_dealloc(struct uld_ctx *ctx)
{ {
c4iw_rdev_close(&ctx->dev->rdev); c4iw_rdev_close(&ctx->dev->rdev);
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr)); WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
...@@ -1208,8 +1203,6 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1208,8 +1203,6 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_UP: case CXGB4_STATE_UP:
pr_info("%s: Up\n", pci_name(ctx->lldi.pdev)); pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) { if (!ctx->dev) {
int ret;
ctx->dev = c4iw_alloc(&ctx->lldi); ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) { if (IS_ERR(ctx->dev)) {
pr_err("%s: initialization failed: %ld\n", pr_err("%s: initialization failed: %ld\n",
...@@ -1218,12 +1211,9 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1218,12 +1211,9 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
ctx->dev = NULL; ctx->dev = NULL;
break; break;
} }
ret = c4iw_register_device(ctx->dev);
if (ret) { INIT_WORK(&ctx->reg_work, c4iw_register_device);
pr_err("%s: RDMA registration failed: %d\n", queue_work(reg_workq, &ctx->reg_work);
pci_name(ctx->lldi.pdev), ret);
c4iw_dealloc(ctx);
}
} }
break; break;
case CXGB4_STATE_DOWN: case CXGB4_STATE_DOWN:
...@@ -1551,6 +1541,12 @@ static int __init c4iw_init_module(void) ...@@ -1551,6 +1541,12 @@ static int __init c4iw_init_module(void)
if (!c4iw_debugfs_root) if (!c4iw_debugfs_root)
pr_warn("could not create debugfs entry, continuing\n"); pr_warn("could not create debugfs entry, continuing\n");
reg_workq = create_singlethread_workqueue("Register_iWARP_device");
if (!reg_workq) {
pr_err("Failed creating workqueue to register iwarp device\n");
return -ENOMEM;
}
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0; return 0;
...@@ -1567,6 +1563,8 @@ static void __exit c4iw_exit_module(void) ...@@ -1567,6 +1563,8 @@ static void __exit c4iw_exit_module(void)
kfree(ctx); kfree(ctx);
} }
mutex_unlock(&dev_mutex); mutex_unlock(&dev_mutex);
flush_workqueue(reg_workq);
destroy_workqueue(reg_workq);
cxgb4_unregister_uld(CXGB4_ULD_RDMA); cxgb4_unregister_uld(CXGB4_ULD_RDMA);
c4iw_cm_term(); c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root); debugfs_remove_recursive(c4iw_debugfs_root);
......
...@@ -322,6 +322,13 @@ struct c4iw_dev { ...@@ -322,6 +322,13 @@ struct c4iw_dev {
wait_queue_head_t wait; wait_queue_head_t wait;
}; };
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
struct c4iw_dev *dev;
struct work_struct reg_work;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
{ {
return container_of(ibdev, struct c4iw_dev, ibdev); return container_of(ibdev, struct c4iw_dev, ibdev);
...@@ -988,7 +995,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); ...@@ -988,7 +995,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev); void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp); void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_register_device(struct c4iw_dev *dev); void c4iw_register_device(struct work_struct *work);
void c4iw_unregister_device(struct c4iw_dev *dev); void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void); int __init c4iw_cm_init(void);
void c4iw_cm_term(void); void c4iw_cm_term(void);
...@@ -1014,6 +1021,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, ...@@ -1014,6 +1021,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw); int c4iw_dealloc_mw(struct ib_mw *mw);
void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata); struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
......
...@@ -531,10 +531,12 @@ static void get_dev_fw_str(struct ib_device *dev, char *str) ...@@ -531,10 +531,12 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers)); FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
} }
int c4iw_register_device(struct c4iw_dev *dev) void c4iw_register_device(struct work_struct *work)
{ {
int ret; int ret;
int i; int i;
struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev); pr_debug("c4iw_dev %p\n", dev);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX); strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
...@@ -609,8 +611,10 @@ int c4iw_register_device(struct c4iw_dev *dev) ...@@ -609,8 +611,10 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_dev_fw_str = get_dev_fw_str; dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm) if (!dev->ibdev.iwcm) {
return -ENOMEM; ret = -ENOMEM;
goto err_dealloc_ctx;
}
dev->ibdev.iwcm->connect = c4iw_connect; dev->ibdev.iwcm->connect = c4iw_connect;
dev->ibdev.iwcm->accept = c4iw_accept_cr; dev->ibdev.iwcm->accept = c4iw_accept_cr;
...@@ -625,20 +629,24 @@ int c4iw_register_device(struct c4iw_dev *dev) ...@@ -625,20 +629,24 @@ int c4iw_register_device(struct c4iw_dev *dev)
ret = ib_register_device(&dev->ibdev, NULL); ret = ib_register_device(&dev->ibdev, NULL);
if (ret) if (ret)
goto bail1; goto err_kfree_iwcm;
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) { for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev, ret = device_create_file(&dev->ibdev.dev,
c4iw_class_attributes[i]); c4iw_class_attributes[i]);
if (ret) if (ret)
goto bail2; goto err_unregister_device;
} }
return 0; return;
bail2: err_unregister_device:
ib_unregister_device(&dev->ibdev); ib_unregister_device(&dev->ibdev);
bail1: err_kfree_iwcm:
kfree(dev->ibdev.iwcm); kfree(dev->ibdev.iwcm);
return ret; err_dealloc_ctx:
pr_err("%s - Failed registering iwarp device: %d\n",
pci_name(ctx->lldi.pdev), ret);
c4iw_dealloc(ctx);
return;
} }
void c4iw_unregister_device(struct c4iw_dev *dev) void c4iw_unregister_device(struct c4iw_dev *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment