Commit 7f569e91 authored by Li Ming's avatar Li Ming Committed by Dave Jiang

cxl/port: Use scoped_guard()/guard() to drop device_lock() for cxl_port

A device_lock() and device_unlock() pair can be replaced by a cleanup
helper scoped_guard() or guard(), that can enhance code readability. In
CXL subsystem, still use device_lock() and device_unlock() pairs for cxl
port resource protection, most of them can be replaced by a
scoped_guard() or a guard() simply.
Suggested-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarLi Ming <ming4.li@intel.com>
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://patch.msgid.link/20240830013138.2256244-2-ming4.li@intel.comSigned-off-by: default avatarDave Jiang <dave.jiang@intel.com>
parent dd2617eb
...@@ -1214,7 +1214,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) ...@@ -1214,7 +1214,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
int rc; int rc;
/* synchronize with cxl_mem_probe() and decoder write operations */ /* synchronize with cxl_mem_probe() and decoder write operations */
device_lock(&cxlmd->dev); guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint; endpoint = cxlmd->endpoint;
down_read(&cxl_region_rwsem); down_read(&cxl_region_rwsem);
/* /*
...@@ -1226,7 +1226,6 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) ...@@ -1226,7 +1226,6 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
else else
rc = -EBUSY; rc = -EBUSY;
up_read(&cxl_region_rwsem); up_read(&cxl_region_rwsem);
device_unlock(&cxlmd->dev);
return rc; return rc;
} }
......
...@@ -1258,18 +1258,13 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); ...@@ -1258,18 +1258,13 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
static int add_ep(struct cxl_ep *new) static int add_ep(struct cxl_ep *new)
{ {
struct cxl_port *port = new->dport->port; struct cxl_port *port = new->dport->port;
int rc;
device_lock(&port->dev); guard(device)(&port->dev);
if (port->dead) { if (port->dead)
device_unlock(&port->dev);
return -ENXIO; return -ENXIO;
}
rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
GFP_KERNEL);
device_unlock(&port->dev);
return rc; return xa_insert(&port->endpoints, (unsigned long)new->ep,
new, GFP_KERNEL);
} }
/** /**
...@@ -1393,14 +1388,14 @@ static void delete_endpoint(void *data) ...@@ -1393,14 +1388,14 @@ static void delete_endpoint(void *data)
struct cxl_port *endpoint = cxlmd->endpoint; struct cxl_port *endpoint = cxlmd->endpoint;
struct device *host = endpoint_host(endpoint); struct device *host = endpoint_host(endpoint);
device_lock(host); scoped_guard(device, host) {
if (host->driver && !endpoint->dead) { if (host->driver && !endpoint->dead) {
devm_release_action(host, cxl_unlink_parent_dport, endpoint); devm_release_action(host, cxl_unlink_parent_dport, endpoint);
devm_release_action(host, cxl_unlink_uport, endpoint); devm_release_action(host, cxl_unlink_uport, endpoint);
devm_release_action(host, unregister_port, endpoint); devm_release_action(host, unregister_port, endpoint);
}
cxlmd->endpoint = NULL;
} }
cxlmd->endpoint = NULL;
device_unlock(host);
put_device(&endpoint->dev); put_device(&endpoint->dev);
put_device(host); put_device(host);
} }
...@@ -1565,40 +1560,38 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, ...@@ -1565,40 +1560,38 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
* dereferencing the device of the port before the parent_port releasing. * dereferencing the device of the port before the parent_port releasing.
*/ */
struct cxl_port *port __free(put_cxl_port) = NULL; struct cxl_port *port __free(put_cxl_port) = NULL;
device_lock(&parent_port->dev); scoped_guard(device, &parent_port->dev) {
if (!parent_port->dev.driver) { if (!parent_port->dev.driver) {
dev_warn(&cxlmd->dev, dev_warn(&cxlmd->dev,
"port %s:%s disabled, failed to enumerate CXL.mem\n", "port %s:%s disabled, failed to enumerate CXL.mem\n",
dev_name(&parent_port->dev), dev_name(uport_dev)); dev_name(&parent_port->dev), dev_name(uport_dev));
port = ERR_PTR(-ENXIO); return -ENXIO;
goto out; }
}
port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
component_reg_phys, parent_dport);
if (IS_ERR(port))
return PTR_ERR(port);
port = find_cxl_port_at(parent_port, dport_dev, &dport); /* retry find to pick up the new dport information */
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
component_reg_phys, parent_dport);
/* retry find to pick up the new dport information */
if (!IS_ERR(port))
port = find_cxl_port_at(parent_port, dport_dev, &dport); port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port)
return -ENXIO;
}
} }
out:
device_unlock(&parent_port->dev);
if (IS_ERR(port)) dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
rc = PTR_ERR(port); dev_name(&port->dev), dev_name(port->uport_dev));
else { rc = cxl_add_ep(dport, &cxlmd->dev);
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", if (rc == -EBUSY) {
dev_name(&port->dev), dev_name(port->uport_dev)); /*
rc = cxl_add_ep(dport, &cxlmd->dev); * "can't" happen, but this error code means
if (rc == -EBUSY) { * something to the caller, so translate it.
/* */
* "can't" happen, but this error code means rc = -ENXIO;
* something to the caller, so translate it.
*/
rc = -ENXIO;
}
} }
return rc; return rc;
...@@ -1979,7 +1972,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); ...@@ -1979,7 +1972,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{ {
struct cxl_port *port; struct cxl_port *port;
int rc;
if (WARN_ON_ONCE(!cxld)) if (WARN_ON_ONCE(!cxld))
return -EINVAL; return -EINVAL;
...@@ -1989,11 +1981,8 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) ...@@ -1989,11 +1981,8 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent); port = to_cxl_port(cxld->dev.parent);
device_lock(&port->dev); guard(device)(&port->dev);
rc = cxl_decoder_add_locked(cxld, target_map); return cxl_decoder_add_locked(cxld, target_map);
device_unlock(&port->dev);
return rc;
} }
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
......
...@@ -3094,11 +3094,11 @@ static void cxlr_release_nvdimm(void *_cxlr) ...@@ -3094,11 +3094,11 @@ static void cxlr_release_nvdimm(void *_cxlr)
struct cxl_region *cxlr = _cxlr; struct cxl_region *cxlr = _cxlr;
struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
device_lock(&cxl_nvb->dev); scoped_guard(device, &cxl_nvb->dev) {
if (cxlr->cxlr_pmem) if (cxlr->cxlr_pmem)
devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
cxlr->cxlr_pmem); cxlr->cxlr_pmem);
device_unlock(&cxl_nvb->dev); }
cxlr->cxl_nvb = NULL; cxlr->cxl_nvb = NULL;
put_device(&cxl_nvb->dev); put_device(&cxl_nvb->dev);
} }
...@@ -3134,13 +3134,14 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) ...@@ -3134,13 +3134,14 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev)); dev_name(dev));
device_lock(&cxl_nvb->dev); scoped_guard(device, &cxl_nvb->dev) {
if (cxl_nvb->dev.driver) if (cxl_nvb->dev.driver)
rc = devm_add_action_or_reset(&cxl_nvb->dev, rc = devm_add_action_or_reset(&cxl_nvb->dev,
cxlr_pmem_unregister, cxlr_pmem); cxlr_pmem_unregister,
else cxlr_pmem);
rc = -ENXIO; else
device_unlock(&cxl_nvb->dev); rc = -ENXIO;
}
if (rc) if (rc)
goto err_bridge; goto err_bridge;
......
...@@ -168,19 +168,17 @@ static int cxl_mem_probe(struct device *dev) ...@@ -168,19 +168,17 @@ static int cxl_mem_probe(struct device *dev)
cxl_setup_parent_dport(dev, dport); cxl_setup_parent_dport(dev, dport);
device_lock(endpoint_parent); scoped_guard(device, endpoint_parent) {
if (!endpoint_parent->driver) { if (!endpoint_parent->driver) {
dev_err(dev, "CXL port topology %s not enabled\n", dev_err(dev, "CXL port topology %s not enabled\n",
dev_name(endpoint_parent)); dev_name(endpoint_parent));
rc = -ENXIO; return -ENXIO;
goto unlock; }
}
rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport); rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
unlock: if (rc)
device_unlock(endpoint_parent); return rc;
if (rc) }
return rc;
/* /*
* The kernel may be operating out of CXL memory on this device, * The kernel may be operating out of CXL memory on this device,
......
...@@ -233,15 +233,13 @@ static int detach_nvdimm(struct device *dev, void *data) ...@@ -233,15 +233,13 @@ static int detach_nvdimm(struct device *dev, void *data)
if (!is_cxl_nvdimm(dev)) if (!is_cxl_nvdimm(dev))
return 0; return 0;
device_lock(dev); scoped_guard(device, dev) {
if (!dev->driver) if (dev->driver) {
goto out; cxl_nvd = to_cxl_nvdimm(dev);
if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
cxl_nvd = to_cxl_nvdimm(dev); release = true;
if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) }
release = true; }
out:
device_unlock(dev);
if (release) if (release)
device_release_driver(dev); device_release_driver(dev);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment