Commit 93a40a6d authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: idxd: add percpu_ref to descriptor submission path

Current submission path has no way to restrict the submitter from
stop submiting on shutdown path or wq disable path. This provides a way to
quiesce the submission path.

Modeling after 'struct reqeust_queue' usage of percpu_ref. One of the
abilities of per_cpu reference counting is the ability to stop new
references from being taken while awaiting outstanding references to be
dropped. On wq shutdown, we want to block any new submissions to the kernel
workqueue and quiesce before disabling. The percpu_ref allows us to block
any new submissions and wait for any current submission calls to finish
submitting to the workqueue.

A percpu_ref is embedded in each idxd_wq context to allow control for
individual wq. The wq->wq_active counter is elevated before calling
movdir64b() or enqcmds() to submit a descriptor to the wq and dropped once
the submission call completes. The function is gated by
percpu_ref_tryget_live(). On shutdown with percpu_ref_kill() called, any
new submission would be blocked from acquiring a ref and failed. Once all
references are dropped for the wq, shutdown can continue.
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/161894438293.3202472.14894701611500822232.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 435b512d
......@@ -384,6 +384,32 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
memset(wq->name, 0, WQ_NAME_SIZE);
}
static void idxd_wq_ref_release(struct percpu_ref *ref)
{
struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
complete(&wq->wq_dead);
}
int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
{
int rc;
memset(&wq->wq_active, 0, sizeof(wq->wq_active));
rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
if (rc < 0)
return rc;
reinit_completion(&wq->wq_dead);
return 0;
}
void idxd_wq_quiesce(struct idxd_wq *wq)
{
percpu_ref_kill(&wq->wq_active);
wait_for_completion(&wq->wq_dead);
percpu_ref_exit(&wq->wq_active);
}
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
......
......@@ -108,6 +108,8 @@ struct idxd_dma_chan {
struct idxd_wq {
void __iomem *portal;
struct percpu_ref wq_active;
struct completion wq_dead;
struct device conf_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
......@@ -395,6 +397,8 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
......
......@@ -178,6 +178,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
init_completion(&wq->wq_dead);
wq->max_xfer_bytes = idxd->max_xfer_bytes;
wq->max_batch_size = idxd->max_batch_size;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
......
......@@ -86,6 +86,9 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
if (!percpu_ref_tryget_live(&wq->wq_active))
return -ENXIO;
portal = wq->portal;
/*
......@@ -108,6 +111,8 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
return rc;
}
percpu_ref_put(&wq->wq_active);
/*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
......
......@@ -47,54 +47,12 @@ static int idxd_config_bus_match(struct device *dev,
return matched;
}
static int idxd_config_bus_probe(struct device *dev)
static int enable_wq(struct idxd_wq *wq)
{
int rc;
unsigned long flags;
dev_dbg(dev, "%s called\n", __func__);
if (is_idxd_dev(dev)) {
struct idxd_device *idxd = confdev_to_idxd(dev);
if (idxd->state != IDXD_DEV_CONF_READY) {
dev_warn(dev, "Device not ready for config\n");
return -EBUSY;
}
if (!try_module_get(THIS_MODULE))
return -ENXIO;
/* Perform IDXD configuration and enabling */
spin_lock_irqsave(&idxd->dev_lock, flags);
rc = idxd_device_config(idxd);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
if (rc < 0) {
module_put(THIS_MODULE);
dev_warn(dev, "Device config failed: %d\n", rc);
return rc;
}
/* start device */
rc = idxd_device_enable(idxd);
if (rc < 0) {
module_put(THIS_MODULE);
dev_warn(dev, "Device enable failed: %d\n", rc);
return rc;
}
dev_info(dev, "Device %s enabled\n", dev_name(dev));
rc = idxd_register_dma_device(idxd);
if (rc < 0) {
module_put(THIS_MODULE);
dev_dbg(dev, "Failed to register dmaengine device\n");
return rc;
}
return 0;
} else if (is_idxd_wq_dev(dev)) {
struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
unsigned long flags;
int rc;
mutex_lock(&wq->wq_lock);
......@@ -125,8 +83,7 @@ static int idxd_config_bus_probe(struct device *dev)
/* Shared WQ checks */
if (wq_shared(wq)) {
if (!device_swq_supported(idxd)) {
dev_warn(dev,
"PASID not enabled and shared WQ.\n");
dev_warn(dev, "PASID not enabled and shared WQ.\n");
mutex_unlock(&wq->wq_lock);
return -ENXIO;
}
......@@ -139,8 +96,7 @@ static int idxd_config_bus_probe(struct device *dev)
* threshold via sysfs.
*/
if (wq->threshold == 0) {
dev_warn(dev,
"Shared WQ and threshold 0.\n");
dev_warn(dev, "Shared WQ and threshold 0.\n");
mutex_unlock(&wq->wq_lock);
return -EINVAL;
}
......@@ -158,16 +114,14 @@ static int idxd_config_bus_probe(struct device *dev)
spin_unlock_irqrestore(&idxd->dev_lock, flags);
if (rc < 0) {
mutex_unlock(&wq->wq_lock);
dev_warn(dev, "Writing WQ %d config failed: %d\n",
wq->id, rc);
dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
return rc;
}
rc = idxd_wq_enable(wq);
if (rc < 0) {
mutex_unlock(&wq->wq_lock);
dev_warn(dev, "WQ %d enabling failed: %d\n",
wq->id, rc);
dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
return rc;
}
......@@ -183,7 +137,14 @@ static int idxd_config_bus_probe(struct device *dev)
wq->client_count = 0;
dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
if (wq->type == IDXD_WQT_KERNEL) {
rc = idxd_wq_init_percpu_ref(wq);
if (rc < 0) {
dev_dbg(dev, "percpu_ref setup failed\n");
mutex_unlock(&wq->wq_lock);
return rc;
}
}
if (is_idxd_wq_dmaengine(wq)) {
rc = idxd_register_dma_channel(wq);
......@@ -202,7 +163,60 @@ static int idxd_config_bus_probe(struct device *dev)
}
mutex_unlock(&wq->wq_lock);
dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
return 0;
}
static int idxd_config_bus_probe(struct device *dev)
{
int rc;
unsigned long flags;
dev_dbg(dev, "%s called\n", __func__);
if (is_idxd_dev(dev)) {
struct idxd_device *idxd = confdev_to_idxd(dev);
if (idxd->state != IDXD_DEV_CONF_READY) {
dev_warn(dev, "Device not ready for config\n");
return -EBUSY;
}
if (!try_module_get(THIS_MODULE))
return -ENXIO;
/* Perform IDXD configuration and enabling */
spin_lock_irqsave(&idxd->dev_lock, flags);
rc = idxd_device_config(idxd);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
if (rc < 0) {
module_put(THIS_MODULE);
dev_warn(dev, "Device config failed: %d\n", rc);
return rc;
}
/* start device */
rc = idxd_device_enable(idxd);
if (rc < 0) {
module_put(THIS_MODULE);
dev_warn(dev, "Device enable failed: %d\n", rc);
return rc;
}
dev_info(dev, "Device %s enabled\n", dev_name(dev));
rc = idxd_register_dma_device(idxd);
if (rc < 0) {
module_put(THIS_MODULE);
dev_dbg(dev, "Failed to register dmaengine device\n");
return rc;
}
return 0;
} else if (is_idxd_wq_dev(dev)) {
struct idxd_wq *wq = confdev_to_wq(dev);
return enable_wq(wq);
}
return -ENODEV;
......@@ -220,6 +234,9 @@ static void disable_wq(struct idxd_wq *wq)
return;
}
if (wq->type == IDXD_WQT_KERNEL)
idxd_wq_quiesce(wq);
if (is_idxd_wq_dmaengine(wq))
idxd_unregister_dma_channel(wq);
else if (is_idxd_wq_cdev(wq))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment