Commit 970e8303 authored by Brijesh Singh's avatar Brijesh Singh Committed by Herbert Xu

crypto: ccp - Use devres interface to allocate PCI/iomap and cleanup

Update pci and platform files to use devres interface to allocate the PCI
and iomap resources. Also add helper functions to consolicate module init,
exit and power mangagement code duplication.
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Acked-by: default avatarGary R Hook <gary.hook@amd.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a0a613ab
...@@ -586,6 +586,13 @@ static const struct ccp_actions ccp3_actions = { ...@@ -586,6 +586,13 @@ static const struct ccp_actions ccp3_actions = {
.irqhandler = ccp_irq_handler, .irqhandler = ccp_irq_handler,
}; };
const struct ccp_vdata ccpv3_platform = {
.version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
};
const struct ccp_vdata ccpv3 = { const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0), .version = CCP_VERSION(3, 0),
.setup = NULL, .setup = NULL,
......
...@@ -539,8 +539,69 @@ bool ccp_queues_suspended(struct ccp_device *ccp) ...@@ -539,8 +539,69 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended; return ccp->cmd_q_count == suspended;
} }
int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state)
{
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
/* Wake all the queue kthreads to prepare for suspend */
for (i = 0; i < ccp->cmd_q_count; i++)
wake_up_process(ccp->cmd_q[i].kthread);
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* Wait for all queue kthreads to say they're done */
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
return 0;
}
int ccp_dev_resume(struct ccp_device *ccp)
{
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
/* Wake up all the kthreads */
for (i = 0; i < ccp->cmd_q_count; i++) {
ccp->cmd_q[i].suspended = 0;
wake_up_process(ccp->cmd_q[i].kthread);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
}
#endif #endif
int ccp_dev_init(struct ccp_device *ccp)
{
ccp->io_regs = ccp->io_map + ccp->vdata->offset;
if (ccp->vdata->setup)
ccp->vdata->setup(ccp);
return ccp->vdata->perform->init(ccp);
}
void ccp_dev_destroy(struct ccp_device *ccp)
{
if (!ccp)
return;
ccp->vdata->perform->destroy(ccp);
}
static int __init ccp_mod_init(void) static int __init ccp_mod_init(void)
{ {
#ifdef CONFIG_X86 #ifdef CONFIG_X86
......
...@@ -652,6 +652,11 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp); ...@@ -652,6 +652,11 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
void ccp5_debugfs_setup(struct ccp_device *ccp); void ccp5_debugfs_setup(struct ccp_device *ccp);
void ccp5_debugfs_destroy(void); void ccp5_debugfs_destroy(void);
int ccp_dev_init(struct ccp_device *ccp);
void ccp_dev_destroy(struct ccp_device *ccp);
int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state);
int ccp_dev_resume(struct ccp_device *ccp);
/* Structure for computation functions that are device-specific */ /* Structure for computation functions that are device-specific */
struct ccp_actions { struct ccp_actions {
int (*aes)(struct ccp_op *); int (*aes)(struct ccp_op *);
...@@ -679,6 +684,7 @@ struct ccp_vdata { ...@@ -679,6 +684,7 @@ struct ccp_vdata {
const unsigned int offset; const unsigned int offset;
}; };
extern const struct ccp_vdata ccpv3_platform;
extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv3;
extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5a;
extern const struct ccp_vdata ccpv5b; extern const struct ccp_vdata ccpv5b;
......
...@@ -150,28 +150,13 @@ static void ccp_free_irqs(struct ccp_device *ccp) ...@@ -150,28 +150,13 @@ static void ccp_free_irqs(struct ccp_device *ccp)
ccp->irq = 0; ccp->irq = 0;
} }
static int ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = to_pci_dev(dev);
resource_size_t io_len;
unsigned long io_flags;
io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
io_len = pci_resource_len(pdev, ccp->vdata->bar);
if ((io_flags & IORESOURCE_MEM) &&
(io_len >= (ccp->vdata->offset + 0x800)))
return ccp->vdata->bar;
return -EIO;
}
static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
struct ccp_device *ccp; struct ccp_device *ccp;
struct ccp_pci *ccp_pci; struct ccp_pci *ccp_pci;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
unsigned int bar; void __iomem * const *iomap_table;
int bar_mask;
int ret; int ret;
ret = -ENOMEM; ret = -ENOMEM;
...@@ -193,32 +178,34 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -193,32 +178,34 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ccp->get_irq = ccp_get_irqs; ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs; ccp->free_irq = ccp_free_irqs;
ret = pci_request_regions(pdev, "ccp"); ret = pcim_enable_device(pdev);
if (ret) { if (ret) {
dev_err(dev, "pci_request_regions failed (%d)\n", ret); dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
goto e_err; goto e_err;
} }
ret = pci_enable_device(pdev); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
ret = pcim_iomap_regions(pdev, bar_mask, "ccp");
if (ret) { if (ret) {
dev_err(dev, "pci_enable_device failed (%d)\n", ret); dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
goto e_regions; goto e_err;
} }
pci_set_master(pdev); iomap_table = pcim_iomap_table(pdev);
if (!iomap_table) {
ret = ccp_find_mmio_area(ccp); dev_err(dev, "pcim_iomap_table failed\n");
if (ret < 0) ret = -ENOMEM;
goto e_device; goto e_err;
bar = ret; }
ret = -EIO; ccp->io_map = iomap_table[ccp->vdata->bar];
ccp->io_map = pci_iomap(pdev, bar, 0);
if (!ccp->io_map) { if (!ccp->io_map) {
dev_err(dev, "pci_iomap failed\n"); dev_err(dev, "ioremap failed\n");
goto e_device; ret = -ENOMEM;
goto e_err;
} }
ccp->io_regs = ccp->io_map + ccp->vdata->offset;
pci_set_master(pdev);
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) { if (ret) {
...@@ -226,32 +213,20 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -226,32 +213,20 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) { if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
ret); ret);
goto e_iomap; goto e_err;
} }
} }
dev_set_drvdata(dev, ccp); dev_set_drvdata(dev, ccp);
if (ccp->vdata->setup) ret = ccp_dev_init(ccp);
ccp->vdata->setup(ccp);
ret = ccp->vdata->perform->init(ccp);
if (ret) if (ret)
goto e_iomap; goto e_err;
dev_notice(dev, "enabled\n"); dev_notice(dev, "enabled\n");
return 0; return 0;
e_iomap:
pci_iounmap(pdev, ccp->io_map);
e_device:
pci_disable_device(pdev);
e_regions:
pci_release_regions(pdev);
e_err: e_err:
dev_notice(dev, "initialization failed\n"); dev_notice(dev, "initialization failed\n");
return ret; return ret;
...@@ -265,13 +240,7 @@ static void ccp_pci_remove(struct pci_dev *pdev) ...@@ -265,13 +240,7 @@ static void ccp_pci_remove(struct pci_dev *pdev)
if (!ccp) if (!ccp)
return; return;
ccp->vdata->perform->destroy(ccp); ccp_dev_destroy(ccp);
pci_iounmap(pdev, ccp->io_map);
pci_disable_device(pdev);
pci_release_regions(pdev);
dev_notice(dev, "disabled\n"); dev_notice(dev, "disabled\n");
} }
...@@ -281,47 +250,16 @@ static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -281,47 +250,16 @@ static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
/* Wake all the queue kthreads to prepare for suspend */
for (i = 0; i < ccp->cmd_q_count; i++)
wake_up_process(ccp->cmd_q[i].kthread);
spin_unlock_irqrestore(&ccp->cmd_lock, flags); return ccp_dev_suspend(ccp, state);
/* Wait for all queue kthreads to say they're done */
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
return 0;
} }
static int ccp_pci_resume(struct pci_dev *pdev) static int ccp_pci_resume(struct pci_dev *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
/* Wake up all the kthreads */ return ccp_dev_resume(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
ccp->cmd_q[i].suspended = 0;
wake_up_process(ccp->cmd_q[i].kthread);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
} }
#endif #endif
......
...@@ -104,19 +104,6 @@ static void ccp_free_irqs(struct ccp_device *ccp) ...@@ -104,19 +104,6 @@ static void ccp_free_irqs(struct ccp_device *ccp)
free_irq(ccp->irq, dev); free_irq(ccp->irq, dev);
} }
static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *ior;
ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (ior && (resource_size(ior) >= 0x800))
return ior;
return NULL;
}
static int ccp_platform_probe(struct platform_device *pdev) static int ccp_platform_probe(struct platform_device *pdev)
{ {
struct ccp_device *ccp; struct ccp_device *ccp;
...@@ -146,7 +133,7 @@ static int ccp_platform_probe(struct platform_device *pdev) ...@@ -146,7 +133,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
ccp->get_irq = ccp_get_irqs; ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs; ccp->free_irq = ccp_free_irqs;
ior = ccp_find_mmio_area(ccp); ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ccp->io_map = devm_ioremap_resource(dev, ior); ccp->io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp->io_map)) { if (IS_ERR(ccp->io_map)) {
ret = PTR_ERR(ccp->io_map); ret = PTR_ERR(ccp->io_map);
...@@ -174,7 +161,7 @@ static int ccp_platform_probe(struct platform_device *pdev) ...@@ -174,7 +161,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ccp); dev_set_drvdata(dev, ccp);
ret = ccp->vdata->perform->init(ccp); ret = ccp_dev_init(ccp);
if (ret) if (ret)
goto e_err; goto e_err;
...@@ -192,7 +179,7 @@ static int ccp_platform_remove(struct platform_device *pdev) ...@@ -192,7 +179,7 @@ static int ccp_platform_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct ccp_device *ccp = dev_get_drvdata(dev);
ccp->vdata->perform->destroy(ccp); ccp_dev_destroy(ccp);
dev_notice(dev, "disabled\n"); dev_notice(dev, "disabled\n");
...@@ -205,47 +192,16 @@ static int ccp_platform_suspend(struct platform_device *pdev, ...@@ -205,47 +192,16 @@ static int ccp_platform_suspend(struct platform_device *pdev,
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1; return ccp_dev_suspend(ccp, state);
/* Wake all the queue kthreads to prepare for suspend */
for (i = 0; i < ccp->cmd_q_count; i++)
wake_up_process(ccp->cmd_q[i].kthread);
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* Wait for all queue kthreads to say they're done */
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
return 0;
} }
static int ccp_platform_resume(struct platform_device *pdev) static int ccp_platform_resume(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0; return ccp_dev_resume(ccp);
/* Wake up all the kthreads */
for (i = 0; i < ccp->cmd_q_count; i++) {
ccp->cmd_q[i].suspended = 0;
wake_up_process(ccp->cmd_q[i].kthread);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
} }
#endif #endif
...@@ -260,7 +216,7 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); ...@@ -260,7 +216,7 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#ifdef CONFIG_OF #ifdef CONFIG_OF
static const struct of_device_id ccp_of_match[] = { static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a", { .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&ccpv3 }, .data = (const void *)&ccpv3_platform },
{ }, { },
}; };
MODULE_DEVICE_TABLE(of, ccp_of_match); MODULE_DEVICE_TABLE(of, ccp_of_match);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment