Commit 575d34b6 authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/bcom' into for-linus

parents 049d0d38 7076a1e4
...@@ -115,7 +115,7 @@ config BCM_SBA_RAID ...@@ -115,7 +115,7 @@ config BCM_SBA_RAID
select DMA_ENGINE_RAID select DMA_ENGINE_RAID
select ASYNC_TX_DISABLE_XOR_VAL_DMA select ASYNC_TX_DISABLE_XOR_VAL_DMA
select ASYNC_TX_DISABLE_PQ_VAL_DMA select ASYNC_TX_DISABLE_PQ_VAL_DMA
default ARCH_BCM_IPROC default m if ARCH_BCM_IPROC
help help
Enable support for Broadcom SBA RAID Engine. The SBA RAID Enable support for Broadcom SBA RAID Engine. The SBA RAID
engine is available on most of the Broadcom iProc SoCs. It engine is available on most of the Broadcom iProc SoCs. It
......
/* /*
* Copyright (C) 2017 Broadcom * Copyright (C) 2017 Broadcom
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or
* it under the terms of the GNU General Public License version 2 as * modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation. * published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/ */
/* /*
...@@ -25,11 +30,8 @@ ...@@ -25,11 +30,8 @@
* *
* The Broadcom SBA RAID driver does not require any register programming * The Broadcom SBA RAID driver does not require any register programming
* except submitting request to SBA hardware device via mailbox channels. * except submitting request to SBA hardware device via mailbox channels.
* This driver implements a DMA device with one DMA channel using a set * This driver implements a DMA device with one DMA channel using a single
* of mailbox channels provided by Broadcom SoC specific ring manager * mailbox channel provided by Broadcom SoC specific ring manager driver.
* driver. To exploit parallelism (as described above), all DMA request
* coming to SBA RAID DMA channel are broken down to smaller requests
* and submitted to multiple mailbox channels in round-robin fashion.
* For having more SBA DMA channels, we can create more SBA device nodes * For having more SBA DMA channels, we can create more SBA device nodes
* in Broadcom SoC specific DTS based on number of hardware rings supported * in Broadcom SoC specific DTS based on number of hardware rings supported
* by Broadcom SoC ring manager. * by Broadcom SoC ring manager.
...@@ -85,6 +87,7 @@ ...@@ -85,6 +87,7 @@
#define SBA_CMD_GALOIS 0xe #define SBA_CMD_GALOIS 0xe
#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
/* Driver helper macros */ /* Driver helper macros */
#define to_sba_request(tx) \ #define to_sba_request(tx) \
...@@ -142,9 +145,7 @@ struct sba_device { ...@@ -142,9 +145,7 @@ struct sba_device {
u32 max_cmds_pool_size; u32 max_cmds_pool_size;
/* Maibox client and Mailbox channels */ /* Maibox client and Mailbox channels */
struct mbox_client client; struct mbox_client client;
int mchans_count; struct mbox_chan *mchan;
atomic_t mchans_current;
struct mbox_chan **mchans;
struct device *mbox_dev; struct device *mbox_dev;
/* DMA device and DMA channel */ /* DMA device and DMA channel */
struct dma_device dma_dev; struct dma_device dma_dev;
...@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) ...@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
/* ====== General helper routines ===== */ /* ====== General helper routines ===== */
static void sba_peek_mchans(struct sba_device *sba)
{
int mchan_idx;
for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
mbox_client_peek_data(sba->mchans[mchan_idx]);
}
static struct sba_request *sba_alloc_request(struct sba_device *sba) static struct sba_request *sba_alloc_request(struct sba_device *sba)
{ {
bool found = false; bool found = false;
...@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba) ...@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
* would have completed which will create more * would have completed which will create more
* room for new requests. * room for new requests.
*/ */
sba_peek_mchans(sba); mbox_client_peek_data(sba->mchan);
return NULL; return NULL;
} }
...@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) ...@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
static int sba_send_mbox_request(struct sba_device *sba, static int sba_send_mbox_request(struct sba_device *sba,
struct sba_request *req) struct sba_request *req)
{ {
int mchans_idx, ret = 0; int ret = 0;
/* Select mailbox channel in round-robin fashion */
mchans_idx = atomic_inc_return(&sba->mchans_current);
mchans_idx = mchans_idx % sba->mchans_count;
/* Send message for the request */ /* Send message for the request */
req->msg.error = 0; req->msg.error = 0;
ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); ret = mbox_send_message(sba->mchan, &req->msg);
if (ret < 0) { if (ret < 0) {
dev_err(sba->dev, "send message failed with error %d", ret); dev_err(sba->dev, "send message failed with error %d", ret);
return ret; return ret;
...@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba, ...@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba,
} }
/* Signal txdone for mailbox channel */ /* Signal txdone for mailbox channel */
mbox_client_txdone(sba->mchans[mchans_idx], ret); mbox_client_txdone(sba->mchan, ret);
return ret; return ret;
} }
...@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba) ...@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba)
u32 count; u32 count;
struct sba_request *req; struct sba_request *req;
/* /* Process few pending requests */
* Process few pending requests count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
*
* For now, we process (<number_of_mailbox_channels> * 8)
* number of requests at a time.
*/
count = sba->mchans_count * 8;
while (!list_empty(&sba->reqs_pending_list) && count) { while (!list_empty(&sba->reqs_pending_list) && count) {
/* Get the first pending request */ /* Get the first pending request */
req = list_first_entry(&sba->reqs_pending_list, req = list_first_entry(&sba->reqs_pending_list,
...@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba, ...@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba,
WARN_ON(tx->cookie < 0); WARN_ON(tx->cookie < 0);
if (tx->cookie > 0) { if (tx->cookie > 0) {
spin_lock_irqsave(&sba->reqs_lock, flags);
dma_cookie_complete(tx); dma_cookie_complete(tx);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
dmaengine_desc_get_callback_invoke(tx, NULL); dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);
tx->callback = NULL; tx->callback = NULL;
...@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, ...@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE) if (ret == DMA_COMPLETE)
return ret; return ret;
sba_peek_mchans(sba); mbox_client_peek_data(sba->mchan);
return dma_cookie_status(dchan, cookie, txstate); return dma_cookie_status(dchan, cookie, txstate);
} }
...@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba) ...@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba)
static int sba_probe(struct platform_device *pdev) static int sba_probe(struct platform_device *pdev)
{ {
int i, ret = 0, mchans_count; int ret = 0;
struct sba_device *sba; struct sba_device *sba;
struct platform_device *mbox_pdev; struct platform_device *mbox_pdev;
struct of_phandle_args args; struct of_phandle_args args;
...@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev) ...@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev)
sba->dev = &pdev->dev; sba->dev = &pdev->dev;
platform_set_drvdata(pdev, sba); platform_set_drvdata(pdev, sba);
/* Number of channels equals number of mailbox channels */ /* Number of mailbox channels should be atleast 1 */
ret = of_count_phandle_with_args(pdev->dev.of_node, ret = of_count_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells"); "mboxes", "#mbox-cells");
if (ret <= 0) if (ret <= 0)
return -ENODEV; return -ENODEV;
mchans_count = ret;
/* Determine SBA version from DT compatible string */ /* Determine SBA version from DT compatible string */
if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
...@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev) ...@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev)
default: default:
return -EINVAL; return -EINVAL;
} }
sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
sba->max_cmd_per_req = sba->max_pq_srcs + 3; sba->max_cmd_per_req = sba->max_pq_srcs + 3;
sba->max_xor_srcs = sba->max_cmd_per_req - 1; sba->max_xor_srcs = sba->max_cmd_per_req - 1;
sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
...@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev) ...@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev)
sba->client.knows_txdone = true; sba->client.knows_txdone = true;
sba->client.tx_tout = 0; sba->client.tx_tout = 0;
/* Allocate mailbox channel array */ /* Request mailbox channel */
sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, sba->mchan = mbox_request_channel(&sba->client, 0);
sizeof(*sba->mchans), GFP_KERNEL); if (IS_ERR(sba->mchan)) {
if (!sba->mchans) ret = PTR_ERR(sba->mchan);
return -ENOMEM; goto fail_free_mchan;
/* Request mailbox channels */
sba->mchans_count = 0;
for (i = 0; i < mchans_count; i++) {
sba->mchans[i] = mbox_request_channel(&sba->client, i);
if (IS_ERR(sba->mchans[i])) {
ret = PTR_ERR(sba->mchans[i]);
goto fail_free_mchans;
}
sba->mchans_count++;
} }
atomic_set(&sba->mchans_current, 0);
/* Find-out underlying mailbox device */ /* Find-out underlying mailbox device */
ret = of_parse_phandle_with_args(pdev->dev.of_node, ret = of_parse_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells", 0, &args); "mboxes", "#mbox-cells", 0, &args);
if (ret) if (ret)
goto fail_free_mchans; goto fail_free_mchan;
mbox_pdev = of_find_device_by_node(args.np); mbox_pdev = of_find_device_by_node(args.np);
of_node_put(args.np); of_node_put(args.np);
if (!mbox_pdev) { if (!mbox_pdev) {
ret = -ENODEV; ret = -ENODEV;
goto fail_free_mchans; goto fail_free_mchan;
} }
sba->mbox_dev = &mbox_pdev->dev; sba->mbox_dev = &mbox_pdev->dev;
/* All mailbox channels should be of same ring manager device */
for (i = 1; i < mchans_count; i++) {
ret = of_parse_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells", i, &args);
if (ret)
goto fail_free_mchans;
mbox_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (sba->mbox_dev != &mbox_pdev->dev) {
ret = -EINVAL;
goto fail_free_mchans;
}
}
/* Prealloc channel resource */ /* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba); ret = sba_prealloc_channel_resources(sba);
if (ret) if (ret)
goto fail_free_mchans; goto fail_free_mchan;
/* Check availability of debugfs */ /* Check availability of debugfs */
if (!debugfs_initialized()) if (!debugfs_initialized())
...@@ -1777,24 +1737,22 @@ static int sba_probe(struct platform_device *pdev) ...@@ -1777,24 +1737,22 @@ static int sba_probe(struct platform_device *pdev)
goto fail_free_resources; goto fail_free_resources;
/* Print device info */ /* Print device info */
dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
dma_chan_name(&sba->dma_chan), sba->ver+1, dma_chan_name(&sba->dma_chan), sba->ver+1,
sba->mchans_count); dev_name(sba->mbox_dev));
return 0; return 0;
fail_free_resources: fail_free_resources:
debugfs_remove_recursive(sba->root); debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba); sba_freeup_channel_resources(sba);
fail_free_mchans: fail_free_mchan:
for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchan);
mbox_free_channel(sba->mchans[i]);
return ret; return ret;
} }
static int sba_remove(struct platform_device *pdev) static int sba_remove(struct platform_device *pdev)
{ {
int i;
struct sba_device *sba = platform_get_drvdata(pdev); struct sba_device *sba = platform_get_drvdata(pdev);
dma_async_device_unregister(&sba->dma_dev); dma_async_device_unregister(&sba->dma_dev);
...@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev) ...@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba); sba_freeup_channel_resources(sba);
for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchan);
mbox_free_channel(sba->mchans[i]);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment