Commit 1199aa61 authored by Allen Hubbe's avatar Allen Hubbe Committed by Jon Mason

NTB: Use NUMA memory and DMA chan in transport

Allocate memory and request the DMA channel for the same NUMA node as
the NTB device.
Signed-off-by: default avatarAllen Hubbe <Allen.Hubbe@emc.com>
Signed-off-by: default avatarJon Mason <jdmason@kudzu.us>
parent 28762289
...@@ -346,6 +346,7 @@ int ntb_transport_register_client_dev(char *device_name) ...@@ -346,6 +346,7 @@ int ntb_transport_register_client_dev(char *device_name)
{ {
struct ntb_transport_client_dev *client_dev; struct ntb_transport_client_dev *client_dev;
struct ntb_transport_ctx *nt; struct ntb_transport_ctx *nt;
int node;
int rc, i = 0; int rc, i = 0;
if (list_empty(&ntb_transport_list)) if (list_empty(&ntb_transport_list))
...@@ -354,8 +355,10 @@ int ntb_transport_register_client_dev(char *device_name) ...@@ -354,8 +355,10 @@ int ntb_transport_register_client_dev(char *device_name)
list_for_each_entry(nt, &ntb_transport_list, entry) { list_for_each_entry(nt, &ntb_transport_list, entry) {
struct device *dev; struct device *dev;
client_dev = kzalloc(sizeof(*client_dev), node = dev_to_node(&nt->ndev->dev);
GFP_KERNEL);
client_dev = kzalloc_node(sizeof(*client_dev),
GFP_KERNEL, node);
if (!client_dev) { if (!client_dev) {
rc = -ENOMEM; rc = -ENOMEM;
goto err; goto err;
...@@ -953,6 +956,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -953,6 +956,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
struct ntb_transport_mw *mw; struct ntb_transport_mw *mw;
unsigned int mw_count, qp_count; unsigned int mw_count, qp_count;
u64 qp_bitmap; u64 qp_bitmap;
int node;
int rc, i; int rc, i;
if (ntb_db_is_unsafe(ndev)) if (ntb_db_is_unsafe(ndev))
...@@ -962,7 +966,9 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -962,7 +966,9 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
dev_dbg(&ndev->dev, dev_dbg(&ndev->dev,
"scratchpad is unsafe, proceed anyway...\n"); "scratchpad is unsafe, proceed anyway...\n");
nt = kzalloc(sizeof(*nt), GFP_KERNEL); node = dev_to_node(&ndev->dev);
nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
if (!nt) if (!nt)
return -ENOMEM; return -ENOMEM;
...@@ -972,7 +978,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -972,7 +978,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
nt->mw_count = mw_count; nt->mw_count = mw_count;
nt->mw_vec = kcalloc(mw_count, sizeof(*nt->mw_vec), GFP_KERNEL); nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
GFP_KERNEL, node);
if (!nt->mw_vec) { if (!nt->mw_vec) {
rc = -ENOMEM; rc = -ENOMEM;
goto err; goto err;
...@@ -1012,7 +1019,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1012,7 +1019,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
nt->qp_bitmap = qp_bitmap; nt->qp_bitmap = qp_bitmap;
nt->qp_bitmap_free = qp_bitmap; nt->qp_bitmap_free = qp_bitmap;
nt->qp_vec = kcalloc(qp_count, sizeof(*nt->qp_vec), GFP_KERNEL); nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
GFP_KERNEL, node);
if (!nt->qp_vec) { if (!nt->qp_vec) {
rc = -ENOMEM; rc = -ENOMEM;
goto err2; goto err2;
...@@ -1512,6 +1520,11 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp) ...@@ -1512,6 +1520,11 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
ntb_qp_link_down_reset(qp); ntb_qp_link_down_reset(qp);
} }
static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
{
return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
}
/** /**
* ntb_transport_create_queue - Create a new NTB transport layer queue * ntb_transport_create_queue - Create a new NTB transport layer queue
* @rx_handler: receive callback function * @rx_handler: receive callback function
...@@ -1537,12 +1550,16 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1537,12 +1550,16 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
struct ntb_transport_qp *qp; struct ntb_transport_qp *qp;
u64 qp_bit; u64 qp_bit;
unsigned int free_queue; unsigned int free_queue;
dma_cap_mask_t dma_mask;
int node;
int i; int i;
ndev = dev_ntb(client_dev->parent); ndev = dev_ntb(client_dev->parent);
pdev = ndev->pdev; pdev = ndev->pdev;
nt = ndev->ctx; nt = ndev->ctx;
node = dev_to_node(&ndev->dev);
free_queue = ffs(nt->qp_bitmap); free_queue = ffs(nt->qp_bitmap);
if (!free_queue) if (!free_queue)
goto err; goto err;
...@@ -1560,15 +1577,16 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1560,15 +1577,16 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
qp->tx_handler = handlers->tx_handler; qp->tx_handler = handlers->tx_handler;
qp->event_handler = handlers->event_handler; qp->event_handler = handlers->event_handler;
dmaengine_get(); dma_cap_zero(dma_mask);
qp->dma_chan = dma_find_channel(DMA_MEMCPY); dma_cap_set(DMA_MEMCPY, dma_mask);
if (!qp->dma_chan) {
dmaengine_put(); qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
(void *)(unsigned long)node);
if (!qp->dma_chan)
dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
}
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc(sizeof(*entry), GFP_ATOMIC); entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry) if (!entry)
goto err1; goto err1;
...@@ -1578,7 +1596,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1578,7 +1596,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
} }
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc(sizeof(*entry), GFP_ATOMIC); entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry) if (!entry)
goto err2; goto err2;
...@@ -1601,7 +1619,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1601,7 +1619,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
kfree(entry); kfree(entry);
if (qp->dma_chan) if (qp->dma_chan)
dmaengine_put(); dma_release_channel(qp->dma_chan);
nt->qp_bitmap_free |= qp_bit; nt->qp_bitmap_free |= qp_bit;
err: err:
return NULL; return NULL;
...@@ -1638,7 +1656,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) ...@@ -1638,7 +1656,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
*/ */
dma_sync_wait(chan, qp->last_cookie); dma_sync_wait(chan, qp->last_cookie);
dmaengine_terminate_all(chan); dmaengine_terminate_all(chan);
dmaengine_put(); dma_release_channel(chan);
} }
qp_bit = BIT_ULL(qp->qp_num); qp_bit = BIT_ULL(qp->qp_num);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment