Commit 016a9f50 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ntb-4.2-rc7' of git://github.com/jonmason/ntb

Pull NTB bugfixes from Jon Mason:
 "NTB bug fixes to address transport receive issues, stats, link
  negotiation issues, and string formatting"

* tag 'ntb-4.2-rc7' of git://github.com/jonmason/ntb:
  ntb: avoid format string in dev_set_name
  NTB: Fix dereference before check
  NTB: Fix zero size or integer overflow in ntb_set_mw
  NTB: Schedule to receive on QP link up
  NTB: Fix oops in debugfs when transport is half-up
  NTB: ntb_netdev not covering all receive errors
  NTB: Fix transport stats for multiple devices
  NTB: Fix ntb_transport out-of-order RX update
parents a3ca013d e15f9409
...@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, ...@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
if (len < 0) {
ndev->stats.rx_errors++;
ndev->stats.rx_length_errors++;
goto enqueue_again;
}
skb_put(skb, len); skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
...@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, ...@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
return; return;
} }
enqueue_again:
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
if (rc) { if (rc) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev) ...@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
ndev->mtu + ETH_HLEN); ndev->mtu + ETH_HLEN);
if (rc == -EINVAL) { if (rc) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
goto err; goto err;
} }
......
...@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb) ...@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
ntb->dev.bus = &ntb_bus; ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev; ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release; ntb->dev.release = ntb_dev_release;
dev_set_name(&ntb->dev, pci_name(ntb->pdev)); dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
ntb->ctx = NULL; ntb->ctx = NULL;
ntb->ctx_ops = NULL; ntb->ctx_ops = NULL;
......
...@@ -142,10 +142,11 @@ struct ntb_transport_qp { ...@@ -142,10 +142,11 @@ struct ntb_transport_qp {
void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len); void *data, int len);
struct list_head rx_post_q;
struct list_head rx_pend_q; struct list_head rx_pend_q;
struct list_head rx_free_q; struct list_head rx_free_q;
spinlock_t ntb_rx_pend_q_lock; /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
spinlock_t ntb_rx_free_q_lock; spinlock_t ntb_rx_q_lock;
void *rx_buff; void *rx_buff;
unsigned int rx_index; unsigned int rx_index;
unsigned int rx_max_entry; unsigned int rx_max_entry;
...@@ -211,6 +212,8 @@ struct ntb_transport_ctx { ...@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
bool link_is_up; bool link_is_up;
struct delayed_work link_work; struct delayed_work link_work;
struct work_struct link_cleanup; struct work_struct link_cleanup;
struct dentry *debugfs_node_dir;
}; };
enum { enum {
...@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, ...@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
char *buf; char *buf;
ssize_t ret, out_offset, out_count; ssize_t ret, out_offset, out_count;
qp = filp->private_data;
if (!qp || !qp->link_is_up)
return 0;
out_count = 1000; out_count = 1000;
buf = kmalloc(out_count, GFP_KERNEL); buf = kmalloc(out_count, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
qp = filp->private_data;
out_offset = 0; out_offset = 0;
out_offset += snprintf(buf + out_offset, out_count - out_offset, out_offset += snprintf(buf + out_offset, out_count - out_offset,
"NTB QP stats\n"); "NTB QP stats\n");
...@@ -534,6 +541,27 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, ...@@ -534,6 +541,27 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
return entry; return entry;
} }
static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
struct list_head *list,
struct list_head *to_list)
{
struct ntb_queue_entry *entry;
unsigned long flags;
spin_lock_irqsave(lock, flags);
if (list_empty(list)) {
entry = NULL;
} else {
entry = list_first_entry(list, struct ntb_queue_entry, entry);
list_move_tail(&entry->entry, to_list);
}
spin_unlock_irqrestore(lock, flags);
return entry;
}
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
unsigned int qp_num) unsigned int qp_num)
{ {
...@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) ...@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
} }
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
unsigned int size) resource_size_t size)
{ {
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev; struct pci_dev *pdev = nt->ndev->pdev;
unsigned int xlat_size, buff_size; size_t xlat_size, buff_size;
int rc; int rc;
if (!size)
return -EINVAL;
xlat_size = round_up(size, mw->xlat_align_size); xlat_size = round_up(size, mw->xlat_align_size);
buff_size = round_up(size, mw->xlat_align); buff_size = round_up(size, mw->xlat_align);
...@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, ...@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
if (!mw->virt_addr) { if (!mw->virt_addr) {
mw->xlat_size = 0; mw->xlat_size = 0;
mw->buff_size = 0; mw->buff_size = 0;
dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
buff_size); buff_size);
return -ENOMEM; return -ENOMEM;
} }
...@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work) ...@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
if (qp->event_handler) if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up); qp->event_handler(qp->cb_data, qp->link_is_up);
tasklet_schedule(&qp->rxc_db_work);
} else if (nt->link_is_up) } else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work, schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
...@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, ...@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame; qp->tx_max_entry = tx_size / qp->tx_max_frame;
if (nt_debugfs_dir) { if (nt->debugfs_node_dir) {
char debugfs_name[4]; char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num); snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name, qp->debugfs_dir = debugfs_create_dir(debugfs_name,
nt_debugfs_dir); nt->debugfs_node_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp, qp->debugfs_dir, qp,
...@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, ...@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
spin_lock_init(&qp->ntb_rx_pend_q_lock); spin_lock_init(&qp->ntb_rx_q_lock);
spin_lock_init(&qp->ntb_rx_free_q_lock);
spin_lock_init(&qp->ntb_tx_free_q_lock); spin_lock_init(&qp->ntb_tx_free_q_lock);
INIT_LIST_HEAD(&qp->rx_post_q);
INIT_LIST_HEAD(&qp->rx_pend_q); INIT_LIST_HEAD(&qp->rx_pend_q);
INIT_LIST_HEAD(&qp->rx_free_q); INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q); INIT_LIST_HEAD(&qp->tx_free_q);
...@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
goto err2; goto err2;
} }
if (nt_debugfs_dir) {
nt->debugfs_node_dir =
debugfs_create_dir(pci_name(ndev->pdev),
nt_debugfs_dir);
}
for (i = 0; i < qp_count; i++) { for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i); rc = ntb_transport_init_queue(nt, i);
if (rc) if (rc)
...@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
kfree(nt); kfree(nt);
} }
static void ntb_rx_copy_callback(void *data) static void ntb_complete_rxc(struct ntb_transport_qp *qp)
{ {
struct ntb_queue_entry *entry = data; struct ntb_queue_entry *entry;
struct ntb_transport_qp *qp = entry->qp; void *cb_data;
void *cb_data = entry->cb_data; unsigned int len;
unsigned int len = entry->len; unsigned long irqflags;
struct ntb_payload_header *hdr = entry->rx_hdr;
hdr->flags = 0; spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
while (!list_empty(&qp->rx_post_q)) {
entry = list_first_entry(&qp->rx_post_q,
struct ntb_queue_entry, entry);
if (!(entry->flags & DESC_DONE_FLAG))
break;
entry->rx_hdr->flags = 0;
iowrite32(entry->index, &qp->rx_info->entry); iowrite32(entry->index, &qp->rx_info->entry);
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); cb_data = entry->cb_data;
len = entry->len;
list_move_tail(&entry->entry, &qp->rx_free_q);
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
if (qp->rx_handler && qp->client_ready) if (qp->rx_handler && qp->client_ready)
qp->rx_handler(qp, qp->cb_data, cb_data, len); qp->rx_handler(qp, qp->cb_data, cb_data, len);
spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
}
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
}
static void ntb_rx_copy_callback(void *data)
{
struct ntb_queue_entry *entry = data;
entry->flags |= DESC_DONE_FLAG;
ntb_complete_rxc(entry->qp);
} }
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
...@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) ...@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
ntb_rx_copy_callback(entry); ntb_rx_copy_callback(entry);
} }
static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
size_t len)
{ {
struct dma_async_tx_descriptor *txd; struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp; struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->dma_chan; struct dma_chan *chan = qp->dma_chan;
struct dma_device *device; struct dma_device *device;
size_t pay_off, buff_off; size_t pay_off, buff_off, len;
struct dmaengine_unmap_data *unmap; struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie; dma_cookie_t cookie;
void *buf = entry->buf; void *buf = entry->buf;
entry->len = len; len = entry->len;
if (!chan) if (!chan)
goto err; goto err;
...@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) ...@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
struct ntb_payload_header *hdr; struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry; struct ntb_queue_entry *entry;
void *offset; void *offset;
int rc;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
...@@ -1255,25 +1317,27 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) ...@@ -1255,25 +1317,27 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
return -EIO; return -EIO;
} }
entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
if (!entry) { if (!entry) {
dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
qp->rx_err_no_buf++; qp->rx_err_no_buf++;
return -EAGAIN;
rc = -ENOMEM;
goto err;
} }
entry->rx_hdr = hdr;
entry->index = qp->rx_index;
if (hdr->len > entry->len) { if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev, dev_dbg(&qp->ndev->pdev->dev,
"receive buffer overflow! Wanted %d got %d\n", "receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len); hdr->len, entry->len);
qp->rx_err_oflow++; qp->rx_err_oflow++;
rc = -EIO; entry->len = -EIO;
goto err; entry->flags |= DESC_DONE_FLAG;
}
ntb_complete_rxc(qp);
} else {
dev_dbg(&qp->ndev->pdev->dev, dev_dbg(&qp->ndev->pdev->dev,
"RX OK index %u ver %u size %d into buf size %d\n", "RX OK index %u ver %u size %d into buf size %d\n",
qp->rx_index, hdr->ver, hdr->len, entry->len); qp->rx_index, hdr->ver, hdr->len, entry->len);
...@@ -1281,39 +1345,15 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) ...@@ -1281,39 +1345,15 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
qp->rx_bytes += hdr->len; qp->rx_bytes += hdr->len;
qp->rx_pkts++; qp->rx_pkts++;
entry->index = qp->rx_index; entry->len = hdr->len;
entry->rx_hdr = hdr;
ntb_async_rx(entry, offset, hdr->len); ntb_async_rx(entry, offset);
}
qp->rx_index++; qp->rx_index++;
qp->rx_index %= qp->rx_max_entry; qp->rx_index %= qp->rx_max_entry;
return 0; return 0;
err:
/* FIXME: if this syncrhonous update of the rx_index gets ahead of
* asyncrhonous ntb_rx_copy_callback of previous entry, there are three
* scenarios:
*
* 1) The peer might miss this update, but observe the update
* from the memcpy completion callback. In this case, the buffer will
* not be freed on the peer to be reused for a different packet. The
* successful rx of a later packet would clear the condition, but the
* condition could persist if several rx fail in a row.
*
* 2) The peer may observe this update before the asyncrhonous copy of
* prior packets is completed. The peer may overwrite the buffers of
* the prior packets before they are copied.
*
* 3) Both: the peer may observe the update, and then observe the index
* decrement by the asynchronous completion callback. Who knows what
* badness that will cause.
*/
hdr->flags = 0;
iowrite32(qp->rx_index, &qp->rx_info->entry);
return rc;
} }
static void ntb_transport_rxc_db(unsigned long data) static void ntb_transport_rxc_db(unsigned long data)
...@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data) ...@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
break; break;
} }
if (qp->dma_chan) if (i && qp->dma_chan)
dma_async_issue_pending(qp->dma_chan); dma_async_issue_pending(qp->dma_chan);
if (i == qp->rx_max_entry) { if (i == qp->rx_max_entry) {
...@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
goto err1; goto err1;
entry->qp = qp; entry->qp = qp;
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q); &qp->rx_free_q);
} }
...@@ -1634,7 +1674,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1634,7 +1674,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry); kfree(entry);
err1: err1:
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry); kfree(entry);
if (qp->dma_chan) if (qp->dma_chan)
dma_release_channel(qp->dma_chan); dma_release_channel(qp->dma_chan);
...@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); ...@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/ */
void ntb_transport_free_queue(struct ntb_transport_qp *qp) void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{ {
struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev; struct pci_dev *pdev;
struct ntb_queue_entry *entry; struct ntb_queue_entry *entry;
u64 qp_bit; u64 qp_bit;
...@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) ...@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
qp->tx_handler = NULL; qp->tx_handler = NULL;
qp->event_handler = NULL; qp->event_handler = NULL;
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
kfree(entry); kfree(entry);
}
while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
kfree(entry); kfree(entry);
} }
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry); kfree(entry);
nt->qp_bitmap_free |= qp_bit; qp->transport->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
} }
...@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) ...@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
if (!qp || qp->client_ready) if (!qp || qp->client_ready)
return NULL; return NULL;
entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
if (!entry) if (!entry)
return NULL; return NULL;
buf = entry->cb_data; buf = entry->cb_data;
*len = entry->len; *len = entry->len;
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
return buf; return buf;
} }
...@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, ...@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
if (!qp) if (!qp)
return -EINVAL; return -EINVAL;
entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
if (!entry) if (!entry)
return -ENOMEM; return -ENOMEM;
entry->cb_data = cb; entry->cb_data = cb;
entry->buf = data; entry->buf = data;
entry->len = len; entry->len = len;
entry->flags = 0;
ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
tasklet_schedule(&qp->rxc_db_work);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment