Commit e26a5843 authored by Allen Hubbe's avatar Allen Hubbe Committed by Jon Mason

NTB: Split ntb_hw_intel and ntb_transport drivers

Change ntb_hw_intel to use the new NTB hardware abstraction layer.

Split ntb_transport into its own driver.  Change it to use the new NTB
hardware abstraction layer.
Signed-off-by: default avatarAllen Hubbe <Allen.Hubbe@emc.com>
Signed-off-by: default avatarJon Mason <jdmason@kudzu.us>
parent a1bd3bae
......@@ -26,7 +26,33 @@ as ntb hardware, or hardware drivers, are inserted and removed. The
registration uses the Linux Device framework, so it should feel familiar to
anyone who has written a pci driver.
### NTB Transport Client (ntb\_transport) and NTB Netdev (ntb\_netdev)
The primary client for NTB is the Transport client, used in tandem with NTB
Netdev. These drivers function together to create a logical link to the peer,
across the ntb, to exchange packets of network data. The Transport client
establishes a logical link to the peer, and creates queue pairs to exchange
messages and data. The NTB Netdev then creates an ethernet device using a
Transport queue pair. Network data is copied between socket buffers and the
Transport queue pair buffer. The Transport client may be used for other things
besides Netdev, however no other applications have yet been written.
## NTB Hardware Drivers
NTB hardware drivers should register devices with the NTB core driver. After
registering, clients probe and remove functions will be called.
### NTB Intel Hardware Driver (ntb\_hw\_intel)
The Intel hardware driver supports NTB on Xeon and Atom CPUs.
Module Parameters:
* b2b\_mw\_idx - If the peer ntb is to be accessed via a memory window, then use
this memory window to access the peer ntb. A value of zero or positive
starts from the first mw idx, and a negative value starts from the last
mw idx. Both sides MUST set the same value here! The default value is
`-1`.
* b2b\_mw\_share - If the peer ntb is to be accessed via a memory window, and if
the memory window is large enough, still allow the client to use the
second half of the memory window for address translation to the peer.
......@@ -7007,6 +7007,14 @@ F: drivers/net/ntb_netdev.c
F: include/linux/ntb.h
F: include/linux/ntb_transport.h
NTB INTEL DRIVER
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
S: Supported
W: https://github.com/jonmason/ntb/wiki
T: git git://github.com/jonmason/ntb.git
F: drivers/ntb/hw/intel/
NTFS FILESYSTEM
M: Anton Altaparmakov <anton@tuxera.com>
L: linux-ntfs-dev@lists.sourceforge.net
......
......@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -13,6 +14,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -40,7 +42,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Network Linux driver
* PCIe NTB Network Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
......@@ -49,6 +51,7 @@
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ntb.h>
#include <linux/ntb_transport.h>
#define NTB_NETDEV_VER "0.7"
......@@ -70,26 +73,19 @@ struct ntb_netdev {
static LIST_HEAD(dev_list);
static void ntb_netdev_event_handler(void *data, int status)
static void ntb_netdev_event_handler(void *data, int link_is_up)
{
struct net_device *ndev = data;
struct ntb_netdev *dev = netdev_priv(ndev);
netdev_dbg(ndev, "Event %x, Link %x\n", status,
netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
ntb_transport_link_query(dev->qp));
switch (status) {
case NTB_LINK_DOWN:
netif_carrier_off(ndev);
break;
case NTB_LINK_UP:
if (!ntb_transport_link_query(dev->qp))
return;
if (link_is_up) {
if (ntb_transport_link_query(dev->qp))
netif_carrier_on(ndev);
break;
default:
netdev_warn(ndev, "Unsupported event type %d\n", status);
} else {
netif_carrier_off(ndev);
}
}
......@@ -160,8 +156,6 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
struct ntb_netdev *dev = netdev_priv(ndev);
int rc;
netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
if (rc)
goto err;
......@@ -322,20 +316,26 @@ static const struct ntb_queue_handlers ntb_netdev_handlers = {
.event_handler = ntb_netdev_event_handler,
};
static int ntb_netdev_probe(struct pci_dev *pdev)
static int ntb_netdev_probe(struct device *client_dev)
{
struct ntb_dev *ntb;
struct net_device *ndev;
struct pci_dev *pdev;
struct ntb_netdev *dev;
int rc;
ndev = alloc_etherdev(sizeof(struct ntb_netdev));
ntb = dev_ntb(client_dev->parent);
pdev = ntb->pdev;
if (!pdev)
return -ENODEV;
ndev = alloc_etherdev(sizeof(*dev));
if (!ndev)
return -ENOMEM;
dev = netdev_priv(ndev);
dev->ndev = ndev;
dev->pdev = pdev;
BUG_ON(!dev->pdev);
ndev->features = NETIF_F_HIGHDMA;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
......@@ -349,7 +349,8 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
ndev->netdev_ops = &ntb_netdev_ops;
ndev->ethtool_ops = &ntb_ethtool_ops;
dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
dev->qp = ntb_transport_create_queue(ndev, client_dev,
&ntb_netdev_handlers);
if (!dev->qp) {
rc = -EIO;
goto err;
......@@ -372,12 +373,17 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
return rc;
}
static void ntb_netdev_remove(struct pci_dev *pdev)
static void ntb_netdev_remove(struct device *client_dev)
{
struct ntb_dev *ntb;
struct net_device *ndev;
struct pci_dev *pdev;
struct ntb_netdev *dev;
bool found = false;
ntb = dev_ntb(client_dev->parent);
pdev = ntb->pdev;
list_for_each_entry(dev, &dev_list, list) {
if (dev->pdev == pdev) {
found = true;
......@@ -396,7 +402,7 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
free_netdev(ndev);
}
static struct ntb_client ntb_netdev_client = {
static struct ntb_transport_client ntb_netdev_client = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.probe = ntb_netdev_probe,
......@@ -407,7 +413,7 @@ static int __init ntb_netdev_init_module(void)
{
int rc;
rc = ntb_register_client_dev(KBUILD_MODNAME);
rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
if (rc)
return rc;
return ntb_transport_register_client(&ntb_netdev_client);
......@@ -417,6 +423,6 @@ module_init(ntb_netdev_init_module);
static void __exit ntb_netdev_exit_module(void)
{
ntb_transport_unregister_client(&ntb_netdev_client);
ntb_unregister_client_dev(KBUILD_MODNAME);
ntb_transport_unregister_client_dev(KBUILD_MODNAME);
}
module_exit(ntb_netdev_exit_module);
config NTB
tristate "Intel Non-Transparent Bridge support"
menuconfig NTB
tristate "Non-Transparent Bridge support"
depends on PCI
depends on X86
help
The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
connecting 2 systems. When configured, writes to the device's PCI
......@@ -11,3 +10,17 @@ config NTB
If unsure, say N.
if NTB
source "drivers/ntb/hw/Kconfig"
config NTB_TRANSPORT
tristate "NTB Transport Client"
help
This is a transport driver that enables connected systems to exchange
messages over the ntb hardware. The transport exposes a queue pair api
to client drivers.
If unsure, say N.
endif # NTB
obj-$(CONFIG_NTB) += ntb.o
obj-$(CONFIG_NTB) += ntb_hw_intel.o
ntb_hw_intel-objs := hw/intel/ntb_hw_intel.o ntb_transport.o
obj-$(CONFIG_NTB) += ntb.o hw/
obj-$(CONFIG_NTB_TRANSPORT) += ntb_transport.o
source "drivers/ntb/hw/intel/Kconfig"
obj-$(CONFIG_NTB_INTEL) += intel/
config NTB_INTEL
tristate "Intel Non-Transparent Bridge support"
depends on X86_64
help
This driver supports Intel NTB on capable Xeon and Atom hardware.
If unsure, say N.
obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -13,6 +14,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -45,341 +47,296 @@
* Contact Information:
* Jon Mason <jon.mason@intel.com>
*/
#include <linux/ntb_transport.h>
#define NTB_LINK_STATUS_ACTIVE 0x2000
#define NTB_LINK_SPEED_MASK 0x000f
#define NTB_LINK_WIDTH_MASK 0x03f0
#define SNB_MSIX_CNT 4
#define SNB_MAX_B2B_SPADS 16
#define SNB_MAX_COMPAT_SPADS 16
/* Reserve the uppermost bit for link interrupt */
#define SNB_MAX_DB_BITS 15
#define SNB_LINK_DB 15
#define SNB_DB_BITS_PER_VEC 5
#define HSX_SPLITBAR_MAX_MW 3
#define SNB_MAX_MW 2
#define SNB_ERRATA_MAX_MW 1
#define SNB_DB_HW_LINK 0x8000
#define SNB_UNCERRSTS_OFFSET 0x014C
#define SNB_CORERRSTS_OFFSET 0x0158
#define SNB_LINK_STATUS_OFFSET 0x01A2
#define SNB_PCICMD_OFFSET 0x0504
#define SNB_DEVCTRL_OFFSET 0x0598
#define SNB_DEVSTS_OFFSET 0x059A
#define SNB_SLINK_STATUS_OFFSET 0x05A2
#define SNB_PBAR2LMT_OFFSET 0x0000
#ifndef NTB_HW_INTEL_H
#define NTB_HW_INTEL_H
#include <linux/ntb.h>
#include <linux/pci.h>
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB 0x3C0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB 0x3C0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT 0x0E0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT 0x0E0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT 0x0E0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
/* SNB hardware (and JSF, IVT, HSX) */
#define SNB_PBAR23LMT_OFFSET 0x0000
#define SNB_PBAR45LMT_OFFSET 0x0008
#define SNB_PBAR4LMT_OFFSET 0x0008
#define SNB_PBAR5LMT_OFFSET 0x000C
#define SNB_PBAR2XLAT_OFFSET 0x0010
#define SNB_PBAR5LMT_OFFSET 0x000c
#define SNB_PBAR23XLAT_OFFSET 0x0010
#define SNB_PBAR45XLAT_OFFSET 0x0018
#define SNB_PBAR4XLAT_OFFSET 0x0018
#define SNB_PBAR5XLAT_OFFSET 0x001C
#define SNB_SBAR2LMT_OFFSET 0x0020
#define SNB_PBAR5XLAT_OFFSET 0x001c
#define SNB_SBAR23LMT_OFFSET 0x0020
#define SNB_SBAR45LMT_OFFSET 0x0028
#define SNB_SBAR4LMT_OFFSET 0x0028
#define SNB_SBAR5LMT_OFFSET 0x002C
#define SNB_SBAR2XLAT_OFFSET 0x0030
#define SNB_SBAR5LMT_OFFSET 0x002c
#define SNB_SBAR23XLAT_OFFSET 0x0030
#define SNB_SBAR45XLAT_OFFSET 0x0038
#define SNB_SBAR4XLAT_OFFSET 0x0038
#define SNB_SBAR5XLAT_OFFSET 0x003C
#define SNB_SBAR5XLAT_OFFSET 0x003c
#define SNB_SBAR0BASE_OFFSET 0x0040
#define SNB_SBAR2BASE_OFFSET 0x0048
#define SNB_SBAR23BASE_OFFSET 0x0048
#define SNB_SBAR45BASE_OFFSET 0x0050
#define SNB_SBAR4BASE_OFFSET 0x0050
#define SNB_SBAR5BASE_OFFSET 0x0054
#define SNB_SBDF_OFFSET 0x005c
#define SNB_NTBCNTL_OFFSET 0x0058
#define SNB_SBDF_OFFSET 0x005C
#define SNB_PDOORBELL_OFFSET 0x0060
#define SNB_PDBMSK_OFFSET 0x0062
#define SNB_SDOORBELL_OFFSET 0x0064
#define SNB_SDBMSK_OFFSET 0x0066
#define SNB_USMEMMISS_OFFSET 0x0070
#define SNB_SPAD_OFFSET 0x0080
#define SNB_SPADSEMA4_OFFSET 0x00c0
#define SNB_PBAR23SZ_OFFSET 0x00d0
#define SNB_PBAR45SZ_OFFSET 0x00d1
#define SNB_PBAR4SZ_OFFSET 0x00d1
#define SNB_SBAR23SZ_OFFSET 0x00d2
#define SNB_SBAR45SZ_OFFSET 0x00d3
#define SNB_SBAR4SZ_OFFSET 0x00d3
#define SNB_PPD_OFFSET 0x00d4
#define SNB_PBAR5SZ_OFFSET 0x00d5
#define SNB_SBAR5SZ_OFFSET 0x00d6
#define SNB_WCCNTRL_OFFSET 0x00e0
#define SNB_UNCERRSTS_OFFSET 0x014c
#define SNB_CORERRSTS_OFFSET 0x0158
#define SNB_LINK_STATUS_OFFSET 0x01a2
#define SNB_SPCICMD_OFFSET 0x0504
#define SNB_DEVCTRL_OFFSET 0x0598
#define SNB_DEVSTS_OFFSET 0x059a
#define SNB_SLINK_STATUS_OFFSET 0x05a2
#define SNB_B2B_SPAD_OFFSET 0x0100
#define SNB_B2B_DOORBELL_OFFSET 0x0140
#define SNB_B2B_XLAT_OFFSETL 0x0144
#define SNB_B2B_XLAT_OFFSETU 0x0148
/*
* The addresses are setup so the 32bit BARs can function. Thus
* the addresses are all in 32bit space
*/
#define SNB_MBAR01_USD_ADDR 0x000000002100000CULL
#define SNB_MBAR23_USD_ADDR 0x000000004100000CULL
#define SNB_MBAR4_USD_ADDR 0x000000008100000CULL
#define SNB_MBAR5_USD_ADDR 0x00000000A100000CULL
#define SNB_MBAR01_DSD_ADDR 0x000000002000000CULL
#define SNB_MBAR23_DSD_ADDR 0x000000004000000CULL
#define SNB_MBAR4_DSD_ADDR 0x000000008000000CULL
#define SNB_MBAR5_DSD_ADDR 0x00000000A000000CULL
#define BWD_MSIX_CNT 34
#define BWD_MAX_SPADS 16
#define BWD_MAX_DB_BITS 34
#define BWD_DB_BITS_PER_VEC 1
#define BWD_MAX_MW 2
#define BWD_PCICMD_OFFSET 0xb004
#define BWD_MBAR23_OFFSET 0xb018
#define BWD_MBAR45_OFFSET 0xb020
#define BWD_DEVCTRL_OFFSET 0xb048
#define BWD_LINK_STATUS_OFFSET 0xb052
#define BWD_ERRCORSTS_OFFSET 0xb110
#define SNB_PPD_CONN_MASK 0x03
#define SNB_PPD_CONN_TRANSPARENT 0x00
#define SNB_PPD_CONN_B2B 0x01
#define SNB_PPD_CONN_RP 0x02
#define SNB_PPD_DEV_MASK 0x10
#define SNB_PPD_DEV_USD 0x00
#define SNB_PPD_DEV_DSD 0x10
#define SNB_PPD_SPLIT_BAR_MASK 0x40
#define SNB_PPD_TOPO_MASK (SNB_PPD_CONN_MASK | SNB_PPD_DEV_MASK)
#define SNB_PPD_TOPO_PRI_USD (SNB_PPD_CONN_RP | SNB_PPD_DEV_USD)
#define SNB_PPD_TOPO_PRI_DSD (SNB_PPD_CONN_RP | SNB_PPD_DEV_DSD)
#define SNB_PPD_TOPO_SEC_USD (SNB_PPD_CONN_TRANSPARENT | SNB_PPD_DEV_USD)
#define SNB_PPD_TOPO_SEC_DSD (SNB_PPD_CONN_TRANSPARENT | SNB_PPD_DEV_DSD)
#define SNB_PPD_TOPO_B2B_USD (SNB_PPD_CONN_B2B | SNB_PPD_DEV_USD)
#define SNB_PPD_TOPO_B2B_DSD (SNB_PPD_CONN_B2B | SNB_PPD_DEV_DSD)
#define SNB_MW_COUNT 2
#define HSX_SPLIT_BAR_MW_COUNT 3
#define SNB_DB_COUNT 15
#define SNB_DB_LINK 15
#define SNB_DB_LINK_BIT BIT_ULL(SNB_DB_LINK)
#define SNB_DB_MSIX_VECTOR_COUNT 4
#define SNB_DB_MSIX_VECTOR_SHIFT 5
#define SNB_DB_TOTAL_SHIFT 16
#define SNB_SPAD_COUNT 16
/* BWD hardware */
#define BWD_SBAR2XLAT_OFFSET 0x0008
#define BWD_SBAR4XLAT_OFFSET 0x0010
#define BWD_PDOORBELL_OFFSET 0x0020
#define BWD_PDBMSK_OFFSET 0x0028
#define BWD_NTBCNTL_OFFSET 0x0060
#define BWD_EBDF_OFFSET 0x0064
#define BWD_SPAD_OFFSET 0x0080
#define BWD_SPADSEMA_OFFSET 0x00c0
#define BWD_STKYSPAD_OFFSET 0x00c4
#define BWD_PPD_OFFSET 0x00d4
#define BWD_PBAR2XLAT_OFFSET 0x8008
#define BWD_PBAR4XLAT_OFFSET 0x8010
#define BWD_B2B_DOORBELL_OFFSET 0x8020
#define BWD_B2B_SPAD_OFFSET 0x8080
#define BWD_B2B_SPADSEMA_OFFSET 0x80c0
#define BWD_B2B_STKYSPAD_OFFSET 0x80c4
#define BWD_MODPHY_PCSREG4 0x1c004
#define BWD_MODPHY_PCSREG6 0x1c006
#define BWD_IP_BASE 0xC000
#define BWD_SPCICMD_OFFSET 0xb004
#define BWD_LINK_STATUS_OFFSET 0xb052
#define BWD_ERRCORSTS_OFFSET 0xb110
#define BWD_IP_BASE 0xc000
#define BWD_DESKEWSTS_OFFSET (BWD_IP_BASE + 0x3024)
#define BWD_LTSSMERRSTS0_OFFSET (BWD_IP_BASE + 0x3180)
#define BWD_LTSSMSTATEJMP_OFFSET (BWD_IP_BASE + 0x3040)
#define BWD_IBSTERRRCRVSTS0_OFFSET (BWD_IP_BASE + 0x3324)
#define BWD_MODPHY_PCSREG4 0x1c004
#define BWD_MODPHY_PCSREG6 0x1c006
#define BWD_DESKEWSTS_DBERR (1 << 15)
#define BWD_LTSSMERRSTS0_UNEXPECTEDEI (1 << 20)
#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
#define NTB_CNTL_CFG_LOCK (1 << 0)
#define NTB_CNTL_LINK_DISABLE (1 << 1)
#define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2)
#define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4)
#define NTB_CNTL_S2P_BAR4_SNOOP (1 << 6)
#define NTB_CNTL_P2S_BAR4_SNOOP (1 << 8)
#define NTB_CNTL_S2P_BAR5_SNOOP (1 << 12)
#define NTB_CNTL_P2S_BAR5_SNOOP (1 << 14)
#define BWD_CNTL_LINK_DOWN (1 << 16)
#define NTB_PPD_OFFSET 0x00D4
#define SNB_PPD_CONN_TYPE 0x0003
#define SNB_PPD_DEV_TYPE 0x0010
#define SNB_PPD_SPLIT_BAR (1 << 6)
#define BWD_PPD_INIT_LINK 0x0008
#define BWD_PPD_CONN_TYPE 0x0300
#define BWD_PPD_DEV_TYPE 0x1000
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB 0x3C0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB 0x3C0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT 0x0E0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT 0x0E0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT 0x0E0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
#define BWD_PPD_CONN_MASK 0x0300
#define BWD_PPD_CONN_TRANSPARENT 0x0000
#define BWD_PPD_CONN_B2B 0x0100
#define BWD_PPD_CONN_RP 0x0200
#define BWD_PPD_DEV_MASK 0x1000
#define BWD_PPD_DEV_USD 0x0000
#define BWD_PPD_DEV_DSD 0x1000
#define BWD_PPD_TOPO_MASK (BWD_PPD_CONN_MASK | BWD_PPD_DEV_MASK)
#define BWD_PPD_TOPO_PRI_USD (BWD_PPD_CONN_TRANSPARENT | BWD_PPD_DEV_USD)
#define BWD_PPD_TOPO_PRI_DSD (BWD_PPD_CONN_TRANSPARENT | BWD_PPD_DEV_DSD)
#define BWD_PPD_TOPO_SEC_USD (BWD_PPD_CONN_RP | BWD_PPD_DEV_USD)
#define BWD_PPD_TOPO_SEC_DSD (BWD_PPD_CONN_RP | BWD_PPD_DEV_DSD)
#define BWD_PPD_TOPO_B2B_USD (BWD_PPD_CONN_B2B | BWD_PPD_DEV_USD)
#define BWD_PPD_TOPO_B2B_DSD (BWD_PPD_CONN_B2B | BWD_PPD_DEV_DSD)
#define BWD_MW_COUNT 2
#define BWD_DB_COUNT 34
#define BWD_DB_VALID_MASK (BIT_ULL(BWD_DB_COUNT) - 1)
#define BWD_DB_MSIX_VECTOR_COUNT 34
#define BWD_DB_MSIX_VECTOR_SHIFT 1
#define BWD_DB_TOTAL_SHIFT 34
#define BWD_SPAD_COUNT 16
#define BWD_NTB_CTL_DOWN_BIT BIT(16)
#define BWD_NTB_CTL_ACTIVE(x) !(x & BWD_NTB_CTL_DOWN_BIT)
#define BWD_DESKEWSTS_DBERR BIT(15)
#define BWD_LTSSMERRSTS0_UNEXPECTEDEI BIT(20)
#define BWD_LTSSMSTATEJMP_FORCEDETECT BIT(2)
#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
}
#endif
#define BWD_LINK_HB_TIMEOUT msecs_to_jiffies(1000)
#define BWD_LINK_RECOVERY_TIME msecs_to_jiffies(500)
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
#define NTB_CTL_DISABLE BIT(1)
#define NTB_CTL_S2P_BAR2_SNOOP BIT(2)
#define NTB_CTL_P2S_BAR2_SNOOP BIT(4)
#define NTB_CTL_S2P_BAR4_SNOOP BIT(6)
#define NTB_CTL_P2S_BAR4_SNOOP BIT(8)
#define NTB_CTL_S2P_BAR5_SNOOP BIT(12)
#define NTB_CTL_P2S_BAR5_SNOOP BIT(14)
#define NTB_LNK_STA_ACTIVE_BIT 0x2000
#define NTB_LNK_STA_SPEED_MASK 0x000f
#define NTB_LNK_STA_WIDTH_MASK 0x03f0
#define NTB_LNK_STA_ACTIVE(x) (!!((x) & NTB_LNK_STA_ACTIVE_BIT))
#define NTB_LNK_STA_SPEED(x) ((x) & NTB_LNK_STA_SPEED_MASK)
#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4)
/* Use the following addresses for translation between b2b ntb devices in case
* the hardware default values are not reliable. */
#define SNB_B2B_BAR0_USD_ADDR 0x1000000000000000ull
#define SNB_B2B_BAR2_USD_ADDR64 0x2000000000000000ull
#define SNB_B2B_BAR4_USD_ADDR64 0x4000000000000000ull
#define SNB_B2B_BAR4_USD_ADDR32 0x20000000u
#define SNB_B2B_BAR5_USD_ADDR32 0x40000000u
#define SNB_B2B_BAR0_DSD_ADDR 0x9000000000000000ull
#define SNB_B2B_BAR2_DSD_ADDR64 0xa000000000000000ull
#define SNB_B2B_BAR4_DSD_ADDR64 0xc000000000000000ull
#define SNB_B2B_BAR4_DSD_ADDR32 0xa0000000u
#define SNB_B2B_BAR5_DSD_ADDR32 0xc0000000u
/* The peer ntb secondary config space is 32KB fixed size */
#define SNB_B2B_MIN_SIZE 0x8000
/* flags to indicate hardware errata */
#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0)
#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
/* flags to indicate unsafe api */
#define NTB_UNSAFE_DB BIT_ULL(0)
#define NTB_UNSAFE_SPAD BIT_ULL(1)
struct intel_ntb_dev;
struct intel_ntb_reg {
int (*poll_link)(struct intel_ntb_dev *ndev);
int (*link_is_up)(struct intel_ntb_dev *ndev);
u64 (*db_ioread)(void __iomem *mmio);
void (*db_iowrite)(u64 db_bits, void __iomem *mmio);
unsigned long ntb_ctl;
resource_size_t db_size;
int mw_bar[];
};
#ifndef writeq
static inline void writeq(u64 val, void __iomem *addr)
{
writel(val & 0xffffffff, addr);
writel(val >> 32, addr + 4);
}
#endif
struct intel_ntb_alt_reg {
unsigned long db_bell;
unsigned long db_mask;
unsigned long spad;
};
#define NTB_BAR_MMIO 0
#define NTB_BAR_23 2
#define NTB_BAR_4 4
#define NTB_BAR_5 5
#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
(1 << NTB_BAR_4))
#define NTB_SPLITBAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
(1 << NTB_BAR_4) | (1 << NTB_BAR_5))
#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
enum ntb_hw_event {
NTB_EVENT_SW_EVENT0 = 0,
NTB_EVENT_SW_EVENT1,
NTB_EVENT_SW_EVENT2,
NTB_EVENT_HW_ERROR,
NTB_EVENT_HW_LINK_UP,
NTB_EVENT_HW_LINK_DOWN,
struct intel_ntb_xlat_reg {
unsigned long bar0_base;
unsigned long bar2_xlat;
unsigned long bar2_limit;
};
struct ntb_mw {
dma_addr_t phys_addr;
void __iomem *vbase;
resource_size_t bar_sz;
struct intel_b2b_addr {
phys_addr_t bar0_addr;
phys_addr_t bar2_addr64;
phys_addr_t bar4_addr64;
phys_addr_t bar4_addr32;
phys_addr_t bar5_addr32;
};
struct ntb_db_cb {
int (*callback)(void *data, int db_num);
unsigned int db_num;
void *data;
struct ntb_device *ndev;
struct tasklet_struct irq_work;
struct intel_ntb_vec {
struct intel_ntb_dev *ndev;
int num;
};
#define WA_SNB_ERR 0x00000001
struct ntb_device {
struct pci_dev *pdev;
struct msix_entry *msix_entries;
void __iomem *reg_base;
struct ntb_mw *mw;
struct {
unsigned char max_mw;
unsigned char max_spads;
unsigned char max_db_bits;
unsigned char msix_cnt;
} limits;
struct {
void __iomem *ldb;
void __iomem *ldb_mask;
void __iomem *rdb;
void __iomem *bar2_xlat;
void __iomem *bar4_xlat;
void __iomem *bar5_xlat;
void __iomem *spad_write;
void __iomem *spad_read;
void __iomem *lnk_cntl;
void __iomem *lnk_stat;
void __iomem *spci_cmd;
} reg_ofs;
struct ntb_transport *ntb_transport;
void (*event_cb)(void *handle, enum ntb_hw_event event);
struct ntb_db_cb *db_cb;
unsigned char hw_type;
unsigned char conn_type;
unsigned char dev_type;
unsigned char num_msix;
unsigned char bits_per_vector;
unsigned char max_cbs;
unsigned char link_width;
unsigned char link_speed;
unsigned char link_status;
unsigned char split_bar;
struct intel_ntb_dev {
struct ntb_dev ntb;
/* offset of peer bar0 in b2b bar */
unsigned long b2b_off;
/* mw idx used to access peer bar0 */
unsigned int b2b_idx;
/* BAR45 is split into BAR4 and BAR5 */
bool bar4_split;
u32 ntb_ctl;
u32 lnk_sta;
unsigned char mw_count;
unsigned char spad_count;
unsigned char db_count;
unsigned char db_vec_count;
unsigned char db_vec_shift;
u64 db_valid_mask;
u64 db_link_mask;
u64 db_mask;
/* synchronize rmw access of db_mask and hw reg */
spinlock_t db_mask_lock;
struct msix_entry *msix;
struct intel_ntb_vec *vec;
const struct intel_ntb_reg *reg;
const struct intel_ntb_alt_reg *self_reg;
const struct intel_ntb_alt_reg *peer_reg;
const struct intel_ntb_xlat_reg *xlat_reg;
void __iomem *self_mmio;
void __iomem *peer_mmio;
phys_addr_t peer_addr;
struct delayed_work hb_timer;
unsigned long last_ts;
struct delayed_work hb_timer;
struct delayed_work lr_timer;
unsigned long hwerr_flags;
unsigned long unsafe_flags;
unsigned long unsafe_flags_ignore;
struct dentry *debugfs_dir;
struct dentry *debugfs_info;
unsigned int wa_flags;
};
/**
* ntb_max_cbs() - return the max callbacks
* @ndev: pointer to ntb_device instance
*
* Given the ntb pointer, return the maximum number of callbacks
*
* RETURNS: the maximum number of callbacks
*/
static inline unsigned char ntb_max_cbs(struct ntb_device *ndev)
{
return ndev->max_cbs;
}
/**
* ntb_max_mw() - return the max number of memory windows
* @ndev: pointer to ntb_device instance
*
* Given the ntb pointer, return the maximum number of memory windows
*
* RETURNS: the maximum number of memory windows
*/
static inline unsigned char ntb_max_mw(struct ntb_device *ndev)
{
return ndev->limits.max_mw;
}
/**
* ntb_hw_link_status() - return the hardware link status
* @ndev: pointer to ntb_device instance
*
* Returns true if the hardware is connected to the remote system
*
* RETURNS: true or false based on the hardware link state
*/
static inline bool ntb_hw_link_status(struct ntb_device *ndev)
{
return ndev->link_status == NTB_LINK_UP;
}
/**
* ntb_query_pdev() - return the pci_dev pointer
* @ndev: pointer to ntb_device instance
*
* Given the ntb pointer, return the pci_dev pointer for the NTB hardware device
*
* RETURNS: a pointer to the ntb pci_dev
*/
static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
{
return ndev->pdev;
}
/**
* ntb_query_debugfs() - return the debugfs pointer
* @ndev: pointer to ntb_device instance
*
* Given the ntb pointer, return the debugfs directory pointer for the NTB
* hardware device
*
* RETURNS: a pointer to the debugfs directory
*/
static inline struct dentry *ntb_query_debugfs(struct ntb_device *ndev)
{
return ndev->debugfs_dir;
}
struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
void *transport);
void ntb_unregister_transport(struct ntb_device *ndev);
void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
void *data, int (*db_cb_func)(void *data,
int db_num));
void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
int ntb_register_event_callback(struct ntb_device *ndev,
void (*event_cb_func)(void *handle,
enum ntb_hw_event event));
void ntb_unregister_event_callback(struct ntb_device *ndev);
int ntb_get_max_spads(struct ntb_device *ndev);
int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw);
void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int idx);
void *ntb_find_transport(struct pci_dev *pdev);
int ntb_transport_init(struct pci_dev *pdev);
void ntb_transport_free(void *transport);
#define ndev_pdev(ndev) ((ndev)->ntb.pdev)
#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
#define ntb_ndev(ntb) container_of(ntb, struct intel_ntb_dev, ntb)
#define hb_ndev(work) container_of(work, struct intel_ntb_dev, hb_timer.work)
#endif
......@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -13,6 +14,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -40,7 +42,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Linux driver
* PCIe NTB Transport Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
......@@ -56,9 +58,22 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "hw/intel/ntb_hw_intel.h"
#include "linux/ntb.h"
#include "linux/ntb_transport.h"
#define NTB_TRANSPORT_VERSION 3
#define NTB_TRANSPORT_VERSION 4
#define NTB_TRANSPORT_VER "4"
#define NTB_TRANSPORT_NAME "ntb_transport"
#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
static unsigned long max_mw_size;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
static unsigned int transport_mtu = 0x401E;
module_param(transport_mtu, uint, 0644);
......@@ -72,10 +87,12 @@ static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
static struct dentry *nt_debugfs_dir;
struct ntb_queue_entry {
/* ntb_queue list reference */
struct list_head entry;
/* pointers to data to be transfered */
/* pointers to data to be transferred */
void *cb_data;
void *buf;
unsigned int len;
......@@ -94,14 +111,16 @@ struct ntb_rx_info {
};
struct ntb_transport_qp {
struct ntb_transport *transport;
struct ntb_device *ndev;
struct ntb_transport_ctx *transport;
struct ntb_dev *ndev;
void *cb_data;
struct dma_chan *dma_chan;
bool client_ready;
bool qp_link;
bool link_is_up;
u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
u64 qp_bit;
struct ntb_rx_info __iomem *rx_info;
struct ntb_rx_info *remote_rx_info;
......@@ -127,6 +146,7 @@ struct ntb_transport_qp {
unsigned int rx_max_entry;
unsigned int rx_max_frame;
dma_cookie_t last_cookie;
struct tasklet_struct rxc_db_work;
void (*event_handler)(void *data, int status);
struct delayed_work link_work;
......@@ -153,33 +173,44 @@ struct ntb_transport_qp {
};
struct ntb_transport_mw {
size_t size;
phys_addr_t phys_addr;
resource_size_t phys_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
void __iomem *vbase;
size_t xlat_size;
size_t buff_size;
void *virt_addr;
dma_addr_t dma_addr;
};
struct ntb_transport_client_dev {
struct list_head entry;
struct ntb_transport_ctx *nt;
struct device dev;
};
struct ntb_transport {
struct ntb_transport_ctx {
struct list_head entry;
struct list_head client_devs;
struct ntb_device *ndev;
struct ntb_transport_mw *mw;
struct ntb_transport_qp *qps;
unsigned int max_qps;
unsigned long qp_bitmap;
bool transport_link;
struct ntb_dev *ndev;
struct ntb_transport_mw *mw_vec;
struct ntb_transport_qp *qp_vec;
unsigned int mw_count;
unsigned int qp_count;
u64 qp_bitmap;
u64 qp_bitmap_free;
bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
};
enum {
DESC_DONE_FLAG = 1 << 0,
LINK_DOWN_FLAG = 1 << 1,
DESC_DONE_FLAG = BIT(0),
LINK_DOWN_FLAG = BIT(1),
};
struct ntb_payload_header {
......@@ -200,68 +231,69 @@ enum {
MAX_SPAD,
};
#define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
#define dev_client_dev(__dev) \
container_of((__dev), struct ntb_transport_client_dev, dev)
#define drv_client(__drv) \
container_of((__drv), struct ntb_transport_client, driver)
#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
static int ntb_match_bus(struct device *dev, struct device_driver *drv)
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;
static int ntb_transport_bus_match(struct device *dev,
struct device_driver *drv)
{
return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}
static int ntb_client_probe(struct device *dev)
static int ntb_transport_bus_probe(struct device *dev)
{
const struct ntb_client *drv = container_of(dev->driver,
struct ntb_client, driver);
struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
const struct ntb_transport_client *client;
int rc = -EINVAL;
get_device(dev);
if (drv && drv->probe)
rc = drv->probe(pdev);
client = drv_client(dev->driver);
rc = client->probe(dev);
if (rc)
put_device(dev);
return rc;
}
static int ntb_client_remove(struct device *dev)
static int ntb_transport_bus_remove(struct device *dev)
{
const struct ntb_client *drv = container_of(dev->driver,
struct ntb_client, driver);
struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
const struct ntb_transport_client *client;
if (drv && drv->remove)
drv->remove(pdev);
client = drv_client(dev->driver);
client->remove(dev);
put_device(dev);
return 0;
}
static struct bus_type ntb_bus_type = {
.name = "ntb_bus",
.match = ntb_match_bus,
.probe = ntb_client_probe,
.remove = ntb_client_remove,
static struct bus_type ntb_transport_bus = {
.name = "ntb_transport",
.match = ntb_transport_bus_match,
.probe = ntb_transport_bus_probe,
.remove = ntb_transport_bus_remove,
};
static LIST_HEAD(ntb_transport_list);
static int ntb_bus_init(struct ntb_transport *nt)
static int ntb_bus_init(struct ntb_transport_ctx *nt)
{
if (list_empty(&ntb_transport_list)) {
int rc = bus_register(&ntb_bus_type);
if (rc)
return rc;
}
list_add(&nt->entry, &ntb_transport_list);
return 0;
}
static void ntb_bus_remove(struct ntb_transport *nt)
static void ntb_bus_remove(struct ntb_transport_ctx *nt)
{
struct ntb_transport_client_dev *client_dev, *cd;
......@@ -273,29 +305,26 @@ static void ntb_bus_remove(struct ntb_transport *nt)
}
list_del(&nt->entry);
if (list_empty(&ntb_transport_list))
bus_unregister(&ntb_bus_type);
}
static void ntb_client_release(struct device *dev)
static void ntb_transport_client_release(struct device *dev)
{
struct ntb_transport_client_dev *client_dev;
client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
client_dev = dev_client_dev(dev);
kfree(client_dev);
}
/**
* ntb_unregister_client_dev - Unregister NTB client device
* ntb_transport_unregister_client_dev - Unregister NTB client device
* @device_name: Name of NTB client device
*
* Unregister an NTB client device with the NTB transport layer
*/
void ntb_unregister_client_dev(char *device_name)
void ntb_transport_unregister_client_dev(char *device_name)
{
struct ntb_transport_client_dev *client, *cd;
struct ntb_transport *nt;
struct ntb_transport_ctx *nt;
list_for_each_entry(nt, &ntb_transport_list, entry)
list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
......@@ -305,18 +334,18 @@ void ntb_unregister_client_dev(char *device_name)
device_unregister(&client->dev);
}
}
EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
/**
* ntb_register_client_dev - Register NTB client device
* ntb_transport_register_client_dev - Register NTB client device
* @device_name: Name of NTB client device
*
* Register an NTB client device with the NTB transport layer
*/
int ntb_register_client_dev(char *device_name)
int ntb_transport_register_client_dev(char *device_name)
{
struct ntb_transport_client_dev *client_dev;
struct ntb_transport *nt;
struct ntb_transport_ctx *nt;
int rc, i = 0;
if (list_empty(&ntb_transport_list))
......@@ -325,7 +354,7 @@ int ntb_register_client_dev(char *device_name)
list_for_each_entry(nt, &ntb_transport_list, entry) {
struct device *dev;
client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
client_dev = kzalloc(sizeof(*client_dev),
GFP_KERNEL);
if (!client_dev) {
rc = -ENOMEM;
......@@ -336,9 +365,9 @@ int ntb_register_client_dev(char *device_name)
/* setup and register client devices */
dev_set_name(dev, "%s%d", device_name, i);
dev->bus = &ntb_bus_type;
dev->release = ntb_client_release;
dev->parent = &ntb_query_pdev(nt->ndev)->dev;
dev->bus = &ntb_transport_bus;
dev->release = ntb_transport_client_release;
dev->parent = &nt->ndev->dev;
rc = device_register(dev);
if (rc) {
......@@ -353,11 +382,11 @@ int ntb_register_client_dev(char *device_name)
return 0;
err:
ntb_unregister_client_dev(device_name);
ntb_transport_unregister_client_dev(device_name);
return rc;
}
EXPORT_SYMBOL_GPL(ntb_register_client_dev);
EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
/**
* ntb_transport_register_client - Register NTB client driver
......@@ -367,9 +396,9 @@ EXPORT_SYMBOL_GPL(ntb_register_client_dev);
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int ntb_transport_register_client(struct ntb_client *drv)
int ntb_transport_register_client(struct ntb_transport_client *drv)
{
drv->driver.bus = &ntb_bus_type;
drv->driver.bus = &ntb_transport_bus;
if (list_empty(&ntb_transport_list))
return -ENODEV;
......@@ -386,7 +415,7 @@ EXPORT_SYMBOL_GPL(ntb_transport_register_client);
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
void ntb_transport_unregister_client(struct ntb_client *drv)
void ntb_transport_unregister_client(struct ntb_transport_client *drv)
{
driver_unregister(&drv->driver);
}
......@@ -452,8 +481,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
"tx_max_entry - \t%u\n", qp->tx_max_entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
"Up" : "Down");
"\nQP Link %s\n",
qp->link_is_up ? "Up" : "Down");
if (out_offset > out_count)
out_offset = out_count;
......@@ -497,26 +526,31 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
return entry;
}
static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp = &nt->qps[qp_num];
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
struct ntb_transport_mw *mw;
unsigned int rx_size, num_qps_mw;
u8 mw_num, mw_max;
unsigned int mw_num, mw_count, qp_count;
unsigned int i;
mw_max = ntb_max_mw(nt->ndev);
mw_num = QP_TO_MW(nt->ndev, qp_num);
mw_count = nt->mw_count;
qp_count = nt->qp_count;
WARN_ON(nt->mw[mw_num].virt_addr == NULL);
mw_num = QP_TO_MW(nt, qp_num);
mw = &nt->mw_vec[mw_num];
if (!mw->virt_addr)
return -ENOMEM;
if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
num_qps_mw = nt->max_qps / mw_max + 1;
if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = nt->max_qps / mw_max;
num_qps_mw = qp_count / mw_count;
rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
rx_size -= sizeof(struct ntb_rx_info);
qp->remote_rx_info = qp->rx_buff + rx_size;
......@@ -530,49 +564,63 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
/* setup the hdr offsets with 0's */
for (i = 0; i < qp->rx_max_entry; i++) {
void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
sizeof(struct ntb_payload_header);
void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
sizeof(struct ntb_payload_header));
memset(offset, 0, sizeof(struct ntb_payload_header));
}
qp->rx_pkts = 0;
qp->tx_pkts = 0;
qp->tx_index = 0;
return 0;
}
static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
{
struct ntb_transport_mw *mw = &nt->mw[num_mw];
struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
if (!mw->virt_addr)
return;
dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
ntb_mw_clear_trans(nt->ndev, num_mw);
dma_free_coherent(&pdev->dev, mw->buff_size,
mw->virt_addr, mw->dma_addr);
mw->xlat_size = 0;
mw->buff_size = 0;
mw->virt_addr = NULL;
}
static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
unsigned int size)
{
struct ntb_transport_mw *mw = &nt->mw[num_mw];
struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
unsigned int xlat_size, buff_size;
int rc;
xlat_size = round_up(size, mw->xlat_align_size);
buff_size = round_up(size, mw->xlat_align);
/* No need to re-setup */
if (mw->size == ALIGN(size, 4096))
if (mw->xlat_size == xlat_size)
return 0;
if (mw->size != 0)
if (mw->buff_size)
ntb_free_mw(nt, num_mw);
/* Alloc memory for receiving data. Must be 4k aligned */
mw->size = ALIGN(size, 4096);
/* Alloc memory for receiving data. Must be aligned */
mw->xlat_size = xlat_size;
mw->buff_size = buff_size;
mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
GFP_KERNEL);
mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
&mw->dma_addr, GFP_KERNEL);
if (!mw->virt_addr) {
mw->size = 0;
dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
(int) mw->size);
mw->xlat_size = 0;
mw->buff_size = 0;
dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
buff_size);
return -ENOMEM;
}
......@@ -582,34 +630,39 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
* is a requirement of the hardware. It is recommended to setup CMA
* for BAR sizes equal or greater than 4MB.
*/
if (!IS_ALIGNED(mw->dma_addr, mw->size)) {
dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n",
if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
&mw->dma_addr);
ntb_free_mw(nt, num_mw);
return -ENOMEM;
}
/* Notify HW the memory location of the receive buffer */
ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
if (rc) {
dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
ntb_free_mw(nt, num_mw);
return -EIO;
}
return 0;
}
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
{
struct ntb_transport *nt = qp->transport;
struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev = nt->ndev->pdev;
if (qp->qp_link == NTB_LINK_DOWN) {
if (qp->link_is_up) {
cancel_delayed_work_sync(&qp->link_work);
return;
}
if (qp->event_handler)
qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
qp->qp_link = NTB_LINK_DOWN;
qp->link_is_up = false;
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
}
static void ntb_qp_link_cleanup_work(struct work_struct *work)
......@@ -617,11 +670,11 @@ static void ntb_qp_link_cleanup_work(struct work_struct *work)
struct ntb_transport_qp *qp = container_of(work,
struct ntb_transport_qp,
link_cleanup);
struct ntb_transport *nt = qp->transport;
struct ntb_transport_ctx *nt = qp->transport;
ntb_qp_link_cleanup(qp);
if (nt->transport_link == NTB_LINK_UP)
if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
......@@ -631,180 +684,132 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
schedule_work(&qp->link_cleanup);
}
static void ntb_transport_link_cleanup(struct ntb_transport *nt)
static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
{
struct ntb_transport_qp *qp;
u64 qp_bitmap_alloc;
int i;
qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
/* Pass along the info to any clients */
for (i = 0; i < nt->max_qps; i++)
if (!test_bit(i, &nt->qp_bitmap))
ntb_qp_link_cleanup(&nt->qps[i]);
for (i = 0; i < nt->qp_count; i++)
if (qp_bitmap_alloc & BIT_ULL(i)) {
qp = &nt->qp_vec[i];
ntb_qp_link_cleanup(qp);
cancel_work_sync(&qp->link_cleanup);
cancel_delayed_work_sync(&qp->link_work);
}
if (nt->transport_link == NTB_LINK_DOWN)
if (!nt->link_is_up)
cancel_delayed_work_sync(&nt->link_work);
else
nt->transport_link = NTB_LINK_DOWN;
/* The scratchpad registers keep the values if the remote side
* goes down, blast them now to give them a sane value the next
* time they are accessed
*/
for (i = 0; i < MAX_SPAD; i++)
ntb_write_local_spad(nt->ndev, i, 0);
ntb_spad_write(nt->ndev, i, 0);
}
static void ntb_transport_link_cleanup_work(struct work_struct *work)
{
struct ntb_transport *nt = container_of(work, struct ntb_transport,
link_cleanup);
struct ntb_transport_ctx *nt =
container_of(work, struct ntb_transport_ctx, link_cleanup);
ntb_transport_link_cleanup(nt);
}
static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
static void ntb_transport_event_callback(void *data)
{
struct ntb_transport *nt = data;
struct ntb_transport_ctx *nt = data;
switch (event) {
case NTB_EVENT_HW_LINK_UP:
if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
schedule_delayed_work(&nt->link_work, 0);
break;
case NTB_EVENT_HW_LINK_DOWN:
else
schedule_work(&nt->link_cleanup);
break;
default:
BUG();
}
}
static void ntb_transport_link_work(struct work_struct *work)
{
struct ntb_transport *nt = container_of(work, struct ntb_transport,
link_work.work);
struct ntb_device *ndev = nt->ndev;
struct pci_dev *pdev = ntb_query_pdev(ndev);
struct ntb_transport_ctx *nt =
container_of(work, struct ntb_transport_ctx, link_work.work);
struct ntb_dev *ndev = nt->ndev;
struct pci_dev *pdev = ndev->pdev;
resource_size_t size;
u32 val;
int rc, i;
int rc, i, spad;
/* send the local info, in the opposite order of the way we read it */
for (i = 0; i < ntb_max_mw(ndev); i++) {
rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
ntb_get_mw_size(ndev, i) >> 32);
if (rc) {
dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
(u32)(ntb_get_mw_size(ndev, i) >> 32),
MW0_SZ_HIGH + (i * 2));
goto out;
}
for (i = 0; i < nt->mw_count; i++) {
size = nt->mw_vec[i].phys_size;
rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
(u32) ntb_get_mw_size(ndev, i));
if (rc) {
dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
(u32) ntb_get_mw_size(ndev, i),
MW0_SZ_LOW + (i * 2));
goto out;
}
}
if (max_mw_size && size > max_mw_size)
size = max_mw_size;
rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
ntb_max_mw(ndev), NUM_MWS);
goto out;
}
spad = MW0_SZ_HIGH + (i * 2);
ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
nt->max_qps, NUM_QPS);
goto out;
spad = MW0_SZ_LOW + (i * 2);
ntb_peer_spad_write(ndev, spad, (u32)size);
}
rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
NTB_TRANSPORT_VERSION, VERSION);
goto out;
}
ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
/* Query the remote side for its info */
rc = ntb_read_remote_spad(ndev, VERSION, &val);
if (rc) {
dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
goto out;
}
ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
if (val != NTB_TRANSPORT_VERSION)
goto out;
dev_dbg(&pdev->dev, "Remote version = %d\n", val);
ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
if (rc) {
dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
/* Query the remote side for its info */
val = ntb_peer_spad_read(ndev, VERSION);
dev_dbg(&pdev->dev, "Remote version = %d\n", val);
if (val != NTB_TRANSPORT_VERSION)
goto out;
}
if (val != nt->max_qps)
goto out;
val = ntb_peer_spad_read(ndev, NUM_QPS);
dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
if (rc) {
dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
if (val != nt->qp_count)
goto out;
}
if (val != ntb_max_mw(ndev))
goto out;
val = ntb_peer_spad_read(ndev, NUM_MWS);
dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
if (val != nt->mw_count)
goto out;
for (i = 0; i < ntb_max_mw(ndev); i++) {
for (i = 0; i < nt->mw_count; i++) {
u64 val64;
rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
if (rc) {
dev_err(&pdev->dev, "Error reading remote spad %d\n",
MW0_SZ_HIGH + (i * 2));
goto out1;
}
val64 = (u64) val << 32;
rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
if (rc) {
dev_err(&pdev->dev, "Error reading remote spad %d\n",
MW0_SZ_LOW + (i * 2));
goto out1;
}
val = ntb_peer_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
val64 = (u64)val << 32;
val = ntb_peer_spad_read(ndev, MW0_SZ_LOW + (i * 2));
val64 |= val;
dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
rc = ntb_set_mw(nt, i, val64);
if (rc)
goto out1;
}
nt->transport_link = NTB_LINK_UP;
nt->link_is_up = true;
for (i = 0; i < nt->max_qps; i++) {
struct ntb_transport_qp *qp = &nt->qps[i];
for (i = 0; i < nt->qp_count; i++) {
struct ntb_transport_qp *qp = &nt->qp_vec[i];
ntb_transport_setup_qp_mw(nt, i);
if (qp->client_ready == NTB_LINK_UP)
if (qp->client_ready)
schedule_delayed_work(&qp->link_work, 0);
}
return;
out1:
for (i = 0; i < ntb_max_mw(ndev); i++)
for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
out:
if (ntb_hw_link_status(ndev))
if (ntb_link_is_up(ndev, NULL, NULL) == 1)
schedule_delayed_work(&nt->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
......@@ -814,73 +819,73 @@ static void ntb_qp_link_work(struct work_struct *work)
struct ntb_transport_qp *qp = container_of(work,
struct ntb_transport_qp,
link_work.work);
struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
struct ntb_transport *nt = qp->transport;
int rc, val;
struct pci_dev *pdev = qp->ndev->pdev;
struct ntb_transport_ctx *nt = qp->transport;
int val;
WARN_ON(nt->transport_link != NTB_LINK_UP);
WARN_ON(!nt->link_is_up);
rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
if (rc) {
dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
return;
}
val = ntb_spad_read(nt->ndev, QP_LINKS);
rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
if (rc)
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
val | 1 << qp->qp_num, QP_LINKS);
ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
/* query remote spad for qp ready bits */
rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
if (rc)
dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
ntb_peer_spad_read(nt->ndev, QP_LINKS);
dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
/* See if the remote side is up */
if (1 << qp->qp_num & val) {
qp->qp_link = NTB_LINK_UP;
if (val & BIT(qp->qp_num)) {
dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
qp->link_is_up = true;
if (qp->event_handler)
qp->event_handler(qp->cb_data, NTB_LINK_UP);
} else if (nt->transport_link == NTB_LINK_UP)
qp->event_handler(qp->cb_data, qp->link_is_up);
} else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
static int ntb_transport_init_queue(struct ntb_transport *nt,
static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp;
struct ntb_transport_mw *mw;
phys_addr_t mw_base;
resource_size_t mw_size;
unsigned int num_qps_mw, tx_size;
u8 mw_num, mw_max;
unsigned int mw_num, mw_count, qp_count;
u64 qp_offset;
mw_max = ntb_max_mw(nt->ndev);
mw_num = QP_TO_MW(nt->ndev, qp_num);
mw_count = nt->mw_count;
qp_count = nt->qp_count;
mw_num = QP_TO_MW(nt, qp_num);
mw = &nt->mw_vec[mw_num];
qp = &nt->qps[qp_num];
qp = &nt->qp_vec[qp_num];
qp->qp_num = qp_num;
qp->transport = nt;
qp->ndev = nt->ndev;
qp->qp_link = NTB_LINK_DOWN;
qp->client_ready = NTB_LINK_DOWN;
qp->link_is_up = false;
qp->client_ready = false;
qp->event_handler = NULL;
if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
num_qps_mw = nt->max_qps / mw_max + 1;
if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = nt->max_qps / mw_max;
num_qps_mw = qp_count / mw_count;
tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
qp_offset = qp_num / mw_max * tx_size;
qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
mw_base = nt->mw_vec[mw_num].phys_addr;
mw_size = nt->mw_vec[mw_num].phys_size;
tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * qp_num / mw_count;
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
if (!qp->tx_mw)
return -EINVAL;
qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
qp->tx_mw_phys = mw_base + qp_offset;
if (!qp->tx_mw_phys)
return -EINVAL;
......@@ -891,16 +896,19 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
if (ntb_query_debugfs(nt->ndev)) {
if (nt_debugfs_dir) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
ntb_query_debugfs(nt->ndev));
nt_debugfs_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
&ntb_qp_debugfs_stats);
} else {
qp->debugfs_dir = NULL;
qp->debugfs_stats = NULL;
}
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
......@@ -914,46 +922,84 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
(unsigned long)qp);
return 0;
}
int ntb_transport_init(struct pci_dev *pdev)
static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
{
struct ntb_transport *nt;
struct ntb_transport_ctx *nt;
struct ntb_transport_mw *mw;
unsigned int mw_count, qp_count;
u64 qp_bitmap;
int rc, i;
nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev,
"doorbell is unsafe, proceed anyway...\n");
if (ntb_spad_is_unsafe(ndev))
dev_dbg(&ndev->dev,
"scratchpad is unsafe, proceed anyway...\n");
nt = kzalloc(sizeof(*nt), GFP_KERNEL);
if (!nt)
return -ENOMEM;
nt->ndev = ntb_register_transport(pdev, nt);
if (!nt->ndev) {
rc = -EIO;
nt->ndev = ndev;
mw_count = ntb_mw_count(ndev);
nt->mw_count = mw_count;
nt->mw_vec = kcalloc(mw_count, sizeof(*nt->mw_vec), GFP_KERNEL);
if (!nt->mw_vec) {
rc = -ENOMEM;
goto err;
}
nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
GFP_KERNEL);
if (!nt->mw) {
for (i = 0; i < mw_count; i++) {
mw = &nt->mw_vec[i];
rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
&mw->xlat_align, &mw->xlat_align_size);
if (rc)
goto err1;
mw->vbase = ioremap(mw->phys_addr, mw->phys_size);
if (!mw->vbase) {
rc = -ENOMEM;
goto err1;
}
if (max_num_clients)
nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
else
nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
mw->buff_size = 0;
mw->xlat_size = 0;
mw->virt_addr = NULL;
mw->dma_addr = 0;
}
nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
GFP_KERNEL);
if (!nt->qps) {
qp_bitmap = ntb_db_valid_mask(ndev);
qp_count = ilog2(qp_bitmap);
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
else if (mw_count < qp_count)
qp_count = mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1;
nt->qp_count = qp_count;
nt->qp_bitmap = qp_bitmap;
nt->qp_bitmap_free = qp_bitmap;
nt->qp_vec = kcalloc(qp_count, sizeof(*nt->qp_vec), GFP_KERNEL);
if (!nt->qp_vec) {
rc = -ENOMEM;
goto err2;
}
nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
for (i = 0; i < nt->max_qps; i++) {
for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i);
if (rc)
goto err3;
......@@ -962,8 +1008,7 @@ int ntb_transport_init(struct pci_dev *pdev)
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
rc = ntb_register_event_callback(nt->ndev,
ntb_transport_event_callback);
rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
if (rc)
goto err3;
......@@ -972,51 +1017,61 @@ int ntb_transport_init(struct pci_dev *pdev)
if (rc)
goto err4;
if (ntb_hw_link_status(nt->ndev))
schedule_delayed_work(&nt->link_work, 0);
nt->link_is_up = false;
ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
ntb_link_event(ndev);
return 0;
err4:
ntb_unregister_event_callback(nt->ndev);
ntb_clear_ctx(ndev);
err3:
kfree(nt->qps);
kfree(nt->qp_vec);
err2:
kfree(nt->mw);
kfree(nt->mw_vec);
err1:
ntb_unregister_transport(nt->ndev);
while (i--) {
mw = &nt->mw_vec[i];
iounmap(mw->vbase);
}
err:
kfree(nt);
return rc;
}
void ntb_transport_free(void *transport)
static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
{
struct ntb_transport *nt = transport;
struct ntb_device *ndev = nt->ndev;
struct ntb_transport_ctx *nt = ndev->ctx;
struct ntb_transport_qp *qp;
u64 qp_bitmap_alloc;
int i;
ntb_transport_link_cleanup(nt);
cancel_work_sync(&nt->link_cleanup);
cancel_delayed_work_sync(&nt->link_work);
qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
/* verify that all the qp's are freed */
for (i = 0; i < nt->max_qps; i++) {
if (!test_bit(i, &nt->qp_bitmap))
ntb_transport_free_queue(&nt->qps[i]);
debugfs_remove_recursive(nt->qps[i].debugfs_dir);
for (i = 0; i < nt->qp_count; i++) {
qp = &nt->qp_vec[i];
if (qp_bitmap_alloc & BIT_ULL(i))
ntb_transport_free_queue(qp);
debugfs_remove_recursive(qp->debugfs_dir);
}
ntb_bus_remove(nt);
cancel_delayed_work_sync(&nt->link_work);
ntb_link_disable(ndev);
ntb_clear_ctx(ndev);
ntb_unregister_event_callback(ndev);
ntb_bus_remove(nt);
for (i = 0; i < ntb_max_mw(ndev); i++)
for (i = nt->mw_count; i--; ) {
ntb_free_mw(nt, i);
iounmap(nt->mw_vec[i].vbase);
}
kfree(nt->qps);
kfree(nt->mw);
ntb_unregister_transport(ndev);
kfree(nt->qp_vec);
kfree(nt->mw_vec);
kfree(nt);
}
......@@ -1028,15 +1083,13 @@ static void ntb_rx_copy_callback(void *data)
unsigned int len = entry->len;
struct ntb_payload_header *hdr = entry->rx_hdr;
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
hdr->flags = 0;
iowrite32(entry->index, &qp->rx_info->entry);
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
if (qp->rx_handler && qp->client_ready)
qp->rx_handler(qp, qp->cb_data, cb_data, len);
}
......@@ -1047,6 +1100,9 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
memcpy(buf, offset, len);
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
ntb_rx_copy_callback(entry);
}
......@@ -1071,8 +1127,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
goto err_wait;
device = chan->device;
pay_off = (size_t) offset & ~PAGE_MASK;
buff_off = (size_t) buf & ~PAGE_MASK;
pay_off = (size_t)offset & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
goto err_wait;
......@@ -1138,86 +1194,104 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry;
void *offset;
int rc;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
if (!entry) {
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
"no buffer - HDR ver %u, len %d, flags %x\n",
hdr->ver, hdr->len, hdr->flags);
qp->rx_err_no_buf++;
return -ENOMEM;
}
dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
qp->qp_num, hdr->ver, hdr->len, hdr->flags);
if (!(hdr->flags & DESC_DONE_FLAG)) {
ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
&qp->rx_pend_q);
dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
qp->rx_ring_empty++;
return -EAGAIN;
}
if (hdr->ver != (u32) qp->rx_pkts) {
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
"qp %d: version mismatch, expected %llu - got %u\n",
qp->qp_num, qp->rx_pkts, hdr->ver);
ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
&qp->rx_pend_q);
if (hdr->flags & LINK_DOWN_FLAG) {
dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
ntb_qp_link_down(qp);
hdr->flags = 0;
iowrite32(qp->rx_index, &qp->rx_info->entry);
return 0;
}
if (hdr->ver != (u32)qp->rx_pkts) {
dev_dbg(&qp->ndev->pdev->dev,
"version mismatch, expected %llu - got %u\n",
qp->rx_pkts, hdr->ver);
qp->rx_err_ver++;
return -EIO;
}
if (hdr->flags & LINK_DOWN_FLAG) {
ntb_qp_link_down(qp);
entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
if (!entry) {
dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
qp->rx_err_no_buf++;
rc = -ENOMEM;
goto err;
}
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
"rx offset %u, ver %u - %d payload received, buf size %d\n",
qp->rx_index, hdr->ver, hdr->len, entry->len);
qp->rx_bytes += hdr->len;
qp->rx_pkts++;
if (hdr->len > entry->len) {
qp->rx_err_oflow++;
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
"RX overflow! Wanted %d got %d\n",
dev_dbg(&qp->ndev->pdev->dev,
"receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len);
qp->rx_err_oflow++;
rc = -EIO;
goto err;
}
dev_dbg(&qp->ndev->pdev->dev,
"RX OK index %u ver %u size %d into buf size %d\n",
qp->rx_index, hdr->ver, hdr->len, entry->len);
qp->rx_bytes += hdr->len;
qp->rx_pkts++;
entry->index = qp->rx_index;
entry->rx_hdr = hdr;
ntb_async_rx(entry, offset, hdr->len);
out:
qp->rx_index++;
qp->rx_index %= qp->rx_max_entry;
return 0;
err:
ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
/* FIXME: if this syncrhonous update of the rx_index gets ahead of
* asyncrhonous ntb_rx_copy_callback of previous entry, there are three
* scenarios:
*
* 1) The peer might miss this update, but observe the update
* from the memcpy completion callback. In this case, the buffer will
* not be freed on the peer to be reused for a different packet. The
* successful rx of a later packet would clear the condition, but the
* condition could persist if several rx fail in a row.
*
* 2) The peer may observe this update before the asyncrhonous copy of
* prior packets is completed. The peer may overwrite the buffers of
* the prior packets before they are copied.
*
* 3) Both: the peer may observe the update, and then observe the index
* decrement by the asynchronous completion callback. Who knows what
* badness that will cause.
*/
hdr->flags = 0;
iowrite32(qp->rx_index, &qp->rx_info->entry);
goto out;
return rc;
}
static int ntb_transport_rxc_db(void *data, int db_num)
static void ntb_transport_rxc_db(unsigned long data)
{
struct ntb_transport_qp *qp = data;
struct ntb_transport_qp *qp = (void *)data;
int rc, i;
dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
__func__, db_num);
dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
__func__, qp->qp_num);
/* Limit the number of packets processed in a single interrupt to
* provide fairness to others
......@@ -1231,7 +1305,21 @@ static int ntb_transport_rxc_db(void *data, int db_num)
if (qp->dma_chan)
dma_async_issue_pending(qp->dma_chan);
return i;
if (i == qp->rx_max_entry) {
/* there is more work to do */
tasklet_schedule(&qp->rxc_db_work);
} else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
/* the doorbell bit is set: clear it */
ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
/* ntb_db_read ensures ntb_db_clear write is committed */
ntb_db_read(qp->ndev);
/* an interrupt may have arrived between finishing
* ntb_process_rxc and clearing the doorbell bit:
* there might be some more work to do.
*/
tasklet_schedule(&qp->rxc_db_work);
}
}
static void ntb_tx_copy_callback(void *data)
......@@ -1240,11 +1328,9 @@ static void ntb_tx_copy_callback(void *data)
struct ntb_transport_qp *qp = entry->qp;
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
/* Ensure that the data is fully copied out before setting the flags */
wmb();
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
ntb_ring_doorbell(qp->ndev, qp->qp_num);
ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
/* The entry length can only be zero if the packet is intended to be a
* "link down" or similar. Since no payload is being sent in these
......@@ -1265,6 +1351,9 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
{
memcpy_toio(offset, entry->buf, entry->len);
/* Ensure that the data is fully copied out before setting the flags */
wmb();
ntb_tx_copy_callback(entry);
}
......@@ -1288,7 +1377,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
entry->tx_hdr = hdr;
iowrite32(entry->len, &hdr->len);
iowrite32((u32) qp->tx_pkts, &hdr->ver);
iowrite32((u32)qp->tx_pkts, &hdr->ver);
if (!chan)
goto err;
......@@ -1298,8 +1387,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
device = chan->device;
dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
buff_off = (size_t) buf & ~PAGE_MASK;
dest_off = (size_t) dest & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
goto err;
......@@ -1347,9 +1436,6 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
static int ntb_process_tx(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
entry->buf);
if (qp->tx_index == qp->remote_rx_info->entry) {
qp->tx_ring_full++;
return -EAGAIN;
......@@ -1376,14 +1462,14 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
static void ntb_send_link_down(struct ntb_transport_qp *qp)
{
struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
struct pci_dev *pdev = qp->ndev->pdev;
struct ntb_queue_entry *entry;
int i, rc;
if (qp->qp_link == NTB_LINK_DOWN)
if (!qp->link_is_up)
return;
qp->qp_link = NTB_LINK_DOWN;
qp->link_is_up = false;
dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
......@@ -1422,18 +1508,21 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
* RETURNS: pointer to newly created ntb_queue, NULL on error.
*/
struct ntb_transport_qp *
ntb_transport_create_queue(void *data, struct pci_dev *pdev,
ntb_transport_create_queue(void *data, struct device *client_dev,
const struct ntb_queue_handlers *handlers)
{
struct ntb_dev *ndev;
struct pci_dev *pdev;
struct ntb_transport_ctx *nt;
struct ntb_queue_entry *entry;
struct ntb_transport_qp *qp;
struct ntb_transport *nt;
u64 qp_bit;
unsigned int free_queue;
int rc, i;
int i;
nt = ntb_find_transport(pdev);
if (!nt)
goto err;
ndev = dev_ntb(client_dev->parent);
pdev = ndev->pdev;
nt = ndev->ctx;
free_queue = ffs(nt->qp_bitmap);
if (!free_queue)
......@@ -1442,9 +1531,11 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
/* decrement free_queue to make it zero based */
free_queue--;
clear_bit(free_queue, &nt->qp_bitmap);
qp = &nt->qp_vec[free_queue];
qp_bit = BIT_ULL(qp->qp_num);
nt->qp_bitmap_free &= ~qp_bit;
qp = &nt->qps[free_queue];
qp->cb_data = data;
qp->rx_handler = handlers->rx_handler;
qp->tx_handler = handlers->tx_handler;
......@@ -1458,7 +1549,7 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
}
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
goto err1;
......@@ -1468,7 +1559,7 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
}
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
goto err2;
......@@ -1477,10 +1568,8 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
&qp->tx_free_q);
}
rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
ntb_transport_rxc_db);
if (rc)
goto err2;
ntb_db_clear(qp->ndev, qp_bit);
ntb_db_clear_mask(qp->ndev, qp_bit);
dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
......@@ -1494,7 +1583,7 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
kfree(entry);
if (qp->dma_chan)
dmaengine_put();
set_bit(free_queue, &nt->qp_bitmap);
nt->qp_bitmap_free |= qp_bit;
err:
return NULL;
}
......@@ -1508,13 +1597,15 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
u64 qp_bit;
if (!qp)
return;
pdev = ntb_query_pdev(qp->ndev);
pdev = qp->ndev->pdev;
if (qp->dma_chan) {
struct dma_chan *chan = qp->dma_chan;
......@@ -1531,10 +1622,18 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
dmaengine_put();
}
ntb_unregister_db_callback(qp->ndev, qp->qp_num);
qp_bit = BIT_ULL(qp->qp_num);
ntb_db_set_mask(qp->ndev, qp_bit);
tasklet_disable(&qp->rxc_db_work);
cancel_delayed_work_sync(&qp->link_work);
qp->cb_data = NULL;
qp->rx_handler = NULL;
qp->tx_handler = NULL;
qp->event_handler = NULL;
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
kfree(entry);
......@@ -1546,7 +1645,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
set_bit(qp->qp_num, &qp->transport->qp_bitmap);
nt->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
......@@ -1567,7 +1666,7 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
struct ntb_queue_entry *entry;
void *buf;
if (!qp || qp->client_ready == NTB_LINK_UP)
if (!qp || qp->client_ready)
return NULL;
entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
......@@ -1636,7 +1735,7 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
struct ntb_queue_entry *entry;
int rc;
if (!qp || qp->qp_link != NTB_LINK_UP || !len)
if (!qp || !qp->link_is_up || !len)
return -EINVAL;
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
......@@ -1670,9 +1769,9 @@ void ntb_transport_link_up(struct ntb_transport_qp *qp)
if (!qp)
return;
qp->client_ready = NTB_LINK_UP;
qp->client_ready = true;
if (qp->transport->transport_link == NTB_LINK_UP)
if (qp->transport->link_is_up)
schedule_delayed_work(&qp->link_work, 0);
}
EXPORT_SYMBOL_GPL(ntb_transport_link_up);
......@@ -1688,27 +1787,20 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
void ntb_transport_link_down(struct ntb_transport_qp *qp)
{
struct pci_dev *pdev;
int rc, val;
int val;
if (!qp)
return;
pdev = ntb_query_pdev(qp->ndev);
qp->client_ready = NTB_LINK_DOWN;
pdev = qp->ndev->pdev;
qp->client_ready = false;
rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
if (rc) {
dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
return;
}
val = ntb_spad_read(qp->ndev, QP_LINKS);
rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
val & ~(1 << qp->qp_num));
if (rc)
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
val & ~(1 << qp->qp_num), QP_LINKS);
ntb_peer_spad_write(qp->ndev, QP_LINKS,
val & ~BIT(qp->qp_num));
if (qp->qp_link == NTB_LINK_UP)
if (qp->link_is_up)
ntb_send_link_down(qp);
else
cancel_delayed_work_sync(&qp->link_work);
......@@ -1728,7 +1820,7 @@ bool ntb_transport_link_query(struct ntb_transport_qp *qp)
if (!qp)
return false;
return qp->qp_link == NTB_LINK_UP;
return qp->link_is_up;
}
EXPORT_SYMBOL_GPL(ntb_transport_link_query);
......@@ -1774,3 +1866,69 @@ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
return max;
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
static void ntb_transport_doorbell_callback(void *data, int vector)
{
struct ntb_transport_ctx *nt = data;
struct ntb_transport_qp *qp;
u64 db_bits;
unsigned int qp_num;
db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
ntb_db_vector_mask(nt->ndev, vector));
while (db_bits) {
qp_num = __ffs(db_bits);
qp = &nt->qp_vec[qp_num];
tasklet_schedule(&qp->rxc_db_work);
db_bits &= ~BIT_ULL(qp_num);
}
}
static const struct ntb_ctx_ops ntb_transport_ops = {
.link_event = ntb_transport_event_callback,
.db_event = ntb_transport_doorbell_callback,
};
static struct ntb_client ntb_transport_client = {
.ops = {
.probe = ntb_transport_probe,
.remove = ntb_transport_free,
},
};
static int __init ntb_transport_init(void)
{
int rc;
if (debugfs_initialized())
nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
rc = bus_register(&ntb_transport_bus);
if (rc)
goto err_bus;
rc = ntb_register_client(&ntb_transport_client);
if (rc)
goto err_client;
return 0;
err_client:
bus_unregister(&ntb_transport_bus);
err_bus:
debugfs_remove_recursive(nt_debugfs_dir);
return rc;
}
module_init(ntb_transport_init);
static void __exit ntb_transport_exit(void)
{
debugfs_remove_recursive(nt_debugfs_dir);
ntb_unregister_client(&ntb_transport_client);
bus_unregister(&ntb_transport_bus);
}
module_exit(ntb_transport_exit);
......@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -13,6 +14,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -40,7 +42,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Linux driver
* PCIe NTB Transport Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
......@@ -48,21 +50,16 @@
struct ntb_transport_qp;
struct ntb_client {
struct ntb_transport_client {
struct device_driver driver;
int (*probe)(struct pci_dev *pdev);
void (*remove)(struct pci_dev *pdev);
int (*probe)(struct device *client_dev);
void (*remove)(struct device *client_dev);
};
enum {
NTB_LINK_DOWN = 0,
NTB_LINK_UP,
};
int ntb_transport_register_client(struct ntb_client *drvr);
void ntb_transport_unregister_client(struct ntb_client *drvr);
int ntb_register_client_dev(char *device_name);
void ntb_unregister_client_dev(char *device_name);
int ntb_transport_register_client(struct ntb_transport_client *drvr);
void ntb_transport_unregister_client(struct ntb_transport_client *drvr);
int ntb_transport_register_client_dev(char *device_name);
void ntb_transport_unregister_client_dev(char *device_name);
struct ntb_queue_handlers {
void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
......@@ -75,7 +72,7 @@ struct ntb_queue_handlers {
unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
struct ntb_transport_qp *
ntb_transport_create_queue(void *data, struct pci_dev *pdev,
ntb_transport_create_queue(void *data, struct device *client_dev,
const struct ntb_queue_handlers *handlers);
void ntb_transport_free_queue(struct ntb_transport_qp *qp);
int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment