Commit 8b4e6b3c authored by Arjun Vynipadath's avatar Arjun Vynipadath Committed by David S. Miller

cxgb4: Add HMA support

HMA(Host Memory Access) maps a part of host memory for T6-SO memfree cards.

This commit does the following:
- Query FW to check if we have HMA support. If yes, the params will
  return HMA size configured in FW. We will dma map memory based
  on this size.
- Also contains changes to get HMA memory information via debugfs.
Signed-off-by: default avatarArjun Vynipadath <arjun@chelsio.com>
Signed-off-by: default avatarSantosh Rastapur <santosh@chelsio.com>
Signed-off-by: default avatarMichael Werner <werner@chelsio.com>
Signed-off-by: default avatarGanesh GR <ganeshgr@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 74b8da70
...@@ -831,6 +831,16 @@ struct vf_info { ...@@ -831,6 +831,16 @@ struct vf_info {
u16 vlan; u16 vlan;
}; };
enum {
HMA_DMA_MAPPED_FLAG = 1
};
struct hma_data {
unsigned char flags;
struct sg_table *sgt;
dma_addr_t *phy_addr; /* physical address of the page */
};
struct mbox_list { struct mbox_list {
struct list_head list; struct list_head list;
}; };
...@@ -946,6 +956,9 @@ struct adapter { ...@@ -946,6 +956,9 @@ struct adapter {
/* Ethtool Dump */ /* Ethtool Dump */
struct ethtool_dump eth_dump; struct ethtool_dump eth_dump;
/* HMA */
struct hma_data hma;
}; };
/* Support for "sched-class" command to allow a TX Scheduling Class to be /* Support for "sched-class" command to allow a TX Scheduling Class to be
......
...@@ -2617,7 +2617,7 @@ int mem_open(struct inode *inode, struct file *file) ...@@ -2617,7 +2617,7 @@ int mem_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private; file->private_data = inode->i_private;
mem = (uintptr_t)file->private_data & 0x3; mem = (uintptr_t)file->private_data & 0x7;
adap = file->private_data - mem; adap = file->private_data - mem;
(void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH); (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH);
...@@ -2630,7 +2630,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, ...@@ -2630,7 +2630,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
{ {
loff_t pos = *ppos; loff_t pos = *ppos;
loff_t avail = file_inode(file)->i_size; loff_t avail = file_inode(file)->i_size;
unsigned int mem = (uintptr_t)file->private_data & 3; unsigned int mem = (uintptr_t)file->private_data & 0x7;
struct adapter *adap = file->private_data - mem; struct adapter *adap = file->private_data - mem;
__be32 *data; __be32 *data;
int ret; int ret;
...@@ -3042,6 +3042,12 @@ int t4_setup_debugfs(struct adapter *adap) ...@@ -3042,6 +3042,12 @@ int t4_setup_debugfs(struct adapter *adap)
add_debugfs_mem(adap, "mc", MEM_MC, add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size)); EXT_MEM_SIZE_G(size));
} }
if (i & HMA_MUX_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
add_debugfs_mem(adap, "hma", MEM_HMA,
EXT_MEM1_SIZE_G(size));
}
} }
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
......
...@@ -1736,10 +1736,11 @@ EXPORT_SYMBOL(cxgb4_sync_txq_pidx); ...@@ -1736,10 +1736,11 @@ EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{ {
struct adapter *adap;
u32 offset, memtype, memaddr;
u32 edc0_size, edc1_size, mc0_size, mc1_size, size; u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
u32 edc0_end, edc1_end, mc0_end, mc1_end; u32 edc0_end, edc1_end, mc0_end, mc1_end;
u32 offset, memtype, memaddr;
struct adapter *adap;
u32 hma_size = 0;
int ret; int ret;
adap = netdev2adap(dev); adap = netdev2adap(dev);
...@@ -1759,6 +1760,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) ...@@ -1759,6 +1760,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
mc0_size = EXT_MEM0_SIZE_G(size) << 20; mc0_size = EXT_MEM0_SIZE_G(size) << 20;
if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
hma_size = EXT_MEM1_SIZE_G(size) << 20;
}
edc0_end = edc0_size; edc0_end = edc0_size;
edc1_end = edc0_end + edc1_size; edc1_end = edc0_end + edc1_size;
mc0_end = edc1_end + mc0_size; mc0_end = edc1_end + mc0_size;
...@@ -1770,7 +1775,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) ...@@ -1770,7 +1775,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
memtype = MEM_EDC1; memtype = MEM_EDC1;
memaddr = offset - edc0_end; memaddr = offset - edc0_end;
} else { } else {
if (offset < mc0_end) { if (hma_size && (offset < (edc1_end + hma_size))) {
memtype = MEM_HMA;
memaddr = offset - edc1_end;
} else if (offset < mc0_end) {
memtype = MEM_MC0; memtype = MEM_MC0;
memaddr = offset - edc1_end; memaddr = offset - edc1_end;
} else if (is_t5(adap->params.chip)) { } else if (is_t5(adap->params.chip)) {
...@@ -3301,6 +3309,206 @@ static void setup_memwin_rdma(struct adapter *adap) ...@@ -3301,6 +3309,206 @@ static void setup_memwin_rdma(struct adapter *adap)
} }
} }
/* HMA Definitions */
/* The maximum number of address that can be send in a single FW cmd */
#define HMA_MAX_ADDR_IN_CMD 5
#define HMA_PAGE_SIZE PAGE_SIZE
#define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
#define HMA_PAGE_ORDER \
((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
/* The minimum and maximum possible HMA sizes that can be specified in the FW
* configuration(in units of MB).
*/
#define HMA_MIN_TOTAL_SIZE 1
#define HMA_MAX_TOTAL_SIZE \
(((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
HMA_MAX_NO_FW_ADDRESS) >> 20)
static void adap_free_hma_mem(struct adapter *adapter)
{
struct scatterlist *iter;
struct page *page;
int i;
if (!adapter->hma.sgt)
return;
if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
}
for_each_sg(adapter->hma.sgt->sgl, iter,
adapter->hma.sgt->orig_nents, i) {
page = sg_page(iter);
if (page)
__free_pages(page, HMA_PAGE_ORDER);
}
kfree(adapter->hma.phy_addr);
sg_free_table(adapter->hma.sgt);
kfree(adapter->hma.sgt);
adapter->hma.sgt = NULL;
}
static int adap_config_hma(struct adapter *adapter)
{
struct scatterlist *sgl, *iter;
struct sg_table *sgt;
struct page *newpage;
unsigned int i, j, k;
u32 param, hma_size;
unsigned int ncmds;
size_t page_size;
u32 page_order;
int node, ret;
/* HMA is supported only for T6+ cards.
* Avoid initializing HMA in kdump kernels.
*/
if (is_kdump_kernel() ||
CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
return 0;
/* Get the HMA region size required by fw */
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1, &param, &hma_size);
/* An error means card has its own memory or HMA is not supported by
* the firmware. Return without any errors.
*/
if (ret || !hma_size)
return 0;
if (hma_size < HMA_MIN_TOTAL_SIZE ||
hma_size > HMA_MAX_TOTAL_SIZE) {
dev_err(adapter->pdev_dev,
"HMA size %uMB beyond bounds(%u-%lu)MB\n",
hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
return -EINVAL;
}
page_size = HMA_PAGE_SIZE;
page_order = HMA_PAGE_ORDER;
adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
if (unlikely(!adapter->hma.sgt)) {
dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
return -ENOMEM;
}
sgt = adapter->hma.sgt;
/* FW returned value will be in MB's
*/
sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
kfree(adapter->hma.sgt);
adapter->hma.sgt = NULL;
return -ENOMEM;
}
sgl = adapter->hma.sgt->sgl;
node = dev_to_node(adapter->pdev_dev);
for_each_sg(sgl, iter, sgt->orig_nents, i) {
newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL,
page_order);
if (!newpage) {
dev_err(adapter->pdev_dev,
"Not enough memory for HMA page allocation\n");
ret = -ENOMEM;
goto free_hma;
}
sg_set_page(iter, newpage, page_size << page_order, 0);
}
sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
DMA_BIDIRECTIONAL);
if (!sgt->nents) {
dev_err(adapter->pdev_dev,
"Not enough memory for HMA DMA mapping");
ret = -ENOMEM;
goto free_hma;
}
adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
GFP_KERNEL);
if (unlikely(!adapter->hma.phy_addr))
goto free_hma;
for_each_sg(sgl, iter, sgt->nents, i) {
newpage = sg_page(iter);
adapter->hma.phy_addr[i] = sg_dma_address(iter);
}
ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
/* Pass on the addresses to firmware */
for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
struct fw_hma_cmd hma_cmd;
u8 naddr = HMA_MAX_ADDR_IN_CMD;
u8 soc = 0, eoc = 0;
u8 hma_mode = 1; /* Presently we support only Page table mode */
soc = (i == 0) ? 1 : 0;
eoc = (i == ncmds - 1) ? 1 : 0;
/* For last cmd, set naddr corresponding to remaining
* addresses
*/
if (i == ncmds - 1) {
naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
}
memset(&hma_cmd, 0, sizeof(hma_cmd));
hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
hma_cmd.mode_to_pcie_params =
htonl(FW_HMA_CMD_MODE_V(hma_mode) |
FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
/* HMA cmd size specified in MB's */
hma_cmd.naddr_size =
htonl(FW_HMA_CMD_SIZE_V(hma_size) |
FW_HMA_CMD_NADDR_V(naddr));
/* Total Page size specified in units of 4K */
hma_cmd.addr_size_pkd =
htonl(FW_HMA_CMD_ADDR_SIZE_V
((page_size << page_order) >> 12));
/* Fill the 5 addresses */
for (j = 0; j < naddr; j++) {
hma_cmd.phy_address[j] =
cpu_to_be64(adapter->hma.phy_addr[j + k]);
}
ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
sizeof(hma_cmd), &hma_cmd);
if (ret) {
dev_err(adapter->pdev_dev,
"HMA FW command failed with err %d\n", ret);
goto free_hma;
}
}
if (!ret)
dev_info(adapter->pdev_dev,
"Reserved %uMB host memory for HMA\n", hma_size);
return ret;
free_hma:
adap_free_hma_mem(adapter);
return ret;
}
static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
{ {
u32 v; u32 v;
...@@ -3754,6 +3962,12 @@ static int adap_init0_config(struct adapter *adapter, int reset) ...@@ -3754,6 +3962,12 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0) if (ret < 0)
goto bye; goto bye;
/* We will proceed even if HMA init fails. */
ret = adap_config_hma(adapter);
if (ret)
dev_err(adapter->pdev_dev,
"HMA configuration failed with error %d\n", ret);
/* /*
* And finally tell the firmware to initialize itself using the * And finally tell the firmware to initialize itself using the
* parameters from the Configuration File. * parameters from the Configuration File.
...@@ -3960,6 +4174,11 @@ static int adap_init0(struct adapter *adap) ...@@ -3960,6 +4174,11 @@ static int adap_init0(struct adapter *adap)
* effect. Otherwise, it's time to try initializing the adapter. * effect. Otherwise, it's time to try initializing the adapter.
*/ */
if (state == DEV_STATE_INIT) { if (state == DEV_STATE_INIT) {
ret = adap_config_hma(adap);
if (ret)
dev_err(adap->pdev_dev,
"HMA configuration failed with error %d\n",
ret);
dev_info(adap->pdev_dev, "Coming up as %s: "\ dev_info(adap->pdev_dev, "Coming up as %s: "\
"Adapter already initialized\n", "Adapter already initialized\n",
adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
...@@ -4349,6 +4568,7 @@ static int adap_init0(struct adapter *adap) ...@@ -4349,6 +4568,7 @@ static int adap_init0(struct adapter *adap)
* happened to HW/FW, stop issuing commands. * happened to HW/FW, stop issuing commands.
*/ */
bye: bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map); kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map); kfree(adap->sge.ingr_map);
kfree(adap->sge.starving_fl); kfree(adap->sge.starving_fl);
...@@ -5576,6 +5796,8 @@ static void remove_one(struct pci_dev *pdev) ...@@ -5576,6 +5796,8 @@ static void remove_one(struct pci_dev *pdev)
t4_uld_clean_up(adapter); t4_uld_clean_up(adapter);
} }
adap_free_hma_mem(adapter);
disable_interrupts(adapter); disable_interrupts(adapter);
for_each_port(adapter, i) for_each_port(adapter, i)
......
...@@ -487,7 +487,7 @@ static int t4_edc_err_read(struct adapter *adap, int idx) ...@@ -487,7 +487,7 @@ static int t4_edc_err_read(struct adapter *adap, int idx)
* t4_memory_rw_init - Get memory window relative offset, base, and size. * t4_memory_rw_init - Get memory window relative offset, base, and size.
* @adap: the adapter * @adap: the adapter
* @win: PCI-E Memory Window to use * @win: PCI-E Memory Window to use
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
* @mem_off: memory relative offset with respect to @mtype. * @mem_off: memory relative offset with respect to @mtype.
* @mem_base: configured memory base address. * @mem_base: configured memory base address.
* @mem_aperture: configured memory window aperture. * @mem_aperture: configured memory window aperture.
......
...@@ -766,6 +766,7 @@ enum fw_cmd_opcodes { ...@@ -766,6 +766,7 @@ enum fw_cmd_opcodes {
FW_DEVLOG_CMD = 0x25, FW_DEVLOG_CMD = 0x25,
FW_CLIP_CMD = 0x28, FW_CLIP_CMD = 0x28,
FW_PTP_CMD = 0x3e, FW_PTP_CMD = 0x3e,
FW_HMA_CMD = 0x3f,
FW_LASTC2E_CMD = 0x40, FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80, FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81, FW_DEBUG_CMD = 0x81,
...@@ -1132,6 +1133,7 @@ enum fw_memtype_cf { ...@@ -1132,6 +1133,7 @@ enum fw_memtype_cf {
FW_MEMTYPE_CF_FLASH = 0x4, FW_MEMTYPE_CF_FLASH = 0x4,
FW_MEMTYPE_CF_INTERNAL = 0x5, FW_MEMTYPE_CF_INTERNAL = 0x5,
FW_MEMTYPE_CF_EXTMEM1 = 0x6, FW_MEMTYPE_CF_EXTMEM1 = 0x6,
FW_MEMTYPE_CF_HMA = 0x7,
}; };
struct fw_caps_config_cmd { struct fw_caps_config_cmd {
...@@ -1210,6 +1212,7 @@ enum fw_params_param_dev { ...@@ -1210,6 +1212,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D, FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
}; };
/* /*
...@@ -3435,6 +3438,59 @@ struct fw_debug_cmd { ...@@ -3435,6 +3438,59 @@ struct fw_debug_cmd {
#define FW_DEBUG_CMD_TYPE_G(x) \ #define FW_DEBUG_CMD_TYPE_G(x) \
(((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M) (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M)
struct fw_hma_cmd {
__be32 op_pkd;
__be32 retval_len16;
__be32 mode_to_pcie_params;
__be32 naddr_size;
__be32 addr_size_pkd;
__be32 r6;
__be64 phy_address[5];
};
#define FW_HMA_CMD_MODE_S 31
#define FW_HMA_CMD_MODE_M 0x1
#define FW_HMA_CMD_MODE_V(x) ((x) << FW_HMA_CMD_MODE_S)
#define FW_HMA_CMD_MODE_G(x) \
(((x) >> FW_HMA_CMD_MODE_S) & FW_HMA_CMD_MODE_M)
#define FW_HMA_CMD_MODE_F FW_HMA_CMD_MODE_V(1U)
#define FW_HMA_CMD_SOC_S 30
#define FW_HMA_CMD_SOC_M 0x1
#define FW_HMA_CMD_SOC_V(x) ((x) << FW_HMA_CMD_SOC_S)
#define FW_HMA_CMD_SOC_G(x) (((x) >> FW_HMA_CMD_SOC_S) & FW_HMA_CMD_SOC_M)
#define FW_HMA_CMD_SOC_F FW_HMA_CMD_SOC_V(1U)
#define FW_HMA_CMD_EOC_S 29
#define FW_HMA_CMD_EOC_M 0x1
#define FW_HMA_CMD_EOC_V(x) ((x) << FW_HMA_CMD_EOC_S)
#define FW_HMA_CMD_EOC_G(x) (((x) >> FW_HMA_CMD_EOC_S) & FW_HMA_CMD_EOC_M)
#define FW_HMA_CMD_EOC_F FW_HMA_CMD_EOC_V(1U)
#define FW_HMA_CMD_PCIE_PARAMS_S 0
#define FW_HMA_CMD_PCIE_PARAMS_M 0x7ffffff
#define FW_HMA_CMD_PCIE_PARAMS_V(x) ((x) << FW_HMA_CMD_PCIE_PARAMS_S)
#define FW_HMA_CMD_PCIE_PARAMS_G(x) \
(((x) >> FW_HMA_CMD_PCIE_PARAMS_S) & FW_HMA_CMD_PCIE_PARAMS_M)
#define FW_HMA_CMD_NADDR_S 12
#define FW_HMA_CMD_NADDR_M 0x3f
#define FW_HMA_CMD_NADDR_V(x) ((x) << FW_HMA_CMD_NADDR_S)
#define FW_HMA_CMD_NADDR_G(x) \
(((x) >> FW_HMA_CMD_NADDR_S) & FW_HMA_CMD_NADDR_M)
#define FW_HMA_CMD_SIZE_S 0
#define FW_HMA_CMD_SIZE_M 0xfff
#define FW_HMA_CMD_SIZE_V(x) ((x) << FW_HMA_CMD_SIZE_S)
#define FW_HMA_CMD_SIZE_G(x) \
(((x) >> FW_HMA_CMD_SIZE_S) & FW_HMA_CMD_SIZE_M)
#define FW_HMA_CMD_ADDR_SIZE_S 11
#define FW_HMA_CMD_ADDR_SIZE_M 0x1fffff
#define FW_HMA_CMD_ADDR_SIZE_V(x) ((x) << FW_HMA_CMD_ADDR_SIZE_S)
#define FW_HMA_CMD_ADDR_SIZE_G(x) \
(((x) >> FW_HMA_CMD_ADDR_SIZE_S) & FW_HMA_CMD_ADDR_SIZE_M)
enum pcie_fw_eval { enum pcie_fw_eval {
PCIE_FW_EVAL_CRASH = 0, PCIE_FW_EVAL_CRASH = 0,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment