Commit 573c3853 authored by David S. Miller's avatar David S. Miller

Merge branch 'mt7986-WED-RX'

Lorenzo Bianconi says:

====================
introduce WED RX support to MT7986 SoC

Similar to TX counterpart available on MT7622 and MT7986, introduce
RX Wireless Ethernet Dispatch available on MT7986 SoC in order to
offload traffic received by wlan nic to the wired interfaces (lan/wan).

Changes since v3:
- remove reset property in ethsys dts node
- rely on readx_poll_timeout in wo mcu code
- fix typos
- move wo-ccif binding in soc folder
- use reserved-memory for wo-dlm
- improve wo-ccif binding

Changes since v2:
- rely on of_reserved_mem APIs in mcu code
- add some dts fixes
- rename {tx,rx}_wdma in {rx,tx}_wdma
- update entry in maintainers file

Changes since v1:
- fix sparse warnings
- rely on memory-region property in mt7622-wed.yaml
- some more binding fixes
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a07b3835 90050f80
......@@ -29,6 +29,38 @@ properties:
interrupts:
maxItems: 1
memory-region:
items:
- description: firmware EMI region
- description: firmware ILM region
- description: firmware DLM region
- description: firmware CPU DATA region
- description: firmware BOOT region
memory-region-names:
items:
- const: wo-emi
- const: wo-ilm
- const: wo-dlm
- const: wo-data
- const: wo-boot
mediatek,wo-ccif:
$ref: /schemas/types.yaml#/definitions/phandle
description: mediatek wed-wo controller interface.
allOf:
- if:
properties:
compatible:
contains:
const: mediatek,mt7622-wed
then:
properties:
memory-region-names: false
memory-region: false
mediatek,wo-ccif: false
required:
- compatible
- reg
......@@ -49,3 +81,23 @@ examples:
interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
};
};
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
wed@15010000 {
compatible = "mediatek,mt7986-wed", "syscon";
reg = <0 0x15010000 0 0x1000>;
interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
memory-region = <&wo_emi>, <&wo_ilm>, <&wo_dlm>,
<&wo_data>, <&wo_boot>;
memory-region-names = "wo-emi", "wo-ilm", "wo-dlm",
"wo-data", "wo-boot";
mediatek,wo-ccif = <&wo_ccif0>;
};
};
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/soc/mediatek/mediatek,mt7986-wo-ccif.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Wireless Ethernet Dispatch (WED) WO controller interface for MT7986
maintainers:
- Lorenzo Bianconi <lorenzo@kernel.org>
- Felix Fietkau <nbd@nbd.name>
description:
The MediaTek wo-ccif provides a configuration interface for WED WO
controller used to perfrom offload rx packet processing (e.g. 802.11
aggregation packet reordering or rx header translation) on MT7986 soc.
properties:
compatible:
items:
- enum:
- mediatek,mt7986-wo-ccif
- const: syscon
reg:
maxItems: 1
interrupts:
maxItems: 1
required:
- compatible
- reg
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
syscon@151a5000 {
compatible = "mediatek,mt7986-wo-ccif", "syscon";
reg = <0 0x151a5000 0 0x1000>;
interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
};
};
......@@ -12926,6 +12926,7 @@ M: Felix Fietkau <nbd@nbd.name>
M: John Crispin <john@phrozen.org>
M: Sean Wang <sean.wang@mediatek.com>
M: Mark Lee <Mark-MC.Lee@mediatek.com>
M: Lorenzo Bianconi <lorenzo@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
......
......@@ -76,6 +76,47 @@ wmcpu_emi: wmcpu-reserved@4fc00000 {
no-map;
reg = <0 0x4fc00000 0 0x00100000>;
};
wo_emi0: wo-emi@4fd00000 {
reg = <0 0x4fd00000 0 0x40000>;
no-map;
};
wo_emi1: wo-emi@4fd40000 {
reg = <0 0x4fd40000 0 0x40000>;
no-map;
};
wo_ilm0: wo-ilm@151e0000 {
reg = <0 0x151e0000 0 0x8000>;
no-map;
};
wo_ilm1: wo-ilm@151f0000 {
reg = <0 0x151f0000 0 0x8000>;
no-map;
};
wo_data: wo-data@4fd80000 {
reg = <0 0x4fd80000 0 0x240000>;
no-map;
};
wo_dlm0: wo-dlm@151e8000 {
reg = <0 0x151e8000 0 0x2000>;
no-map;
};
wo_dlm1: wo-dlm@151f8000 {
reg = <0 0x151f8000 0 0x2000>;
no-map;
};
wo_boot: wo-boot@15194000 {
reg = <0 0x15194000 0 0x1000>;
no-map;
};
};
timer {
......@@ -240,6 +281,11 @@ wed0: wed@15010000 {
reg = <0 0x15010000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
memory-region = <&wo_emi0>, <&wo_ilm0>, <&wo_dlm0>,
<&wo_data>, <&wo_boot>;
memory-region-names = "wo-emi", "wo-ilm", "wo-dlm",
"wo-data", "wo-boot";
mediatek,wo-ccif = <&wo_ccif0>;
};
wed1: wed@15011000 {
......@@ -248,6 +294,25 @@ wed1: wed@15011000 {
reg = <0 0x15011000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
memory-region = <&wo_emi1>, <&wo_ilm1>, <&wo_dlm1>,
<&wo_data>, <&wo_boot>;
memory-region-names = "wo-emi", "wo-ilm", "wo-dlm",
"wo-data", "wo-boot";
mediatek,wo-ccif = <&wo_ccif1>;
};
wo_ccif0: syscon@151a5000 {
compatible = "mediatek,mt7986-wo-ccif", "syscon";
reg = <0 0x151a5000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
};
wo_ccif1: syscon@151ad000 {
compatible = "mediatek,mt7986-wo-ccif", "syscon";
reg = <0 0x151ad000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
};
eth: ethernet@15100000 {
......
......@@ -5,7 +5,7 @@
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
ifdef CONFIG_DEBUG_FS
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
......
......@@ -9,6 +9,7 @@
#include <linux/skbuff.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/debugfs.h>
#include <linux/soc/mediatek/mtk_wed.h>
......@@ -16,12 +17,14 @@
#include "mtk_wed_regs.h"
#include "mtk_wed.h"
#include "mtk_ppe.h"
#include "mtk_wed_wo.h"
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
#define MTK_WED_PKT_SIZE 1900
#define MTK_WED_BUF_SIZE 2048
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
#define MTK_WED_RX_RING_SIZE 1536
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
......@@ -30,6 +33,10 @@
#define MTK_WED_PER_GROUP_PKT 128
#define MTK_WED_FBUF_SIZE 128
#define MTK_WED_MIOD_CNT 16
#define MTK_WED_FB_CMD_CNT 1024
#define MTK_WED_RRO_QUE_CNT 8192
#define MTK_WED_MIOD_ENTRY_CNT 128
static struct mtk_wed_hw *hw_list[2];
static DEFINE_MUTEX(hw_lock);
......@@ -64,12 +71,76 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
wdma_m32(dev, reg, 0, mask);
}
static void
wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
wdma_m32(dev, reg, mask, 0);
}
static u32
wifi_r32(struct mtk_wed_device *dev, u32 reg)
{
return readl(dev->wlan.base + reg);
}
static void
wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
{
writel(val, dev->wlan.base + reg);
}
static u32
mtk_wed_read_reset(struct mtk_wed_device *dev)
{
return wed_r32(dev, MTK_WED_RESET);
}
static u32
mtk_wdma_read_reset(struct mtk_wed_device *dev)
{
return wdma_r32(dev, MTK_WDMA_GLO_CFG);
}
static void
mtk_wdma_rx_reset(struct mtk_wed_device *dev)
{
u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
int i;
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
!(status & mask), 0, 1000))
dev_err(dev->hw->dev, "rx reset failed\n");
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
if (dev->rx_wdma[i].desc)
continue;
wdma_w32(dev,
MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
}
}
static void
mtk_wdma_tx_reset(struct mtk_wed_device *dev)
{
u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
int i;
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
!(status & mask), 0, 1000))
dev_err(dev->hw->dev, "tx reset failed\n");
for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) {
if (dev->tx_wdma[i].desc)
continue;
wdma_w32(dev,
MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
}
}
static void
mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
{
......@@ -81,6 +152,54 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
WARN_ON_ONCE(1);
}
static u32
mtk_wed_wo_read_status(struct mtk_wed_device *dev)
{
return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS);
}
static void
mtk_wed_wo_reset(struct mtk_wed_device *dev)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
u8 state = MTK_WED_WO_STATE_DISABLE;
void __iomem *reg;
u32 val;
mtk_wdma_tx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
MTK_WED_WO_CMD_CHANGE_STATE, &state,
sizeof(state), false);
if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
val == MTK_WED_WOIF_DISABLE_DONE,
100, MTK_WOCPU_TIMEOUT))
dev_err(dev->hw->dev, "failed to disable wed-wo\n");
reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4);
val = readl(reg);
switch (dev->hw->index) {
case 0:
val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
writel(val, reg);
val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
writel(val, reg);
break;
case 1:
val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
writel(val, reg);
val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
writel(val, reg);
break;
default:
break;
}
iounmap(reg);
}
static struct mtk_wed_hw *
mtk_wed_assign(struct mtk_wed_device *dev)
{
......@@ -115,7 +234,7 @@ mtk_wed_assign(struct mtk_wed_device *dev)
}
static int
mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
......@@ -132,16 +251,16 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
if (!page_list)
return -ENOMEM;
dev->buf_ring.size = ring_size;
dev->buf_ring.pages = page_list;
dev->tx_buf_ring.size = ring_size;
dev->tx_buf_ring.pages = page_list;
desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
&desc_phys, GFP_KERNEL);
if (!desc)
return -ENOMEM;
dev->buf_ring.desc = desc;
dev->buf_ring.desc_phys = desc_phys;
dev->tx_buf_ring.desc = desc;
dev->tx_buf_ring.desc_phys = desc_phys;
for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
......@@ -202,10 +321,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
}
static void
mtk_wed_free_buffer(struct mtk_wed_device *dev)
mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc = dev->buf_ring.desc;
void **page_list = dev->buf_ring.pages;
struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
void **page_list = dev->tx_buf_ring.pages;
int page_idx;
int i;
......@@ -215,7 +334,8 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
if (!desc)
goto free_pagelist;
for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
i += MTK_WED_BUF_PER_PAGE) {
void *page = page_list[page_idx++];
dma_addr_t buf_addr;
......@@ -228,13 +348,59 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
__free_page(page);
}
dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
desc, dev->buf_ring.desc_phys);
dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
desc, dev->tx_buf_ring.desc_phys);
free_pagelist:
kfree(page_list);
}
static int
mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc;
dma_addr_t desc_phys;
dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
desc = dma_alloc_coherent(dev->hw->dev,
dev->wlan.rx_nbuf * sizeof(*desc),
&desc_phys, GFP_KERNEL);
if (!desc)
return -ENOMEM;
dev->rx_buf_ring.desc = desc;
dev->rx_buf_ring.desc_phys = desc_phys;
dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
return 0;
}
static void
mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
if (!desc)
return;
dev->wlan.release_rx_buf(dev);
dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
desc, dev->rx_buf_ring.desc_phys);
}
static void
mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
{
wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
}
static void
mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
{
......@@ -245,6 +411,13 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
ring->desc, ring->desc_phys);
}
static void
mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
{
mtk_wed_free_rx_buffer(dev);
mtk_wed_free_ring(dev, &dev->rro.ring);
}
static void
mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
{
......@@ -252,8 +425,8 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
mtk_wed_free_ring(dev, &dev->tx_ring[i]);
for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
}
static void
......@@ -290,6 +463,38 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
}
}
#define MTK_WFMDA_RX_DMA_EN BIT(2)
static void
mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
{
u32 val;
int i;
if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
return; /* queue is not configured by mt76 */
for (i = 0; i < 3; i++) {
u32 cur_idx;
cur_idx = wed_r32(dev,
MTK_WED_WPDMA_RING_RX_DATA(idx) +
MTK_WED_RING_OFS_CPU_IDX);
if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
break;
usleep_range(100000, 200000);
}
if (i == 3) {
dev_err(dev->hw->dev, "rx dma enable failed\n");
return;
}
val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
MTK_WFMDA_RX_DMA_EN;
wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
}
static void
mtk_wed_dma_disable(struct mtk_wed_device *dev)
{
......@@ -303,20 +508,25 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
wdma_m32(dev, MTK_WDMA_GLO_CFG,
wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_TX_DMA_EN |
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
if (dev->hw->version == 1) {
regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
wdma_m32(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_EN);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
mtk_wed_set_512_support(dev, false);
}
}
......@@ -337,6 +547,13 @@ mtk_wed_stop(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (dev->hw->version == 1)
return;
wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
}
static void
......@@ -352,10 +569,22 @@ mtk_wed_detach(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
if (mtk_wed_get_rx_capa(dev)) {
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
}
mtk_wed_free_buffer(dev);
mtk_wed_free_tx_buffer(dev);
mtk_wed_free_tx_rings(dev);
if (mtk_wed_get_rx_capa(dev)) {
mtk_wed_wo_reset(dev);
mtk_wed_free_rx_rings(dev);
mtk_wed_wo_deinit(hw);
mtk_wdma_rx_reset(dev);
}
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
......@@ -435,6 +664,8 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
}
}
......@@ -484,6 +715,132 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
}
}
static int
mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
int size)
{
ring->desc = dma_alloc_coherent(dev->hw->dev,
size * sizeof(*ring->desc),
&ring->desc_phys, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
ring->desc_size = sizeof(*ring->desc);
ring->size = size;
memset(ring->desc, 0, size);
return 0;
}
#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
static int
mtk_wed_rro_alloc(struct mtk_wed_device *dev)
{
struct reserved_mem *rmem;
struct device_node *np;
int index;
index = of_property_match_string(dev->hw->node, "memory-region-names",
"wo-dlm");
if (index < 0)
return index;
np = of_parse_phandle(dev->hw->node, "memory-region", index);
if (!np)
return -ENODEV;
rmem = of_reserved_mem_lookup(np);
of_node_put(np);
if (!rmem)
return -ENODEV;
dev->rro.miod_phys = rmem->base;
dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
MTK_WED_RRO_QUE_CNT);
}
static int
mtk_wed_rro_cfg(struct mtk_wed_device *dev)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
struct {
struct {
__le32 base;
__le32 cnt;
__le32 unit;
} ring[2];
__le32 wed;
u8 version;
} req = {
.ring[0] = {
.base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
.cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
.unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
},
.ring[1] = {
.base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
MTK_WED_MIOD_COUNT),
.cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
.unit = cpu_to_le32(4),
},
};
return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
MTK_WED_WO_CMD_WED_CFG,
&req, sizeof(req), true);
}
static void
mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
{
wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
MTK_WED_MIOD_ENTRY_CNT >> 2));
wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
wed_set(dev, MTK_WED_RROQM_RST_IDX,
MTK_WED_RROQM_RST_IDX_MIOD |
MTK_WED_RROQM_RST_IDX_FDBK);
wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
}
static void
mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
{
wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
for (;;) {
usleep_range(100, 200);
if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
break;
}
/* configure RX_ROUTE_QM */
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
wed_set(dev, MTK_WED_RTQM_GLO_CFG,
FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
/* enable RX_ROUTE_QM */
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
}
static void
mtk_wed_hw_init(struct mtk_wed_device *dev)
{
......@@ -495,11 +852,11 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
dev->buf_ring.size / 128) |
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
......@@ -526,9 +883,9 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
dev->buf_ring.size / 128) |
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
dev->buf_ring.size / 128));
dev->tx_buf_ring.size / 128));
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
......@@ -536,18 +893,28 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
if (dev->hw->version == 1)
if (dev->hw->version == 1) {
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
else
} else {
wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
/* rx hw init */
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
mtk_wed_rx_buffer_hw_init(dev);
mtk_wed_rro_hw_init(dev);
mtk_wed_route_qm_hw_init(dev);
}
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
}
static void
mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
{
void *head = (void *)ring->desc;
int i;
......@@ -557,7 +924,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
desc->buf0 = 0;
if (tx)
desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
else
desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
desc->buf1 = 0;
desc->info = 0;
}
......@@ -613,7 +983,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
if (!dev->tx_ring[i].desc)
continue;
mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
true);
}
if (mtk_wed_poll_busy(dev))
......@@ -631,6 +1002,9 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
if (mtk_wed_get_rx_capa(dev))
mtk_wdma_rx_reset(dev);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
......@@ -665,12 +1039,11 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
}
static int
mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
int size, u32 desc_size)
int size, u32 desc_size, bool tx)
{
ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
......@@ -679,18 +1052,23 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
ring->desc_size = desc_size;
ring->size = size;
mtk_wed_ring_reset(ring, size);
mtk_wed_ring_reset(ring, size, tx);
return 0;
}
static int
mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
struct mtk_wed_ring *wdma;
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
if (idx >= ARRAY_SIZE(dev->rx_wdma))
return -EINVAL;
wdma = &dev->rx_wdma[idx];
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size,
true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
......@@ -707,6 +1085,60 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
return 0;
}
static int
mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->tx_wdma))
return -EINVAL;
wdma = &dev->tx_wdma[idx];
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size,
true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
size);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
if (!idx) {
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
size);
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
0);
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
0);
}
return 0;
}
static void
mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
u32 reason, u32 hash)
{
struct mtk_eth *eth = dev->hw->eth;
struct ethhdr *eh;
if (!skb)
return;
if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
return;
skb_set_mac_header(skb, 0);
eh = eth_hdr(skb);
skb->protocol = eh->h_proto;
mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
}
static void
mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
......@@ -729,6 +1161,8 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
GENMASK(1, 0));
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
......@@ -747,6 +1181,16 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
MTK_WED_WPDMA_INT_CTRL_RX0_EN |
MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
MTK_WED_WPDMA_INT_CTRL_RX1_EN |
MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
dev->wlan.rx_tbit[0]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
dev->wlan.rx_tbit[1]));
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
......@@ -784,9 +1228,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
int i;
wed_set(dev, MTK_WED_WPDMA_CTRL,
MTK_WED_WPDMA_CTRL_SDL1_FIXED);
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
......@@ -794,6 +1244,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_EN |
FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
0x2));
for (i = 0; i < MTK_WED_RX_QUEUES; i++)
mtk_wed_check_wfdma_rx_fill(dev, i);
}
}
......@@ -802,9 +1261,9 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
{
int i;
for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
if (!dev->tx_wdma[i].desc)
mtk_wed_wdma_ring_setup(dev, i, 16);
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16);
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
......@@ -819,7 +1278,19 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
} else {
mtk_wed_set_512_support(dev, true);
/* driver set mid ready and only once */
wed_w32(dev, MTK_WED_EXT_INT_MASK1,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
wed_w32(dev, MTK_WED_EXT_INT_MASK2,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
wed_r32(dev, MTK_WED_EXT_INT_MASK1);
wed_r32(dev, MTK_WED_EXT_INT_MASK2);
if (mtk_wed_rro_cfg(dev))
return;
mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
}
mtk_wed_dma_enable(dev);
......@@ -853,7 +1324,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
if (!hw) {
module_put(THIS_MODULE);
ret = -ENODEV;
goto out;
goto unlock;
}
device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
......@@ -866,23 +1337,36 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->dev = hw->dev;
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
dev->version = hw->version;
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
mtk_eth_set_dma_device(hw->eth, hw->dev);
ret = mtk_wed_buffer_alloc(dev);
if (ret) {
mtk_wed_detach(dev);
ret = mtk_wed_tx_buffer_alloc(dev);
if (ret)
goto out;
if (mtk_wed_get_rx_capa(dev)) {
ret = mtk_wed_rx_buffer_alloc(dev);
if (ret)
goto out;
ret = mtk_wed_rro_alloc(dev);
if (ret)
goto out;
}
mtk_wed_hw_init_early(dev);
if (hw->hifsys)
if (hw->version == 1)
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), 0);
else
ret = mtk_wed_wo_init(hw);
out:
if (ret)
mtk_wed_detach(dev);
unlock:
mutex_unlock(&hw_lock);
return ret;
......@@ -905,13 +1389,14 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
* WDMA RX.
*/
BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
return -EINVAL;
if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
sizeof(*ring->desc)))
sizeof(*ring->desc), true))
return -ENOMEM;
if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
return -ENOMEM;
ring->reg_base = MTK_WED_RING_TX(idx);
......@@ -955,6 +1440,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
return 0;
}
static int
mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->rx_ring[idx];
if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
return -EINVAL;
if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
sizeof(*ring->desc), false))
return -ENOMEM;
if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
return -ENOMEM;
ring->reg_base = MTK_WED_RING_RX_DATA(idx);
ring->wpdma = regs;
ring->flags |= MTK_WED_RING_CONFIGURED;
/* WPDMA -> WED */
wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
ring->desc_phys);
wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
MTK_WED_RX_RING_SIZE);
return 0;
}
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
......@@ -1051,7 +1567,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
.tx_ring_setup = mtk_wed_tx_ring_setup,
.rx_ring_setup = mtk_wed_rx_ring_setup,
.txfree_ring_setup = mtk_wed_txfree_ring_setup,
.msg_update = mtk_wed_mcu_msg_update,
.start = mtk_wed_start,
.stop = mtk_wed_stop,
.reset_dma = mtk_wed_reset_dma,
......@@ -1060,6 +1578,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.irq_get = mtk_wed_irq_get,
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
......
......@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
struct mtk_eth;
struct mtk_wed_wo;
struct mtk_wed_hw {
struct device_node *node;
......@@ -22,6 +23,7 @@ struct mtk_wed_hw {
struct regmap *mirror;
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
struct mtk_wed_wo *wed_wo;
u32 debugfs_reg;
u32 num_flows;
u8 version;
......@@ -84,6 +86,24 @@ wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
writel(val, dev->tx_ring[ring].wpdma + reg);
}
static inline u32
wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
{
if (!dev->rx_ring[ring].wpdma)
return 0;
return readl(dev->rx_ring[ring].wpdma + reg);
}
static inline void
wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
{
if (!dev->rx_ring[ring].wpdma)
return;
writel(val, dev->rx_ring[ring].wpdma + reg);
}
static inline u32
wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
{
......@@ -126,6 +146,7 @@ static inline int mtk_wed_flow_add(int index)
static inline void mtk_wed_flow_remove(int index)
{
}
#endif
#ifdef CONFIG_DEBUG_FS
......
......@@ -2,6 +2,7 @@
/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
#include <linux/seq_file.h>
#include <linux/soc/mediatek/mtk_wed.h>
#include "mtk_wed.h"
#include "mtk_wed_regs.h"
......@@ -18,6 +19,8 @@ enum {
DUMP_TYPE_WDMA,
DUMP_TYPE_WPDMA_TX,
DUMP_TYPE_WPDMA_TXFREE,
DUMP_TYPE_WPDMA_RX,
DUMP_TYPE_WED_RRO,
};
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
......@@ -36,6 +39,9 @@ enum {
#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
static void
print_reg_val(struct seq_file *s, const char *name, u32 val)
......@@ -57,6 +63,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
cur > regs ? "\n" : "",
cur->name);
continue;
case DUMP_TYPE_WED_RRO:
case DUMP_TYPE_WED:
val = wed_r32(dev, cur->offset);
break;
......@@ -69,6 +76,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
case DUMP_TYPE_WPDMA_TXFREE:
val = wpdma_txfree_r32(dev, cur->offset);
break;
case DUMP_TYPE_WPDMA_RX:
val = wpdma_rx_r32(dev, cur->base, cur->offset);
break;
}
print_reg_val(s, cur->name, val);
}
......@@ -132,6 +142,80 @@ wed_txinfo_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
static int
wed_rxinfo_show(struct seq_file *s, void *data)
{
static const struct reg_dump regs[] = {
DUMP_STR("WPDMA RX"),
DUMP_WPDMA_RX_RING(0),
DUMP_WPDMA_RX_RING(1),
DUMP_STR("WPDMA RX"),
DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
DUMP_STR("WED RX"),
DUMP_WED_RING(WED_RING_RX_DATA(0)),
DUMP_WED_RING(WED_RING_RX_DATA(1)),
DUMP_STR("WED RRO"),
DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
DUMP_WED(WED_RROQM_MID_MIB),
DUMP_WED(WED_RROQM_MOD_MIB),
DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
DUMP_WED(WED_RROQM_FDBK_IND_MIB),
DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
DUMP_STR("WED Route QM"),
DUMP_WED(WED_RTQM_R2H_MIB(0)),
DUMP_WED(WED_RTQM_R2Q_MIB(0)),
DUMP_WED(WED_RTQM_Q2H_MIB(0)),
DUMP_WED(WED_RTQM_R2H_MIB(1)),
DUMP_WED(WED_RTQM_R2Q_MIB(1)),
DUMP_WED(WED_RTQM_Q2H_MIB(1)),
DUMP_WED(WED_RTQM_Q2N_MIB),
DUMP_WED(WED_RTQM_Q2B_MIB),
DUMP_WED(WED_RTQM_PFDBK_MIB),
DUMP_STR("WED WDMA TX"),
DUMP_WED(WED_WDMA_TX_MIB),
DUMP_WED_RING(WED_WDMA_RING_TX),
DUMP_STR("WDMA TX"),
DUMP_WDMA(WDMA_GLO_CFG),
DUMP_WDMA_RING(WDMA_RING_TX(0)),
DUMP_WDMA_RING(WDMA_RING_TX(1)),
DUMP_STR("WED RX BM"),
DUMP_WED(WED_RX_BM_BASE),
DUMP_WED(WED_RX_BM_RX_DMAD),
DUMP_WED(WED_RX_BM_PTR),
DUMP_WED(WED_RX_BM_TKID_MIB),
DUMP_WED(WED_RX_BM_BLEN),
DUMP_WED(WED_RX_BM_STS),
DUMP_WED(WED_RX_BM_INTF2),
DUMP_WED(WED_RX_BM_INTF),
DUMP_WED(WED_RX_BM_ERR_STS),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
if (!dev)
return 0;
dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
static int
mtk_wed_reg_set(void *data, u64 val)
......@@ -175,4 +259,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
if (hw->version != 1)
debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
&wed_rxinfo_fops);
}
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2022 MediaTek Inc.
*
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
* Sujuan Chen <sujuan.chen@mediatek.com>
*/
#include <linux/firmware.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/soc/mediatek/mtk_wed.h>
#include <asm/unaligned.h>
#include "mtk_wed_regs.h"
#include "mtk_wed_wo.h"
#include "mtk_wed.h"
static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
{
return readl(wo->boot.addr + reg);
}
static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
writel(val, wo->boot.addr + reg);
}
static struct sk_buff *
mtk_wed_mcu_msg_alloc(const void *data, int data_len)
{
int length = sizeof(struct mtk_wed_mcu_hdr) + data_len;
struct sk_buff *skb;
skb = alloc_skb(length, GFP_KERNEL);
if (!skb)
return NULL;
memset(skb->head, 0, length);
skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr));
if (data && data_len)
skb_put_data(skb, data, data_len);
return skb;
}
static struct sk_buff *
mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
{
if (!time_is_after_jiffies(expires))
return NULL;
wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q),
expires - jiffies);
return skb_dequeue(&wo->mcu.res_q);
}
void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
{
skb_queue_tail(&wo->mcu.res_q, skb);
wake_up(&wo->mcu.wait);
}
static void
mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
{
u32 count = get_unaligned_le32(skb->data);
struct mtk_wed_wo_rx_stats *stats;
int i;
if (count * sizeof(*stats) > skb->len - sizeof(u32))
return;
stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32));
for (i = 0 ; i < count ; i++)
wed->wlan.update_wo_rx_stats(wed, &stats[i]);
}
void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
struct sk_buff *skb)
{
struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
switch (hdr->cmd) {
case MTK_WED_WO_EVT_LOG_DUMP:
dev_notice(wo->hw->dev, "%s\n", skb->data);
break;
case MTK_WED_WO_EVT_PROFILING: {
struct mtk_wed_wo_log_info *info = (void *)skb->data;
u32 count = skb->len / sizeof(*info);
int i;
for (i = 0 ; i < count ; i++)
dev_notice(wo->hw->dev,
"SN:%u latency: total=%u, rro:%u, mod:%u\n",
le32_to_cpu(info[i].sn),
le32_to_cpu(info[i].total),
le32_to_cpu(info[i].rro),
le32_to_cpu(info[i].mod));
break;
}
case MTK_WED_WO_EVT_RXCNT_INFO:
mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
break;
default:
break;
}
dev_kfree_skb(skb);
}
static int
mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb,
int id, int cmd, u16 *wait_seq, bool wait_resp)
{
struct mtk_wed_mcu_hdr *hdr;
/* TODO: make it dynamic based on cmd */
wo->mcu.timeout = 20 * HZ;
hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr));
hdr->cmd = cmd;
hdr->length = cpu_to_le16(skb->len);
if (wait_resp && wait_seq) {
u16 seq = ++wo->mcu.seq;
if (!seq)
seq = ++wo->mcu.seq;
*wait_seq = seq;
hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP);
hdr->seq = cpu_to_le16(seq);
}
if (id == MTK_WED_MODULE_ID_WO)
hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
}
static int
mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb,
int cmd, int seq)
{
struct mtk_wed_mcu_hdr *hdr;
if (!skb) {
dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
return -ETIMEDOUT;
}
hdr = (struct mtk_wed_mcu_hdr *)skb->data;
if (le16_to_cpu(hdr->seq) != seq)
return -EAGAIN;
skb_pull(skb, sizeof(*hdr));
switch (cmd) {
case MTK_WED_WO_CMD_RXCNT_INFO:
mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
break;
default:
break;
}
return 0;
}
int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
const void *data, int len, bool wait_resp)
{
unsigned long expires;
struct sk_buff *skb;
u16 seq;
int ret;
skb = mtk_wed_mcu_msg_alloc(data, len);
if (!skb)
return -ENOMEM;
mutex_lock(&wo->mcu.mutex);
ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp);
if (ret || !wait_resp)
goto unlock;
expires = jiffies + wo->mcu.timeout;
do {
skb = mtk_wed_mcu_get_response(wo, expires);
ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq);
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
unlock:
mutex_unlock(&wo->mcu.mutex);
return ret;
}
int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
int len)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
if (dev->hw->version == 1)
return 0;
return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len,
true);
}
static int
mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
int index;
index = of_property_match_string(wo->hw->node, "memory-region-names",
region->name);
if (index < 0)
return index;
np = of_parse_phandle(wo->hw->node, "memory-region", index);
if (!np)
return -ENODEV;
rmem = of_reserved_mem_lookup(np);
of_node_put(np);
if (!rmem)
return -ENODEV;
region->phy_addr = rmem->base;
region->size = rmem->size;
region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
return !region->addr ? -EINVAL : 0;
}
static int
mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
struct mtk_wed_wo_memory_region *region)
{
const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
const struct mtk_wed_fw_trailer *trailer;
const struct mtk_wed_fw_region *fw_region;
trailer_ptr = fw->data + fw->size - sizeof(*trailer);
trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
first_region_ptr = region_ptr;
while (region_ptr < trailer_ptr) {
u32 length;
fw_region = (const struct mtk_wed_fw_region *)region_ptr;
length = le32_to_cpu(fw_region->len);
if (region->phy_addr != le32_to_cpu(fw_region->addr))
goto next;
if (region->size < length)
goto next;
if (first_region_ptr < ptr + length)
goto next;
if (region->shared && region->consumed)
return 0;
if (!region->shared || !region->consumed) {
memcpy_toio(region->addr, ptr, length);
region->consumed = true;
return 0;
}
next:
region_ptr += sizeof(*fw_region);
ptr += length;
}
return -EINVAL;
}
static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
static struct mtk_wed_wo_memory_region mem_region[] = {
[MTK_WED_WO_REGION_EMI] = {
.name = "wo-emi",
},
[MTK_WED_WO_REGION_ILM] = {
.name = "wo-ilm",
},
[MTK_WED_WO_REGION_DATA] = {
.name = "wo-data",
.shared = true,
},
};
const struct mtk_wed_fw_trailer *trailer;
const struct firmware *fw;
const char *fw_name;
u32 val, boot_cr;
int ret, i;
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
if (ret)
return ret;
}
wo->boot.name = "wo-boot";
ret = mtk_wed_get_memory_region(wo, &wo->boot);
if (ret)
return ret;
/* set dummy cr */
wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
wo->hw->index + 1);
/* load firmware */
fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
ret = request_firmware(&fw, fw_name, wo->hw->dev);
if (ret)
return ret;
trailer = (void *)(fw->data + fw->size -
sizeof(struct mtk_wed_fw_trailer));
dev_info(wo->hw->dev,
"MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n",
trailer->fw_ver, trailer->build_date);
dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
trailer->chip_id, trailer->num_region);
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
if (ret)
goto out;
}
/* set the start address */
boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
: MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
/* wo firmware reset */
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
: MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
out:
release_firmware(fw);
return ret;
}
static u32
mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo)
{
return wed_r32(wo->hw->wed_dev,
MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL);
}
int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
{
u32 val;
int ret;
skb_queue_head_init(&wo->mcu.res_q);
init_waitqueue_head(&wo->mcu.wait);
mutex_init(&wo->mcu.mutex);
ret = mtk_wed_mcu_load_firmware(wo);
if (ret)
return ret;
return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val,
100, MTK_FW_DL_TIMEOUT);
}
MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
......@@ -4,6 +4,7 @@
#ifndef __MTK_WED_REGS_H
#define __MTK_WED_REGS_H
#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
......@@ -28,6 +29,8 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET_WED_TX_DMA BIT(12)
#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
#define MTK_WED_RESET_RX_RRO_QM BIT(20)
#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
#define MTK_WED_RESET_WED BIT(31)
#define MTK_WED_CTRL 0x00c
......@@ -39,8 +42,12 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
#define MTK_WED_CTRL_RESERVE_EN BIT(12)
#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
......@@ -62,6 +69,9 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_GET_BM_DMAD_SKIP BIT(25)
#define MTK_WED_EXT_INT_STATUS_WPDMA_RX_D_DRV_ERR BIT(26)
#define MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY BIT(27)
#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
......@@ -71,6 +81,8 @@ struct mtk_wdma_desc {
MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
#define MTK_WED_EXT_INT_MASK 0x028
#define MTK_WED_EXT_INT_MASK1 0x02c
#define MTK_WED_EXT_INT_MASK2 0x030
#define MTK_WED_STATUS 0x060
#define MTK_WED_STATUS_TX GENMASK(15, 8)
......@@ -151,7 +163,9 @@ struct mtk_wdma_desc {
#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
#define MTK_WED_SCR0 0x3c0
#define MTK_WED_WPDMA_INT_TRIGGER 0x504
#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
......@@ -212,6 +226,12 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
......@@ -241,11 +261,34 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
#define MTK_WED_WPDMA_RX_RING 0x770
#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
#define MTK_WED_WDMA_RING_TX 0x800
#define MTK_WED_WDMA_TX_MIB 0x810
#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
#define MTK_WED_WDMA_GLO_CFG 0xa04
#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
......@@ -290,6 +333,20 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
#define MTK_WED_RX_BM_RX_DMAD 0xd80
#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
#define MTK_WED_RX_BM_BASE 0xd84
#define MTK_WED_RX_BM_INIT_PTR 0xd88
#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
#define MTK_WED_RX_PTR 0xd8c
#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
#define MTK_WED_RING_OFS_BASE 0x00
#define MTK_WED_RING_OFS_COUNT 0x04
#define MTK_WED_RING_OFS_CPU_IDX 0x08
......@@ -300,7 +357,9 @@ struct mtk_wdma_desc {
#define MTK_WDMA_GLO_CFG 0x204
#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
......@@ -329,4 +388,70 @@ struct mtk_wdma_desc {
/* DMA channel mapping */
#define HIFSYS_DMA_AG_MAP 0x008
#define MTK_WED_RTQM_GLO_CFG 0xb00
#define MTK_WED_RTQM_BUSY BIT(1)
#define MTK_WED_RTQM_Q_RST BIT(2)
#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
#define MTK_WED_RTQM_Q2N_MIB 0xb80
#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
#define MTK_WED_RTQM_Q2B_MIB 0xb8c
#define MTK_WED_RTQM_PFDBK_MIB 0xb90
#define MTK_WED_RROQM_GLO_CFG 0xc04
#define MTK_WED_RROQM_RST_IDX 0xc08
#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
#define MTK_WED_RROQ_BASE_L 0xc80
#define MTK_WED_RROQ_BASE_H 0xc84
#define MTK_WED_RROQM_MIOD_CFG 0xc8c
#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
#define MTK_WED_RROQM_MID_MIB 0xcc0
#define MTK_WED_RROQM_MOD_MIB 0xcc4
#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
#define MTK_WED_RROQM_FDBK_MIB 0xcd0
#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
#define MTK_WED_RX_BM_RX_DMAD 0xd80
#define MTK_WED_RX_BM_BASE 0xd84
#define MTK_WED_RX_BM_INIT_PTR 0xd88
#define MTK_WED_RX_BM_PTR 0xd8c
#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
#define MTK_WED_RX_BM_BLEN 0xd90
#define MTK_WED_RX_BM_STS 0xd94
#define MTK_WED_RX_BM_INTF2 0xd98
#define MTK_WED_RX_BM_INTF 0xd9c
#define MTK_WED_RX_BM_ERR_STS 0xda8
#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
#define MTK_WED_PCIE_INT_MASK 0x0
#endif
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2022 MediaTek Inc.
*
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
* Sujuan Chen <sujuan.chen@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/of_irq.h>
#include <linux/bitfield.h>
#include "mtk_wed.h"
#include "mtk_wed_regs.h"
#include "mtk_wed_wo.h"
static u32
mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
{
u32 val;
if (regmap_read(wo->mmio.regs, reg, &val))
val = ~0;
return val;
}
static void
mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
regmap_write(wo->mmio.regs, reg, val);
}
static u32
mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
{
u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
}
static void
mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
}
static void
mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
}
static void
mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
{
unsigned long flags;
spin_lock_irqsave(&wo->mmio.lock, flags);
wo->mmio.irq_mask &= ~mask;
wo->mmio.irq_mask |= val;
if (set)
mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
spin_unlock_irqrestore(&wo->mmio.lock, flags);
}
static void
mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
tasklet_schedule(&wo->mmio.irq_tasklet);
}
static void
mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
}
static void
mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
{
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
}
static void
mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
u32 val)
{
wmb();
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
}
static void *
mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
bool flush)
{
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
int index = (q->tail + 1) % q->n_desc;
struct mtk_wed_wo_queue_entry *entry;
struct mtk_wed_wo_queue_desc *desc;
void *buf;
if (!q->queued)
return NULL;
if (flush)
q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
return NULL;
q->tail = index;
q->queued--;
desc = &q->desc[index];
entry = &q->entry[index];
buf = entry->buf;
if (len)
*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
le32_to_cpu(READ_ONCE(desc->ctrl)));
if (buf)
dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
DMA_FROM_DEVICE);
entry->buf = NULL;
return buf;
}
static int
mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
gfp_t gfp, bool rx)
{
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
int n_buf = 0;
spin_lock_bh(&q->lock);
while (q->queued < q->n_desc) {
void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
struct mtk_wed_wo_queue_entry *entry;
dma_addr_t addr;
if (!buf)
break;
addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
skb_free_frag(buf);
break;
}
q->head = (q->head + 1) % q->n_desc;
entry = &q->entry[q->head];
entry->addr = addr;
entry->len = q->buf_size;
q->entry[q->head].buf = buf;
if (rx) {
struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
entry->len);
WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
}
q->queued++;
n_buf++;
}
spin_unlock_bh(&q->lock);
return n_buf;
}
static void
mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
{
mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
}
static void
mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
for (;;) {
struct mtk_wed_mcu_hdr *hdr;
struct sk_buff *skb;
void *data;
u32 len;
data = mtk_wed_wo_dequeue(wo, q, &len, false);
if (!data)
break;
skb = build_skb(data, q->buf_size);
if (!skb) {
skb_free_frag(data);
continue;
}
__skb_put(skb, len);
if (mtk_wed_mcu_check_msg(wo, skb)) {
dev_kfree_skb(skb);
continue;
}
hdr = (struct mtk_wed_mcu_hdr *)skb->data;
if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
mtk_wed_mcu_rx_event(wo, skb);
else
mtk_wed_mcu_rx_unsolicited_event(wo, skb);
}
if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
u32 index = (q->head - 1) % q->n_desc;
mtk_wed_wo_queue_kick(wo, q, index);
}
}
static irqreturn_t
mtk_wed_wo_irq_handler(int irq, void *data)
{
struct mtk_wed_wo *wo = data;
mtk_wed_wo_set_isr(wo, 0);
tasklet_schedule(&wo->mmio.irq_tasklet);
return IRQ_HANDLED;
}
static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
{
struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
u32 intr, mask;
/* disable interrupts */
mtk_wed_wo_set_isr(wo, 0);
intr = mtk_wed_wo_get_isr(wo);
intr &= wo->mmio.irq_mask;
mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
mtk_wed_wo_irq_disable(wo, mask);
if (intr & MTK_WED_WO_RXCH_INT_MASK) {
mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
mtk_wed_wo_rx_complete(wo);
}
}
/* mtk wed wo hw queues */
static int
mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
int n_desc, int buf_size, int index,
struct mtk_wed_wo_queue_regs *regs)
{
spin_lock_init(&q->lock);
q->regs = *regs;
q->n_desc = n_desc;
q->buf_size = buf_size;
q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
&q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
return 0;
}
static void
mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
q->desc_dma);
}
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
struct page *page;
int i;
spin_lock_bh(&q->lock);
for (i = 0; i < q->n_desc; i++) {
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
DMA_TO_DEVICE);
skb_free_frag(entry->buf);
entry->buf = NULL;
}
spin_unlock_bh(&q->lock);
if (!q->cache.va)
return;
page = virt_to_page(q->cache.va);
__page_frag_cache_drain(page, q->cache.pagecnt_bias);
memset(&q->cache, 0, sizeof(q->cache));
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
struct page *page;
spin_lock_bh(&q->lock);
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
if (!buf)
break;
skb_free_frag(buf);
}
spin_unlock_bh(&q->lock);
if (!q->cache.va)
return;
page = virt_to_page(q->cache.va);
__page_frag_cache_drain(page, q->cache.pagecnt_bias);
memset(&q->cache, 0, sizeof(q->cache));
}
static void
mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
}
int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
struct sk_buff *skb)
{
struct mtk_wed_wo_queue_entry *entry;
struct mtk_wed_wo_queue_desc *desc;
int ret = 0, index;
u32 ctrl;
spin_lock_bh(&q->lock);
q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
index = (q->head + 1) % q->n_desc;
if (q->tail == index) {
ret = -ENOMEM;
goto out;
}
entry = &q->entry[index];
if (skb->len > entry->len) {
ret = -ENOMEM;
goto out;
}
desc = &q->desc[index];
q->head = index;
dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
DMA_TO_DEVICE);
memcpy(entry->buf, skb->data, skb->len);
dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
DMA_TO_DEVICE);
ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
mtk_wed_wo_queue_kick(wo, q, q->head);
mtk_wed_wo_kickout(wo);
out:
spin_unlock_bh(&q->lock);
dev_kfree_skb(skb);
return ret;
}
static int
mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
{
return 0;
}
static int
mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
{
struct mtk_wed_wo_queue_regs regs;
struct device_node *np;
int ret;
np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
if (!np)
return -ENODEV;
wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
if (IS_ERR_OR_NULL(wo->mmio.regs))
return PTR_ERR(wo->mmio.regs);
wo->mmio.irq = irq_of_parse_and_map(np, 0);
wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
spin_lock_init(&wo->mmio.lock);
tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
KBUILD_MODNAME, wo);
if (ret)
goto error;
regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
&regs);
if (ret)
goto error;
mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
mtk_wed_wo_queue_reset(wo, &wo->q_tx);
regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
&regs);
if (ret)
goto error;
mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
mtk_wed_wo_queue_reset(wo, &wo->q_rx);
/* rx queue irqmask */
mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
return 0;
error:
devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
return ret;
}
static void
mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
{
/* disable interrupts */
mtk_wed_wo_set_isr(wo, 0);
tasklet_disable(&wo->mmio.irq_tasklet);
disable_irq(wo->mmio.irq);
devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
mtk_wed_wo_queue_free(wo, &wo->q_tx);
mtk_wed_wo_queue_free(wo, &wo->q_rx);
}
int mtk_wed_wo_init(struct mtk_wed_hw *hw)
{
struct mtk_wed_wo *wo;
int ret;
wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
if (!wo)
return -ENOMEM;
hw->wed_wo = wo;
wo->hw = hw;
ret = mtk_wed_wo_hardware_init(wo);
if (ret)
return ret;
ret = mtk_wed_mcu_init(wo);
if (ret)
return ret;
return mtk_wed_wo_exception_init(wo);
}
void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
{
struct mtk_wed_wo *wo = hw->wed_wo;
mtk_wed_wo_hw_deinit(wo);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2022 Lorenzo Bianconi <lorenzo@kernel.org> */
#ifndef __MTK_WED_WO_H
#define __MTK_WED_WO_H
#include <linux/skbuff.h>
#include <linux/netdevice.h>
struct mtk_wed_hw;
struct mtk_wed_mcu_hdr {
/* DW0 */
u8 version;
u8 cmd;
__le16 length;
/* DW1 */
__le16 seq;
__le16 flag;
/* DW2 */
__le32 status;
/* DW3 */
u8 rsv[20];
};
struct mtk_wed_wo_log_info {
__le32 sn;
__le32 total;
__le32 rro;
__le32 mod;
};
enum mtk_wed_wo_event {
MTK_WED_WO_EVT_LOG_DUMP = 0x1,
MTK_WED_WO_EVT_PROFILING = 0x2,
MTK_WED_WO_EVT_RXCNT_INFO = 0x3,
};
#define MTK_WED_MODULE_ID_WO 1
#define MTK_FW_DL_TIMEOUT 4000000 /* us */
#define MTK_WOCPU_TIMEOUT 2000000 /* us */
enum {
MTK_WED_WARP_CMD_FLAG_RSP = BIT(0),
MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1),
MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2),
};
#define MTK_WED_WO_CPU_MCUSYS_RESET_ADDR 0x15194050
#define MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK 0x20
#define MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK 0x1
enum {
MTK_WED_WO_REGION_EMI,
MTK_WED_WO_REGION_ILM,
MTK_WED_WO_REGION_DATA,
MTK_WED_WO_REGION_BOOT,
__MTK_WED_WO_REGION_MAX,
};
enum mtk_wed_wo_state {
MTK_WED_WO_STATE_UNDEFINED,
MTK_WED_WO_STATE_INIT,
MTK_WED_WO_STATE_ENABLE,
MTK_WED_WO_STATE_DISABLE,
MTK_WED_WO_STATE_HALT,
MTK_WED_WO_STATE_GATING,
MTK_WED_WO_STATE_SER_RESET,
MTK_WED_WO_STATE_WF_RESET,
};
enum mtk_wed_wo_done_state {
MTK_WED_WOIF_UNDEFINED,
MTK_WED_WOIF_DISABLE_DONE,
MTK_WED_WOIF_TRIGGER_ENABLE,
MTK_WED_WOIF_ENABLE_DONE,
MTK_WED_WOIF_TRIGGER_GATING,
MTK_WED_WOIF_GATING_DONE,
MTK_WED_WOIF_TRIGGER_HALT,
MTK_WED_WOIF_HALT_DONE,
};
enum mtk_wed_dummy_cr_idx {
MTK_WED_DUMMY_CR_FWDL,
MTK_WED_DUMMY_CR_WO_STATUS,
};
#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
#define MTK_WO_MCU_CFG_LS_BASE 0
#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004)
#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c)
#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010)
#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014)
#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018)
#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c)
#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050)
#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060)
#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064)
#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
#define MTK_WED_WO_RING_SIZE 256
#define MTK_WED_WO_CMD_LEN 1504
#define MTK_WED_WO_TXCH_NUM 0
#define MTK_WED_WO_RXCH_NUM 1
#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
MTK_WED_WO_EXCEPTION_INT_MASK)
#define MTK_WED_WO_CCIF_BUSY 0x004
#define MTK_WED_WO_CCIF_START 0x008
#define MTK_WED_WO_CCIF_TCHNUM 0x00c
#define MTK_WED_WO_CCIF_RCHNUM 0x010
#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
#define MTK_WED_WO_CCIF_ACK 0x014
#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
#define MTK_WED_WO_CCIF_DUMMY1 0x020
#define MTK_WED_WO_CCIF_DUMMY2 0x024
#define MTK_WED_WO_CCIF_DUMMY3 0x028
#define MTK_WED_WO_CCIF_DUMMY4 0x02c
#define MTK_WED_WO_CCIF_SHADOW1 0x030
#define MTK_WED_WO_CCIF_SHADOW2 0x034
#define MTK_WED_WO_CCIF_SHADOW3 0x038
#define MTK_WED_WO_CCIF_SHADOW4 0x03c
#define MTK_WED_WO_CCIF_DUMMY5 0x050
#define MTK_WED_WO_CCIF_DUMMY6 0x054
#define MTK_WED_WO_CCIF_DUMMY7 0x058
#define MTK_WED_WO_CCIF_DUMMY8 0x05c
#define MTK_WED_WO_CCIF_SHADOW5 0x060
#define MTK_WED_WO_CCIF_SHADOW6 0x064
#define MTK_WED_WO_CCIF_SHADOW7 0x068
#define MTK_WED_WO_CCIF_SHADOW8 0x06c
#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
#define MTK_WED_WO_CTL_BURST BIT(15)
#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
struct mtk_wed_wo_memory_region {
const char *name;
void __iomem *addr;
phys_addr_t phy_addr;
u32 size;
bool shared:1;
bool consumed:1;
};
struct mtk_wed_fw_region {
__le32 decomp_crc;
__le32 decomp_len;
__le32 decomp_blk_sz;
u8 rsv0[4];
__le32 addr;
__le32 len;
u8 feature_set;
u8 rsv1[15];
} __packed;
struct mtk_wed_fw_trailer {
u8 chip_id;
u8 eco_code;
u8 num_region;
u8 format_ver;
u8 format_flag;
u8 rsv[2];
char fw_ver[10];
char build_date[15];
u32 crc;
};
struct mtk_wed_wo_queue_regs {
u32 desc_base;
u32 ring_size;
u32 cpu_idx;
u32 dma_idx;
};
struct mtk_wed_wo_queue_desc {
__le32 buf0;
__le32 ctrl;
__le32 buf1;
__le32 info;
__le32 reserved[4];
} __packed __aligned(32);
struct mtk_wed_wo_queue_entry {
dma_addr_t addr;
void *buf;
u32 len;
};
struct mtk_wed_wo_queue {
struct mtk_wed_wo_queue_regs regs;
struct page_frag_cache cache;
spinlock_t lock;
struct mtk_wed_wo_queue_desc *desc;
dma_addr_t desc_dma;
struct mtk_wed_wo_queue_entry *entry;
u16 head;
u16 tail;
int n_desc;
int queued;
int buf_size;
};
struct mtk_wed_wo {
struct mtk_wed_hw *hw;
struct mtk_wed_wo_memory_region boot;
struct mtk_wed_wo_queue q_tx;
struct mtk_wed_wo_queue q_rx;
struct {
struct mutex mutex;
int timeout;
u16 seq;
struct sk_buff_head res_q;
wait_queue_head_t wait;
} mcu;
struct {
struct regmap *regs;
spinlock_t lock;
struct tasklet_struct irq_tasklet;
int irq;
u32 irq_mask;
} mmio;
};
static inline int
mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb)
{
struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
if (hdr->version)
return -EINVAL;
if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length))
return -EINVAL;
return 0;
}
void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
struct sk_buff *skb);
int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
const void *data, int len, bool wait_resp);
int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
int len);
int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
int mtk_wed_wo_init(struct mtk_wed_hw *hw);
void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
struct sk_buff *skb);
#endif /* __MTK_WED_WO_H */
......@@ -5,27 +5,76 @@
#include <linux/rcupdate.h>
#include <linux/regmap.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#define MTK_WED_TX_QUEUES 2
#define MTK_WED_RX_QUEUES 2
#define WED_WO_STA_REC 0x6
struct mtk_wed_hw;
struct mtk_wdma_desc;
enum mtk_wed_wo_cmd {
MTK_WED_WO_CMD_WED_CFG,
MTK_WED_WO_CMD_WED_RX_STAT,
MTK_WED_WO_CMD_RRO_SER,
MTK_WED_WO_CMD_DBG_INFO,
MTK_WED_WO_CMD_DEV_INFO,
MTK_WED_WO_CMD_BSS_INFO,
MTK_WED_WO_CMD_STA_REC,
MTK_WED_WO_CMD_DEV_INFO_DUMP,
MTK_WED_WO_CMD_BSS_INFO_DUMP,
MTK_WED_WO_CMD_STA_REC_DUMP,
MTK_WED_WO_CMD_BA_INFO_DUMP,
MTK_WED_WO_CMD_FBCMD_Q_DUMP,
MTK_WED_WO_CMD_FW_LOG_CTRL,
MTK_WED_WO_CMD_LOG_FLUSH,
MTK_WED_WO_CMD_CHANGE_STATE,
MTK_WED_WO_CMD_CPU_STATS_ENABLE,
MTK_WED_WO_CMD_CPU_STATS_DUMP,
MTK_WED_WO_CMD_EXCEPTION_INIT,
MTK_WED_WO_CMD_PROF_CTRL,
MTK_WED_WO_CMD_STA_BA_DUMP,
MTK_WED_WO_CMD_BA_CTRL_DUMP,
MTK_WED_WO_CMD_RXCNT_CTRL,
MTK_WED_WO_CMD_RXCNT_INFO,
MTK_WED_WO_CMD_SET_CAP,
MTK_WED_WO_CMD_CCIF_RING_DUMP,
MTK_WED_WO_CMD_WED_END
};
struct mtk_rxbm_desc {
__le32 buf0;
__le32 token;
} __packed __aligned(4);
enum mtk_wed_bus_tye {
MTK_WED_BUS_PCIE,
MTK_WED_BUS_AXI,
};
#define MTK_WED_RING_CONFIGURED BIT(0)
struct mtk_wed_ring {
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
u32 desc_size;
int size;
u32 flags;
u32 reg_base;
void __iomem *wpdma;
};
struct mtk_wed_wo_rx_stats {
__le16 wlan_idx;
__le16 tid;
__le32 rx_pkt_cnt;
__le32 rx_byte_cnt;
__le32 rx_err_cnt;
__le32 rx_drop_cnt;
};
struct mtk_wed_device {
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
const struct mtk_wed_ops *ops;
......@@ -34,17 +83,33 @@ struct mtk_wed_device {
bool init_done, running;
int wdma_idx;
int irq;
u8 version;
struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
struct mtk_wed_ring txfree_ring;
struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
struct {
int size;
void **pages;
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
} buf_ring;
} tx_buf_ring;
struct {
int size;
struct page_frag_cache rx_page;
struct mtk_rxbm_desc *desc;
dma_addr_t desc_phys;
} rx_buf_ring;
struct {
struct mtk_wed_ring ring;
dma_addr_t miod_phys;
dma_addr_t fdbk_phys;
} rro;
/* filled by driver: */
struct {
......@@ -53,22 +118,36 @@ struct mtk_wed_device {
struct pci_dev *pci_dev;
};
enum mtk_wed_bus_tye bus_type;
void __iomem *base;
u32 phy_base;
u32 wpdma_phys;
u32 wpdma_int;
u32 wpdma_mask;
u32 wpdma_tx;
u32 wpdma_txfree;
u32 wpdma_rx_glo;
u32 wpdma_rx;
bool wcid_512;
u16 token_start;
unsigned int nbuf;
unsigned int rx_nbuf;
unsigned int rx_npkt;
unsigned int rx_size;
u8 tx_tbit[MTK_WED_TX_QUEUES];
u8 rx_tbit[MTK_WED_RX_QUEUES];
u8 txfree_tbit;
u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
int (*offload_enable)(struct mtk_wed_device *wed);
void (*offload_disable)(struct mtk_wed_device *wed);
u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
void (*release_rx_buf)(struct mtk_wed_device *wed);
void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
struct mtk_wed_wo_rx_stats *stats);
} wlan;
#endif
};
......@@ -77,9 +156,15 @@ struct mtk_wed_ops {
int (*attach)(struct mtk_wed_device *dev);
int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
void __iomem *regs);
int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
void __iomem *regs);
int (*txfree_ring_setup)(struct mtk_wed_device *dev,
void __iomem *regs);
int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
void *data, int len);
void (*detach)(struct mtk_wed_device *dev);
void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
u32 reason, u32 hash);
void (*stop)(struct mtk_wed_device *dev);
void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
......@@ -114,6 +199,16 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
return ret;
}
static inline bool
mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
return dev->version != 1;
#else
return false;
#endif
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
#define mtk_wed_device_active(_dev) !!(_dev)->ops
#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
......@@ -130,6 +225,12 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
(_dev)->ops->irq_get(_dev, _mask)
#define mtk_wed_device_irq_set_mask(_dev, _mask) \
(_dev)->ops->irq_set_mask(_dev, _mask)
#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
(_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
(_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
(_dev)->ops->msg_update(_dev, _id, _msg, _len)
#else
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
{
......@@ -143,6 +244,9 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
#define mtk_wed_device_irq_get(_dev, _mask) 0
#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0)
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment