Commit 6a203cb5 authored by Yoshihiro Shimoda's avatar Yoshihiro Shimoda Committed by David S. Miller

net: rswitch: Use build_skb() for RX

If this hardware receives a jumbo frame like 2KiB or more, it will be
split into multiple queues. In the near future, to support this, use
build_skb() instead of netdev_alloc_skb_ip_align().
Signed-off-by: default avatarYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 88570341
...@@ -235,19 +235,18 @@ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) ...@@ -235,19 +235,18 @@ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
return false; return false;
} }
static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq, static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
unsigned int start_index, unsigned int start_index,
unsigned int num) unsigned int num)
{ {
unsigned int i, index; unsigned int i, index;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size; index = (i + start_index) % gq->ring_size;
if (gq->skbs[index]) if (gq->rx_bufs[index])
continue; continue;
gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev, gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
PKT_BUF_SZ + RSWITCH_ALIGN - 1); if (!gq->rx_bufs[index])
if (!gq->skbs[index])
goto err; goto err;
} }
...@@ -256,8 +255,8 @@ static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq, ...@@ -256,8 +255,8 @@ static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
err: err:
for (; i-- > 0; ) { for (; i-- > 0; ) {
index = (i + start_index) % gq->ring_size; index = (i + start_index) % gq->ring_size;
dev_kfree_skb(gq->skbs[index]); skb_free_frag(gq->rx_bufs[index]);
gq->skbs[index] = NULL; gq->rx_bufs[index] = NULL;
} }
return -ENOMEM; return -ENOMEM;
...@@ -275,16 +274,17 @@ static void rswitch_gwca_queue_free(struct net_device *ndev, ...@@ -275,16 +274,17 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
gq->rx_ring = NULL; gq->rx_ring = NULL;
for (i = 0; i < gq->ring_size; i++) for (i = 0; i < gq->ring_size; i++)
dev_kfree_skb(gq->skbs[i]); skb_free_frag(gq->rx_bufs[i]);
kfree(gq->rx_bufs);
gq->rx_bufs = NULL;
} else { } else {
dma_free_coherent(ndev->dev.parent, dma_free_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_desc) * sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), gq->tx_ring, gq->ring_dma); (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
gq->tx_ring = NULL; gq->tx_ring = NULL;
kfree(gq->skbs);
gq->skbs = NULL;
} }
kfree(gq->skbs);
gq->skbs = NULL;
} }
static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
...@@ -308,17 +308,20 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev, ...@@ -308,17 +308,20 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
gq->ring_size = ring_size; gq->ring_size = ring_size;
gq->ndev = ndev; gq->ndev = ndev;
gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
if (!gq->skbs)
return -ENOMEM;
if (!dir_tx) { if (!dir_tx) {
rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
if (!gq->rx_bufs)
return -ENOMEM;
if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
goto out;
gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) * sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
} else { } else {
gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
if (!gq->skbs)
return -ENOMEM;
gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_desc) * sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
...@@ -367,12 +370,13 @@ static int rswitch_gwca_queue_format(struct net_device *ndev, ...@@ -367,12 +370,13 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
if (!gq->dir_tx) { if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent, dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[i]->data, PKT_BUF_SZ, gq->rx_bufs[i] + RSWITCH_HEADROOM,
RSWITCH_MAP_BUF_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr)) if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err; goto err;
desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
rswitch_desc_set_dptr(&desc->desc, dma_addr); rswitch_desc_set_dptr(&desc->desc, dma_addr);
desc->desc.die_dt = DT_FEMPTY | DIE; desc->desc.die_dt = DT_FEMPTY | DIE;
} else { } else {
...@@ -395,8 +399,8 @@ static int rswitch_gwca_queue_format(struct net_device *ndev, ...@@ -395,8 +399,8 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
if (!gq->dir_tx) { if (!gq->dir_tx) {
for (desc = gq->tx_ring; i-- > 0; desc++) { for (desc = gq->tx_ring; i-- > 0; desc++) {
dma_addr = rswitch_desc_get_dptr(&desc->desc); dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, dma_unmap_single(ndev->dev.parent, dma_addr,
DMA_FROM_DEVICE); RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
} }
} }
...@@ -433,12 +437,13 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, ...@@ -433,12 +437,13 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
desc = &gq->rx_ring[index]; desc = &gq->rx_ring[index];
if (!gq->dir_tx) { if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent, dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[index]->data, PKT_BUF_SZ, gq->rx_bufs[index] + RSWITCH_HEADROOM,
RSWITCH_MAP_BUF_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr)) if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err; goto err;
desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
rswitch_desc_set_dptr(&desc->desc, dma_addr); rswitch_desc_set_dptr(&desc->desc, dma_addr);
dma_wmb(); dma_wmb();
desc->desc.die_dt = DT_FEMPTY | DIE; desc->desc.die_dt = DT_FEMPTY | DIE;
...@@ -456,8 +461,8 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, ...@@ -456,8 +461,8 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
index = (i + start_index) % gq->ring_size; index = (i + start_index) % gq->ring_size;
desc = &gq->rx_ring[index]; desc = &gq->rx_ring[index];
dma_addr = rswitch_desc_get_dptr(&desc->desc); dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, dma_unmap_single(ndev->dev.parent, dma_addr,
DMA_FROM_DEVICE); RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
} }
} }
...@@ -724,10 +729,15 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) ...@@ -724,10 +729,15 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
dma_rmb(); dma_rmb();
pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
skb = gq->skbs[gq->cur];
gq->skbs[gq->cur] = NULL;
dma_addr = rswitch_desc_get_dptr(&desc->desc); dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); dma_unmap_single(ndev->dev.parent, dma_addr,
RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
if (!skb)
goto out;
skb_reserve(skb, RSWITCH_HEADROOM);
skb_put(skb, pkt_len);
get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
if (get_ts) { if (get_ts) {
struct skb_shared_hwtstamps *shhwtstamps; struct skb_shared_hwtstamps *shhwtstamps;
...@@ -739,12 +749,13 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) ...@@ -739,12 +749,13 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
shhwtstamps->hwtstamp = timespec64_to_ktime(ts); shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
} }
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&rdev->napi, skb); napi_gro_receive(&rdev->napi, skb);
rdev->ndev->stats.rx_packets++; rdev->ndev->stats.rx_packets++;
rdev->ndev->stats.rx_bytes += pkt_len; rdev->ndev->stats.rx_bytes += pkt_len;
out:
gq->rx_bufs[gq->cur] = NULL;
gq->cur = rswitch_next_queue_index(gq, true, 1); gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->rx_ring[gq->cur]; desc = &gq->rx_ring[gq->cur];
...@@ -753,7 +764,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) ...@@ -753,7 +764,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
} }
num = rswitch_get_num_cur_queues(gq); num = rswitch_get_num_cur_queues(gq);
ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num); ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
if (ret < 0) if (ret < 0)
goto err; goto err;
ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
......
...@@ -29,8 +29,13 @@ ...@@ -29,8 +29,13 @@
#define RX_RING_SIZE 1024 #define RX_RING_SIZE 1024
#define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS) #define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS)
#define PKT_BUF_SZ 1584 #define RSWITCH_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
#define RSWITCH_DESC_BUF_SIZE 2048
#define RSWITCH_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
#define RSWITCH_ALIGN 128 #define RSWITCH_ALIGN 128
#define RSWITCH_BUF_SIZE (RSWITCH_HEADROOM + RSWITCH_DESC_BUF_SIZE + \
RSWITCH_TAILROOM + RSWITCH_ALIGN)
#define RSWITCH_MAP_BUF_SIZE (RSWITCH_BUF_SIZE - RSWITCH_HEADROOM)
#define RSWITCH_MAX_CTAG_PCP 7 #define RSWITCH_MAX_CTAG_PCP 7
#define RSWITCH_TIMEOUT_US 100000 #define RSWITCH_TIMEOUT_US 100000
...@@ -945,8 +950,18 @@ struct rswitch_gwca_queue { ...@@ -945,8 +950,18 @@ struct rswitch_gwca_queue {
/* For [rt]x_ring */ /* For [rt]x_ring */
unsigned int index; unsigned int index;
bool dir_tx; bool dir_tx;
struct sk_buff **skbs;
struct net_device *ndev; /* queue to ndev for irq */ struct net_device *ndev; /* queue to ndev for irq */
union {
/* For TX */
struct {
struct sk_buff **skbs;
};
/* For RX */
struct {
void **rx_bufs;
};
};
}; };
struct rswitch_gwca_ts_info { struct rswitch_gwca_ts_info {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment