Commit 11871aba authored by Horatiu Vultur's avatar Horatiu Vultur Committed by David S. Miller

net: lan96x: Use page_pool API

Use the page_pool API for allocation, freeing and DMA handling instead
of dev_alloc_pages, __free_pages and dma_map_page.
Signed-off-by: default avatarHoratiu Vultur <horatiu.vultur@microchip.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6a2159be
...@@ -7,5 +7,6 @@ config LAN966X_SWITCH ...@@ -7,5 +7,6 @@ config LAN966X_SWITCH
depends on BRIDGE || BRIDGE=n depends on BRIDGE || BRIDGE=n
select PHYLINK select PHYLINK
select PACKING select PACKING
select PAGE_POOL
help help
This driver supports the Lan966x network switch device. This driver supports the Lan966x network switch device.
...@@ -10,47 +10,25 @@ static int lan966x_fdma_channel_active(struct lan966x *lan966x) ...@@ -10,47 +10,25 @@ static int lan966x_fdma_channel_active(struct lan966x *lan966x)
static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
struct lan966x_db *db) struct lan966x_db *db)
{ {
struct lan966x *lan966x = rx->lan966x;
dma_addr_t dma_addr;
struct page *page; struct page *page;
page = dev_alloc_pages(rx->page_order); page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page)) if (unlikely(!page))
return NULL; return NULL;
dma_addr = dma_map_page(lan966x->dev, page, 0, db->dataptr = page_pool_get_dma_addr(page);
PAGE_SIZE << rx->page_order,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
goto free_page;
db->dataptr = dma_addr;
return page; return page;
free_page:
__free_pages(page, rx->page_order);
return NULL;
} }
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{ {
struct lan966x *lan966x = rx->lan966x;
struct lan966x_rx_dcb *dcb;
struct lan966x_db *db;
int i, j; int i, j;
for (i = 0; i < FDMA_DCB_MAX; ++i) { for (i = 0; i < FDMA_DCB_MAX; ++i) {
dcb = &rx->dcbs[i]; for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
page_pool_put_full_page(rx->page_pool,
for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { rx->page[i][j], false);
db = &dcb->db[j];
dma_unmap_single(lan966x->dev,
(dma_addr_t)db->dataptr,
PAGE_SIZE << rx->page_order,
DMA_FROM_DEVICE);
__free_pages(rx->page[i][j], rx->page_order);
}
} }
} }
...@@ -62,7 +40,7 @@ static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx) ...@@ -62,7 +40,7 @@ static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
if (unlikely(!page)) if (unlikely(!page))
return; return;
__free_pages(page, rx->page_order); page_pool_recycle_direct(rx->page_pool, page);
} }
static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
...@@ -84,6 +62,25 @@ static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, ...@@ -84,6 +62,25 @@ static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
rx->last_entry = dcb; rx->last_entry = dcb;
} }
static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct page_pool_params pp_params = {
.order = rx->page_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = FDMA_DCB_MAX,
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
.offset = 0,
.max_len = rx->max_mtu -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
};
rx->page_pool = page_pool_create(&pp_params);
return PTR_ERR_OR_ZERO(rx->page_pool);
}
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
...@@ -93,6 +90,9 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) ...@@ -93,6 +90,9 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
int i, j; int i, j;
int size; int size;
if (lan966x_fdma_rx_alloc_page_pool(rx))
return PTR_ERR(rx->page_pool);
/* calculate how many pages are needed to allocate the dcbs */ /* calculate how many pages are needed to allocate the dcbs */
size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
size = ALIGN(size, PAGE_SIZE); size = ALIGN(size, PAGE_SIZE);
...@@ -436,10 +436,6 @@ static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port) ...@@ -436,10 +436,6 @@ static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
FDMA_DCB_STATUS_BLOCKL(db->status), FDMA_DCB_STATUS_BLOCKL(db->status),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr,
PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
lan966x_ifh_get_src_port(page_address(page), src_port); lan966x_ifh_get_src_port(page_address(page), src_port);
if (WARN_ON(*src_port >= lan966x->num_phys_ports)) if (WARN_ON(*src_port >= lan966x->num_phys_ports))
return FDMA_ERROR; return FDMA_ERROR;
...@@ -468,6 +464,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, ...@@ -468,6 +464,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
if (unlikely(!skb)) if (unlikely(!skb))
goto free_page; goto free_page;
skb_mark_for_recycle(skb);
skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
lan966x_ifh_get_timestamp(skb->data, &timestamp); lan966x_ifh_get_timestamp(skb->data, &timestamp);
...@@ -495,7 +493,7 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, ...@@ -495,7 +493,7 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
return skb; return skb;
free_page: free_page:
__free_pages(page, rx->page_order); page_pool_recycle_direct(rx->page_pool, page);
return NULL; return NULL;
} }
...@@ -740,6 +738,7 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x) ...@@ -740,6 +738,7 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{ {
struct page_pool *page_pool;
dma_addr_t rx_dma; dma_addr_t rx_dma;
void *rx_dcbs; void *rx_dcbs;
u32 size; u32 size;
...@@ -748,6 +747,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) ...@@ -748,6 +747,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
/* Store these for later to free them */ /* Store these for later to free them */
rx_dma = lan966x->rx.dma; rx_dma = lan966x->rx.dma;
rx_dcbs = lan966x->rx.dcbs; rx_dcbs = lan966x->rx.dcbs;
page_pool = lan966x->rx.page_pool;
napi_synchronize(&lan966x->napi); napi_synchronize(&lan966x->napi);
napi_disable(&lan966x->napi); napi_disable(&lan966x->napi);
...@@ -756,6 +756,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) ...@@ -756,6 +756,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
lan966x_fdma_rx_disable(&lan966x->rx); lan966x_fdma_rx_disable(&lan966x->rx);
lan966x_fdma_rx_free_pages(&lan966x->rx); lan966x_fdma_rx_free_pages(&lan966x->rx);
lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
lan966x->rx.max_mtu = new_mtu;
err = lan966x_fdma_rx_alloc(&lan966x->rx); err = lan966x_fdma_rx_alloc(&lan966x->rx);
if (err) if (err)
goto restore; goto restore;
...@@ -765,11 +766,14 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) ...@@ -765,11 +766,14 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
size = ALIGN(size, PAGE_SIZE); size = ALIGN(size, PAGE_SIZE);
dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
page_pool_destroy(page_pool);
lan966x_fdma_wakeup_netdev(lan966x); lan966x_fdma_wakeup_netdev(lan966x);
napi_enable(&lan966x->napi); napi_enable(&lan966x->napi);
return err; return err;
restore: restore:
lan966x->rx.page_pool = page_pool;
lan966x->rx.dma = rx_dma; lan966x->rx.dma = rx_dma;
lan966x->rx.dcbs = rx_dcbs; lan966x->rx.dcbs = rx_dcbs;
lan966x_fdma_rx_start(&lan966x->rx); lan966x_fdma_rx_start(&lan966x->rx);
...@@ -777,19 +781,22 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) ...@@ -777,19 +781,22 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
return err; return err;
} }
static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
{
return lan966x_fdma_get_max_mtu(lan966x) +
IFH_LEN_BYTES +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
VLAN_HLEN * 2;
}
int lan966x_fdma_change_mtu(struct lan966x *lan966x) int lan966x_fdma_change_mtu(struct lan966x *lan966x)
{ {
int max_mtu; int max_mtu;
int err; int err;
u32 val; u32 val;
max_mtu = lan966x_fdma_get_max_mtu(lan966x); max_mtu = lan966x_fdma_get_max_frame(lan966x);
max_mtu += IFH_LEN_BYTES; if (max_mtu == lan966x->rx.max_mtu)
max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
max_mtu += VLAN_HLEN * 2;
if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
lan966x->rx.page_order)
return 0; return 0;
/* Disable the CPU port */ /* Disable the CPU port */
...@@ -844,6 +851,7 @@ int lan966x_fdma_init(struct lan966x *lan966x) ...@@ -844,6 +851,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
lan966x->rx.lan966x = lan966x; lan966x->rx.lan966x = lan966x;
lan966x->rx.channel_id = FDMA_XTR_CHANNEL; lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
lan966x->tx.lan966x = lan966x; lan966x->tx.lan966x = lan966x;
lan966x->tx.channel_id = FDMA_INJ_CHANNEL; lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
lan966x->tx.last_in_use = -1; lan966x->tx.last_in_use = -1;
...@@ -876,5 +884,6 @@ void lan966x_fdma_deinit(struct lan966x *lan966x) ...@@ -876,5 +884,6 @@ void lan966x_fdma_deinit(struct lan966x *lan966x)
lan966x_fdma_rx_free_pages(&lan966x->rx); lan966x_fdma_rx_free_pages(&lan966x->rx);
lan966x_fdma_rx_free(&lan966x->rx); lan966x_fdma_rx_free(&lan966x->rx);
page_pool_destroy(lan966x->rx.page_pool);
lan966x_fdma_tx_free(&lan966x->tx); lan966x_fdma_tx_free(&lan966x->tx);
} }
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/phylink.h> #include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <net/page_pool.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <net/switchdev.h> #include <net/switchdev.h>
...@@ -161,7 +162,14 @@ struct lan966x_rx { ...@@ -161,7 +162,14 @@ struct lan966x_rx {
*/ */
u8 page_order; u8 page_order;
/* Represents the max size frame that it can receive to the CPU. This
* includes the IFH + VLAN tags + frame + skb_shared_info
*/
u32 max_mtu;
u8 channel_id; u8 channel_id;
struct page_pool *page_pool;
}; };
struct lan966x_tx_dcb_buf { struct lan966x_tx_dcb_buf {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment