Commit 4e4a105f authored by Thomas Petazzoni's avatar Thomas Petazzoni Committed by David S. Miller

net: mvpp2: store physical address of buffer in rx_desc->buf_cookie

The RX descriptors of the PPv2 hardware allow to store several
information, amongst which:

 - the DMA address of the buffer in which the data has been received
 - a "cookie" field, left to the use of the driver, and not used by the
   hardware

In the current implementation, the "cookie" field is used to store the
virtual address of the buffer, so that in the receive completion path,
we can easily get the virtual address of the buffer that corresponds to
a completed RX descriptors.

On PPv2.1, used on 32-bit platforms, those two fields are 32-bit wide,
which is enough to store a DMA address in the first field, and a virtual
address in the second field.

On PPv2.2, used on 64-bit platforms, these two fields have been extended
to 40 bits. While 40 bits is enough to store a DMA address (as long as
the DMA mask is 40 bits or lower), it is not enough to store a virtual
address. Therefore, the "cookie" field can no longer be used to store
the virtual address of the buffer.

However, as Russell King pointed out, the RX buffers are always
allocated in the kernel linear mapping, and therefore using
phys_to_virt() on the physical address of the RX buffer is possible and
correct.

Therefore, this commit changes the driver to use the "cookie" field to
store the physical address instead of the virtual
address. phys_to_virt() is used in the receive completion path to
retrieve the virtual address from the physical address.

It is obviously important to realize that the DMA address and physical
address are two different things, which is why we store both in the RX
descriptors. While those addresses may be identical in some situations,
it remains two distinct concepts, and both addresses should be handled
separately.
Signed-off-by: default avatarThomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4d6c2a67
...@@ -3412,20 +3412,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, ...@@ -3412,20 +3412,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
for (i = 0; i < bm_pool->buf_num; i++) { for (i = 0; i < bm_pool->buf_num; i++) {
dma_addr_t buf_dma_addr; dma_addr_t buf_dma_addr;
unsigned long vaddr; phys_addr_t buf_phys_addr;
void *data;
/* Get buffer virtual address (indirect access) */
buf_dma_addr = mvpp2_read(priv, buf_dma_addr = mvpp2_read(priv,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); buf_phys_addr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
dma_unmap_single(dev, buf_dma_addr, dma_unmap_single(dev, buf_dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE); bm_pool->buf_size, DMA_FROM_DEVICE);
if (!vaddr) data = (void *)phys_to_virt(buf_phys_addr);
if (!data)
break; break;
mvpp2_frag_free(bm_pool, (void *)vaddr); mvpp2_frag_free(bm_pool, data);
} }
/* Update BM driver with number of buffers removed from pool */ /* Update BM driver with number of buffers removed from pool */
...@@ -3542,6 +3543,7 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, ...@@ -3542,6 +3543,7 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
static void *mvpp2_buf_alloc(struct mvpp2_port *port, static void *mvpp2_buf_alloc(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, struct mvpp2_bm_pool *bm_pool,
dma_addr_t *buf_dma_addr, dma_addr_t *buf_dma_addr,
phys_addr_t *buf_phys_addr,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -3559,6 +3561,7 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port, ...@@ -3559,6 +3561,7 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
return NULL; return NULL;
} }
*buf_dma_addr = dma_addr; *buf_dma_addr = dma_addr;
*buf_phys_addr = virt_to_phys(data);
return data; return data;
} }
...@@ -3583,20 +3586,25 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) ...@@ -3583,20 +3586,25 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
/* Release buffer to BM */ /* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr, dma_addr_t buf_dma_addr,
unsigned long buf_virt_addr) phys_addr_t buf_phys_addr)
{ {
mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
* returned in the "cookie" field of the RX
* descriptor. Instead of storing the virtual address, we
* store the physical address
*/
mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
} }
/* Refill BM pool */ /* Refill BM pool */
static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
dma_addr_t dma_addr, dma_addr_t dma_addr,
unsigned long cookie) phys_addr_t phys_addr)
{ {
int pool = mvpp2_bm_cookie_pool_get(bm); int pool = mvpp2_bm_cookie_pool_get(bm);
mvpp2_bm_pool_put(port, pool, dma_addr, cookie); mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
} }
/* Allocate buffers for the pool */ /* Allocate buffers for the pool */
...@@ -3605,6 +3613,7 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, ...@@ -3605,6 +3613,7 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
{ {
int i, buf_size, total_size; int i, buf_size, total_size;
dma_addr_t dma_addr; dma_addr_t dma_addr;
phys_addr_t phys_addr;
void *buf; void *buf;
buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
...@@ -3619,12 +3628,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, ...@@ -3619,12 +3628,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
} }
for (i = 0; i < buf_num; i++) { for (i = 0; i < buf_num; i++) {
buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, GFP_KERNEL); buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
&phys_addr, GFP_KERNEL);
if (!buf) if (!buf)
break; break;
mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
(unsigned long)buf); phys_addr);
} }
/* Update BM driver with number of buffers added to pool */ /* Update BM driver with number of buffers added to pool */
...@@ -4983,14 +4993,16 @@ static int mvpp2_rx_refill(struct mvpp2_port *port, ...@@ -4983,14 +4993,16 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, u32 bm) struct mvpp2_bm_pool *bm_pool, u32 bm)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
phys_addr_t phys_addr;
void *buf; void *buf;
/* No recycle or too many buffers are in use, so allocate a new skb */ /* No recycle or too many buffers are in use, so allocate a new skb */
buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, GFP_ATOMIC); buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
GFP_ATOMIC);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)buf); mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
return 0; return 0;
} }
...@@ -5055,7 +5067,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, ...@@ -5055,7 +5067,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
rx_status = rx_desc->status; rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
dma_addr = rx_desc->buf_dma_addr; dma_addr = rx_desc->buf_dma_addr;
data = (void *)(uintptr_t)rx_desc->buf_cookie; data = (void *)phys_to_virt(rx_desc->buf_cookie);
bm = mvpp2_bm_cookie_build(rx_desc); bm = mvpp2_bm_cookie_build(rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm); pool = mvpp2_bm_cookie_pool_get(bm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment