Commit 64a99fe5 authored by Rasmus Villemoes's avatar Rasmus Villemoes Committed by Jakub Kicinski

ethernet: ucc_geth: remove bd_mem_part and all associated code

The bd_mem_part member of ucc_geth_info always has the value
MEM_PART_SYSTEM, and AFAICT, there has never been any code setting it
to any other value. Moreover, muram is a somewhat precious resource,
so there's no point using that when normal memory serves just as well.

Apart from removing a lot of dead code, this is also motivated by
wanting to clean up the "store result from kmalloc() in a u32" mess.
Signed-off-by: default avatarRasmus Villemoes <rasmus.villemoes@prevas.dk>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent b29fafd3
...@@ -72,7 +72,6 @@ MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); ...@@ -72,7 +72,6 @@ MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
static const struct ucc_geth_info ugeth_primary_info = { static const struct ucc_geth_info ugeth_primary_info = {
.uf_info = { .uf_info = {
.bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
.max_rx_buf_length = 1536, .max_rx_buf_length = 1536,
/* adjusted at startup if max-speed 1000 */ /* adjusted at startup if max-speed 1000 */
...@@ -1854,12 +1853,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth) ...@@ -1854,12 +1853,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
kfree(ugeth->rx_skbuff[i]); kfree(ugeth->rx_skbuff[i]);
if (ugeth->ug_info->uf_info.bd_mem_part == kfree((void *)ugeth->rx_bd_ring_offset[i]);
MEM_PART_SYSTEM)
kfree((void *)ugeth->rx_bd_ring_offset[i]);
else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM)
qe_muram_free(ugeth->rx_bd_ring_offset[i]);
ugeth->p_rx_bd_ring[i] = NULL; ugeth->p_rx_bd_ring[i] = NULL;
} }
} }
...@@ -1897,12 +1891,7 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) ...@@ -1897,12 +1891,7 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
kfree(ugeth->tx_skbuff[i]); kfree(ugeth->tx_skbuff[i]);
if (ugeth->p_tx_bd_ring[i]) { if (ugeth->p_tx_bd_ring[i]) {
if (ugeth->ug_info->uf_info.bd_mem_part == kfree((void *)ugeth->tx_bd_ring_offset[i]);
MEM_PART_SYSTEM)
kfree((void *)ugeth->tx_bd_ring_offset[i]);
else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM)
qe_muram_free(ugeth->tx_bd_ring_offset[i]);
ugeth->p_tx_bd_ring[i] = NULL; ugeth->p_tx_bd_ring[i] = NULL;
} }
} }
...@@ -2060,13 +2049,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) ...@@ -2060,13 +2049,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info; ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info; uf_info = &ug_info->uf_info;
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth))
pr_err("Bad memory partition value\n");
return -EINVAL;
}
/* Rx BD lengths */ /* Rx BD lengths */
for (i = 0; i < ug_info->numQueuesRx; i++) { for (i = 0; i < ug_info->numQueuesRx; i++) {
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
...@@ -2186,6 +2168,8 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth) ...@@ -2186,6 +2168,8 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
/* Allocate Tx bds */ /* Allocate Tx bds */
for (j = 0; j < ug_info->numQueuesTx; j++) { for (j = 0; j < ug_info->numQueuesTx; j++) {
u32 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
/* Allocate in multiple of /* Allocate in multiple of
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
according to spec */ according to spec */
...@@ -2195,25 +2179,15 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth) ...@@ -2195,25 +2179,15 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
u32 align = UCC_GETH_TX_BD_RING_ALIGNMENT; ugeth->tx_bd_ring_offset[j] =
(u32) kmalloc((u32) (length + align), GFP_KERNEL);
ugeth->tx_bd_ring_offset[j] =
(u32) kmalloc((u32) (length + align), GFP_KERNEL); if (ugeth->tx_bd_ring_offset[j] != 0)
ugeth->p_tx_bd_ring[j] =
if (ugeth->tx_bd_ring_offset[j] != 0) (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
ugeth->p_tx_bd_ring[j] = align) & ~(align - 1));
(u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->tx_bd_ring_offset[j] =
qe_muram_alloc(length,
UCC_GETH_TX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
ugeth->p_tx_bd_ring[j] =
(u8 __iomem *) qe_muram_addr(ugeth->
tx_bd_ring_offset[j]);
}
if (!ugeth->p_tx_bd_ring[j]) { if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth)) if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Tx bd rings\n"); pr_err("Can not allocate memory for Tx bd rings\n");
...@@ -2271,25 +2245,16 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth) ...@@ -2271,25 +2245,16 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
/* Allocate Rx bds */ /* Allocate Rx bds */
for (j = 0; j < ug_info->numQueuesRx; j++) { for (j = 0; j < ug_info->numQueuesRx; j++) {
u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { ugeth->rx_bd_ring_offset[j] =
u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT; (u32) kmalloc((u32) (length + align), GFP_KERNEL);
if (ugeth->rx_bd_ring_offset[j] != 0)
ugeth->rx_bd_ring_offset[j] = ugeth->p_rx_bd_ring[j] =
(u32) kmalloc((u32) (length + align), GFP_KERNEL); (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
if (ugeth->rx_bd_ring_offset[j] != 0) align) & ~(align - 1));
ugeth->p_rx_bd_ring[j] =
(u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->rx_bd_ring_offset[j] =
qe_muram_alloc(length,
UCC_GETH_RX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
ugeth->p_rx_bd_ring[j] =
(u8 __iomem *) qe_muram_addr(ugeth->
rx_bd_ring_offset[j]);
}
if (!ugeth->p_rx_bd_ring[j]) { if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth)) if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Rx bd rings\n"); pr_err("Can not allocate memory for Rx bd rings\n");
...@@ -2554,20 +2519,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) ...@@ -2554,20 +2519,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
endOfRing = endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
1) * sizeof(struct qe_bd); 1) * sizeof(struct qe_bd);
if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
(u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. last_bd_completed_address,
last_bd_completed_address, (u32) virt_to_phys(endOfRing));
(u32) virt_to_phys(endOfRing));
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
(u32)qe_muram_dma(ugeth->p_tx_bd_ring[i]));
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
last_bd_completed_address,
(u32)qe_muram_dma(endOfRing));
}
} }
/* schedulerbasepointer */ /* schedulerbasepointer */
...@@ -2786,14 +2742,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) ...@@ -2786,14 +2742,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Setup the table */ /* Setup the table */
/* Assume BD rings are already established */ /* Assume BD rings are already established */
for (i = 0; i < ug_info->numQueuesRx; i++) { for (i = 0; i < ug_info->numQueuesRx; i++) {
if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
(u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
(u32)qe_muram_dma(ugeth->p_rx_bd_ring[i]));
}
/* rest of fields handled by QE */ /* rest of fields handled by QE */
} }
......
...@@ -27,12 +27,6 @@ ...@@ -27,12 +27,6 @@
#define QE_NUM_OF_BRGS 16 #define QE_NUM_OF_BRGS 16
#define QE_NUM_OF_PORTS 1024 #define QE_NUM_OF_PORTS 1024
/* Memory partitions
*/
#define MEM_PART_SYSTEM 0
#define MEM_PART_SECONDARY 1
#define MEM_PART_MURAM 2
/* Clocks and BRGs */ /* Clocks and BRGs */
enum qe_clock { enum qe_clock {
QE_CLK_NONE = 0, QE_CLK_NONE = 0,
......
...@@ -146,7 +146,6 @@ struct ucc_fast_info { ...@@ -146,7 +146,6 @@ struct ucc_fast_info {
resource_size_t regs; resource_size_t regs;
int irq; int irq;
u32 uccm_mask; u32 uccm_mask;
int bd_mem_part;
int brkpt_support; int brkpt_support;
int grant_support; int grant_support;
int tsa; int tsa;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment