Commit a012649d authored by Ebru Akagunduz's avatar Ebru Akagunduz Committed by Greg Kroah-Hartman

Staging: octeon: fix line over 80 characters in ethernet-tx.c

Fix checkpatch.pl issues with line over 80
characters in ethernet-tx.c
Signed-off-by: default avatarEbru Akagunduz <ebru.akagunduz@gmail.com>
Reviewed-by: default avatarJosh Triplett <josh@joshtriplett.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6b478c2c
...@@ -78,10 +78,12 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); ...@@ -78,10 +78,12 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{ {
int32_t undo; int32_t undo;
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
if (undo > 0) if (undo > 0)
cvmx_fau_atomic_add32(fau, -undo); cvmx_fau_atomic_add32(fau, -undo);
skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free; skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
-skb_to_free;
return skb_to_free; return skb_to_free;
} }
...@@ -108,8 +110,10 @@ void cvm_oct_free_tx_skbs(struct net_device *dev) ...@@ -108,8 +110,10 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
for (qos = 0; qos < queues_per_port; qos++) { for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0) if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue; continue;
skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); MAX_SKB_TO_FREE);
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
priv->fau+qos*4);
total_freed += skb_to_free; total_freed += skb_to_free;
...@@ -117,12 +121,14 @@ void cvm_oct_free_tx_skbs(struct net_device *dev) ...@@ -117,12 +121,14 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
struct sk_buff *to_free_list = NULL; struct sk_buff *to_free_list = NULL;
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) { while (skb_to_free > 0) {
struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); struct sk_buff *t;
t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list; t->next = to_free_list;
to_free_list = t; to_free_list = t;
skb_to_free--; skb_to_free--;
} }
spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
flags);
/* Do the actual freeing outside of the lock. */ /* Do the actual freeing outside of the lock. */
while (to_free_list) { while (to_free_list) {
struct sk_buff *t = to_free_list; struct sk_buff *t = to_free_list;
...@@ -211,15 +217,23 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -211,15 +217,23 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(__skb_linearize(skb))) { if (unlikely(__skb_linearize(skb))) {
queue_type = QUEUE_DROP; queue_type = QUEUE_DROP;
if (USE_ASYNC_IOBDMA) { if (USE_ASYNC_IOBDMA) {
/* Get the number of skbuffs in use by the hardware */ /*
* Get the number of skbuffs in use
* by the hardware
*/
CVMX_SYNCIOBDMA; CVMX_SYNCIOBDMA;
skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); skb_to_free =
cvmx_scratch_read64(CVMX_SCR_SCRATCH);
} else { } else {
/* Get the number of skbuffs in use by the hardware */ /*
skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, * Get the number of skbuffs in use
MAX_SKB_TO_FREE); * by the hardware
*/
skb_to_free = cvmx_fau_fetch_and_add32(
priv->fau + qos * 4, MAX_SKB_TO_FREE);
} }
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4); skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
priv->fau + qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit; goto skip_xmit;
} }
...@@ -276,7 +290,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -276,7 +290,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset)); hw_buffer.s.addr = XKPHYS_TO_PHYS(
(u64)(page_address(fs->page.p) +
fs->page_offset));
hw_buffer.s.size = fs->size; hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
} }
...@@ -358,7 +374,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -358,7 +374,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
pko_command.s.dontfree = 0; pko_command.s.dontfree = 0;
hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
((unsigned long)fpa_head >> 7);
*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
/* /*
...@@ -422,17 +440,22 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -422,17 +440,22 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
queue_type = QUEUE_HW; queue_type = QUEUE_HW;
} }
if (USE_ASYNC_IOBDMA) if (USE_ASYNC_IOBDMA)
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1); cvmx_fau_async_fetch_and_add32(
CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
/* Drop this packet if we have too many already queued to the HW */ /* Drop this packet if we have too many already queued to the HW */
if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
MAX_OUT_QUEUE_DEPTH)) {
if (dev->tx_queue_len != 0) { if (dev->tx_queue_len != 0) {
/* Drop the lock when notifying the core. */ /* Drop the lock when notifying the core. */
spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
flags);
netif_stop_queue(dev); netif_stop_queue(dev);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); spin_lock_irqsave(&priv->tx_free_list[qos].lock,
flags);
} else { } else {
/* If not using normal queueing. */ /* If not using normal queueing. */
queue_type = QUEUE_DROP; queue_type = QUEUE_DROP;
...@@ -448,7 +471,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -448,7 +471,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
priv->queue + qos, priv->queue + qos,
pko_command, hw_buffer, pko_command, hw_buffer,
CVMX_PKO_LOCK_NONE))) { CVMX_PKO_LOCK_NONE))) {
printk_ratelimited("%s: Failed to send the packet\n", dev->name); printk_ratelimited("%s: Failed to send the packet\n",
dev->name);
queue_type = QUEUE_DROP; queue_type = QUEUE_DROP;
} }
skip_xmit: skip_xmit:
...@@ -493,7 +517,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -493,7 +517,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else { } else {
total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1); total_to_clean = cvmx_fau_fetch_and_add32(
FAU_TOTAL_TX_TO_CLEAN, 1);
} }
if (total_to_clean & 0x3ff) { if (total_to_clean & 0x3ff) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment