Commit 29d37fa1 authored by Emil Tantilov's avatar Emil Tantilov Committed by David S. Miller

ixgbevf: merge ixgbevf_tx_map and ixgbevf_tx_queue into a single function

This change merges the ixgbevf_tx_map call and the ixgbevf_tx_queue call
into a single function.  In order to make room for this setting of cmd_type
and olinfo flags is done in separate functions.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9bdfefd2
...@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed; ...@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ #define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */ /* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc { union ixgbe_adv_tx_desc {
......
...@@ -233,8 +233,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -233,8 +233,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
/* unmap remaining buffers */ /* unmap remaining buffers */
while (tx_desc != eop_desc) { while (tx_desc != eop_desc) {
tx_desc->wb.status = 0;
tx_buffer++; tx_buffer++;
tx_desc++; tx_desc++;
i++; i++;
...@@ -254,8 +252,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -254,8 +252,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
} }
} }
tx_desc->wb.status = 0;
/* move us one more past the eop_desc for start of next pkt */ /* move us one more past the eop_desc for start of next pkt */
tx_buffer++; tx_buffer++;
tx_desc++; tx_desc++;
...@@ -2915,166 +2911,171 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, ...@@ -2915,166 +2911,171 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
type_tucmd, mss_l4len_idx); type_tucmd, mss_l4len_idx);
} }
static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
struct ixgbevf_tx_buffer *first)
{ {
dma_addr_t dma; /* set type for advanced descriptor with frame checksum insertion */
struct sk_buff *skb = first->skb; __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
struct ixgbevf_tx_buffer *tx_buffer_info; IXGBE_ADVTXD_DCMD_IFCS |
unsigned int len; IXGBE_ADVTXD_DCMD_DEXT);
unsigned int total = skb->len;
unsigned int offset = 0, size;
int count = 0;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
int i;
i = tx_ring->next_to_use; /* set HW vlan bit if vlan is present */
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
len = min(skb_headlen(skb), total); /* set segmentation enable bits for TSO/FSO */
while (len) { if (tx_flags & IXGBE_TX_FLAGS_TSO)
tx_buffer_info = &tx_ring->tx_buffer_info[i]; cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->tx_flags = first->tx_flags; return cmd_type;
dma = dma_map_single(tx_ring->dev, skb->data + offset, }
size, DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
/* record length, and DMA address */ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
dma_unmap_len_set(tx_buffer_info, len, size); u32 tx_flags, unsigned int paylen)
dma_unmap_addr_set(tx_buffer_info, dma, dma); {
__le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
len -= size; /* enable L4 checksum for TSO and TX checksum offload */
total -= size; if (tx_flags & IXGBE_TX_FLAGS_CSUM)
offset += size; olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
count++;
i++;
if (i == tx_ring->count)
i = 0;
}
for (f = 0; f < nr_frags; f++) { /* enble IPv4 checksum for TSO */
const struct skb_frag_struct *frag; if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
frag = &skb_shinfo(skb)->frags[f]; /* use index 1 context for TSO/FSO/FCOE */
len = min((unsigned int)skb_frag_size(frag), total); if (tx_flags & IXGBE_TX_FLAGS_TSO)
offset = 0; olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
while (len) { /* Check Context must be set if Tx switch is enabled, which it
tx_buffer_info = &tx_ring->tx_buffer_info[i]; * always is for case where virtual functions are running
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); */
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
dma = skb_frag_dma_map(tx_ring->dev, frag, tx_desc->read.olinfo_status = olinfo_status;
offset, size, DMA_TO_DEVICE); }
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
/* record length, and DMA address */ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
dma_unmap_len_set(tx_buffer_info, len, size); struct ixgbevf_tx_buffer *first,
dma_unmap_addr_set(tx_buffer_info, dma, dma); const u8 hdr_len)
{
dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
unsigned int paylen = skb->len - hdr_len;
u32 tx_flags = first->tx_flags;
__le32 cmd_type;
u16 i = tx_ring->next_to_use;
len -= size; tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
total -= size;
offset += size;
count++;
i++;
if (i == tx_ring->count)
i = 0;
}
if (total == 0)
break;
}
if (i == 0) ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
i = tx_ring->count - 1; cmd_type = ixgbevf_tx_cmd_type(tx_flags);
else
i = i - 1;
first->next_to_watch = IXGBEVF_TX_DESC(tx_ring, i); dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
first->time_stamp = jiffies; if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
return count; /* record length, and DMA address */
dma_unmap_len_set(first, len, size);
dma_unmap_addr_set(first, dma, dma);
dma_error: tx_desc->read.buffer_addr = cpu_to_le64(dma);
dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed tx_buffer_info map */ for (;;) {
tx_buffer_info->dma = 0; while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
count--; tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
/* clear timestamp and dma mappings for remaining portion of packet */ i++;
while (count >= 0) { tx_desc++;
count--; if (i == tx_ring->count) {
i--; tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
if (i < 0) i = 0;
i += tx_ring->count; }
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return count; dma += IXGBE_MAX_DATA_PER_TXD;
} size -= IXGBE_MAX_DATA_PER_TXD;
static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, tx_desc->read.buffer_addr = cpu_to_le64(dma);
struct ixgbevf_tx_buffer *first, tx_desc->read.olinfo_status = 0;
int count, u8 hdr_len) }
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer_info;
u32 olinfo_status = 0, cmd_type_len = 0;
u32 tx_flags = first->tx_flags;
unsigned int i;
u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; if (likely(!data_len))
break;
cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; i++;
tx_desc++;
if (i == tx_ring->count) {
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0;
}
if (tx_flags & IXGBE_TX_FLAGS_VLAN) size = skb_frag_size(frag);
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; data_len -= size;
if (tx_flags & IXGBE_TX_FLAGS_CSUM) dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
if (tx_flags & IXGBE_TX_FLAGS_TSO) { tx_buffer = &tx_ring->tx_buffer_info[i];
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; dma_unmap_len_set(tx_buffer, len, size);
dma_unmap_addr_set(tx_buffer, dma, dma);
/* use index 1 context for tso */ tx_desc->read.buffer_addr = cpu_to_le64(dma);
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); tx_desc->read.olinfo_status = 0;
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; frag++;
} }
/* /* write last descriptor with RS and EOP bits */
* Check Context must be set if Tx switch is enabled, which it cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
* always is for case where virtual functions are running tx_desc->read.cmd_type_len = cmd_type;
/* set the timestamp */
first->time_stamp = jiffies;
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
*
* We also need this memory barrier (wmb) to make certain all of the
* status bits have been updated before next_to_watch is written.
*/ */
olinfo_status |= IXGBE_ADVTXD_CC; wmb();
olinfo_status |= ((skb->len - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
i = tx_ring->next_to_use; i++;
while (count--) { if (i == tx_ring->count)
dma_addr_t dma; i = 0;
unsigned int len;
tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_ring->next_to_use = i;
dma = dma_unmap_addr(tx_buffer_info, dma);
len = dma_unmap_len(tx_buffer_info, len);
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | len);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
i++;
if (i == tx_ring->count)
i = 0;
}
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); /* notify HW of packet */
writel(i, tx_ring->tail);
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_buffer_info map */
for (;;) {
tx_buffer = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
if (tx_buffer == first)
break;
if (i == 0)
i = tx_ring->count;
i--;
}
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
...@@ -3167,17 +3168,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3167,17 +3168,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
else else
ixgbevf_tx_csum(tx_ring, first); ixgbevf_tx_csum(tx_ring, first);
ixgbevf_tx_queue(tx_ring, first, ixgbevf_tx_map(tx_ring, first, hdr_len);
ixgbevf_tx_map(tx_ring, first), hdr_len);
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(tx_ring->next_to_use, tx_ring->tail);
ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment