Commit 29d37fa1 authored by Emil Tantilov's avatar Emil Tantilov Committed by David S. Miller

ixgbevf: merge ixgbevf_tx_map and ixgbevf_tx_queue into a single function

This change merges the ixgbevf_tx_map call and the ixgbevf_tx_queue call
into a single function.  In order to make room for this setting of cmd_type
and olinfo flags is done in separate functions.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9bdfefd2
...@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed; ...@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ #define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */ /* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc { union ixgbe_adv_tx_desc {
......
...@@ -233,8 +233,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -233,8 +233,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
/* unmap remaining buffers */ /* unmap remaining buffers */
while (tx_desc != eop_desc) { while (tx_desc != eop_desc) {
tx_desc->wb.status = 0;
tx_buffer++; tx_buffer++;
tx_desc++; tx_desc++;
i++; i++;
...@@ -254,8 +252,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -254,8 +252,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
} }
} }
tx_desc->wb.status = 0;
/* move us one more past the eop_desc for start of next pkt */ /* move us one more past the eop_desc for start of next pkt */
tx_buffer++; tx_buffer++;
tx_desc++; tx_desc++;
...@@ -2915,166 +2911,171 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, ...@@ -2915,166 +2911,171 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
type_tucmd, mss_l4len_idx); type_tucmd, mss_l4len_idx);
} }
static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
struct ixgbevf_tx_buffer *first)
{ {
dma_addr_t dma; /* set type for advanced descriptor with frame checksum insertion */
struct sk_buff *skb = first->skb; __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
struct ixgbevf_tx_buffer *tx_buffer_info; IXGBE_ADVTXD_DCMD_IFCS |
unsigned int len; IXGBE_ADVTXD_DCMD_DEXT);
unsigned int total = skb->len;
unsigned int offset = 0, size;
int count = 0;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
int i;
i = tx_ring->next_to_use; /* set HW vlan bit if vlan is present */
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
len = min(skb_headlen(skb), total); /* set segmentation enable bits for TSO/FSO */
while (len) { if (tx_flags & IXGBE_TX_FLAGS_TSO)
tx_buffer_info = &tx_ring->tx_buffer_info[i]; cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->tx_flags = first->tx_flags; return cmd_type;
dma = dma_map_single(tx_ring->dev, skb->data + offset, }
size, DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
/* record length, and DMA address */ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
dma_unmap_len_set(tx_buffer_info, len, size); u32 tx_flags, unsigned int paylen)
dma_unmap_addr_set(tx_buffer_info, dma, dma); {
__le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
len -= size; /* enable L4 checksum for TSO and TX checksum offload */
total -= size; if (tx_flags & IXGBE_TX_FLAGS_CSUM)
offset += size; olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
count++;
i++;
if (i == tx_ring->count)
i = 0;
}
for (f = 0; f < nr_frags; f++) { /* enble IPv4 checksum for TSO */
const struct skb_frag_struct *frag; if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
frag = &skb_shinfo(skb)->frags[f]; /* use index 1 context for TSO/FSO/FCOE */
len = min((unsigned int)skb_frag_size(frag), total); if (tx_flags & IXGBE_TX_FLAGS_TSO)
offset = 0; olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
while (len) { /* Check Context must be set if Tx switch is enabled, which it
tx_buffer_info = &tx_ring->tx_buffer_info[i]; * always is for case where virtual functions are running
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); */
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
tx_desc->read.olinfo_status = olinfo_status;
}
dma = skb_frag_dma_map(tx_ring->dev, frag, static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
offset, size, DMA_TO_DEVICE); struct ixgbevf_tx_buffer *first,
const u8 hdr_len)
{
dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
unsigned int paylen = skb->len - hdr_len;
u32 tx_flags = first->tx_flags;
__le32 cmd_type;
u16 i = tx_ring->next_to_use;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
cmd_type = ixgbevf_tx_cmd_type(tx_flags);
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, dma)) if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; goto dma_error;
/* record length, and DMA address */ /* record length, and DMA address */
dma_unmap_len_set(tx_buffer_info, len, size); dma_unmap_len_set(first, len, size);
dma_unmap_addr_set(tx_buffer_info, dma, dma); dma_unmap_addr_set(first, dma, dma);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
len -= size;
total -= size;
offset += size;
count++;
i++; i++;
if (i == tx_ring->count) tx_desc++;
if (i == tx_ring->count) {
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0; i = 0;
} }
if (total == 0)
break;
}
if (i == 0) dma += IXGBE_MAX_DATA_PER_TXD;
i = tx_ring->count - 1; size -= IXGBE_MAX_DATA_PER_TXD;
else
i = i - 1;
first->next_to_watch = IXGBEVF_TX_DESC(tx_ring, i);
first->time_stamp = jiffies;
return count; tx_desc->read.buffer_addr = cpu_to_le64(dma);
tx_desc->read.olinfo_status = 0;
}
dma_error: if (likely(!data_len))
dev_err(tx_ring->dev, "TX DMA map failed\n"); break;
/* clear timestamp and dma mappings for failed tx_buffer_info map */ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
tx_buffer_info->dma = 0;
count--;
/* clear timestamp and dma mappings for remaining portion of packet */ i++;
while (count >= 0) { tx_desc++;
count--; if (i == tx_ring->count) {
i--; tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
if (i < 0) i = 0;
i += tx_ring->count;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
} }
return count; size = skb_frag_size(frag);
} data_len -= size;
static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
int count, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer_info;
u32 olinfo_status = 0, cmd_type_len = 0;
u32 tx_flags = first->tx_flags;
unsigned int i;
u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; tx_buffer = &tx_ring->tx_buffer_info[i];
dma_unmap_len_set(tx_buffer, len, size);
dma_unmap_addr_set(tx_buffer, dma, dma);
if (tx_flags & IXGBE_TX_FLAGS_VLAN) tx_desc->read.buffer_addr = cpu_to_le64(dma);
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; tx_desc->read.olinfo_status = 0;
if (tx_flags & IXGBE_TX_FLAGS_CSUM) frag++;
olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; }
if (tx_flags & IXGBE_TX_FLAGS_TSO) { /* write last descriptor with RS and EOP bits */
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
tx_desc->read.cmd_type_len = cmd_type;
/* use index 1 context for tso */ /* set the timestamp */
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); first->time_stamp = jiffies;
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
}
/* /* Force memory writes to complete before letting h/w know there
* Check Context must be set if Tx switch is enabled, which it * are new descriptors to fetch. (Only applicable for weak-ordered
* always is for case where virtual functions are running * memory model archs, such as IA-64).
*
* We also need this memory barrier (wmb) to make certain all of the
* status bits have been updated before next_to_watch is written.
*/ */
olinfo_status |= IXGBE_ADVTXD_CC; wmb();
olinfo_status |= ((skb->len - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
i = tx_ring->next_to_use; /* set next_to_watch value indicating a packet is present */
while (count--) { first->next_to_watch = tx_desc;
dma_addr_t dma;
unsigned int len;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
dma = dma_unmap_addr(tx_buffer_info, dma);
len = dma_unmap_len(tx_buffer_info, len);
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | len);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
i = 0; i = 0;
}
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); tx_ring->next_to_use = i;
/* notify HW of packet */
writel(i, tx_ring->tail);
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_buffer_info map */
for (;;) {
tx_buffer = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
if (tx_buffer == first)
break;
if (i == 0)
i = tx_ring->count;
i--;
}
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
...@@ -3167,17 +3168,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3167,17 +3168,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
else else
ixgbevf_tx_csum(tx_ring, first); ixgbevf_tx_csum(tx_ring, first);
ixgbevf_tx_queue(tx_ring, first, ixgbevf_tx_map(tx_ring, first, hdr_len);
ixgbevf_tx_map(tx_ring, first), hdr_len);
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(tx_ring->next_to_use, tx_ring->tail);
ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment