Commit 880b3ca5 authored by Igor Russkikh's avatar Igor Russkikh Committed by David S. Miller

net: aquantia: vlan offloads logic in datapath

Update datapath by adding logic related to hardware assisted
vlan strip/insert behaviour.
Tested-by: default avatarNikita Danilov <ndanilov@aquantia.com>
Signed-off-by: default avatarIgor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d3ed7c5c
...@@ -429,26 +429,37 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, ...@@ -429,26 +429,37 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
unsigned int dx = ring->sw_tail; unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *first = NULL; struct aq_ring_buff_s *first = NULL;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
bool need_context_tag = false;
dx_buff->flags = 0U;
if (unlikely(skb_is_gso(skb))) { if (unlikely(skb_is_gso(skb))) {
dx_buff->flags = 0U; dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_gso = 1U;
dx_buff->len_pkt = skb->len; dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN; dx_buff->len_l2 = ETH_HLEN;
dx_buff->len_l3 = ip_hdrlen(skb); dx_buff->len_l3 = ip_hdrlen(skb);
dx_buff->len_l4 = tcp_hdrlen(skb); dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_gso = 1U;
dx_buff->eop_index = 0xffffU; dx_buff->eop_index = 0xffffU;
dx_buff->is_ipv6 = dx_buff->is_ipv6 =
(ip_hdr(skb)->version == 6) ? 1U : 0U; (ip_hdr(skb)->version == 6) ? 1U : 0U;
need_context_tag = true;
}
if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
dx_buff->len_pkt = skb->len;
dx_buff->is_vlan = 1U;
need_context_tag = true;
}
if (need_context_tag) {
dx = aq_ring_next_dx(ring, dx); dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx]; dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
++ret; ++ret;
} }
dx_buff->flags = 0U;
dx_buff->len = skb_headlen(skb); dx_buff->len = skb_headlen(skb);
dx_buff->pa = dma_map_single(aq_nic_get_dev(self), dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
skb->data, skb->data,
...@@ -537,7 +548,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, ...@@ -537,7 +548,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
--ret, dx = aq_ring_next_dx(ring, dx)) { --ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx]; dx_buff = &ring->buff_ring[dx];
if (!dx_buff->is_gso && dx_buff->pa) { if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) { if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self), dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa, dx_buff->pa,
......
...@@ -409,6 +409,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -409,6 +409,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
} }
} }
if (buff->is_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
buff->vlan_rx_tag);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
aq_rx_checksum(self, buff, skb); aq_rx_checksum(self, buff, skb);
......
...@@ -247,6 +247,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, ...@@ -247,6 +247,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
/* LSO offloads*/ /* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
/* Outer VLAN tag offload */
hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
/* LRO offloads */ /* LRO offloads */
{ {
unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
...@@ -489,6 +492,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, ...@@ -489,6 +492,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
unsigned int buff_pa_len = 0U; unsigned int buff_pa_len = 0U;
unsigned int pkt_len = 0U; unsigned int pkt_len = 0U;
unsigned int frag_count = 0U; unsigned int frag_count = 0U;
bool is_vlan = false;
bool is_gso = false; bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail]; buff = &ring->buff_ring[ring->sw_tail];
...@@ -504,35 +508,43 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, ...@@ -504,35 +508,43 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail]; buff = &ring->buff_ring[ring->sw_tail];
if (buff->is_gso) { if (buff->is_gso) {
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl |= (buff->len_l3 << 31) | txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) | (buff->len_l2 << 24);
HW_ATL_B0_TXD_CTL_CMD_TCP | txd->ctl2 |= (buff->mss << 16);
HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; is_gso = true;
txd->ctl2 |= (buff->mss << 16) |
(buff->len_l4 << 8) |
(buff->len_l3 >> 1);
pkt_len -= (buff->len_l4 + pkt_len -= (buff->len_l4 +
buff->len_l3 + buff->len_l3 +
buff->len_l2); buff->len_l2);
is_gso = true;
if (buff->is_ipv6) if (buff->is_ipv6)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6; txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
} else { txd->ctl2 |= (buff->len_l4 << 8) |
(buff->len_l3 >> 1);
}
if (buff->is_vlan) {
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl |= buff->vlan_tx_tag << 4;
is_vlan = true;
}
if (!buff->is_gso && !buff->is_vlan) {
buff_pa_len = buff->len; buff_pa_len = buff->len;
txd->buf_addr = buff->pa; txd->buf_addr = buff->pa;
txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN & txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
((u32)buff_pa_len << 4)); ((u32)buff_pa_len << 4));
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD; txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
/* PAY_LEN */ /* PAY_LEN */
txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14); txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
if (is_gso) { if (is_gso || is_vlan) {
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO; /* enable tx context */
txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN; txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
} }
if (is_gso)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
/* Tx checksum offloads */ /* Tx checksum offloads */
if (buff->is_ip_cso) if (buff->is_ip_cso)
...@@ -541,13 +553,16 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, ...@@ -541,13 +553,16 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
if (buff->is_udp_cso || buff->is_tcp_cso) if (buff->is_udp_cso || buff->is_tcp_cso)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO; txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
if (is_vlan)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
if (unlikely(buff->is_eop)) { if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
is_gso = false; is_gso = false;
is_vlan = false;
} }
} }
ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
} }
...@@ -685,11 +700,15 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, ...@@ -685,11 +700,15 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->hw_head]; buff = &ring->buff_ring[ring->hw_head];
buff->flags = 0U;
buff->is_hash_l4 = 0U;
rx_stat = (0x0000003CU & rxd_wb->status) >> 2; rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U; is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
pkt_type = 0xFFU & (rxd_wb->type >> 4); pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
if (is_rx_check_sum_enabled & BIT(0) && if (is_rx_check_sum_enabled & BIT(0) &&
(0x0U == (pkt_type & 0x3U))) (0x0U == (pkt_type & 0x3U)))
...@@ -710,6 +729,13 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, ...@@ -710,6 +729,13 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_cso_err = 0U; buff->is_cso_err = 0U;
} }
if (self->aq_nic_cfg->is_vlan_rx_strip &&
((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
(pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
buff->is_vlan = 1;
buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
}
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */ /* MAC error or DMA error */
buff->is_error = 1U; buff->is_error = 1U;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment