Commit 05e08a2a authored by Frank Pavlic's avatar Frank Pavlic Committed by Jeff Garzik

[PATCH] s390: qeth bug fixes

[patch 10/10] s390: qeth bug fixes.

From: Frank Pavlic <pavlic@de.ibm.com>

qeth network driver related changes:
 - due to OSA hardware changes in TCP Segmentation Offload
   support we are able now to pack TSO packets too.
   This fits perfectly in design of qeth buffer handling and
   sending data respectively.
 - remove skb_realloc_headroom from the sending path since
   hard_header_len value provides enough headroom now.
 - device recovery behaviour improvement
 - bug fixed in Enhanced Device Driver Packing functionality
Signed-off-by: default avatarFrank Pavlic <pavlic@de.ibm.com>
parent 9a455819
...@@ -10,6 +10,6 @@ obj-$(CONFIG_SMSGIUCV) += smsgiucv.o ...@@ -10,6 +10,6 @@ obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
obj-$(CONFIG_LCS) += lcs.o cu3088.o obj-$(CONFIG_LCS) += lcs.o cu3088.o
obj-$(CONFIG_CLAW) += claw.o cu3088.o obj-$(CONFIG_CLAW) += claw.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o
qeth-$(CONFIG_PROC_FS) += qeth_proc.o qeth-$(CONFIG_PROC_FS) += qeth_proc.o
obj-$(CONFIG_QETH) += qeth.o obj-$(CONFIG_QETH) += qeth.o
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "qeth_mpc.h" #include "qeth_mpc.h"
#define VERSION_QETH_H "$Revision: 1.137 $" #define VERSION_QETH_H "$Revision: 1.139 $"
#ifdef CONFIG_QETH_IPV6 #ifdef CONFIG_QETH_IPV6
#define QETH_VERSION_IPV6 ":IPv6" #define QETH_VERSION_IPV6 ":IPv6"
...@@ -370,6 +370,25 @@ struct qeth_hdr { ...@@ -370,6 +370,25 @@ struct qeth_hdr {
} hdr; } hdr;
} __attribute__ ((packed)); } __attribute__ ((packed));
/*TCP Segmentation Offload header*/
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
__u8 imb_hdr_no;
__u8 reserved;
__u8 hdr_type;
__u8 hdr_version;
__u16 hdr_len;
__u32 payload_len;
__u16 mss;
__u16 dg_hdr_len;
__u8 padding[16];
} __attribute__ ((packed));
struct qeth_hdr_tso {
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
struct qeth_hdr_ext_tso ext;
} __attribute__ ((packed));
/* flags for qeth_hdr.flags */ /* flags for qeth_hdr.flags */
#define QETH_HDR_PASSTHRU 0x10 #define QETH_HDR_PASSTHRU 0x10
...@@ -867,16 +886,6 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size) ...@@ -867,16 +886,6 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
return hdr; return hdr;
} }
static inline int
qeth_get_skb_data_len(struct sk_buff *skb)
{
int len = skb->len;
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
len -= skb_shinfo(skb)->frags[i].size;
return len;
}
inline static int inline static int
qeth_get_hlen(__u8 link_type) qeth_get_hlen(__u8 link_type)
...@@ -885,19 +894,19 @@ qeth_get_hlen(__u8 link_type) ...@@ -885,19 +894,19 @@ qeth_get_hlen(__u8 link_type)
switch (link_type) { switch (link_type) {
case QETH_LINK_TYPE_HSTR: case QETH_LINK_TYPE_HSTR:
case QETH_LINK_TYPE_LANE_TR: case QETH_LINK_TYPE_LANE_TR:
return sizeof(struct qeth_hdr) + TR_HLEN; return sizeof(struct qeth_hdr_tso) + TR_HLEN;
default: default:
#ifdef CONFIG_QETH_VLAN #ifdef CONFIG_QETH_VLAN
return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN; return sizeof(struct qeth_hdr_tso) + VLAN_ETH_HLEN;
#else #else
return sizeof(struct qeth_hdr) + ETH_HLEN; return sizeof(struct qeth_hdr_tso) + ETH_HLEN;
#endif #endif
} }
#else /* CONFIG_QETH_IPV6 */ #else /* CONFIG_QETH_IPV6 */
#ifdef CONFIG_QETH_VLAN #ifdef CONFIG_QETH_VLAN
return sizeof(struct qeth_hdr) + VLAN_HLEN; return sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
#else #else
return sizeof(struct qeth_hdr); return sizeof(struct qeth_hdr_tso);
#endif #endif
#endif /* CONFIG_QETH_IPV6 */ #endif /* CONFIG_QETH_IPV6 */
} }
......
/* /*
* *
* linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.12 $) * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.13 $)
* *
* Enhanced Device Driver Packing (EDDP) support for the qeth driver. * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
* *
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* *
* Author(s): Thomas Spatzier <tspat@de.ibm.com> * Author(s): Thomas Spatzier <tspat@de.ibm.com>
* *
* $Revision: 1.12 $ $Date: 2005/04/01 21:40:40 $ * $Revision: 1.13 $ $Date: 2005/05/04 20:19:18 $
* *
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -85,7 +85,7 @@ void ...@@ -85,7 +85,7 @@ void
qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
{ {
struct qeth_eddp_context_reference *ref; struct qeth_eddp_context_reference *ref;
QETH_DBF_TEXT(trace, 6, "eddprctx"); QETH_DBF_TEXT(trace, 6, "eddprctx");
while (!list_empty(&buf->ctx_list)){ while (!list_empty(&buf->ctx_list)){
ref = list_entry(buf->ctx_list.next, ref = list_entry(buf->ctx_list.next,
...@@ -139,7 +139,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -139,7 +139,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
"buffer!\n"); "buffer!\n");
goto out; goto out;
} }
} }
/* check if the whole next skb fits into current buffer */ /* check if the whole next skb fits into current buffer */
if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
buf->next_element_to_fill) buf->next_element_to_fill)
...@@ -152,7 +152,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -152,7 +152,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
* and increment ctx's refcnt */ * and increment ctx's refcnt */
must_refcnt = 1; must_refcnt = 1;
continue; continue;
} }
if (must_refcnt){ if (must_refcnt){
must_refcnt = 0; must_refcnt = 0;
if (qeth_eddp_buf_ref_context(buf, ctx)){ if (qeth_eddp_buf_ref_context(buf, ctx)){
...@@ -204,27 +204,27 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -204,27 +204,27 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
static inline void static inline void
qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
struct qeth_eddp_data *eddp) struct qeth_eddp_data *eddp, int data_len)
{ {
u8 *page; u8 *page;
int page_remainder; int page_remainder;
int page_offset; int page_offset;
int hdr_len; int pkt_len;
struct qeth_eddp_element *element; struct qeth_eddp_element *element;
QETH_DBF_TEXT(trace, 5, "eddpcrsh"); QETH_DBF_TEXT(trace, 5, "eddpcrsh");
page = ctx->pages[ctx->offset >> PAGE_SHIFT]; page = ctx->pages[ctx->offset >> PAGE_SHIFT];
page_offset = ctx->offset % PAGE_SIZE; page_offset = ctx->offset % PAGE_SIZE;
element = &ctx->elements[ctx->num_elements]; element = &ctx->elements[ctx->num_elements];
hdr_len = eddp->nhl + eddp->thl; pkt_len = eddp->nhl + eddp->thl + data_len;
/* FIXME: layer2 and VLAN !!! */ /* FIXME: layer2 and VLAN !!! */
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
hdr_len += ETH_HLEN; pkt_len += ETH_HLEN;
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
hdr_len += VLAN_HLEN; pkt_len += VLAN_HLEN;
/* does complete header fit in current page ? */ /* does complete packet fit in current page ? */
page_remainder = PAGE_SIZE - page_offset; page_remainder = PAGE_SIZE - page_offset;
if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){ if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
/* no -> go to start of next page */ /* no -> go to start of next page */
ctx->offset += page_remainder; ctx->offset += page_remainder;
page = ctx->pages[ctx->offset >> PAGE_SHIFT]; page = ctx->pages[ctx->offset >> PAGE_SHIFT];
...@@ -270,7 +270,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, ...@@ -270,7 +270,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
int left_in_frag; int left_in_frag;
int copy_len; int copy_len;
u8 *src; u8 *src;
QETH_DBF_TEXT(trace, 5, "eddpcdtc"); QETH_DBF_TEXT(trace, 5, "eddpcdtc");
if (skb_shinfo(eddp->skb)->nr_frags == 0) { if (skb_shinfo(eddp->skb)->nr_frags == 0) {
memcpy(dst, eddp->skb->data + eddp->skb_offset, len); memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
...@@ -281,7 +281,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, ...@@ -281,7 +281,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
while (len > 0) { while (len > 0) {
if (eddp->frag < 0) { if (eddp->frag < 0) {
/* we're in skb->data */ /* we're in skb->data */
left_in_frag = qeth_get_skb_data_len(eddp->skb) left_in_frag = (eddp->skb->len - eddp->skb->data_len)
- eddp->skb_offset; - eddp->skb_offset;
src = eddp->skb->data + eddp->skb_offset; src = eddp->skb->data + eddp->skb_offset;
} else { } else {
...@@ -413,7 +413,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, ...@@ -413,7 +413,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct tcphdr *tcph; struct tcphdr *tcph;
int data_len; int data_len;
u32 hcsum; u32 hcsum;
QETH_DBF_TEXT(trace, 5, "eddpftcp"); QETH_DBF_TEXT(trace, 5, "eddpftcp");
eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
tcph = eddp->skb->h.th; tcph = eddp->skb->h.th;
...@@ -453,7 +453,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, ...@@ -453,7 +453,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
else else
hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len); hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
/* fill the next segment into the context */ /* fill the next segment into the context */
qeth_eddp_create_segment_hdrs(ctx, eddp); qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum); qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
if (eddp->skb_offset >= eddp->skb->len) if (eddp->skb_offset >= eddp->skb->len)
break; break;
...@@ -463,13 +463,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, ...@@ -463,13 +463,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
eddp->th.tcp.h.seq += data_len; eddp->th.tcp.h.seq += data_len;
} }
} }
static inline int static inline int
qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct sk_buff *skb, struct qeth_hdr *qhdr) struct sk_buff *skb, struct qeth_hdr *qhdr)
{ {
struct qeth_eddp_data *eddp = NULL; struct qeth_eddp_data *eddp = NULL;
QETH_DBF_TEXT(trace, 5, "eddpficx"); QETH_DBF_TEXT(trace, 5, "eddpficx");
/* create our segmentation headers and copy original headers */ /* create our segmentation headers and copy original headers */
if (skb->protocol == ETH_P_IP) if (skb->protocol == ETH_P_IP)
...@@ -509,7 +509,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, ...@@ -509,7 +509,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
int hdr_len) int hdr_len)
{ {
int skbs_per_page; int skbs_per_page;
QETH_DBF_TEXT(trace, 5, "eddpcanp"); QETH_DBF_TEXT(trace, 5, "eddpcanp");
/* can we put multiple skbs in one page? */ /* can we put multiple skbs in one page? */
skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
...@@ -589,7 +589,7 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, ...@@ -589,7 +589,7 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *qhdr) struct qeth_hdr *qhdr)
{ {
struct qeth_eddp_context *ctx = NULL; struct qeth_eddp_context *ctx = NULL;
QETH_DBF_TEXT(trace, 5, "creddpct"); QETH_DBF_TEXT(trace, 5, "creddpct");
if (skb->protocol == ETH_P_IP) if (skb->protocol == ETH_P_IP)
ctx = qeth_eddp_create_context_generic(card, skb, ctx = qeth_eddp_create_context_generic(card, skb,
......
This diff is collapsed.
/*
* linux/drivers/s390/net/qeth_tso.c ($Revision: 1.7 $)
*
* Header file for qeth TCP Segmentation Offload support.
*
* Copyright 2004 IBM Corporation
*
* Author(s): Frank Pavlic <pavlic@de.ibm.com>
*
* $Revision: 1.7 $ $Date: 2005/04/01 21:40:41 $
*
*/
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "qeth.h"
#include "qeth_mpc.h"
#include "qeth_tso.h"
/**
* skb already partially prepared
* classic qdio header in skb->data
* */
static inline struct qeth_hdr_tso *
qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
{
int rc = 0;
QETH_DBF_TEXT(trace, 5, "tsoprsk");
rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
if (rc)
return NULL;
return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
}
/**
* fill header for a TSO packet
*/
static inline void
qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
{
struct qeth_hdr_tso *hdr;
struct tcphdr *tcph;
struct iphdr *iph;
QETH_DBF_TEXT(trace, 5, "tsofhdr");
hdr = (struct qeth_hdr_tso *) skb->data;
iph = skb->nh.iph;
tcph = skb->h.th;
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
hdr->ext.hdr_type = 1;
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->tso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
}
/**
* change some header values as requested by hardware
*/
static inline void
qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
iph = skb->nh.iph;
ip6h = skb->nh.ipv6h;
tcph = skb->h.th;
tcph->check = 0;
if (skb->protocol == ETH_P_IPV6) {
ip6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0, IPPROTO_TCP, 0);
return;
}
/*OSA want us to set these values ...*/
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
iph->tot_len = 0;
iph->check = 0;
}
static inline struct qeth_hdr_tso *
qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
struct qeth_hdr_tso *hdr;
int rc = 0;
QETH_DBF_TEXT(trace, 5, "tsoprep");
/*get headroom for tso qdio header */
hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
if (hdr == NULL) {
QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
return NULL;
}
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
/*fill first 32 bytes of qdio header as used
*FIXME: TSO has two struct members
* with different names but same size
* */
qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
qeth_tso_fill_header(card, skb);
qeth_tso_set_tcpip_header(card, skb);
return hdr;
}
static inline int
qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
int flush_cnt = 0;
QETH_DBF_TEXT(trace, 5, "tsobuf");
/* force to non-packing*/
if (queue->do_pack)
queue->do_pack = 0;
buffer = &queue->bufs[queue->next_buf_to_fill];
/* get a new buffer if current is already in use*/
if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
flush_cnt++;
}
return flush_cnt;
}
static inline int
qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb)
{
int length, length_here, element;
int hdr_len;
struct qdio_buffer *buffer;
struct qeth_hdr_tso *hdr;
char *data;
QETH_DBF_TEXT(trace, 3, "tsfilbuf");
/*increment user count and queue skb ...*/
atomic_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*initialize all variables...*/
buffer = buf->buffer;
hdr = (struct qeth_hdr_tso *)skb->data;
hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
data = skb->data + hdr_len;
length = skb->len - hdr_len;
element = buf->next_element_to_fill;
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
/*check if we have frags ...*/
if (skb_shinfo(skb)->nr_frags > 0) {
skb->len = length;
skb->data = data;
__qeth_fill_buffer_frag(skb, buffer,1,
(int *)&buf->next_element_to_fill);
goto out;
}
/*... if not, use this */
element++;
while (length > 0) {
/* length_here is the remaining amount of data in this page */
length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
if (length < length_here)
length_here = length;
buffer->element[element].addr = data;
buffer->element[element].length = length_here;
length -= length_here;
if (!length)
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
data += length_here;
element++;
}
buf->next_element_to_fill = element;
out:
/*prime buffer now ...*/
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
return 1;
}
int
qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
int flush_cnt = 0;
struct qeth_hdr_tso *hdr;
struct qeth_qdio_out_buffer *buffer;
int start_index;
QETH_DBF_TEXT(trace, 3, "tsosend");
if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
return -ENOMEM;
/*check if skb fits in one SBAL ...*/
if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
return -EINVAL;
/*lock queue, force switching to non-packing and send it ...*/
while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED,
&queue->state));
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*check if card is too busy ...*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
goto out;
}
/*let's force to non-packing and get a new SBAL*/
flush_cnt += qeth_tso_get_queue_buffer(queue);
buffer = &queue->bufs[queue->next_buf_to_fill];
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
card->stats.tx_dropped++;
goto out;
}
flush_cnt += qeth_tso_fill_buffer(buffer, skb);
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (flush_cnt)
qeth_flush_buffers(queue, 0, start_index, flush_cnt);
/*do some statistics */
card->stats.tx_packets++;
card->stats.tx_bytes += skb->len;
return 0;
}
/* /*
* linux/drivers/s390/net/qeth_tso.h ($Revision: 1.5 $) * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.7 $)
* *
* Header file for qeth TCP Segmentation Offload support. * Header file for qeth TCP Segmentation Offload support.
* *
...@@ -7,97 +7,148 @@ ...@@ -7,97 +7,148 @@
* *
* Author(s): Frank Pavlic <pavlic@de.ibm.com> * Author(s): Frank Pavlic <pavlic@de.ibm.com>
* *
* $Revision: 1.5 $ $Date: 2005/04/01 21:40:41 $ * $Revision: 1.7 $ $Date: 2005/05/04 20:19:18 $
* *
*/ */
#ifndef __QETH_TSO_H__ #ifndef __QETH_TSO_H__
#define __QETH_TSO_H__ #define __QETH_TSO_H__
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "qeth.h"
#include "qeth_mpc.h"
extern int
qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
struct qeth_qdio_out_q *, int , int);
struct qeth_hdr_ext_tso { static inline struct qeth_hdr_tso *
__u16 hdr_tot_len; qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
__u8 imb_hdr_no; {
__u8 reserved; QETH_DBF_TEXT(trace, 5, "tsoprsk");
__u8 hdr_type; return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_tso));
__u8 hdr_version; }
__u16 hdr_len;
__u32 payload_len; /**
__u16 mss; * fill header for a TSO packet
__u16 dg_hdr_len; */
__u8 padding[16]; static inline void
} __attribute__ ((packed)); qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
{
struct qeth_hdr_tso *hdr;
struct tcphdr *tcph;
struct iphdr *iph;
struct qeth_hdr_tso { QETH_DBF_TEXT(trace, 5, "tsofhdr");
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
struct qeth_hdr_ext_tso ext; hdr = (struct qeth_hdr_tso *) skb->data;
} __attribute__ ((packed)); iph = skb->nh.iph;
tcph = skb->h.th;
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
hdr->ext.hdr_type = 1;
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->tso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
}
/**
* change some header values as requested by hardware
*/
static inline void
qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
iph = skb->nh.iph;
ip6h = skb->nh.ipv6h;
tcph = skb->h.th;
tcph->check = 0;
if (skb->protocol == ETH_P_IPV6) {
ip6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0, IPPROTO_TCP, 0);
return;
}
/*OSA want us to set these values ...*/
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
iph->tot_len = 0;
iph->check = 0;
}
/*some helper functions*/
static inline int static inline int
qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb) qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{ {
int elements_needed = 0; struct qeth_hdr_tso *hdr;
if (skb_shinfo(skb)->nr_frags > 0) QETH_DBF_TEXT(trace, 5, "tsoprep");
elements_needed = (skb_shinfo(skb)->nr_frags + 1);
if (elements_needed == 0 ) hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) if (hdr == NULL) {
+ skb->len) >> PAGE_SHIFT); QETH_DBF_TEXT(trace, 4, "tsoperr");
if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){ return -ENOMEM;
PRINT_ERR("qeth_do_send_packet: invalid size of " }
"IP packet. Discarded."); memset(hdr, 0, sizeof(struct qeth_hdr_tso));
return 0; /*fill first 32 bytes of qdio header as used
} *FIXME: TSO has two struct members
return elements_needed; * with different names but same size
* */
qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
qeth_tso_fill_header(card, skb);
qeth_tso_set_tcpip_header(card, skb);
return 0;
} }
static inline void static inline void
__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer, __qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
int is_tso, int *next_element_to_fill) int is_tso, int *next_element_to_fill)
{ {
int length = skb->len;
struct skb_frag_struct *frag; struct skb_frag_struct *frag;
int fragno; int fragno;
unsigned long addr; unsigned long addr;
int element; int element, cnt, dlen;
int first_lap = 1;
fragno = skb_shinfo(skb)->nr_frags;
fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */ element = *next_element_to_fill;
element = *next_element_to_fill + fragno; dlen = 0;
while (length > 0) {
if (fragno > 0) { if (is_tso)
frag = &skb_shinfo(skb)->frags[fragno - 1]; buffer->element[element].flags =
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + SBAL_FLAGS_MIDDLE_FRAG;
frag->page_offset; else
buffer->element[element].addr = (char *)addr; buffer->element[element].flags =
buffer->element[element].length = frag->size; SBAL_FLAGS_FIRST_FRAG;
length -= frag->size; if ( (dlen = (skb->len - skb->data_len)) ) {
if (first_lap) buffer->element[element].addr = skb->data;
buffer->element[element].flags = buffer->element[element].length = dlen;
SBAL_FLAGS_LAST_FRAG; element++;
else
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
} else {
buffer->element[element].addr = skb->data;
buffer->element[element].length = length;
length = 0;
if (is_tso)
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_FIRST_FRAG;
}
element--;
fragno--;
first_lap = 0;
} }
*next_element_to_fill += skb_shinfo(skb)->nr_frags + 1; for (cnt = 0; cnt < fragno; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
frag->page_offset;
buffer->element[element].addr = (char *)addr;
buffer->element[element].length = frag->size;
if (cnt < (fragno - 1))
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
element++;
}
*next_element_to_fill = element;
} }
#endif /* __QETH_TSO_H__ */ #endif /* __QETH_TSO_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment