Commit ea2ab693 authored by Ian Campbell's avatar Ian Campbell Committed by David S. Miller

net: convert core to skb paged frag APIs

Signed-off-by: default avatarIan Campbell <ian.campbell@citrix.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: "Michał Mirosław" <mirq-linux@rere.qmqm.pl>
Cc: netdev@vger.kernel.org
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 15133fbb
......@@ -1898,12 +1898,12 @@ static inline int skb_add_data(struct sk_buff *skb,
}
static inline int skb_can_coalesce(struct sk_buff *skb, int i,
struct page *page, int off)
const struct page *page, int off)
{
if (i) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
return page == frag->page &&
return page == skb_frag_page(frag) &&
off == frag->page_offset + frag->size;
}
return 0;
......
......@@ -332,7 +332,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
int err;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
......@@ -418,7 +418,7 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
int err;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
......@@ -508,7 +508,7 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
int err;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
......@@ -594,7 +594,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
int err = 0;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
......
......@@ -1949,9 +1949,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
#ifdef CONFIG_HIGHMEM
int i;
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
if (PageHighMem(skb_shinfo(skb)->frags[i].page))
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (PageHighMem(skb_frag_page(frag)))
return 1;
}
}
if (PCI_DMA_BUS_IS_PHYS) {
......@@ -1960,7 +1962,8 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
if (!pdev)
return 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr = page_to_phys(skb_frag_page(frag));
if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
return 1;
}
......@@ -3474,7 +3477,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb_shinfo(skb)->frags[0].size -= grow;
if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
put_page(skb_shinfo(skb)->frags[0].page);
skb_frag_unref(skb, 0);
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
......@@ -3538,10 +3541,9 @@ void skb_gro_reset_offset(struct sk_buff *skb)
NAPI_GRO_CB(skb)->frag0_len = 0;
if (skb->mac_header == skb->tail &&
!PageHighMem(skb_shinfo(skb)->frags[0].page)) {
!PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
NAPI_GRO_CB(skb)->frag0 =
page_address(skb_shinfo(skb)->frags[0].page) +
skb_shinfo(skb)->frags[0].page_offset;
skb_frag_address(&skb_shinfo(skb)->frags[0]);
NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
}
}
......
......@@ -7,7 +7,7 @@ static inline void *kmap_skb_frag(const skb_frag_t *frag)
local_bh_disable();
#endif
return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ);
}
static inline void kunmap_skb_frag(void *vaddr)
......
......@@ -2602,8 +2602,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
if (!pkt_dev->page)
break;
}
skb_shinfo(skb)->frags[i].page = pkt_dev->page;
get_page(pkt_dev->page);
skb_frag_set_page(skb, i, pkt_dev->page);
skb_shinfo(skb)->frags[i].page_offset = 0;
/*last fragment, fill rest of data*/
if (i == (frags - 1))
......
......@@ -326,7 +326,7 @@ static void skb_release_data(struct sk_buff *skb)
if (skb_shinfo(skb)->nr_frags) {
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
put_page(skb_shinfo(skb)->frags[i].page);
skb_frag_unref(skb, i);
}
/*
......@@ -809,7 +809,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
get_page(skb_shinfo(n)->frags[i].page);
skb_frag_ref(skb, i);
}
skb_shinfo(n)->nr_frags = i;
}
......@@ -901,7 +901,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page);
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
......@@ -1181,7 +1181,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
skb_shinfo(skb)->nr_frags = i;
for (; i < nfrags; i++)
put_page(skb_shinfo(skb)->frags[i].page);
skb_frag_unref(skb, i);
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
......@@ -1350,7 +1350,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
if (skb_shinfo(skb)->frags[i].size <= eat) {
put_page(skb_shinfo(skb)->frags[i].page);
skb_frag_unref(skb, i);
eat -= skb_shinfo(skb)->frags[i].size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
......@@ -1609,7 +1609,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(f->page, f->page_offset, f->size,
if (__splice_segment(skb_frag_page(f),
f->page_offset, f->size,
offset, len, skb, spd, 0, sk, pipe))
return 1;
}
......@@ -2154,7 +2155,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
get_page(skb_shinfo(skb)->frags[i].page);
skb_frag_ref(skb, i);
skb_shinfo(skb1)->frags[0].page_offset += len - pos;
skb_shinfo(skb1)->frags[0].size -= len - pos;
skb_shinfo(skb)->frags[i].size = len - pos;
......@@ -2229,7 +2230,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
* commit all, so that we don't have to undo partial changes
*/
if (!to ||
!skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
fragfrom->page_offset)) {
merge = -1;
} else {
merge = to - 1;
......@@ -2276,7 +2278,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
to++;
} else {
get_page(fragfrom->page);
__skb_frag_ref(fragfrom);
fragto->page = fragfrom->page;
fragto->page_offset = fragfrom->page_offset;
fragto->size = todo;
......@@ -2298,7 +2300,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragto = &skb_shinfo(tgt)->frags[merge];
fragto->size += fragfrom->size;
put_page(fragfrom->page);
__skb_frag_unref(fragfrom);
}
/* Reposition in the original skb */
......@@ -2543,8 +2545,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
left = PAGE_SIZE - frag->page_offset;
copy = (length > left)? left : length;
ret = getfrag(from, (page_address(frag->page) +
frag->page_offset + frag->size),
ret = getfrag(from, skb_frag_address(frag) + frag->size,
offset, copy, 0, skb);
if (ret < 0)
return -EFAULT;
......@@ -2696,7 +2697,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
while (pos < offset + len && i < nfrags) {
*frag = skb_shinfo(skb)->frags[i];
get_page(frag->page);
__skb_frag_ref(frag);
size = frag->size;
if (pos < offset) {
......@@ -2919,7 +2920,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
if (copy > len)
copy = len;
sg_set_page(&sg[elt], frag->page, copy,
sg_set_page(&sg[elt], skb_frag_page(frag), copy,
frag->page_offset+offset-start);
elt++;
if (!(len -= copy))
......
......@@ -1533,7 +1533,6 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
skb_shinfo(skb)->nr_frags = npages;
for (i = 0; i < npages; i++) {
struct page *page;
skb_frag_t *frag;
page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
......@@ -1543,12 +1542,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
goto failure;
}
frag = &skb_shinfo(skb)->frags[i];
frag->page = page;
frag->page_offset = 0;
frag->size = (data_len >= PAGE_SIZE ?
PAGE_SIZE :
data_len);
__skb_fill_page_desc(skb, i,
page, 0,
(data_len >= PAGE_SIZE ?
PAGE_SIZE :
data_len));
data_len -= PAGE_SIZE;
}
......
......@@ -78,7 +78,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
copy = end - offset;
if (copy > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment