Commit 37264b31 authored by Ulisses Alonso Camaró's avatar Ulisses Alonso Camaró Committed by James Morris

[AF_PACKET]: Fix packet_set_ring memleak and remove num frame limit.

parent 1d5b953d
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
* Alexey Kuznetsov : Untied from IPv4 stack. * Alexey Kuznetsov : Untied from IPv4 stack.
* Cyrus Durgin : Fixed kerneld for kmod. * Cyrus Durgin : Fixed kerneld for kmod.
* Michal Ostrowski : Module initialization cleanup. * Michal Ostrowski : Module initialization cleanup.
* Ulises Alonso : Frame number limit removal and
* packet_set_ring memory leak.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -168,30 +170,47 @@ static void packet_flush_mclist(struct sock *sk); ...@@ -168,30 +170,47 @@ static void packet_flush_mclist(struct sock *sk);
struct packet_opt struct packet_opt
{ {
struct tpacket_stats stats;
#ifdef CONFIG_PACKET_MMAP
unsigned long *pg_vec;
unsigned int head;
unsigned int frames_per_block;
unsigned int frame_size;
unsigned int frame_max;
int copy_thresh;
#endif
struct packet_type prot_hook; struct packet_type prot_hook;
spinlock_t bind_lock; spinlock_t bind_lock;
char running; /* prot_hook is attached*/ char running; /* prot_hook is attached*/
int ifindex; /* bound device */ int ifindex; /* bound device */
unsigned short num; unsigned short num;
struct tpacket_stats stats;
#ifdef CONFIG_PACKET_MULTICAST #ifdef CONFIG_PACKET_MULTICAST
struct packet_mclist *mclist; struct packet_mclist *mclist;
#endif #endif
#ifdef CONFIG_PACKET_MMAP #ifdef CONFIG_PACKET_MMAP
atomic_t mapped; atomic_t mapped;
unsigned long *pg_vec; unsigned int pg_vec_order;
unsigned int pg_vec_order;
unsigned int pg_vec_pages; unsigned int pg_vec_pages;
unsigned int pg_vec_len; unsigned int pg_vec_len;
struct tpacket_hdr **iovec;
unsigned int frame_size;
unsigned int iovmax;
unsigned int head;
int copy_thresh;
#endif #endif
}; };
#ifdef CONFIG_PACKET_MMAP
static inline unsigned long packet_lookup_frame(struct packet_opt *po, unsigned int position)
{
unsigned int pg_vec_pos, frame_offset;
unsigned long frame;
pg_vec_pos = position / po->frames_per_block;
frame_offset = position % po->frames_per_block;
frame = (unsigned long) (po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size));
return frame;
}
#endif
#define pkt_sk(__sk) ((struct packet_opt *)(__sk)->sk_protinfo) #define pkt_sk(__sk) ((struct packet_opt *)(__sk)->sk_protinfo)
void packet_sock_destruct(struct sock *sk) void packet_sock_destruct(struct sock *sk)
...@@ -586,11 +605,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack ...@@ -586,11 +605,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
snaplen = skb->len-skb->data_len; snaplen = skb->len-skb->data_len;
spin_lock(&sk->sk_receive_queue.lock); spin_lock(&sk->sk_receive_queue.lock);
h = po->iovec[po->head]; h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
if (h->tp_status) if (h->tp_status)
goto ring_is_full; goto ring_is_full;
po->head = po->head != po->iovmax ? po->head+1 : 0; po->head = po->head != po->frame_max ? po->head+1 : 0;
po->stats.tp_packets++; po->stats.tp_packets++;
if (copy_skb) { if (copy_skb) {
status |= TP_STATUS_COPY; status |= TP_STATUS_COPY;
...@@ -1485,10 +1504,13 @@ unsigned int packet_poll(struct file * file, struct socket *sock, poll_table *wa ...@@ -1485,10 +1504,13 @@ unsigned int packet_poll(struct file * file, struct socket *sock, poll_table *wa
unsigned int mask = datagram_poll(file, sock, wait); unsigned int mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->iovec) { if (po->pg_vec) {
unsigned last = po->head ? po->head-1 : po->iovmax; unsigned last = po->head ? po->head-1 : po->frame_max;
struct tpacket_hdr *h;
if (po->iovec[last]->tp_status) h = (struct tpacket_hdr *)packet_lookup_frame(po, last);
if (h->tp_status)
mask |= POLLIN | POLLRDNORM; mask |= POLLIN | POLLRDNORM;
} }
spin_unlock_bh(&sk->sk_receive_queue.lock); spin_unlock_bh(&sk->sk_receive_queue.lock);
...@@ -1548,16 +1570,18 @@ static void free_pg_vec(unsigned long *pg_vec, unsigned order, unsigned len) ...@@ -1548,16 +1570,18 @@ static void free_pg_vec(unsigned long *pg_vec, unsigned order, unsigned len)
static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing) static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
{ {
unsigned long *pg_vec = NULL; unsigned long *pg_vec = NULL;
struct tpacket_hdr **io_vec = NULL;
struct packet_opt *po = pkt_sk(sk); struct packet_opt *po = pkt_sk(sk);
int was_running, num, order = 0; int was_running, num, order = 0;
int err = 0; int err = 0;
if (req->tp_block_nr) { if (req->tp_block_nr) {
int i, l; int i, l;
int frames_per_block;
/* Sanity tests and some calculations */ /* Sanity tests and some calculations */
if (po->pg_vec)
return -EBUSY;
if ((int)req->tp_block_size <= 0) if ((int)req->tp_block_size <= 0)
return -EINVAL; return -EINVAL;
if (req->tp_block_size&(PAGE_SIZE-1)) if (req->tp_block_size&(PAGE_SIZE-1))
...@@ -1566,10 +1590,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing ...@@ -1566,10 +1590,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
return -EINVAL; return -EINVAL;
if (req->tp_frame_size&(TPACKET_ALIGNMENT-1)) if (req->tp_frame_size&(TPACKET_ALIGNMENT-1))
return -EINVAL; return -EINVAL;
frames_per_block = req->tp_block_size/req->tp_frame_size;
if (frames_per_block <= 0) po->frames_per_block = req->tp_block_size/req->tp_frame_size;
if (po->frames_per_block <= 0)
return -EINVAL; return -EINVAL;
if (frames_per_block*req->tp_block_nr != req->tp_frame_nr) if (po->frames_per_block*req->tp_block_nr != req->tp_frame_nr)
return -EINVAL; return -EINVAL;
/* OK! */ /* OK! */
...@@ -1596,20 +1621,16 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing ...@@ -1596,20 +1621,16 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
} }
/* Page vector is allocated */ /* Page vector is allocated */
/* Draw frames */
io_vec = kmalloc(req->tp_frame_nr*sizeof(struct tpacket_hdr*), GFP_KERNEL);
if (io_vec == NULL)
goto out_free_pgvec;
memset(io_vec, 0, req->tp_frame_nr*sizeof(struct tpacket_hdr*));
l = 0; l = 0;
for (i=0; i<req->tp_block_nr; i++) { for (i=0; i<req->tp_block_nr; i++) {
unsigned long ptr = pg_vec[i]; unsigned long ptr = pg_vec[i];
struct tpacket_hdr *header;
int k; int k;
for (k=0; k<frames_per_block; k++, l++) { for (k=0; k<po->frames_per_block; k++) {
io_vec[l] = (struct tpacket_hdr*)ptr;
io_vec[l]->tp_status = TP_STATUS_KERNEL; header = (struct tpacket_hdr*)ptr;
header->tp_status = TP_STATUS_KERNEL;
ptr += req->tp_frame_size; ptr += req->tp_frame_size;
} }
} }
...@@ -1642,8 +1663,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing ...@@ -1642,8 +1663,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
spin_lock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_receive_queue.lock);
pg_vec = XC(po->pg_vec, pg_vec); pg_vec = XC(po->pg_vec, pg_vec);
io_vec = XC(po->iovec, io_vec); po->frame_max = req->tp_frame_nr-1;
po->iovmax = req->tp_frame_nr-1;
po->head = 0; po->head = 0;
po->frame_size = req->tp_frame_size; po->frame_size = req->tp_frame_size;
spin_unlock_bh(&sk->sk_receive_queue.lock); spin_unlock_bh(&sk->sk_receive_queue.lock);
...@@ -1652,7 +1672,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing ...@@ -1652,7 +1672,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr); req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
po->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = po->iovec ? tpacket_rcv : packet_rcv; po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv;
skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_receive_queue);
#undef XC #undef XC
if (atomic_read(&po->mapped)) if (atomic_read(&po->mapped))
...@@ -1670,9 +1690,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing ...@@ -1670,9 +1690,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
release_sock(sk); release_sock(sk);
if (io_vec)
kfree(io_vec);
out_free_pgvec: out_free_pgvec:
if (pg_vec) if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr); free_pg_vec(pg_vec, order, req->tp_block_nr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment