Commit d312c8f8 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [NETFILTER]: nf_conntrack_pptp: fix NAT setup of expected GRE connections
  [NETFILTER]: nf_nat_pptp: fix expectation removal
  [NETFILTER]: nf_nat: fix ICMP translation with statically linked conntrack
  [TCP]: Restore SKB socket owner setting in tcp_transmit_skb().
  [AF_PACKET]: Check device down state before hard header callbacks.
  [DECNET]: Handle a failure in neigh_parms_alloc (take 2)
  [BNX2]: Fix 2nd port's MAC address.
  [TCP]: Fix sorting of SACK blocks.
  [AF_PACKET]: Fix BPF handling.
  [IPV4]: Fix the fib trie iterator to work with a single entry routing tables
parents 08eacc31 7399072a
......@@ -57,8 +57,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.5.3"
#define DRV_MODULE_RELDATE "January 8, 2007"
#define DRV_MODULE_VERSION "1.5.4"
#define DRV_MODULE_RELDATE "January 24, 2007"
#define RUN_AT(x) (jiffies + (x))
......@@ -5845,9 +5845,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
BNX2_SHM_HDR_SIGNATURE_SIG)
bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
else
BNX2_SHM_HDR_SIGNATURE_SIG) {
u32 off = PCI_FUNC(pdev->devfn) << 2;
bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
} else
bp->shmem_base = HOST_VIEW_SHMEM_BASE;
/* Get the permanent MAC address. First we need to make sure the
......
......@@ -38,5 +38,5 @@ extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
extern int inet6_csk_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok);
extern int inet6_csk_xmit(struct sk_buff *skb, int ipfragok);
#endif /* _INET6_CONNECTION_SOCK_H */
......@@ -37,8 +37,7 @@ struct tcp_congestion_ops;
* (i.e. things that depend on the address family)
*/
struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sk_buff *skb, struct sock *sk,
int ipfragok);
int (*queue_xmit)(struct sk_buff *skb, int ipfragok);
void (*send_check)(struct sock *sk, int len,
struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
......
......@@ -97,7 +97,7 @@ extern int ip_mc_output(struct sk_buff *skb);
extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
extern int ip_do_nat(struct sk_buff *skb);
extern void ip_send_check(struct iphdr *ip);
extern int ip_queue_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok);
extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok);
extern void ip_init(void);
extern int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
......
......@@ -124,7 +124,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
err = icsk->icsk_af_ops->queue_xmit(skb, 0);
return net_xmit_eval(err);
}
return -ENOBUFS;
......@@ -396,7 +396,7 @@ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
code);
if (skb != NULL) {
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0);
err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0);
return net_xmit_eval(err);
}
}
......
......@@ -1145,16 +1145,23 @@ struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
init_timer(&dn_db->timer);
dn_db->uptime = jiffies;
dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
if (!dn_db->neigh_parms) {
dev->dn_ptr = NULL;
kfree(dn_db);
return NULL;
}
if (dn_db->parms.up) {
if (dn_db->parms.up(dev) < 0) {
neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
dev->dn_ptr = NULL;
kfree(dn_db);
return NULL;
}
}
dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
dn_dev_sysctl_register(dev, &dn_db->parms);
dn_dev_set_timer(dev);
......
......@@ -1989,6 +1989,10 @@ static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
unsigned cindex = iter->index;
struct tnode *p;
/* A single entry routing table */
if (!tn)
return NULL;
pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
iter->tnode, iter->index, iter->depth);
rescan:
......@@ -2037,11 +2041,18 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
if(!iter)
return NULL;
if (n && IS_TNODE(n)) {
iter->tnode = (struct tnode *) n;
iter->trie = t;
iter->index = 0;
iter->depth = 1;
if (n) {
if (IS_TNODE(n)) {
iter->tnode = (struct tnode *) n;
iter->trie = t;
iter->index = 0;
iter->depth = 1;
} else {
iter->tnode = NULL;
iter->trie = t;
iter->index = 0;
iter->depth = 0;
}
return n;
}
return NULL;
......
......@@ -281,8 +281,9 @@ int ip_output(struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
int ip_queue_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok)
int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ip_options *opt = inet->opt;
struct rtable *rt;
......
......@@ -4,6 +4,14 @@
# objects for the standalone - connection tracking / NAT
ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o
# objects for l3 independent conntrack
nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
ifeq ($(CONFIG_PROC_FS),y)
nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
endif
endif
ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o
nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
ifneq ($(CONFIG_NF_NAT),)
......@@ -20,6 +28,8 @@ ip_nat_h323-objs := ip_nat_helper_h323.o
# connection tracking
obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
obj-$(CONFIG_IP_NF_NAT) += ip_nat.o
obj-$(CONFIG_NF_NAT) += nf_nat.o
......@@ -106,13 +116,3 @@ obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
# objects for l3 independent conntrack
nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
ifeq ($(CONFIG_PROC_FS),y)
nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
endif
endif
# l3 independent conntrack
obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
......@@ -72,9 +72,9 @@ static void pptp_nat_expected(struct nf_conn *ct,
DEBUGP("we are PAC->PNS\n");
/* build tuple for PNS->PAC */
t.src.l3num = AF_INET;
t.src.u3.ip = master->tuplehash[exp->dir].tuple.src.u3.ip;
t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
t.src.u.gre.key = nat_pptp_info->pns_call_id;
t.dst.u3.ip = master->tuplehash[exp->dir].tuple.dst.u3.ip;
t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
t.dst.u.gre.key = nat_pptp_info->pac_call_id;
t.dst.protonum = IPPROTO_GRE;
}
......
......@@ -1011,10 +1011,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
for (j = 0; j < i; j++){
if (after(ntohl(sp[j].start_seq),
ntohl(sp[j+1].start_seq))){
sp[j].start_seq = htonl(tp->recv_sack_cache[j+1].start_seq);
sp[j].end_seq = htonl(tp->recv_sack_cache[j+1].end_seq);
sp[j+1].start_seq = htonl(tp->recv_sack_cache[j].start_seq);
sp[j+1].end_seq = htonl(tp->recv_sack_cache[j].end_seq);
struct tcp_sack_block_wire tmp;
tmp = sp[j];
sp[j] = sp[j+1];
sp[j+1] = tmp;
}
}
......
......@@ -467,6 +467,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th = (struct tcphdr *) skb_push(skb, tcp_header_size);
skb->h.th = th;
skb_set_owner_w(skb, sk);
/* Build TCP header and checksum it. */
th->source = inet->sport;
......@@ -540,7 +541,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
TCP_INC_STATS(TCP_MIB_OUTSEGS);
err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
err = icsk->icsk_af_ops->queue_xmit(skb, 0);
if (likely(err <= 0))
return err;
......
......@@ -139,8 +139,9 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
int inet6_csk_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok)
int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct flowi fl;
......
......@@ -113,7 +113,7 @@ static void pptp_expectfn(struct nf_conn *ct,
rcu_read_lock();
nf_nat_pptp_expectfn = rcu_dereference(nf_nat_pptp_hook_expectfn);
if (nf_nat_pptp_expectfn && ct->status & IPS_NAT_MASK)
if (nf_nat_pptp_expectfn && ct->master->status & IPS_NAT_MASK)
nf_nat_pptp_expectfn(ct, exp);
else {
struct nf_conntrack_tuple inv_t;
......
......@@ -359,6 +359,10 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
if (dev == NULL)
goto out_unlock;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
......@@ -407,10 +411,6 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
if (err)
goto out_free;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
/*
* Now send it
*/
......@@ -428,24 +428,18 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
}
#endif
static inline int run_filter(struct sk_buff *skb, struct sock *sk,
unsigned *snaplen)
static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
unsigned int res)
{
struct sk_filter *filter;
int err = 0;
rcu_read_lock_bh();
filter = rcu_dereference(sk->sk_filter);
if (filter != NULL) {
err = sk_run_filter(skb, filter->insns, filter->len);
if (!err)
err = -EPERM;
else if (*snaplen > err)
*snaplen = err;
}
if (filter != NULL)
res = sk_run_filter(skb, filter->insns, filter->len);
rcu_read_unlock_bh();
return err;
return res;
}
/*
......@@ -467,7 +461,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
struct packet_sock *po;
u8 * skb_head = skb->data;
int skb_len = skb->len;
unsigned snaplen;
unsigned int snaplen, res;
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
......@@ -495,8 +489,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
snaplen = skb->len;
if (run_filter(skb, sk, &snaplen) < 0)
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
if (snaplen > res)
snaplen = res;
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf)
......@@ -568,7 +565,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
struct tpacket_hdr *h;
u8 * skb_head = skb->data;
int skb_len = skb->len;
unsigned snaplen;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
unsigned short macoff, netoff;
struct sk_buff *copy_skb = NULL;
......@@ -592,8 +589,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
snaplen = skb->len;
if (run_filter(skb, sk, &snaplen) < 0)
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
if (snaplen > res)
snaplen = res;
if (sk->sk_type == SOCK_DGRAM) {
macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
......@@ -738,6 +738,10 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
if (sock->type == SOCK_RAW)
reserve = dev->hard_header_len;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
err = -EMSGSIZE;
if (len > dev->mtu+reserve)
goto out_unlock;
......@@ -770,10 +774,6 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
skb->dev = dev;
skb->priority = sk->sk_priority;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
/*
* Now send it
*/
......
......@@ -804,7 +804,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
NIPQUAD(((struct rtable *)skb->dst)->rt_dst));
SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
return ip_queue_xmit(skb, skb->sk, ipfragok);
return ip_queue_xmit(skb, ipfragok);
}
static struct sctp_af sctp_ipv4_specific;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment