Commit 992d6a32 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into evo.osdl.org:/home/torvalds/v2.6/linux
parents bd8f3917 0daefd17
DaveM:
If you agree with it I will send two small patches to modify
kernel's configure help.
Ulisses
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
+ ABSTRACT + ABSTRACT
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
...@@ -405,8 +397,3 @@ then poll for frames. ...@@ -405,8 +397,3 @@ then poll for frames.
Jesse Brandeburg, for fixing my grammathical/spelling errors Jesse Brandeburg, for fixing my grammathical/spelling errors
>>> EOF
-
To unsubscribe from this list: send the line "unsubscribe linux-net" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
\ No newline at end of file
...@@ -27,13 +27,28 @@ static void update(struct crypto_tfm *tfm, ...@@ -27,13 +27,28 @@ static void update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg) struct scatterlist *sg, unsigned int nsg)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < nsg; i++) { for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), struct page *pg = sg[i].page;
p, sg[i].length); unsigned int offset = sg[i].offset;
crypto_kunmap(p, 0); unsigned int l = sg[i].length;
crypto_yield(tfm);
do {
unsigned int bytes_from_page = min(l, ((unsigned int)
(PAGE_SIZE)) -
offset);
char *p = crypto_kmap(pg, 0) + offset;
tfm->__crt_alg->cra_digest.dia_update
(crypto_tfm_ctx(tfm), p,
bytes_from_page);
crypto_kunmap(p, 0);
crypto_yield(tfm);
offset = 0;
pg++;
l -= bytes_from_page;
} while (l > 0);
} }
} }
......
...@@ -192,6 +192,8 @@ struct inet6_skb_parm ...@@ -192,6 +192,8 @@ struct inet6_skb_parm
__u16 dst1; __u16 dst1;
}; };
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
struct ipv6_pinfo { struct ipv6_pinfo {
struct in6_addr saddr; struct in6_addr saddr;
struct in6_addr rcv_saddr; struct in6_addr rcv_saddr;
......
...@@ -89,6 +89,7 @@ struct dst_ops ...@@ -89,6 +89,7 @@ struct dst_ops
int (*gc)(void); int (*gc)(void);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
void (*destroy)(struct dst_entry *); void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *, int how);
struct dst_entry * (*negative_advice)(struct dst_entry *); struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *); void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, u32 mtu); void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
......
...@@ -356,7 +356,6 @@ extern int ip6_dst_lookup(struct sock *sk, ...@@ -356,7 +356,6 @@ extern int ip6_dst_lookup(struct sock *sk,
*/ */
extern int ip6_output(struct sk_buff **pskb); extern int ip6_output(struct sk_buff **pskb);
extern int ip6_output2(struct sk_buff **pskb);
extern int ip6_forward(struct sk_buff *skb); extern int ip6_forward(struct sk_buff *skb);
extern int ip6_input(struct sk_buff *skb); extern int ip6_input(struct sk_buff *skb);
extern int ip6_mc_input(struct sk_buff *skb); extern int ip6_mc_input(struct sk_buff *skb);
......
...@@ -398,6 +398,21 @@ static inline int sock_flag(struct sock *sk, enum sock_flags flag) ...@@ -398,6 +398,21 @@ static inline int sock_flag(struct sock *sk, enum sock_flags flag)
return test_bit(flag, &sk->sk_flags); return test_bit(flag, &sk->sk_flags);
} }
static inline void sk_acceptq_removed(struct sock *sk)
{
sk->sk_ack_backlog--;
}
static inline void sk_acceptq_added(struct sock *sk)
{
sk->sk_ack_backlog++;
}
static inline int sk_acceptq_is_full(struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
/* The per-socket spinlock must be held here. */ /* The per-socket spinlock must be held here. */
#define sk_add_backlog(__sk, __skb) \ #define sk_add_backlog(__sk, __skb) \
do { if (!(__sk)->sk_backlog.tail) { \ do { if (!(__sk)->sk_backlog.tail) { \
...@@ -410,6 +425,20 @@ do { if (!(__sk)->sk_backlog.tail) { \ ...@@ -410,6 +425,20 @@ do { if (!(__sk)->sk_backlog.tail) { \
(__skb)->next = NULL; \ (__skb)->next = NULL; \
} while(0) } while(0)
#define sk_wait_event(__sk, __timeo, __condition) \
({ int rc; \
release_sock(__sk); \
rc = __condition; \
if (!rc) { \
*(__timeo) = schedule_timeout(*(__timeo)); \
rc = __condition; \
} \
lock_sock(__sk); \
rc; \
})
extern int sk_wait_data(struct sock *sk, long *timeo);
/* IP protocol blocks we attach to sockets. /* IP protocol blocks we attach to sockets.
* socket layer -> transport layer interface * socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto * transport -> network interface is defined by struct inet_proto
...@@ -898,6 +927,11 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) ...@@ -898,6 +927,11 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
atomic_add(skb->truesize, &sk->sk_rmem_alloc); atomic_add(skb->truesize, &sk->sk_rmem_alloc);
} }
extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
unsigned long expires);
extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
int err = 0; int err = 0;
...@@ -1035,6 +1069,20 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) ...@@ -1035,6 +1069,20 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
sk->sk_stamp = *stamp; sk->sk_stamp = *stamp;
} }
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk - socket to eat this skb from
* @skb - socket buffer to eat
*
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
*/
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
extern atomic_t netstamp_needed; extern atomic_t netstamp_needed;
extern void sock_enable_timestamp(struct sock *sk); extern void sock_enable_timestamp(struct sock *sk);
extern void sock_disable_timestamp(struct sock *sk); extern void sock_disable_timestamp(struct sock *sk);
......
...@@ -989,9 +989,7 @@ static inline void tcp_clear_xmit_timer(struct sock *sk, int what) ...@@ -989,9 +989,7 @@ static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
tp->pending = 0; tp->pending = 0;
#ifdef TCP_CLEAR_TIMERS #ifdef TCP_CLEAR_TIMERS
if (timer_pending(&tp->retransmit_timer) && sk_stop_timer(sk, &tp->retransmit_timer);
del_timer(&tp->retransmit_timer))
__sock_put(sk);
#endif #endif
break; break;
case TCP_TIME_DACK: case TCP_TIME_DACK:
...@@ -999,9 +997,7 @@ static inline void tcp_clear_xmit_timer(struct sock *sk, int what) ...@@ -999,9 +997,7 @@ static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
tp->ack.pending = 0; tp->ack.pending = 0;
#ifdef TCP_CLEAR_TIMERS #ifdef TCP_CLEAR_TIMERS
if (timer_pending(&tp->delack_timer) && sk_stop_timer(sk, &tp->delack_timer);
del_timer(&tp->delack_timer))
__sock_put(sk);
#endif #endif
break; break;
default: default:
...@@ -1030,15 +1026,13 @@ static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long ...@@ -1030,15 +1026,13 @@ static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long
case TCP_TIME_PROBE0: case TCP_TIME_PROBE0:
tp->pending = what; tp->pending = what;
tp->timeout = jiffies+when; tp->timeout = jiffies+when;
if (!mod_timer(&tp->retransmit_timer, tp->timeout)) sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
sock_hold(sk);
break; break;
case TCP_TIME_DACK: case TCP_TIME_DACK:
tp->ack.pending |= TCP_ACK_TIMER; tp->ack.pending |= TCP_ACK_TIMER;
tp->ack.timeout = jiffies+when; tp->ack.timeout = jiffies+when;
if (!mod_timer(&tp->delack_timer, tp->ack.timeout)) sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
sock_hold(sk);
break; break;
default: default:
...@@ -1800,28 +1794,13 @@ static inline int tcp_full_space( struct sock *sk) ...@@ -1800,28 +1794,13 @@ static inline int tcp_full_space( struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf); return tcp_win_from_space(sk->sk_rcvbuf);
} }
static inline void tcp_acceptq_removed(struct sock *sk)
{
sk->sk_ack_backlog--;
}
static inline void tcp_acceptq_added(struct sock *sk)
{
sk->sk_ack_backlog++;
}
static inline int tcp_acceptq_is_full(struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
struct sock *child) struct sock *child)
{ {
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
req->sk = child; req->sk = child;
tcp_acceptq_added(sk); sk_acceptq_added(sk);
if (!tp->accept_queue_tail) { if (!tp->accept_queue_tail) {
tp->accept_queue = req; tp->accept_queue = req;
......
...@@ -95,17 +95,13 @@ static void l2cap_sock_timeout(unsigned long arg) ...@@ -95,17 +95,13 @@ static void l2cap_sock_timeout(unsigned long arg)
static void l2cap_sock_set_timer(struct sock *sk, long timeout) static void l2cap_sock_set_timer(struct sock *sk, long timeout)
{ {
BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout); BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
if (!mod_timer(&sk->sk_timer, jiffies + timeout))
sock_hold(sk);
} }
static void l2cap_sock_clear_timer(struct sock *sk) static void l2cap_sock_clear_timer(struct sock *sk)
{ {
BT_DBG("sock %p state %d", sk, sk->sk_state); BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
} }
static void l2cap_sock_init_timer(struct sock *sk) static void l2cap_sock_init_timer(struct sock *sk)
......
...@@ -91,17 +91,13 @@ static void sco_sock_timeout(unsigned long arg) ...@@ -91,17 +91,13 @@ static void sco_sock_timeout(unsigned long arg)
static void sco_sock_set_timer(struct sock *sk, long timeout) static void sco_sock_set_timer(struct sock *sk, long timeout)
{ {
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
if (!mod_timer(&sk->sk_timer, jiffies + timeout))
sock_hold(sk);
} }
static void sco_sock_clear_timer(struct sock *sk) static void sco_sock_clear_timer(struct sock *sk)
{ {
BT_DBG("sock %p state %d", sk, sk->sk_state); BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
} }
static void sco_sock_init_timer(struct sock *sk) static void sco_sock_init_timer(struct sock *sk)
......
...@@ -230,8 +230,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void ...@@ -230,8 +230,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
if (event!=NETDEV_DOWN && if (event!=NETDEV_DOWN &&
dst->output == dst_discard_out) { dst->output == dst_discard_out) {
dst->dev = &loopback_dev; dst->dev = &loopback_dev;
dev_put(dev);
dev_hold(&loopback_dev); dev_hold(&loopback_dev);
dev_put(dev);
dst->output = dst_discard_out; dst->output = dst_discard_out;
if (dst->neighbour && dst->neighbour->dev == dev) { if (dst->neighbour && dst->neighbour->dev == dev) {
dst->neighbour->dev = &loopback_dev; dst->neighbour->dev = &loopback_dev;
...@@ -242,6 +242,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void ...@@ -242,6 +242,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
dst->input = dst_discard_in; dst->input = dst_discard_in;
dst->output = dst_discard_out; dst->output = dst_discard_out;
} }
if (dst->ops->ifdown)
dst->ops->ifdown(dst, event != NETDEV_DOWN);
} }
} }
spin_unlock_bh(&dst_lock); spin_unlock_bh(&dst_lock);
......
...@@ -917,6 +917,31 @@ void __release_sock(struct sock *sk) ...@@ -917,6 +917,31 @@ void __release_sock(struct sock *sk)
} while((skb = sk->sk_backlog.head) != NULL); } while((skb = sk->sk_backlog.head) != NULL);
} }
/**
* sk_wait_data - wait for data to arrive at sk_receive_queue
* sk - sock to wait on
* timeo - for how long
*
* Now socket state including sk->sk_err is changed only under lock,
* hence we may omit checks after joining wait queue.
* We check receive queue before schedule() only as optimization;
* it is very likely that release_sock() added new data.
*/
int sk_wait_data(struct sock *sk, long *timeo)
{
int rc;
DEFINE_WAIT(wait);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
finish_wait(sk->sk_sleep, &wait);
return rc;
}
EXPORT_SYMBOL(sk_wait_data);
/* /*
* Set of default routines for initialising struct proto_ops when * Set of default routines for initialising struct proto_ops when
* the protocol does not support a particular function. In certain * the protocol does not support a particular function. In certain
...@@ -1099,6 +1124,23 @@ void sk_send_sigurg(struct sock *sk) ...@@ -1099,6 +1124,23 @@ void sk_send_sigurg(struct sock *sk)
sk_wake_async(sk, 3, POLL_PRI); sk_wake_async(sk, 3, POLL_PRI);
} }
void sk_reset_timer(struct sock *sk, struct timer_list* timer,
unsigned long expires)
{
if (!mod_timer(timer, expires))
sock_hold(sk);
}
EXPORT_SYMBOL(sk_reset_timer);
void sk_stop_timer(struct sock *sk, struct timer_list* timer)
{
if (timer_pending(timer) && del_timer(timer))
__sock_put(sk);
}
EXPORT_SYMBOL(sk_stop_timer);
void sock_init_data(struct socket *sock, struct sock *sk) void sock_init_data(struct socket *sock, struct sock *sk)
{ {
skb_queue_head_init(&sk->sk_receive_queue); skb_queue_head_init(&sk->sk_receive_queue);
......
...@@ -179,11 +179,10 @@ static inline int arp_packet_match(const struct arphdr *arphdr, ...@@ -179,11 +179,10 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
return 0; return 0;
} }
/* Look for ifname matches; this should unroll nicely. */ /* Look for ifname matches. */
for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { for (i = 0, ret = 0; i < IFNAMSIZ; i++) {
ret |= (((const unsigned long *)indev)[i] ret |= (indev[i] ^ arpinfo->iniface[i])
^ ((const unsigned long *)arpinfo->iniface)[i]) & arpinfo->iniface_mask[i];
& ((const unsigned long *)arpinfo->iniface_mask)[i];
} }
if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
......
...@@ -138,6 +138,7 @@ static struct timer_list rt_secret_timer; ...@@ -138,6 +138,7 @@ static struct timer_list rt_secret_timer;
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static void ipv4_dst_destroy(struct dst_entry *dst); static void ipv4_dst_destroy(struct dst_entry *dst);
static void ipv4_dst_ifdown(struct dst_entry *dst, int how);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb); static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
...@@ -150,6 +151,7 @@ static struct dst_ops ipv4_dst_ops = { ...@@ -150,6 +151,7 @@ static struct dst_ops ipv4_dst_ops = {
.gc = rt_garbage_collect, .gc = rt_garbage_collect,
.check = ipv4_dst_check, .check = ipv4_dst_check,
.destroy = ipv4_dst_destroy, .destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice, .negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure, .link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu, .update_pmtu = ip_rt_update_pmtu,
...@@ -1336,6 +1338,16 @@ static void ipv4_dst_destroy(struct dst_entry *dst) ...@@ -1336,6 +1338,16 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
} }
} }
static void ipv4_dst_ifdown(struct dst_entry *dst, int how)
{
struct rtable *rt = (struct rtable *) dst;
struct in_device *idev = rt->idev;
if (idev) {
rt->idev = NULL;
in_dev_put(idev);
}
}
static void ipv4_link_failure(struct sk_buff *skb) static void ipv4_link_failure(struct sk_buff *skb)
{ {
struct rtable *rt; struct rtable *rt;
......
...@@ -648,7 +648,7 @@ static void tcp_listen_stop (struct sock *sk) ...@@ -648,7 +648,7 @@ static void tcp_listen_stop (struct sock *sk)
local_bh_enable(); local_bh_enable();
sock_put(child); sock_put(child);
tcp_acceptq_removed(sk); sk_acceptq_removed(sk);
tcp_openreq_fastfree(req); tcp_openreq_fastfree(req);
} }
BUG_TRAP(!sk->sk_ack_backlog); BUG_TRAP(!sk->sk_ack_backlog);
...@@ -1296,18 +1296,6 @@ static int tcp_recv_urg(struct sock *sk, long timeo, ...@@ -1296,18 +1296,6 @@ static int tcp_recv_urg(struct sock *sk, long timeo,
return -EAGAIN; return -EAGAIN;
} }
/*
* Release a skb if it is no longer needed. This routine
* must be called with interrupts disabled or with the
* socket locked so that the sk_buff queue operation is ok.
*/
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
/* Clean up the receive buffer for full frames taken by the user, /* Clean up the receive buffer for full frames taken by the user,
* then send an ACK if necessary. COPIED is the number of bytes * then send an ACK if necessary. COPIED is the number of bytes
* tcp_recvmsg has given to the user so far, it speeds up the * tcp_recvmsg has given to the user so far, it speeds up the
...@@ -1368,31 +1356,6 @@ static void cleanup_rbuf(struct sock *sk, int copied) ...@@ -1368,31 +1356,6 @@ static void cleanup_rbuf(struct sock *sk, int copied)
tcp_send_ack(sk); tcp_send_ack(sk);
} }
/* Now socket state including sk->sk_err is changed only under lock,
* hence we may omit checks after joining wait queue.
* We check receive queue before schedule() only as optimization;
* it is very likely that release_sock() added new data.
*/
static long tcp_data_wait(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
release_sock(sk);
if (skb_queue_empty(&sk->sk_receive_queue))
timeo = schedule_timeout(timeo);
lock_sock(sk);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
finish_wait(sk->sk_sleep, &wait);
return timeo;
}
static void tcp_prequeue_process(struct sock *sk) static void tcp_prequeue_process(struct sock *sk)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1473,11 +1436,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, ...@@ -1473,11 +1436,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
break; break;
} }
if (skb->h.th->fin) { if (skb->h.th->fin) {
tcp_eat_skb(sk, skb); sk_eat_skb(sk, skb);
++seq; ++seq;
break; break;
} }
tcp_eat_skb(sk, skb); sk_eat_skb(sk, skb);
if (!desc->count) if (!desc->count)
break; break;
} }
...@@ -1672,9 +1635,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1672,9 +1635,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Do not sleep, just process backlog. */ /* Do not sleep, just process backlog. */
release_sock(sk); release_sock(sk);
lock_sock(sk); lock_sock(sk);
} else { } else
timeo = tcp_data_wait(sk, timeo); sk_wait_data(sk, &timeo);
}
if (user_recv) { if (user_recv) {
int chunk; int chunk;
...@@ -1758,14 +1720,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1758,14 +1720,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (skb->h.th->fin) if (skb->h.th->fin)
goto found_fin_ok; goto found_fin_ok;
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK))
tcp_eat_skb(sk, skb); sk_eat_skb(sk, skb);
continue; continue;
found_fin_ok: found_fin_ok:
/* Process the FIN. */ /* Process the FIN. */
++*seq; ++*seq;
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK))
tcp_eat_skb(sk, skb); sk_eat_skb(sk, skb);
break; break;
} while (len > 0); } while (len > 0);
...@@ -2263,7 +2225,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) ...@@ -2263,7 +2225,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
tp->accept_queue_tail = NULL; tp->accept_queue_tail = NULL;
newsk = req->sk; newsk = req->sk;
tcp_acceptq_removed(sk); sk_acceptq_removed(sk);
tcp_openreq_fastfree(req); tcp_openreq_fastfree(req);
BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
release_sock(sk); release_sock(sk);
......
...@@ -463,6 +463,8 @@ void tcp_rcv_space_adjust(struct sock *sk) ...@@ -463,6 +463,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
tp->rcvq_space.space = space; tp->rcvq_space.space = space;
if (sysctl_tcp_moderate_rcvbuf) { if (sysctl_tcp_moderate_rcvbuf) {
int new_clamp = space;
/* Receive space grows, normalize in order to /* Receive space grows, normalize in order to
* take into account packet headers and sk_buff * take into account packet headers and sk_buff
* structure overhead. * structure overhead.
...@@ -472,10 +474,16 @@ void tcp_rcv_space_adjust(struct sock *sk) ...@@ -472,10 +474,16 @@ void tcp_rcv_space_adjust(struct sock *sk)
space = 1; space = 1;
rcvmem = (tp->advmss + MAX_TCP_HEADER + rcvmem = (tp->advmss + MAX_TCP_HEADER +
16 + sizeof(struct sk_buff)); 16 + sizeof(struct sk_buff));
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
space *= rcvmem; space *= rcvmem;
space = min(space, sysctl_tcp_rmem[2]); space = min(space, sysctl_tcp_rmem[2]);
if (space > sk->sk_rcvbuf) if (space > sk->sk_rcvbuf) {
sk->sk_rcvbuf = space; sk->sk_rcvbuf = space;
/* Make the window clamp follow along. */
tp->window_clamp = new_clamp;
}
} }
} }
......
...@@ -1442,7 +1442,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1442,7 +1442,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* clogging syn queue with openreqs with exponentially increasing * clogging syn queue with openreqs with exponentially increasing
* timeout. * timeout.
*/ */
if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
goto drop; goto drop;
req = tcp_openreq_alloc(); req = tcp_openreq_alloc();
...@@ -1567,7 +1567,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1567,7 +1567,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct tcp_opt *newtp; struct tcp_opt *newtp;
struct sock *newsk; struct sock *newsk;
if (tcp_acceptq_is_full(sk)) if (sk_acceptq_is_full(sk))
goto exit_overflow; goto exit_overflow;
if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
......
...@@ -1389,8 +1389,7 @@ void tcp_send_delayed_ack(struct sock *sk) ...@@ -1389,8 +1389,7 @@ void tcp_send_delayed_ack(struct sock *sk)
} }
tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER; tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER;
tp->ack.timeout = timeout; tp->ack.timeout = timeout;
if (!mod_timer(&tp->delack_timer, timeout)) sk_reset_timer(sk, &tp->delack_timer, timeout);
sock_hold(sk);
} }
/* This routine sends an ack and also updates the window. */ /* This routine sends an ack and also updates the window. */
......
...@@ -68,18 +68,13 @@ void tcp_clear_xmit_timers(struct sock *sk) ...@@ -68,18 +68,13 @@ void tcp_clear_xmit_timers(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
tp->pending = 0; tp->pending = 0;
if (timer_pending(&tp->retransmit_timer) && sk_stop_timer(sk, &tp->retransmit_timer);
del_timer(&tp->retransmit_timer))
__sock_put(sk);
tp->ack.pending = 0; tp->ack.pending = 0;
tp->ack.blocked = 0; tp->ack.blocked = 0;
if (timer_pending(&tp->delack_timer) && sk_stop_timer(sk, &tp->delack_timer);
del_timer(&tp->delack_timer))
__sock_put(sk);
if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer)) sk_stop_timer(sk, &sk->sk_timer);
__sock_put(sk);
} }
static void tcp_write_err(struct sock *sk) static void tcp_write_err(struct sock *sk)
...@@ -218,8 +213,7 @@ static void tcp_delack_timer(unsigned long data) ...@@ -218,8 +213,7 @@ static void tcp_delack_timer(unsigned long data)
/* Try again later. */ /* Try again later. */
tp->ack.blocked = 1; tp->ack.blocked = 1;
NET_INC_STATS_BH(DelayedACKLocked); NET_INC_STATS_BH(DelayedACKLocked);
if (!mod_timer(&tp->delack_timer, jiffies + TCP_DELACK_MIN)) sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
sock_hold(sk);
goto out_unlock; goto out_unlock;
} }
...@@ -229,8 +223,7 @@ static void tcp_delack_timer(unsigned long data) ...@@ -229,8 +223,7 @@ static void tcp_delack_timer(unsigned long data)
goto out; goto out;
if (time_after(tp->ack.timeout, jiffies)) { if (time_after(tp->ack.timeout, jiffies)) {
if (!mod_timer(&tp->delack_timer, tp->ack.timeout)) sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
sock_hold(sk);
goto out; goto out;
} }
tp->ack.pending &= ~TCP_ACK_TIMER; tp->ack.pending &= ~TCP_ACK_TIMER;
...@@ -429,8 +422,7 @@ static void tcp_write_timer(unsigned long data) ...@@ -429,8 +422,7 @@ static void tcp_write_timer(unsigned long data)
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
/* Try again later */ /* Try again later */
if (!mod_timer(&tp->retransmit_timer, jiffies + (HZ/20))) sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20));
sock_hold(sk);
goto out_unlock; goto out_unlock;
} }
...@@ -438,8 +430,7 @@ static void tcp_write_timer(unsigned long data) ...@@ -438,8 +430,7 @@ static void tcp_write_timer(unsigned long data)
goto out; goto out;
if (time_after(tp->timeout, jiffies)) { if (time_after(tp->timeout, jiffies)) {
if (!mod_timer(&tp->retransmit_timer, tp->timeout)) sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
sock_hold(sk);
goto out; goto out;
} }
...@@ -557,14 +548,12 @@ static void tcp_synack_timer(struct sock *sk) ...@@ -557,14 +548,12 @@ static void tcp_synack_timer(struct sock *sk)
void tcp_delete_keepalive_timer (struct sock *sk) void tcp_delete_keepalive_timer (struct sock *sk)
{ {
if (timer_pending(&sk->sk_timer) && del_timer (&sk->sk_timer)) sk_stop_timer(sk, &sk->sk_timer);
__sock_put(sk);
} }
void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len) void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
{ {
if (!mod_timer(&sk->sk_timer, jiffies + len)) sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
sock_hold(sk);
} }
void tcp_set_keepalive(struct sock *sk, int val) void tcp_set_keepalive(struct sock *sk, int val)
......
...@@ -363,7 +363,7 @@ void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -363,7 +363,7 @@ void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
struct xfrm_state *x; struct xfrm_state *x;
if (type != ICMPV6_DEST_UNREACH || if (type != ICMPV6_DEST_UNREACH &&
type != ICMPV6_PKT_TOOBIG) type != ICMPV6_PKT_TOOBIG)
return; return;
......
...@@ -145,10 +145,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) ...@@ -145,10 +145,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
(struct in6_addr *)(skb->nh.raw + serr->addr_offset)); (struct in6_addr *)(skb->nh.raw + serr->addr_offset));
if (np->sndflow) if (np->sndflow)
sin->sin6_flowinfo = *(u32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK; sin->sin6_flowinfo = *(u32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK;
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) { if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; sin->sin6_scope_id = IP6CB(skb)->iif;
sin->sin6_scope_id = opt->iif;
}
} else { } else {
ipv6_addr_set(&sin->sin6_addr, 0, 0, ipv6_addr_set(&sin->sin6_addr, 0, 0,
htonl(0xffff), htonl(0xffff),
...@@ -167,10 +165,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) ...@@ -167,10 +165,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
ipv6_addr_copy(&sin->sin6_addr, &skb->nh.ipv6h->saddr); ipv6_addr_copy(&sin->sin6_addr, &skb->nh.ipv6h->saddr);
if (np->rxopt.all) if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb); datagram_recv_ctl(sk, msg, skb);
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) { if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; sin->sin6_scope_id = IP6CB(skb)->iif;
sin->sin6_scope_id = opt->iif;
}
} else { } else {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
...@@ -211,7 +207,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) ...@@ -211,7 +207,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
{ {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; struct inet6_skb_parm *opt = IP6CB(skb);
if (np->rxopt.bits.rxinfo) { if (np->rxopt.bits.rxinfo) {
struct in6_pktinfo src_info; struct in6_pktinfo src_info;
......
...@@ -324,7 +324,7 @@ void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -324,7 +324,7 @@ void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset); struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset);
struct xfrm_state *x; struct xfrm_state *x;
if (type != ICMPV6_DEST_UNREACH || if (type != ICMPV6_DEST_UNREACH &&
type != ICMPV6_PKT_TOOBIG) type != ICMPV6_PKT_TOOBIG)
return; return;
......
...@@ -155,7 +155,7 @@ static struct tlvtype_proc tlvprocdestopt_lst[] = { ...@@ -155,7 +155,7 @@ static struct tlvtype_proc tlvprocdestopt_lst[] = {
static int ipv6_destopt_rcv(struct sk_buff **skbp, unsigned int *nhoffp) static int ipv6_destopt_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
{ {
struct sk_buff *skb = *skbp; struct sk_buff *skb = *skbp;
struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb; struct inet6_skb_parm *opt = IP6CB(skb);
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
...@@ -217,7 +217,7 @@ void __init ipv6_nodata_init(void) ...@@ -217,7 +217,7 @@ void __init ipv6_nodata_init(void)
static int ipv6_rthdr_rcv(struct sk_buff **skbp, unsigned int *nhoffp) static int ipv6_rthdr_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
{ {
struct sk_buff *skb = *skbp; struct sk_buff *skb = *skbp;
struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb; struct inet6_skb_parm *opt = IP6CB(skb);
struct in6_addr *addr; struct in6_addr *addr;
struct in6_addr daddr; struct in6_addr daddr;
int n, i; int n, i;
...@@ -288,7 +288,7 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp, unsigned int *nhoffp) ...@@ -288,7 +288,7 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
return -1; return -1;
} }
*skbp = skb = skb2; *skbp = skb = skb2;
opt = (struct inet6_skb_parm *)skb2->cb; opt = IP6CB(skb2);
hdr = (struct ipv6_rt_hdr *) skb2->h.raw; hdr = (struct ipv6_rt_hdr *) skb2->h.raw;
} }
...@@ -418,7 +418,7 @@ ipv6_invert_rthdr(struct sock *sk, struct ipv6_rt_hdr *hdr) ...@@ -418,7 +418,7 @@ ipv6_invert_rthdr(struct sock *sk, struct ipv6_rt_hdr *hdr)
static int ipv6_hop_ra(struct sk_buff *skb, int optoff) static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
{ {
if (skb->nh.raw[optoff+1] == 2) { if (skb->nh.raw[optoff+1] == 2) {
((struct inet6_skb_parm*)skb->cb)->ra = optoff; IP6CB(skb)->ra = optoff;
return 1; return 1;
} }
LIMIT_NETDEBUG( LIMIT_NETDEBUG(
...@@ -482,7 +482,7 @@ static struct tlvtype_proc tlvprochopopt_lst[] = { ...@@ -482,7 +482,7 @@ static struct tlvtype_proc tlvprochopopt_lst[] = {
int ipv6_parse_hopopts(struct sk_buff *skb, int nhoff) int ipv6_parse_hopopts(struct sk_buff *skb, int nhoff)
{ {
((struct inet6_skb_parm*)skb->cb)->hop = sizeof(struct ipv6hdr); IP6CB(skb)->hop = sizeof(struct ipv6hdr);
if (ip6_parse_tlv(tlvprochopopt_lst, skb)) if (ip6_parse_tlv(tlvprochopopt_lst, skb))
return sizeof(struct ipv6hdr); return sizeof(struct ipv6hdr);
return -1; return -1;
......
...@@ -74,7 +74,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt ...@@ -74,7 +74,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
/* Store incoming device index. When the packet will /* Store incoming device index. When the packet will
be queued, we cannot refer to skb->dev anymore. be queued, we cannot refer to skb->dev anymore.
*/ */
((struct inet6_skb_parm *)skb->cb)->iif = dev->ifindex; IP6CB(skb)->iif = dev->ifindex;
if (skb->len < sizeof(struct ipv6hdr)) if (skb->len < sizeof(struct ipv6hdr))
goto err; goto err;
......
...@@ -107,7 +107,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) ...@@ -107,7 +107,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
} }
int ip6_output2(struct sk_buff **pskb) static int ip6_output2(struct sk_buff **pskb)
{ {
struct sk_buff *skb = *pskb; struct sk_buff *skb = *pskb;
struct dst_entry *dst = skb->dst; struct dst_entry *dst = skb->dst;
...@@ -349,7 +349,7 @@ int ip6_forward(struct sk_buff *skb) ...@@ -349,7 +349,7 @@ int ip6_forward(struct sk_buff *skb)
{ {
struct dst_entry *dst = skb->dst; struct dst_entry *dst = skb->dst;
struct ipv6hdr *hdr = skb->nh.ipv6h; struct ipv6hdr *hdr = skb->nh.ipv6h;
struct inet6_skb_parm *opt =(struct inet6_skb_parm*)skb->cb; struct inet6_skb_parm *opt = IP6CB(skb);
if (ipv6_devconf.forwarding == 0) if (ipv6_devconf.forwarding == 0)
goto error; goto error;
......
...@@ -395,7 +395,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, ...@@ -395,7 +395,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
ndisc_flow_init(&fl, NDISC_NEIGHBOUR_ADVERTISEMENT, src_addr, daddr); ndisc_flow_init(&fl, NDISC_NEIGHBOUR_ADVERTISEMENT, src_addr, daddr);
dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output2); dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output);
if (!dst) if (!dst)
return; return;
...@@ -486,7 +486,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, ...@@ -486,7 +486,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
ndisc_flow_init(&fl, NDISC_NEIGHBOUR_SOLICITATION, saddr, daddr); ndisc_flow_init(&fl, NDISC_NEIGHBOUR_SOLICITATION, saddr, daddr);
dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output2); dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output);
if (!dst) if (!dst)
return; return;
...@@ -562,7 +562,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, ...@@ -562,7 +562,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
ndisc_flow_init(&fl, NDISC_ROUTER_SOLICITATION, saddr, daddr); ndisc_flow_init(&fl, NDISC_ROUTER_SOLICITATION, saddr, daddr);
dst = ndisc_dst_alloc(dev, NULL, daddr, ip6_output2); dst = ndisc_dst_alloc(dev, NULL, daddr, ip6_output);
if (!dst) if (!dst)
return; return;
......
...@@ -409,10 +409,8 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -409,10 +409,8 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
sin6->sin6_flowinfo = 0; sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0; sin6->sin6_scope_id = 0;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; sin6->sin6_scope_id = IP6CB(skb)->iif;
sin6->sin6_scope_id = opt->iif;
}
} }
sock_recv_timestamp(msg, sk, skb); sock_recv_timestamp(msg, sk, skb);
......
...@@ -84,6 +84,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort); ...@@ -84,6 +84,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static struct dst_entry *ip6_negative_advice(struct dst_entry *); static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *); static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *, int how);
static int ip6_dst_gc(void); static int ip6_dst_gc(void);
static int ip6_pkt_discard(struct sk_buff *skb); static int ip6_pkt_discard(struct sk_buff *skb);
...@@ -98,6 +99,7 @@ static struct dst_ops ip6_dst_ops = { ...@@ -98,6 +99,7 @@ static struct dst_ops ip6_dst_ops = {
.gc_thresh = 1024, .gc_thresh = 1024,
.check = ip6_dst_check, .check = ip6_dst_check,
.destroy = ip6_dst_destroy, .destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice, .negative_advice = ip6_negative_advice,
.link_failure = ip6_link_failure, .link_failure = ip6_link_failure,
.update_pmtu = ip6_rt_update_pmtu, .update_pmtu = ip6_rt_update_pmtu,
...@@ -143,9 +145,17 @@ static __inline__ struct rt6_info *ip6_dst_alloc(void) ...@@ -143,9 +145,17 @@ static __inline__ struct rt6_info *ip6_dst_alloc(void)
static void ip6_dst_destroy(struct dst_entry *dst) static void ip6_dst_destroy(struct dst_entry *dst)
{ {
struct rt6_info *rt = (struct rt6_info *)dst; struct rt6_info *rt = (struct rt6_info *)dst;
if (rt->rt6i_idev != NULL) struct inet6_dev *idev = rt->rt6i_idev;
in6_dev_put(rt->rt6i_idev);
if (idev != NULL) {
rt->rt6i_idev = NULL;
in6_dev_put(idev);
}
}
static void ip6_dst_ifdown(struct dst_entry *dst, int how)
{
ip6_dst_destroy(dst);
} }
/* /*
...@@ -573,6 +583,8 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu) ...@@ -573,6 +583,8 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
/* Protected by rt6_lock. */ /* Protected by rt6_lock. */
static struct dst_entry *ndisc_dst_gc_list; static struct dst_entry *ndisc_dst_gc_list;
static int ipv6_get_mtu(struct net_device *dev);
static inline unsigned int ipv6_advmss(unsigned int mtu);
struct dst_entry *ndisc_dst_alloc(struct net_device *dev, struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
struct neighbour *neigh, struct neighbour *neigh,
...@@ -598,6 +610,8 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev, ...@@ -598,6 +610,8 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
rt->rt6i_metric = 0; rt->rt6i_metric = 0;
atomic_set(&rt->u.dst.__refcnt, 1); atomic_set(&rt->u.dst.__refcnt, 1);
rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&rt->u.dst));
rt->u.dst.output = output; rt->u.dst.output = output;
write_lock_bh(&rt6_lock); write_lock_bh(&rt6_lock);
......
...@@ -536,8 +536,7 @@ static int tcp_v6_hash_connect(struct sock *sk) ...@@ -536,8 +536,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
static __inline__ int tcp_v6_iif(struct sk_buff *skb) static __inline__ int tcp_v6_iif(struct sk_buff *skb)
{ {
struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; return IP6CB(skb)->iif;
return opt->iif;
} }
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
...@@ -879,7 +878,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct open_request *req, ...@@ -879,7 +878,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct open_request *req,
np->rxopt.bits.srcrt == 2 && np->rxopt.bits.srcrt == 2 &&
req->af.v6_req.pktopts) { req->af.v6_req.pktopts) {
struct sk_buff *pktopts = req->af.v6_req.pktopts; struct sk_buff *pktopts = req->af.v6_req.pktopts;
struct inet6_skb_parm *rxopt = (struct inet6_skb_parm *)pktopts->cb; struct inet6_skb_parm *rxopt = IP6CB(pktopts);
if (rxopt->srcrt) if (rxopt->srcrt)
opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt)); opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
} }
...@@ -932,7 +931,7 @@ static struct or_calltable or_ipv6 = { ...@@ -932,7 +931,7 @@ static struct or_calltable or_ipv6 = {
static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
{ {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb; struct inet6_skb_parm *opt = IP6CB(skb);
if (np->rxopt.all) { if (np->rxopt.all) {
if ((opt->hop && np->rxopt.bits.hopopts) || if ((opt->hop && np->rxopt.bits.hopopts) ||
...@@ -1183,7 +1182,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1183,7 +1182,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
} }
if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
goto drop; goto drop;
req = tcp_openreq_alloc(); req = tcp_openreq_alloc();
...@@ -1300,12 +1299,12 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1300,12 +1299,12 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
opt = np->opt; opt = np->opt;
if (tcp_acceptq_is_full(sk)) if (sk_acceptq_is_full(sk))
goto out_overflow; goto out_overflow;
if (np->rxopt.bits.srcrt == 2 && if (np->rxopt.bits.srcrt == 2 &&
opt == NULL && req->af.v6_req.pktopts) { opt == NULL && req->af.v6_req.pktopts) {
struct inet6_skb_parm *rxopt = (struct inet6_skb_parm *)req->af.v6_req.pktopts->cb; struct inet6_skb_parm *rxopt = IP6CB(req->af.v6_req.pktopts);
if (rxopt->srcrt) if (rxopt->srcrt)
opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(req->af.v6_req.pktopts->nh.raw+rxopt->srcrt)); opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(req->af.v6_req.pktopts->nh.raw+rxopt->srcrt));
} }
......
...@@ -432,10 +432,8 @@ static int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -432,10 +432,8 @@ static int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (np->rxopt.all) if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb); datagram_recv_ctl(sk, msg, skb);
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; sin6->sin6_scope_id = IP6CB(skb)->iif;
sin6->sin6_scope_id = opt->iif;
}
} }
} }
err = copied; err = copied;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment