Commit 3525bea4 authored by Benjamin LaHaise's avatar Benjamin LaHaise Committed by David S. Miller

[AIO]: First stage of AIO infrastructure for networking.

- Change socket lock users to owner which is a pointer.
- Add sock_owned_by_user
parent 90e464a4
......@@ -70,15 +70,16 @@
* between user contexts and software interrupt processing, whereas the
* mini-semaphore synchronizes multiple users amongst themselves.
*/
struct sock_iocb;
typedef struct {
spinlock_t slock;
unsigned int users;
struct sock_iocb *owner;
wait_queue_head_t wq;
} socket_lock_t;
#define sock_lock_init(__sk) \
do { spin_lock_init(&((__sk)->lock.slock)); \
(__sk)->lock.users = 0; \
(__sk)->lock.owner = NULL; \
init_waitqueue_head(&((__sk)->lock.wq)); \
} while(0)
......@@ -306,14 +307,16 @@ static __inline__ void sock_prot_dec_use(struct proto *prot)
* Since ~2.3.5 it is also exclusive sleep lock serializing
* accesses from user process context.
*/
extern int __async_lock_sock(struct sock_iocb *, struct sock *, struct list_head *);
extern void __lock_sock(struct sock *sk);
extern void __release_sock(struct sock *sk);
#define sock_owned_by_user(sk) (NULL != (sk)->lock.owner)
#define lock_sock(__sk) \
do { might_sleep(); \
spin_lock_bh(&((__sk)->lock.slock)); \
if ((__sk)->lock.users != 0) \
if ((__sk)->lock.owner != NULL) \
__lock_sock(__sk); \
(__sk)->lock.users = 1; \
(__sk)->lock.owner = (void *)1; \
spin_unlock_bh(&((__sk)->lock.slock)); \
} while(0)
......@@ -321,7 +324,7 @@ do { might_sleep(); \
do { spin_lock_bh(&((__sk)->lock.slock)); \
if ((__sk)->backlog.tail != NULL) \
__release_sock(__sk); \
(__sk)->lock.users = 0; \
(__sk)->lock.owner = NULL; \
if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
spin_unlock_bh(&((__sk)->lock.slock)); \
} while(0)
......
......@@ -1348,7 +1348,7 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
if (tp->ucopy.memory > sk->rcvbuf) {
struct sk_buff *skb1;
if (sk->lock.users) BUG();
if (sock_owned_by_user(sk)) BUG();
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->backlog_rcv(sk, skb1);
......
......@@ -861,7 +861,7 @@ void __lock_sock(struct sock *sk)
spin_unlock_bh(&sk->lock.slock);
schedule();
spin_lock_bh(&sk->lock.slock);
if(!sk->lock.users)
if(!sock_owned_by_user(sk))
break;
}
current->state = TASK_RUNNING;
......
......@@ -800,8 +800,8 @@ static int dn_nsp_rx_packet(struct sk_buff *skb)
printk(KERN_DEBUG "NSP: 0x%02x 0x%02x 0x%04x 0x%04x %d\n",
(int)cb->rt_flags, (int)cb->nsp_flags,
(int)cb->src_port, (int)cb->dst_port,
(int)sk->lock.users);
if (sk->lock.users == 0)
(int)sock_owned_by_user(sk));
if (!sock_owned_by_user(sk))
ret = dn_nsp_backlog_rcv(sk, skb);
else
sk_add_backlog(sk, skb);
......
......@@ -57,7 +57,7 @@ static void dn_slow_timer(unsigned long arg)
sock_hold(sk);
bh_lock_sock(sk);
if (sk->lock.users != 0) {
if (sock_owned_by_user(sk)) {
sk->timer.expires = jiffies + HZ / 10;
add_timer(&sk->timer);
goto out;
......@@ -115,7 +115,7 @@ static void dn_fast_timer(unsigned long arg)
struct dn_scp *scp = DN_SK(sk);
bh_lock_sock(sk);
if (sk->lock.users != 0) {
if (sock_owned_by_user(sk)) {
scp->delack_timer.expires = jiffies + HZ / 20;
add_timer(&scp->delack_timer);
goto out;
......
......@@ -623,7 +623,7 @@ static void tcp_listen_stop (struct sock *sk)
local_bh_disable();
bh_lock_sock(child);
BUG_TRAP(!child->lock.users);
BUG_TRAP(!sock_owned_by_user(child));
sock_hold(child);
tcp_disconnect(child, O_NONBLOCK);
......@@ -2019,7 +2019,7 @@ void tcp_close(struct sock *sk, long timeout)
*/
local_bh_disable();
bh_lock_sock(sk);
BUG_TRAP(!sk->lock.users);
BUG_TRAP(!sock_owned_by_user(sk));
sock_hold(sk);
sock_orphan(sk);
......
......@@ -2570,7 +2570,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
/* Ok. In sequence. In window. */
if (tp->ucopy.task == current &&
tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
sk->lock.users && !tp->urg_data) {
sock_owned_by_user(sk) && !tp->urg_data) {
int chunk = min_t(unsigned int, skb->len,
tp->ucopy.len);
......@@ -3190,7 +3190,7 @@ static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
{
int result;
if (sk->lock.users) {
if (sock_owned_by_user(sk)) {
local_bh_enable();
result = __tcp_checksum_complete(skb);
local_bh_disable();
......@@ -3324,7 +3324,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tp->ucopy.task == current &&
tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len &&
sk->lock.users) {
sock_owned_by_user(sk)) {
__set_current_state(TASK_RUNNING);
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
......@@ -3864,7 +3864,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tmo = tcp_fin_time(tp);
if (tmo > TCP_TIMEWAIT_LEN) {
tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
} else if (th->fin || sk->lock.users) {
} else if (th->fin || sock_owned_by_user(sk)) {
/* Bad case. We could lose such FIN otherwise.
* It is not a big problem, but it looks confusing
* and not so rare event. We still can lose it now,
......
......@@ -1003,7 +1003,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sk->lock.users)
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LockDroppedIcmps);
if (sk->state == TCP_CLOSE)
......@@ -1022,7 +1022,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* This is deprecated, but if someone generated it,
* we have no reasons to ignore it.
*/
if (!sk->lock.users)
if (!sock_owned_by_user(sk))
tcp_enter_cwr(tp);
goto out;
case ICMP_PARAMETERPROB:
......@@ -1033,7 +1033,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
if (!sk->lock.users)
if (!sock_owned_by_user(sk))
do_pmtu_discovery(sk, iph, info);
goto out;
}
......@@ -1050,7 +1050,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
switch (sk->state) {
struct open_request *req, **prev;
case TCP_LISTEN:
if (sk->lock.users)
if (sock_owned_by_user(sk))
goto out;
req = tcp_v4_search_req(tp, &prev, th->dest,
......@@ -1081,7 +1081,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
case TCP_SYN_RECV: /* Cannot happen.
It can f.e. if SYNs crossed.
*/
if (!sk->lock.users) {
if (!sock_owned_by_user(sk)) {
TCP_INC_STATS_BH(TcpAttemptFails);
sk->err = err;
......@@ -1111,7 +1111,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
*/
inet = inet_sk(sk);
if (!sk->lock.users && inet->recverr) {
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->err = err;
sk->error_report(sk);
} else { /* Only an error on timeout */
......@@ -1778,7 +1778,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
bh_lock_sock(sk);
ret = 0;
if (!sk->lock.users) {
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
} else
......
......@@ -989,7 +989,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
int ret = 0;
int state = child->state;
if (child->lock.users == 0) {
if (!sock_owned_by_user(child)) {
ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
/* Wakeup parent, send SIGIO */
......
......@@ -213,7 +213,7 @@ static void tcp_delack_timer(unsigned long data)
struct tcp_opt *tp = tcp_sk(sk);
bh_lock_sock(sk);
if (sk->lock.users) {
if (sock_owned_by_user(sk)) {
/* Try again later. */
tp->ack.blocked = 1;
NET_INC_STATS_BH(DelayedACKLocked);
......@@ -421,7 +421,7 @@ static void tcp_write_timer(unsigned long data)
int event;
bh_lock_sock(sk);
if (sk->lock.users) {
if (sock_owned_by_user(sk)) {
/* Try again later */
if (!mod_timer(&tp->retransmit_timer, jiffies + (HZ/20)))
sock_hold(sk);
......@@ -581,7 +581,7 @@ static void tcp_keepalive_timer (unsigned long data)
/* Only process if socket is not in use. */
bh_lock_sock(sk);
if (sk->lock.users) {
if (sock_owned_by_user(sk)) {
/* Try again later. */
tcp_reset_keepalive_timer (sk, HZ/20);
goto out;
......
......@@ -731,7 +731,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
bh_lock_sock(sk);
if (sk->lock.users)
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LockDroppedIcmps);
if (sk->state == TCP_CLOSE)
......@@ -749,7 +749,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
if (sk->lock.users)
if (sock_owned_by_user(sk))
goto out;
if ((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
goto out;
......@@ -792,7 +792,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
switch (sk->state) {
struct open_request *req, **prev;
case TCP_LISTEN:
if (sk->lock.users)
if (sock_owned_by_user(sk))
goto out;
req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr,
......@@ -816,7 +816,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
if (sk->lock.users == 0) {
if (!sock_owned_by_user(sk)) {
TCP_INC_STATS_BH(TcpAttemptFails);
sk->err = err;
sk->error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
......@@ -828,7 +828,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
}
if (sk->lock.users == 0 && np->recverr) {
if (!sock_owned_by_user(sk) && np->recverr) {
sk->err = err;
sk->error_report(sk);
} else {
......@@ -1622,7 +1622,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
bh_lock_sock(sk);
ret = 0;
if (!sk->lock.users) {
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
} else
......
......@@ -1489,7 +1489,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
__FUNCTION__);
kfree_skb(skb);
} else {
if (!sk->lock.users)
if (!sock_owned_by_user(sk))
llc_conn_state_process(sk, skb);
else {
llc_set_backlog_type(skb, LLC_EVENT);
......
......@@ -140,7 +140,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
} else
skb->sk = sk;
bh_lock_sock(sk);
if (!sk->lock.users) {
if (!sock_owned_by_user(sk)) {
/* rc = */ llc_conn_rcv(sk, skb);
rc = 0;
} else {
......
......@@ -182,7 +182,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v)
timer_pending(&llc->pf_cycle_timer.timer),
timer_pending(&llc->rej_sent_timer.timer),
timer_pending(&llc->busy_state_timer.timer),
!!sk->backlog.tail, sk->lock.users);
!!sk->backlog.tail, sock_owned_by_user(sk));
out:
return 0;
}
......
......@@ -70,7 +70,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
skb->h.raw = skb->data;
bh_lock_sock(sk);
if (!sk->lock.users) {
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
sk_add_backlog(sk, skb);
......
......@@ -131,7 +131,7 @@ static void x25_heartbeat_expiry(unsigned long param)
struct sock *sk = (struct sock *)param;
bh_lock_sock(sk);
if (sk->lock.users) /* can currently only occur in state 3 */
if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */
goto restart_heartbeat;
switch (x25_sk(sk)->state) {
......@@ -193,7 +193,7 @@ static void x25_timer_expiry(unsigned long param)
struct sock *sk = (struct sock *)param;
bh_lock_sock(sk);
if (sk->lock.users) { /* can currently only occur in state 3 */
if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */
if (x25_sk(sk)->state == X25_STATE_3)
x25_start_t2timer(sk);
} else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment