Commit 49b538e7 authored by David S. Miller's avatar David S. Miller

Merge tag 'rxrpc-fixes-20181008' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: Fix packet reception code

Here are a set of patches that prepares for and fix problems in rxrpc's
package reception code.  There serious problems are:

 (A) There's a window between binding the socket and setting the data_ready
     hook in which packets can find their way into the UDP socket's receive
     queues.

 (B) The skb_recv_udp() will return an error (and clear the error state) if
     there was an error on the Tx side.  rxrpc doesn't handle this.

 (C) The rxrpc data_ready handler doesn't fully drain the UDP receive
     queue.

 (D) The rxrpc data_ready handler assumes it is called in a non-reentrant
 state.

The second patch fixes (A) - (C); the third patch renders (B) and (C)
non-issues by using the recap_rcv hook instead of data_ready - and the
final patch fixes (D).  That last is the most complex.

The preparatory patches are:

 (1) Fix some places that are doing things in the wrong net namespace.

 (2) Stop taking the rcu read lock as it's held by the IP input routine in
     the call chain.

 (3) Only end the Tx phase if *we* rotated the final packet out of the Tx
     buffer.

 (4) Don't assume that the call state won't change after dropping the
     call_state lock.

 (5) Only take receive window and MTU suze parameters from an ACK packet if
     it's the latest ACK packet.

 (6) Record connection-level abort information correctly.

 (7) Fix a trace line.

And then there are three main patches - note that these are mixed in with
the preparatory patches somewhat:

 (1) Fix the setup window (A), skb_recv_udp() error check (B) and packet
     drainage (C).

 (2) Switch to using the encap_rcv instead of data_ready to cut out the
     effects of the UDP read queues and get the packets delivered directly.

 (3) Add more locking into the various packet input paths to defend against
     re-entrance (D).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9a4890bd c1e15b49
...@@ -931,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet, ...@@ -931,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_fast_assign( TP_fast_assign(
__entry->call = call_id; __entry->call = call_id;
memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
__entry->where = where;
), ),
TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s", TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
......
...@@ -40,5 +40,6 @@ struct udphdr { ...@@ -40,5 +40,6 @@ struct udphdr {
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ #define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ #define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
#define UDP_ENCAP_RXRPC 6
#endif /* _UAPI_LINUX_UDP_H */ #endif /* _UAPI_LINUX_UDP_H */
...@@ -302,6 +302,7 @@ struct rxrpc_peer { ...@@ -302,6 +302,7 @@ struct rxrpc_peer {
/* calculated RTT cache */ /* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32 #define RXRPC_RTT_CACHE_SIZE 32
spinlock_t rtt_input_lock; /* RTT lock for input routine */
ktime_t rtt_last_req; /* Time of last RTT request */ ktime_t rtt_last_req; /* Time of last RTT request */
u64 rtt; /* Current RTT estimate (in nS) */ u64 rtt; /* Current RTT estimate (in nS) */
u64 rtt_sum; /* Sum of cache contents */ u64 rtt_sum; /* Sum of cache contents */
...@@ -442,17 +443,17 @@ struct rxrpc_connection { ...@@ -442,17 +443,17 @@ struct rxrpc_connection {
spinlock_t state_lock; /* state-change lock */ spinlock_t state_lock; /* state-change lock */
enum rxrpc_conn_cache_state cache_state; enum rxrpc_conn_cache_state cache_state;
enum rxrpc_conn_proto_state state; /* current state of connection */ enum rxrpc_conn_proto_state state; /* current state of connection */
u32 local_abort; /* local abort code */ u32 abort_code; /* Abort code of connection abort */
u32 remote_abort; /* remote abort code */
int debug_id; /* debug ID for printks */ int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */ atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */ unsigned int hi_serial; /* highest serial number received */
u32 security_nonce; /* response re-use preventer */ u32 security_nonce; /* response re-use preventer */
u16 service_id; /* Service ID, possibly upgraded */ u32 service_id; /* Service ID, possibly upgraded */
u8 size_align; /* data size alignment (for security) */ u8 size_align; /* data size alignment (for security) */
u8 security_size; /* security header size */ u8 security_size; /* security header size */
u8 security_ix; /* security type */ u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
short error; /* Local error code */
}; };
static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
...@@ -635,6 +636,8 @@ struct rxrpc_call { ...@@ -635,6 +636,8 @@ struct rxrpc_call {
bool tx_phase; /* T if transmission phase, F if receive phase */ bool tx_phase; /* T if transmission phase, F if receive phase */
u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */ u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
spinlock_t input_lock; /* Lock for packet input to this call */
/* receive-phase ACK management */ /* receive-phase ACK management */
u8 ackr_reason; /* reason to ACK */ u8 ackr_reason; /* reason to ACK */
u16 ackr_skew; /* skew on packet being ACK'd */ u16 ackr_skew; /* skew on packet being ACK'd */
...@@ -720,8 +723,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); ...@@ -720,8 +723,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *); void rxrpc_discard_prealloc(struct rxrpc_sock *);
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
struct rxrpc_sock *, struct rxrpc_sock *,
struct rxrpc_peer *,
struct rxrpc_connection *,
struct sk_buff *); struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *); void rxrpc_accept_incoming_calls(struct rxrpc_local *);
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
...@@ -891,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry; ...@@ -891,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids; extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void); void rxrpc_destroy_client_conn_ids(void);
int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
struct sockaddr_rxrpc *, gfp_t); struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
gfp_t);
void rxrpc_expose_client_call(struct rxrpc_call *); void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_call *); void rxrpc_disconnect_client_call(struct rxrpc_call *);
void rxrpc_put_client_conn(struct rxrpc_connection *); void rxrpc_put_client_conn(struct rxrpc_connection *);
...@@ -965,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *); ...@@ -965,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/* /*
* input.c * input.c
*/ */
void rxrpc_data_ready(struct sock *); int rxrpc_input_packet(struct sock *, struct sk_buff *);
/* /*
* insecure.c * insecure.c
...@@ -1045,10 +1047,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *); ...@@ -1045,10 +1047,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
*/ */
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
const struct sockaddr_rxrpc *); const struct sockaddr_rxrpc *);
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t); struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *); void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
struct rxrpc_peer *);
void rxrpc_destroy_all_peers(struct rxrpc_net *); void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
......
...@@ -287,7 +287,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, ...@@ -287,7 +287,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
(peer_tail + 1) & (peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1)); (RXRPC_BACKLOG_MAX - 1));
rxrpc_new_incoming_peer(local, peer); rxrpc_new_incoming_peer(rx, local, peer);
} }
/* Now allocate and set up the connection */ /* Now allocate and set up the connection */
...@@ -333,11 +333,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, ...@@ -333,11 +333,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
*/ */
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
struct rxrpc_sock *rx, struct rxrpc_sock *rx,
struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_connection *conn;
struct rxrpc_peer *peer;
struct rxrpc_call *call; struct rxrpc_call *call;
_enter(""); _enter("");
...@@ -354,6 +354,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -354,6 +354,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
goto out; goto out;
} }
/* The peer, connection and call may all have sprung into existence due
* to a duplicate packet being handled on another CPU in parallel, so
* we have to recheck the routing. However, we're now holding
* rx->incoming_lock, so the values should remain stable.
*/
conn = rxrpc_find_connection_rcu(local, skb, &peer);
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) { if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
...@@ -396,20 +403,22 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -396,20 +403,22 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
case RXRPC_CONN_SERVICE: case RXRPC_CONN_SERVICE:
write_lock(&call->state_lock); write_lock(&call->state_lock);
if (rx->discard_new_call) if (call->state < RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_SERVER_RECV_REQUEST; if (rx->discard_new_call)
else call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
call->state = RXRPC_CALL_SERVER_ACCEPTING; else
call->state = RXRPC_CALL_SERVER_ACCEPTING;
}
write_unlock(&call->state_lock); write_unlock(&call->state_lock);
break; break;
case RXRPC_CONN_REMOTELY_ABORTED: case RXRPC_CONN_REMOTELY_ABORTED:
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
conn->remote_abort, -ECONNABORTED); conn->abort_code, conn->error);
break; break;
case RXRPC_CONN_LOCALLY_ABORTED: case RXRPC_CONN_LOCALLY_ABORTED:
rxrpc_abort_call("CON", call, sp->hdr.seq, rxrpc_abort_call("CON", call, sp->hdr.seq,
conn->local_abort, -ECONNABORTED); conn->abort_code, conn->error);
break; break;
default: default:
BUG(); BUG();
......
...@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, ...@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
init_waitqueue_head(&call->waitq); init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock); spin_lock_init(&call->lock);
spin_lock_init(&call->notify_lock); spin_lock_init(&call->notify_lock);
spin_lock_init(&call->input_lock);
rwlock_init(&call->state_lock); rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1); atomic_set(&call->usage, 1);
call->debug_id = debug_id; call->debug_id = debug_id;
...@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters, /* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID. * including channel number and call ID.
*/ */
ret = rxrpc_connect_call(call, cp, srx, gfp); ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0) if (ret < 0)
goto error; goto error;
...@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx, ...@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters, /* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID. * including channel number and call ID.
*/ */
ret = rxrpc_connect_call(call, cp, srx, gfp); ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0) if (ret < 0)
goto error; goto error;
......
...@@ -276,7 +276,8 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) ...@@ -276,7 +276,8 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
* If we return with a connection, the call will be on its waiting list. It's * If we return with a connection, the call will be on its waiting list. It's
* left to the caller to assign a channel and wake up the call. * left to the caller to assign a channel and wake up the call.
*/ */
static int rxrpc_get_client_conn(struct rxrpc_call *call, static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp, struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx, struct sockaddr_rxrpc *srx,
gfp_t gfp) gfp_t gfp)
...@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call, ...@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
_enter("{%d,%lx},", call->debug_id, call->user_call_ID); _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
if (!cp->peer) if (!cp->peer)
goto error; goto error;
...@@ -683,7 +684,8 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) ...@@ -683,7 +684,8 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
* find a connection for a call * find a connection for a call
* - called in process context with IRQs enabled * - called in process context with IRQs enabled
*/ */
int rxrpc_connect_call(struct rxrpc_call *call, int rxrpc_connect_call(struct rxrpc_sock *rx,
struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp, struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx, struct sockaddr_rxrpc *srx,
gfp_t gfp) gfp_t gfp)
...@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call, ...@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
rxrpc_cull_active_client_conns(rxnet); rxrpc_cull_active_client_conns(rxnet);
ret = rxrpc_get_client_conn(call, cp, srx, gfp); ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
if (ret < 0) if (ret < 0)
goto out; goto out;
......
...@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ...@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
switch (chan->last_type) { switch (chan->last_type) {
case RXRPC_PACKET_TYPE_ABORT: case RXRPC_PACKET_TYPE_ABORT:
_proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
break; break;
case RXRPC_PACKET_TYPE_ACK: case RXRPC_PACKET_TYPE_ACK:
trace_rxrpc_tx_ack(chan->call_debug_id, serial, trace_rxrpc_tx_ack(chan->call_debug_id, serial,
...@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ...@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
* pass a connection-level abort onto all calls on that connection * pass a connection-level abort onto all calls on that connection
*/ */
static void rxrpc_abort_calls(struct rxrpc_connection *conn, static void rxrpc_abort_calls(struct rxrpc_connection *conn,
enum rxrpc_call_completion compl, enum rxrpc_call_completion compl)
u32 abort_code, int error)
{ {
struct rxrpc_call *call; struct rxrpc_call *call;
int i; int i;
_enter("{%d},%x", conn->debug_id, abort_code); _enter("{%d},%x", conn->debug_id, conn->abort_code);
spin_lock(&conn->channel_lock); spin_lock(&conn->channel_lock);
...@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, ...@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
trace_rxrpc_abort(call->debug_id, trace_rxrpc_abort(call->debug_id,
"CON", call->cid, "CON", call->cid,
call->call_id, 0, call->call_id, 0,
abort_code, error); conn->abort_code,
conn->error);
if (rxrpc_set_call_completion(call, compl, if (rxrpc_set_call_completion(call, compl,
abort_code, error)) conn->abort_code,
conn->error))
rxrpc_notify_socket(call); rxrpc_notify_socket(call);
} }
} }
...@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ...@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
return 0; return 0;
} }
conn->error = error;
conn->abort_code = abort_code;
conn->state = RXRPC_CONN_LOCALLY_ABORTED; conn->state = RXRPC_CONN_LOCALLY_ABORTED;
spin_unlock_bh(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error); rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
msg.msg_name = &conn->params.peer->srx.transport; msg.msg_name = &conn->params.peer->srx.transport;
msg.msg_namelen = conn->params.peer->srx.transport_len; msg.msg_namelen = conn->params.peer->srx.transport_len;
...@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ...@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
whdr._rsvd = 0; whdr._rsvd = 0;
whdr.serviceId = htons(conn->service_id); whdr.serviceId = htons(conn->service_id);
word = htonl(conn->local_abort); word = htonl(conn->abort_code);
iov[0].iov_base = &whdr; iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr); iov[0].iov_len = sizeof(whdr);
...@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ...@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
serial = atomic_inc_return(&conn->serial); serial = atomic_inc_return(&conn->serial);
whdr.serial = htonl(serial); whdr.serial = htonl(serial);
_proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
if (ret < 0) { if (ret < 0) {
...@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, ...@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
abort_code = ntohl(wtmp); abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
conn->error = -ECONNABORTED;
conn->abort_code = abort_code;
conn->state = RXRPC_CONN_REMOTELY_ABORTED; conn->state = RXRPC_CONN_REMOTELY_ABORTED;
rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
abort_code, -ECONNABORTED);
return -ECONNABORTED; return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE: case RXRPC_PACKET_TYPE_CHALLENGE:
......
...@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
/* /*
* Apply a hard ACK by advancing the Tx window. * Apply a hard ACK by advancing the Tx window.
*/ */
static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary) struct rxrpc_ack_summary *summary)
{ {
struct sk_buff *skb, *list = NULL; struct sk_buff *skb, *list = NULL;
bool rot_last = false;
int ix; int ix;
u8 annotation; u8 annotation;
...@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, ...@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
skb->next = list; skb->next = list;
list = skb; list = skb;
if (annotation & RXRPC_TX_ANNO_LAST) if (annotation & RXRPC_TX_ANNO_LAST) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags); set_bit(RXRPC_CALL_TX_LAST, &call->flags);
rot_last = true;
}
if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
summary->nr_rot_new_acks++; summary->nr_rot_new_acks++;
} }
spin_unlock(&call->lock); spin_unlock(&call->lock);
trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? trace_rxrpc_transmit(call, (rot_last ?
rxrpc_transmit_rotate_last : rxrpc_transmit_rotate_last :
rxrpc_transmit_rotate)); rxrpc_transmit_rotate));
wake_up(&call->waitq); wake_up(&call->waitq);
...@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, ...@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
skb->next = NULL; skb->next = NULL;
rxrpc_free_skb(skb, rxrpc_skb_tx_freed); rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
} }
return rot_last;
} }
/* /*
...@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, ...@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
const char *abort_why) const char *abort_why)
{ {
unsigned int state;
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
write_lock(&call->state_lock); write_lock(&call->state_lock);
switch (call->state) { state = call->state;
switch (state) {
case RXRPC_CALL_CLIENT_SEND_REQUEST: case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY: case RXRPC_CALL_CLIENT_AWAIT_REPLY:
if (reply_begun) if (reply_begun)
call->state = RXRPC_CALL_CLIENT_RECV_REPLY; call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
else else
call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
break; break;
case RXRPC_CALL_SERVER_AWAIT_ACK: case RXRPC_CALL_SERVER_AWAIT_ACK:
__rxrpc_call_completed(call); __rxrpc_call_completed(call);
rxrpc_notify_socket(call); rxrpc_notify_socket(call);
state = call->state;
break; break;
default: default:
...@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, ...@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
} }
write_unlock(&call->state_lock); write_unlock(&call->state_lock);
if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
} else { else
trace_rxrpc_transmit(call, rxrpc_transmit_end); trace_rxrpc_transmit(call, rxrpc_transmit_end);
}
_leave(" = ok"); _leave(" = ok");
return true; return true;
...@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) ...@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
} }
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
rxrpc_rotate_tx_window(call, top, &summary);
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
rxrpc_proto_abort("TXL", call, top); if (!rxrpc_rotate_tx_window(call, top, &summary)) {
return false; rxrpc_proto_abort("TXL", call, top);
return false;
}
} }
if (!rxrpc_end_tx_phase(call, true, "ETD")) if (!rxrpc_end_tx_phase(call, true, "ETD"))
return false; return false;
...@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
} }
} }
spin_lock(&call->input_lock);
/* Received data implicitly ACKs all of the request packets we sent /* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client. * when we're acting as a client.
*/ */
if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
!rxrpc_receiving_reply(call)) !rxrpc_receiving_reply(call))
return; goto unlock;
call->ackr_prev_seq = seq; call->ackr_prev_seq = seq;
...@@ -488,12 +497,16 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -488,12 +497,16 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
if (flags & RXRPC_LAST_PACKET) { if (flags & RXRPC_LAST_PACKET) {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
seq != call->rx_top) seq != call->rx_top) {
return rxrpc_proto_abort("LSN", call, seq); rxrpc_proto_abort("LSN", call, seq);
goto unlock;
}
} else { } else {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
after_eq(seq, call->rx_top)) after_eq(seq, call->rx_top)) {
return rxrpc_proto_abort("LSA", call, seq); rxrpc_proto_abort("LSA", call, seq);
goto unlock;
}
} }
trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
...@@ -560,8 +573,10 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -560,8 +573,10 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
skip: skip:
offset += len; offset += len;
if (flags & RXRPC_JUMBO_PACKET) { if (flags & RXRPC_JUMBO_PACKET) {
if (skb_copy_bits(skb, offset, &flags, 1) < 0) if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
return rxrpc_proto_abort("XJF", call, seq); rxrpc_proto_abort("XJF", call, seq);
goto unlock;
}
offset += sizeof(struct rxrpc_jumbo_header); offset += sizeof(struct rxrpc_jumbo_header);
seq++; seq++;
serial++; serial++;
...@@ -601,6 +616,9 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -601,6 +616,9 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
trace_rxrpc_notify_socket(call->debug_id, serial); trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call); rxrpc_notify_socket(call);
} }
unlock:
spin_unlock(&call->input_lock);
_leave(" [queued]"); _leave(" [queued]");
} }
...@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call, ...@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
ping_time = call->ping_time; ping_time = call->ping_time;
smp_rmb(); smp_rmb();
ping_serial = call->ping_serial; ping_serial = READ_ONCE(call->ping_serial);
if (orig_serial == call->acks_lost_ping) if (orig_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call); rxrpc_input_check_for_lost_ack(call);
if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || if (before(orig_serial, ping_serial) ||
before(orig_serial, ping_serial)) !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
return; return;
clear_bit(RXRPC_CALL_PINGING, &call->flags);
if (after(orig_serial, ping_serial)) if (after(orig_serial, ping_serial))
return; return;
...@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_propose_ack_respond_to_ack); rxrpc_propose_ack_respond_to_ack);
} }
/* Discard any out-of-order or duplicate ACKs. */
if (before_eq(sp->hdr.serial, call->acks_latest))
return;
buf.info.rxMTU = 0;
ioffset = offset + nr_acks + 3; ioffset = offset + nr_acks + 3;
if (skb->len >= ioffset + sizeof(buf.info)) { if (skb->len >= ioffset + sizeof(buf.info) &&
if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
return rxrpc_proto_abort("XAI", call, 0); return rxrpc_proto_abort("XAI", call, 0);
spin_lock(&call->input_lock);
/* Discard any out-of-order or duplicate ACKs. */
if (before_eq(sp->hdr.serial, call->acks_latest))
goto out;
call->acks_latest_ts = skb->tstamp;
call->acks_latest = sp->hdr.serial;
/* Parse rwind and mtu sizes if provided. */
if (buf.info.rxMTU)
rxrpc_input_ackinfo(call, skb, &buf.info); rxrpc_input_ackinfo(call, skb, &buf.info);
}
if (first_soft_ack == 0) if (first_soft_ack == 0) {
return rxrpc_proto_abort("AK0", call, 0); rxrpc_proto_abort("AK0", call, 0);
goto out;
}
/* Ignore ACKs unless we are or have just been transmitting. */ /* Ignore ACKs unless we are or have just been transmitting. */
switch (READ_ONCE(call->state)) { switch (READ_ONCE(call->state)) {
...@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
case RXRPC_CALL_SERVER_AWAIT_ACK: case RXRPC_CALL_SERVER_AWAIT_ACK:
break; break;
default: default:
return; goto out;
}
/* Discard any out-of-order or duplicate ACKs. */
if (before_eq(sp->hdr.serial, call->acks_latest)) {
_debug("discard ACK %d <= %d",
sp->hdr.serial, call->acks_latest);
return;
} }
call->acks_latest_ts = skb->tstamp;
call->acks_latest = sp->hdr.serial;
if (before(hard_ack, call->tx_hard_ack) || if (before(hard_ack, call->tx_hard_ack) ||
after(hard_ack, call->tx_top)) after(hard_ack, call->tx_top)) {
return rxrpc_proto_abort("AKW", call, 0); rxrpc_proto_abort("AKW", call, 0);
if (nr_acks > call->tx_top - hard_ack) goto out;
return rxrpc_proto_abort("AKN", call, 0); }
if (nr_acks > call->tx_top - hard_ack) {
rxrpc_proto_abort("AKN", call, 0);
goto out;
}
if (after(hard_ack, call->tx_hard_ack)) if (after(hard_ack, call->tx_hard_ack)) {
rxrpc_rotate_tx_window(call, hard_ack, &summary); if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
rxrpc_end_tx_phase(call, false, "ETA");
goto out;
}
}
if (nr_acks > 0) { if (nr_acks > 0) {
if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
return rxrpc_proto_abort("XSA", call, 0); rxrpc_proto_abort("XSA", call, 0);
goto out;
}
rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
&summary); &summary);
} }
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
rxrpc_end_tx_phase(call, false, "ETA");
return;
}
if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
RXRPC_TX_ANNO_LAST && RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack && summary.nr_acks == call->tx_top - hard_ack &&
...@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
false, true, false, true,
rxrpc_propose_ack_ping_for_lost_reply); rxrpc_propose_ack_ping_for_lost_reply);
return rxrpc_congestion_management(call, skb, &summary, acked_serial); rxrpc_congestion_management(call, skb, &summary, acked_serial);
out:
spin_unlock(&call->input_lock);
} }
/* /*
...@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) ...@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
_proto("Rx ACKALL %%%u", sp->hdr.serial); _proto("Rx ACKALL %%%u", sp->hdr.serial);
rxrpc_rotate_tx_window(call, call->tx_top, &summary); spin_lock(&call->input_lock);
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
rxrpc_end_tx_phase(call, false, "ETL"); rxrpc_end_tx_phase(call, false, "ETL");
spin_unlock(&call->input_lock);
} }
/* /*
...@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, ...@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
} }
/* /*
* Handle a new call on a channel implicitly completing the preceding call on * Handle a new service call on a channel implicitly completing the preceding
* that channel. * call on that channel. This does not apply to client conns.
* *
* TODO: If callNumber > call_id + 1, renegotiate security. * TODO: If callNumber > call_id + 1, renegotiate security.
*/ */
static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
struct rxrpc_connection *conn,
struct rxrpc_call *call) struct rxrpc_call *call)
{ {
switch (READ_ONCE(call->state)) { switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK: case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call); rxrpc_call_completed(call);
break; /* Fall through */
case RXRPC_CALL_COMPLETE: case RXRPC_CALL_COMPLETE:
break; break;
default: default:
...@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, ...@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
set_bit(RXRPC_CALL_EV_ABORT, &call->events); set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call); rxrpc_queue_call(call);
} }
trace_rxrpc_improper_term(call);
break; break;
} }
trace_rxrpc_improper_term(call); spin_lock(&rx->incoming_lock);
__rxrpc_disconnect_call(conn, call); __rxrpc_disconnect_call(conn, call);
spin_unlock(&rx->incoming_lock);
rxrpc_notify_socket(call); rxrpc_notify_socket(call);
} }
...@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) ...@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* The socket is locked by the caller and this prevents the socket from being * The socket is locked by the caller and this prevents the socket from being
* shut down and the local endpoint from going away, thus sk_user_data will not * shut down and the local endpoint from going away, thus sk_user_data will not
* be cleared until this function returns. * be cleared until this function returns.
*
* Called with the RCU read lock held from the IP layer via UDP.
*/ */
void rxrpc_data_ready(struct sock *udp_sk) int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
{ {
struct rxrpc_connection *conn; struct rxrpc_connection *conn;
struct rxrpc_channel *chan; struct rxrpc_channel *chan;
...@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk)
struct rxrpc_local *local = udp_sk->sk_user_data; struct rxrpc_local *local = udp_sk->sk_user_data;
struct rxrpc_peer *peer = NULL; struct rxrpc_peer *peer = NULL;
struct rxrpc_sock *rx = NULL; struct rxrpc_sock *rx = NULL;
struct sk_buff *skb;
unsigned int channel; unsigned int channel;
int ret, skew = 0; int skew = 0;
_enter("%p", udp_sk); _enter("%p", udp_sk);
ASSERT(!irqs_disabled());
skb = skb_recv_udp(udp_sk, 0, 1, &ret);
if (!skb) {
if (ret == -EAGAIN)
return;
_debug("UDP socket error %d", ret);
return;
}
if (skb->tstamp == 0) if (skb->tstamp == 0)
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
rxrpc_new_skb(skb, rxrpc_skb_rx_received); rxrpc_new_skb(skb, rxrpc_skb_rx_received);
_net("recv skb %p", skb); skb_pull(skb, sizeof(struct udphdr));
/* we'll probably need to checksum it (didn't call sock_recvmsg) */
if (skb_checksum_complete(skb)) {
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
_leave(" [CSUM failed]");
return;
}
__UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
/* The UDP protocol already released all skb resources; /* The UDP protocol already released all skb resources;
* we are free to add our own data there. * we are free to add our own data there.
...@@ -1176,11 +1195,13 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1176,11 +1195,13 @@ void rxrpc_data_ready(struct sock *udp_sk)
static int lose; static int lose;
if ((lose++ & 7) == 7) { if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp); trace_rxrpc_rx_lose(sp);
rxrpc_lose_skb(skb, rxrpc_skb_rx_lost); rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
return; return 0;
} }
} }
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
trace_rxrpc_rx_packet(sp); trace_rxrpc_rx_packet(sp);
switch (sp->hdr.type) { switch (sp->hdr.type) {
...@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (sp->hdr.serviceId == 0) if (sp->hdr.serviceId == 0)
goto bad_message; goto bad_message;
rcu_read_lock();
if (rxrpc_to_server(sp)) { if (rxrpc_to_server(sp)) {
/* Weed out packets to services we're not offering. Packets /* Weed out packets to services we're not offering. Packets
* that would begin a call are explicitly rejected and the rest * that would begin a call are explicitly rejected and the rest
...@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
sp->hdr.seq == 1) sp->hdr.seq == 1)
goto unsupported_service; goto unsupported_service;
goto discard_unlock; goto discard;
} }
} }
...@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk)
goto wrong_security; goto wrong_security;
if (sp->hdr.serviceId != conn->service_id) { if (sp->hdr.serviceId != conn->service_id) {
if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) || int old_id;
conn->service_id != conn->params.service_id)
if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
goto reupgrade;
old_id = cmpxchg(&conn->service_id, conn->params.service_id,
sp->hdr.serviceId);
if (old_id != conn->params.service_id &&
old_id != sp->hdr.serviceId)
goto reupgrade; goto reupgrade;
conn->service_id = sp->hdr.serviceId;
} }
if (sp->hdr.callNumber == 0) { if (sp->hdr.callNumber == 0) {
/* Connection-level packet */ /* Connection-level packet */
_debug("CONN %p {%d}", conn, conn->debug_id); _debug("CONN %p {%d}", conn, conn->debug_id);
rxrpc_post_packet_to_conn(conn, skb); rxrpc_post_packet_to_conn(conn, skb);
goto out_unlock; goto out;
} }
/* Note the serial number skew here */ /* Note the serial number skew here */
...@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
/* Ignore really old calls */ /* Ignore really old calls */
if (sp->hdr.callNumber < chan->last_call) if (sp->hdr.callNumber < chan->last_call)
goto discard_unlock; goto discard;
if (sp->hdr.callNumber == chan->last_call) { if (sp->hdr.callNumber == chan->last_call) {
if (chan->call || if (chan->call ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
goto discard_unlock; goto discard;
/* For the previous service call, if completed /* For the previous service call, if completed
* successfully, we discard all further packets. * successfully, we discard all further packets.
*/ */
if (rxrpc_conn_is_service(conn) && if (rxrpc_conn_is_service(conn) &&
chan->last_type == RXRPC_PACKET_TYPE_ACK) chan->last_type == RXRPC_PACKET_TYPE_ACK)
goto discard_unlock; goto discard;
/* But otherwise we need to retransmit the final packet /* But otherwise we need to retransmit the final packet
* from data cached in the connection record. * from data cached in the connection record.
...@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
sp->hdr.serial, sp->hdr.serial,
sp->hdr.flags, 0); sp->hdr.flags, 0);
rxrpc_post_packet_to_conn(conn, skb); rxrpc_post_packet_to_conn(conn, skb);
goto out_unlock; goto out;
} }
call = rcu_dereference(chan->call); call = rcu_dereference(chan->call);
if (sp->hdr.callNumber > chan->call_id) { if (sp->hdr.callNumber > chan->call_id) {
if (rxrpc_to_client(sp)) { if (rxrpc_to_client(sp))
rcu_read_unlock();
goto reject_packet; goto reject_packet;
}
if (call) if (call)
rxrpc_input_implicit_end_call(conn, call); rxrpc_input_implicit_end_call(rx, conn, call);
call = NULL; call = NULL;
} }
...@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (!call || atomic_read(&call->usage) == 0) { if (!call || atomic_read(&call->usage) == 0) {
if (rxrpc_to_client(sp) || if (rxrpc_to_client(sp) ||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA) sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
goto bad_message_unlock; goto bad_message;
if (sp->hdr.seq != 1) if (sp->hdr.seq != 1)
goto discard_unlock; goto discard;
call = rxrpc_new_incoming_call(local, rx, peer, conn, skb); call = rxrpc_new_incoming_call(local, rx, skb);
if (!call) { if (!call)
rcu_read_unlock();
goto reject_packet; goto reject_packet;
}
rxrpc_send_ping(call, skb, skew); rxrpc_send_ping(call, skb, skew);
mutex_unlock(&call->user_mutex); mutex_unlock(&call->user_mutex);
} }
rxrpc_input_call_packet(call, skb, skew); rxrpc_input_call_packet(call, skb, skew);
goto discard_unlock; goto discard;
discard_unlock:
rcu_read_unlock();
discard: discard:
rxrpc_free_skb(skb, rxrpc_skb_rx_freed); rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
out: out:
trace_rxrpc_rx_done(0, 0); trace_rxrpc_rx_done(0, 0);
return; return 0;
out_unlock:
rcu_read_unlock();
goto out;
wrong_security: wrong_security:
rcu_read_unlock();
trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RXKADINCONSISTENCY, EBADMSG); RXKADINCONSISTENCY, EBADMSG);
skb->priority = RXKADINCONSISTENCY; skb->priority = RXKADINCONSISTENCY;
goto post_abort; goto post_abort;
unsupported_service: unsupported_service:
rcu_read_unlock();
trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_INVALID_OPERATION, EOPNOTSUPP); RX_INVALID_OPERATION, EOPNOTSUPP);
skb->priority = RX_INVALID_OPERATION; skb->priority = RX_INVALID_OPERATION;
goto post_abort; goto post_abort;
reupgrade: reupgrade:
rcu_read_unlock();
trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG); RX_PROTOCOL_ERROR, EBADMSG);
goto protocol_error; goto protocol_error;
bad_message_unlock:
rcu_read_unlock();
bad_message: bad_message:
trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG); RX_PROTOCOL_ERROR, EBADMSG);
...@@ -1397,4 +1407,5 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1397,4 +1407,5 @@ void rxrpc_data_ready(struct sock *udp_sk)
trace_rxrpc_rx_done(skb->mark, skb->priority); trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_reject_packet(local, skb); rxrpc_reject_packet(local, skb);
_leave(" [badmsg]"); _leave(" [badmsg]");
return 0;
} }
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/udp.h>
#include <net/af_rxrpc.h> #include <net/af_rxrpc.h>
#include "ar-internal.h" #include "ar-internal.h"
...@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, ...@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
*/ */
static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
{ {
struct sock *sock; struct sock *usk;
int ret, opt; int ret, opt;
_enter("%p{%d,%d}", _enter("%p{%d,%d}",
...@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) ...@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
return ret; return ret;
} }
/* set the socket up */
usk = local->socket->sk;
inet_sk(usk)->mc_loop = 0;
/* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
inet_inc_convert_csum(usk);
rcu_assign_sk_user_data(usk, local);
udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
udp_sk(usk)->encap_rcv = rxrpc_input_packet;
udp_sk(usk)->encap_destroy = NULL;
udp_sk(usk)->gro_receive = NULL;
udp_sk(usk)->gro_complete = NULL;
udp_encap_enable();
#if IS_ENABLED(CONFIG_IPV6)
if (local->srx.transport.family == AF_INET6)
udpv6_encap_enable();
#endif
usk->sk_error_report = rxrpc_error_report;
/* if a local address was supplied then bind it */ /* if a local address was supplied then bind it */
if (local->srx.transport_len > sizeof(sa_family_t)) { if (local->srx.transport_len > sizeof(sa_family_t)) {
_debug("bind"); _debug("bind");
...@@ -191,11 +214,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) ...@@ -191,11 +214,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
BUG(); BUG();
} }
/* set the socket up */
sock = local->socket->sk;
sock->sk_user_data = local;
sock->sk_data_ready = rxrpc_data_ready;
sock->sk_error_report = rxrpc_error_report;
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
......
...@@ -301,6 +301,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, ...@@ -301,6 +301,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
if (rtt < 0) if (rtt < 0)
return; return;
spin_lock(&peer->rtt_input_lock);
/* Replace the oldest datum in the RTT buffer */ /* Replace the oldest datum in the RTT buffer */
sum -= peer->rtt_cache[cursor]; sum -= peer->rtt_cache[cursor];
sum += rtt; sum += rtt;
...@@ -312,6 +314,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, ...@@ -312,6 +314,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_usage = usage; peer->rtt_usage = usage;
} }
spin_unlock(&peer->rtt_input_lock);
/* Now recalculate the average */ /* Now recalculate the average */
if (usage == RXRPC_RTT_CACHE_SIZE) { if (usage == RXRPC_RTT_CACHE_SIZE) {
avg = sum / RXRPC_RTT_CACHE_SIZE; avg = sum / RXRPC_RTT_CACHE_SIZE;
...@@ -320,6 +324,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, ...@@ -320,6 +324,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
do_div(avg, usage); do_div(avg, usage);
} }
/* Don't need to update this under lock */
peer->rtt = avg; peer->rtt = avg;
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
usage, avg); usage, avg);
......
...@@ -153,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local, ...@@ -153,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
* assess the MTU size for the network interface through which this peer is * assess the MTU size for the network interface through which this peer is
* reached * reached
*/ */
static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
struct rxrpc_peer *peer)
{ {
struct net *net = sock_net(&rx->sk);
struct dst_entry *dst; struct dst_entry *dst;
struct rtable *rt; struct rtable *rt;
struct flowi fl; struct flowi fl;
...@@ -169,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) ...@@ -169,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
switch (peer->srx.transport.family) { switch (peer->srx.transport.family) {
case AF_INET: case AF_INET:
rt = ip_route_output_ports( rt = ip_route_output_ports(
&init_net, fl4, NULL, net, fl4, NULL,
peer->srx.transport.sin.sin_addr.s_addr, 0, peer->srx.transport.sin.sin_addr.s_addr, 0,
htons(7000), htons(7001), IPPROTO_UDP, 0, 0); htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
...@@ -188,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) ...@@ -188,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
sizeof(struct in6_addr)); sizeof(struct in6_addr));
fl6->fl6_dport = htons(7001); fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000); fl6->fl6_sport = htons(7000);
dst = ip6_route_output(&init_net, NULL, fl6); dst = ip6_route_output(net, NULL, fl6);
if (dst->error) { if (dst->error) {
_leave(" [route err %d]", dst->error); _leave(" [route err %d]", dst->error);
return; return;
...@@ -223,6 +225,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) ...@@ -223,6 +225,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
peer->service_conns = RB_ROOT; peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock); seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock); spin_lock_init(&peer->lock);
spin_lock_init(&peer->rtt_input_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id); peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
if (RXRPC_TX_SMSS > 2190) if (RXRPC_TX_SMSS > 2190)
...@@ -240,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) ...@@ -240,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
/* /*
* Initialise peer record. * Initialise peer record.
*/ */
static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
unsigned long hash_key)
{ {
peer->hash_key = hash_key; peer->hash_key = hash_key;
rxrpc_assess_MTU_size(peer); rxrpc_assess_MTU_size(rx, peer);
peer->mtu = peer->if_mtu; peer->mtu = peer->if_mtu;
peer->rtt_last_req = ktime_get_real(); peer->rtt_last_req = ktime_get_real();
...@@ -275,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) ...@@ -275,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
/* /*
* Set up a new peer. * Set up a new peer.
*/ */
static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, struct sockaddr_rxrpc *srx,
unsigned long hash_key, unsigned long hash_key,
gfp_t gfp) gfp_t gfp)
...@@ -287,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, ...@@ -287,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
peer = rxrpc_alloc_peer(local, gfp); peer = rxrpc_alloc_peer(local, gfp);
if (peer) { if (peer) {
memcpy(&peer->srx, srx, sizeof(*srx)); memcpy(&peer->srx, srx, sizeof(*srx));
rxrpc_init_peer(peer, hash_key); rxrpc_init_peer(rx, peer, hash_key);
} }
_leave(" = %p", peer); _leave(" = %p", peer);
...@@ -299,14 +304,15 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, ...@@ -299,14 +304,15 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
* since we've already done a search in the list from the non-reentrant context * since we've already done a search in the list from the non-reentrant context
* (the data_ready handler) that is the only place we can add new peers. * (the data_ready handler) that is the only place we can add new peers.
*/ */
void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
struct rxrpc_peer *peer)
{ {
struct rxrpc_net *rxnet = local->rxnet; struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key; unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &peer->srx); hash_key = rxrpc_peer_hash_key(local, &peer->srx);
peer->local = local; peer->local = local;
rxrpc_init_peer(peer, hash_key); rxrpc_init_peer(rx, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock); spin_lock(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
...@@ -317,7 +323,8 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) ...@@ -317,7 +323,8 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
/* /*
* obtain a remote transport endpoint for the specified address * obtain a remote transport endpoint for the specified address
*/ */
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp) struct sockaddr_rxrpc *srx, gfp_t gfp)
{ {
struct rxrpc_peer *peer, *candidate; struct rxrpc_peer *peer, *candidate;
...@@ -337,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, ...@@ -337,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
/* The peer is not yet present in hash - create a candidate /* The peer is not yet present in hash - create a candidate
* for a new record and then redo the search. * for a new record and then redo the search.
*/ */
candidate = rxrpc_create_peer(local, srx, hash_key, gfp); candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
if (!candidate) { if (!candidate) {
_leave(" = NULL [nomem]"); _leave(" = NULL [nomem]");
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment