Commit 0099dc58 authored by David Howells's avatar David Howells

rxrpc: Make service call handling more robust

Make the following changes to improve the robustness of the code that sets
up a new service call:

 (1) Cache the rxrpc_sock struct obtained in rxrpc_data_ready() to do a
     service ID check and pass that along to rxrpc_new_incoming_call().
     This means that I can remove the check from rxrpc_new_incoming_call()
     without the need to worry about the socket attached to the local
     endpoint getting replaced - which would invalidate the check.

 (2) Cache the rxrpc_peer struct, thereby allowing the peer search to be
     done once.  The peer is passed to rxrpc_new_incoming_call(), thereby
     saving the need to repeat the search.

     This also reduces the possibility of rxrpc_publish_service_conn()
     BUG()'ing due to the detection of a duplicate connection, despite the
     initial search done by rxrpc_find_connection_rcu() having turned up
     nothing.

     This BUG() shouldn't ever get hit since rxrpc_data_ready() *should* be
     non-reentrant and the result of the initial search should still hold
     true, but it has proven possible to hit.

     I *think* this may be due to __rxrpc_lookup_peer_rcu() cutting short
     the iteration over the hash table if it finds a matching peer with a
     zero usage count, but I don't know for sure since it's only ever been
     hit once that I know of.

     Another possibility is that a bug in rxrpc_data_ready() that checked
     the wrong byte in the header for the RXRPC_CLIENT_INITIATED flag
     might've let through a packet that caused a spurious and invalid call
     to be set up.  That is addressed in another patch.

 (3) Fix __rxrpc_lookup_peer_rcu() to skip peer records that have a zero
     usage count rather than stopping and returning not found, just in case
     there's another peer record behind it in the bucket.

 (4) Don't search the peer records in rxrpc_alloc_incoming_call(), but
     rather either use the peer cached in (2) or, if one wasn't found,
     preemptively install a new one.

Fixes: 8496af50 ("rxrpc: Use RCU to access a peer's service connection tree")
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
parent 403fc2a1
...@@ -722,6 +722,8 @@ extern struct workqueue_struct *rxrpc_workqueue; ...@@ -722,6 +722,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *); void rxrpc_discard_prealloc(struct rxrpc_sock *);
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
struct rxrpc_sock *,
struct rxrpc_peer *,
struct rxrpc_connection *, struct rxrpc_connection *,
struct sk_buff *); struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *); void rxrpc_accept_incoming_calls(struct rxrpc_local *);
...@@ -913,7 +915,8 @@ extern unsigned int rxrpc_closed_conn_expiry; ...@@ -913,7 +915,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
struct sk_buff *); struct sk_buff *,
struct rxrpc_peer **);
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *); void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
void rxrpc_disconnect_call(struct rxrpc_call *); void rxrpc_disconnect_call(struct rxrpc_call *);
void rxrpc_kill_connection(struct rxrpc_connection *); void rxrpc_kill_connection(struct rxrpc_connection *);
...@@ -1049,8 +1052,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, ...@@ -1049,8 +1052,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t); struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *, void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
struct rxrpc_peer *);
void rxrpc_destroy_all_peers(struct rxrpc_net *); void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
......
...@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) ...@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
*/ */
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_local *local, struct rxrpc_local *local,
struct rxrpc_peer *peer,
struct rxrpc_connection *conn, struct rxrpc_connection *conn,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct rxrpc_backlog *b = rx->backlog; struct rxrpc_backlog *b = rx->backlog;
struct rxrpc_peer *peer, *xpeer;
struct rxrpc_call *call; struct rxrpc_call *call;
unsigned short call_head, conn_head, peer_head; unsigned short call_head, conn_head, peer_head;
unsigned short call_tail, conn_tail, peer_tail; unsigned short call_tail, conn_tail, peer_tail;
...@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, ...@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
return NULL; return NULL;
if (!conn) { if (!conn) {
/* No connection. We're going to need a peer to start off if (peer && !rxrpc_get_peer_maybe(peer))
* with. If one doesn't yet exist, use a spare from the peer = NULL;
* preallocation set. We dump the address into the spare in if (!peer) {
* anticipation - and to save on stack space. peer = b->peer_backlog[peer_tail];
*/ if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
xpeer = b->peer_backlog[peer_tail]; return NULL;
if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
return NULL;
peer = rxrpc_lookup_incoming_peer(local, xpeer);
if (peer == xpeer) {
b->peer_backlog[peer_tail] = NULL; b->peer_backlog[peer_tail] = NULL;
smp_store_release(&b->peer_backlog_tail, smp_store_release(&b->peer_backlog_tail,
(peer_tail + 1) & (peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1)); (RXRPC_BACKLOG_MAX - 1));
rxrpc_new_incoming_peer(local, peer);
} }
/* Now allocate and set up the connection */ /* Now allocate and set up the connection */
...@@ -335,30 +332,16 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, ...@@ -335,30 +332,16 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
* The call is returned with the user access mutex held. * The call is returned with the user access mutex held.
*/ */
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
struct rxrpc_sock *rx,
struct rxrpc_peer *peer,
struct rxrpc_connection *conn, struct rxrpc_connection *conn,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_sock *rx;
struct rxrpc_call *call; struct rxrpc_call *call;
u16 service_id = sp->hdr.serviceId;
_enter(""); _enter("");
/* Get the socket providing the service */
rx = rcu_dereference(local->service);
if (rx && (service_id == rx->srx.srx_service ||
service_id == rx->second_service))
goto found_service;
trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_INVALID_OPERATION, EOPNOTSUPP);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
_leave(" = NULL [service]");
return NULL;
found_service:
spin_lock(&rx->incoming_lock); spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) { rx->sk.sk_state == RXRPC_CLOSE) {
...@@ -371,7 +354,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -371,7 +354,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
goto out; goto out;
} }
call = rxrpc_alloc_incoming_call(rx, local, conn, skb); call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) { if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
_leave(" = NULL [busy]"); _leave(" = NULL [busy]");
......
...@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) ...@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
* If successful, a pointer to the connection is returned, but no ref is taken. * If successful, a pointer to the connection is returned, but no ref is taken.
* NULL is returned if there is no match. * NULL is returned if there is no match.
* *
* When searching for a service call, if we find a peer but no connection, we
* return that through *_peer in case we need to create a new service call.
*
* The caller must be holding the RCU read lock. * The caller must be holding the RCU read lock.
*/ */
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
struct sk_buff *skb) struct sk_buff *skb,
struct rxrpc_peer **_peer)
{ {
struct rxrpc_connection *conn; struct rxrpc_connection *conn;
struct rxrpc_conn_proto k; struct rxrpc_conn_proto k;
...@@ -104,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, ...@@ -104,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
peer = rxrpc_lookup_peer_rcu(local, &srx); peer = rxrpc_lookup_peer_rcu(local, &srx);
if (!peer) if (!peer)
goto not_found; goto not_found;
*_peer = peer;
conn = rxrpc_find_service_conn_rcu(peer, skb); conn = rxrpc_find_service_conn_rcu(peer, skb);
if (!conn || atomic_read(&conn->usage) == 0) if (!conn || atomic_read(&conn->usage) == 0)
goto not_found; goto not_found;
......
...@@ -1128,7 +1128,8 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1128,7 +1128,8 @@ void rxrpc_data_ready(struct sock *udp_sk)
struct rxrpc_call *call = NULL; struct rxrpc_call *call = NULL;
struct rxrpc_skb_priv *sp; struct rxrpc_skb_priv *sp;
struct rxrpc_local *local = udp_sk->sk_user_data; struct rxrpc_local *local = udp_sk->sk_user_data;
struct rxrpc_sock *rx; struct rxrpc_peer *peer = NULL;
struct rxrpc_sock *rx = NULL;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int channel; unsigned int channel;
int ret, skew = 0; int ret, skew = 0;
...@@ -1250,7 +1251,7 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1250,7 +1251,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
} }
} }
conn = rxrpc_find_connection_rcu(local, skb); conn = rxrpc_find_connection_rcu(local, skb, &peer);
if (conn) { if (conn) {
if (sp->hdr.securityIndex != conn->security_ix) if (sp->hdr.securityIndex != conn->security_ix)
goto wrong_security; goto wrong_security;
...@@ -1339,7 +1340,7 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1339,7 +1340,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
goto bad_message_unlock; goto bad_message_unlock;
if (sp->hdr.seq != 1) if (sp->hdr.seq != 1)
goto discard_unlock; goto discard_unlock;
call = rxrpc_new_incoming_call(local, conn, skb); call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
if (!call) { if (!call) {
rcu_read_unlock(); rcu_read_unlock();
goto reject_packet; goto reject_packet;
......
...@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu( ...@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
struct rxrpc_net *rxnet = local->rxnet; struct rxrpc_net *rxnet = local->rxnet;
hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) { if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
if (atomic_read(&peer->usage) == 0) atomic_read(&peer->usage) > 0)
return NULL;
return peer; return peer;
}
} }
return NULL; return NULL;
...@@ -299,34 +297,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, ...@@ -299,34 +297,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
} }
/* /*
* Set up a new incoming peer. The address is prestored in the preallocated * Set up a new incoming peer. There shouldn't be any other matching peers
* peer. * since we've already done a search in the list from the non-reentrant context
* (the data_ready handler) that is the only place we can add new peers.
*/ */
struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local, void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
struct rxrpc_peer *prealloc)
{ {
struct rxrpc_peer *peer;
struct rxrpc_net *rxnet = local->rxnet; struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key; unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &prealloc->srx); hash_key = rxrpc_peer_hash_key(local, &peer->srx);
prealloc->local = local; peer->local = local;
rxrpc_init_peer(prealloc, hash_key); rxrpc_init_peer(peer, hash_key);
spin_lock(&rxnet->peer_hash_lock); spin_lock(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
/* Need to check that we aren't racing with someone else */ list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
if (!peer) {
peer = prealloc;
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
}
spin_unlock(&rxnet->peer_hash_lock); spin_unlock(&rxnet->peer_hash_lock);
return peer;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment