Commit d91dc434 authored by David S. Miller's avatar David S. Miller

Merge tag 'rxrpc-fixes-20201005' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: Miscellaneous fixes

Here are some miscellaneous rxrpc fixes:

 (1) Fix the xdr encoding of the contents read from an rxrpc key.

 (2) Fix a BUG() for a unsupported encoding type.

 (3) Fix missing _bh lock annotations.

 (4) Fix acceptance handling for an incoming call where the incoming call
     is encrypted.

 (5) The server token keyring isn't network namespaced - it belongs to the
     server, so there's no need.  Namespacing it means that request_key()
     fails to find it.

 (6) Fix a leak of the server keyring.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 86bccd03 38b1dc47
...@@ -51,11 +51,11 @@ enum rxrpc_cmsg_type { ...@@ -51,11 +51,11 @@ enum rxrpc_cmsg_type {
RXRPC_BUSY = 6, /* -r: server busy received [terminal] */ RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */ RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */ RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */ RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
RXRPC_CHARGE_ACCEPT = 14, /* s-: Charge the accept pool with a user call ID */
RXRPC__SUPPORTED RXRPC__SUPPORTED
}; };
......
...@@ -518,7 +518,6 @@ enum rxrpc_call_state { ...@@ -518,7 +518,6 @@ enum rxrpc_call_state {
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */ RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
...@@ -714,8 +713,8 @@ struct rxrpc_ack_summary { ...@@ -714,8 +713,8 @@ struct rxrpc_ack_summary {
enum rxrpc_command { enum rxrpc_command {
RXRPC_CMD_SEND_DATA, /* send data message */ RXRPC_CMD_SEND_DATA, /* send data message */
RXRPC_CMD_SEND_ABORT, /* request abort generation */ RXRPC_CMD_SEND_ABORT, /* request abort generation */
RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */
}; };
struct rxrpc_call_params { struct rxrpc_call_params {
...@@ -755,9 +754,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, ...@@ -755,9 +754,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
struct rxrpc_sock *, struct rxrpc_sock *,
struct sk_buff *); struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *); void rxrpc_accept_incoming_calls(struct rxrpc_local *);
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
rxrpc_notify_rx_t);
int rxrpc_reject_call(struct rxrpc_sock *);
/* /*
* call_event.c * call_event.c
......
...@@ -39,8 +39,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -39,8 +39,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
unsigned int debug_id) unsigned int debug_id)
{ {
const void *here = __builtin_return_address(0); const void *here = __builtin_return_address(0);
struct rxrpc_call *call; struct rxrpc_call *call, *xcall;
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
struct rb_node *parent, **pp;
int max, tmp; int max, tmp;
unsigned int size = RXRPC_BACKLOG_MAX; unsigned int size = RXRPC_BACKLOG_MAX;
unsigned int head, tail, call_head, call_tail; unsigned int head, tail, call_head, call_tail;
...@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
} }
/* Now it gets complicated, because calls get registered with the /* Now it gets complicated, because calls get registered with the
* socket here, particularly if a user ID is preassigned by the user. * socket here, with a user ID preassigned by the user.
*/ */
call = rxrpc_alloc_call(rx, gfp, debug_id); call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call) if (!call)
...@@ -107,9 +108,6 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -107,9 +108,6 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
here, (const void *)user_call_ID); here, (const void *)user_call_ID);
write_lock(&rx->call_lock); write_lock(&rx->call_lock);
if (user_attach_call) {
struct rxrpc_call *xcall;
struct rb_node *parent, **pp;
/* Check the user ID isn't already in use */ /* Check the user ID isn't already in use */
pp = &rx->calls.rb_node; pp = &rx->calls.rb_node;
...@@ -127,13 +125,15 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -127,13 +125,15 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
call->user_call_ID = user_call_ID; call->user_call_ID = user_call_ID;
call->notify_rx = notify_rx; call->notify_rx = notify_rx;
if (user_attach_call) {
rxrpc_get_call(call, rxrpc_call_got_kernel); rxrpc_get_call(call, rxrpc_call_got_kernel);
user_attach_call(call, user_call_ID); user_attach_call(call, user_call_ID);
}
rxrpc_get_call(call, rxrpc_call_got_userid); rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp); rb_link_node(&call->sock_node, parent, pp);
rb_insert_color(&call->sock_node, &rx->calls); rb_insert_color(&call->sock_node, &rx->calls);
set_bit(RXRPC_CALL_HAS_USERID, &call->flags); set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
}
list_add(&call->sock_link, &rx->sock_calls); list_add(&call->sock_link, &rx->sock_calls);
...@@ -157,11 +157,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -157,11 +157,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
} }
/* /*
* Preallocate sufficient service connections, calls and peers to cover the * Allocate the preallocation buffers for incoming service calls. These must
* entire backlog of a socket. When a new call comes in, if we don't have * be charged manually.
* sufficient of each available, the call gets rejected as busy or ignored.
*
* The backlog is replenished when a connection is accepted or rejected.
*/ */
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{ {
...@@ -174,13 +171,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) ...@@ -174,13 +171,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
rx->backlog = b; rx->backlog = b;
} }
if (rx->discard_new_call)
return 0;
while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
atomic_inc_return(&rxrpc_debug_id)) == 0)
;
return 0; return 0;
} }
...@@ -333,6 +323,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, ...@@ -333,6 +323,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
rxrpc_see_call(call); rxrpc_see_call(call);
call->conn = conn; call->conn = conn;
call->security = conn->security; call->security = conn->security;
call->security_ix = conn->security_ix;
call->peer = rxrpc_get_peer(conn->params.peer); call->peer = rxrpc_get_peer(conn->params.peer);
call->cong_cwnd = call->peer->cong_cwnd; call->cong_cwnd = call->peer->cong_cwnd;
return call; return call;
...@@ -402,8 +393,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -402,8 +393,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
if (rx->notify_new_call) if (rx->notify_new_call)
rx->notify_new_call(&rx->sk, call, call->user_call_ID); rx->notify_new_call(&rx->sk, call, call->user_call_ID);
else
sk_acceptq_added(&rx->sk);
spin_lock(&conn->state_lock); spin_lock(&conn->state_lock);
switch (conn->state) { switch (conn->state) {
...@@ -415,12 +404,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -415,12 +404,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
case RXRPC_CONN_SERVICE: case RXRPC_CONN_SERVICE:
write_lock(&call->state_lock); write_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE) { if (call->state < RXRPC_CALL_COMPLETE)
if (rx->discard_new_call)
call->state = RXRPC_CALL_SERVER_RECV_REQUEST; call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
else
call->state = RXRPC_CALL_SERVER_ACCEPTING;
}
write_unlock(&call->state_lock); write_unlock(&call->state_lock);
break; break;
...@@ -440,9 +425,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -440,9 +425,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
rxrpc_send_ping(call, skb); rxrpc_send_ping(call, skb);
if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
rxrpc_notify_socket(call);
/* We have to discard the prealloc queue's ref here and rely on a /* We have to discard the prealloc queue's ref here and rely on a
* combination of the RCU read lock and refs held either by the socket * combination of the RCU read lock and refs held either by the socket
* (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
...@@ -460,187 +442,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -460,187 +442,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
} }
/* /*
* handle acceptance of a call by userspace * Charge up socket with preallocated calls, attaching user call IDs.
* - assign the user call ID to the call at the front of the queue
* - called with the socket locked.
*/
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
unsigned long user_call_ID,
rxrpc_notify_rx_t notify_rx)
__releases(&rx->sk.sk_lock.slock)
__acquires(call->user_mutex)
{
struct rxrpc_call *call;
struct rb_node *parent, **pp;
int ret;
_enter(",%lx", user_call_ID);
ASSERT(!irqs_disabled());
write_lock(&rx->call_lock);
if (list_empty(&rx->to_be_accepted)) {
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
kleave(" = -ENODATA [empty]");
return ERR_PTR(-ENODATA);
}
/* check the user ID isn't already in use */
pp = &rx->calls.rb_node;
parent = NULL;
while (*pp) {
parent = *pp;
call = rb_entry(parent, struct rxrpc_call, sock_node);
if (user_call_ID < call->user_call_ID)
pp = &(*pp)->rb_left;
else if (user_call_ID > call->user_call_ID)
pp = &(*pp)->rb_right;
else
goto id_in_use;
}
/* Dequeue the first call and check it's still valid. We gain
* responsibility for the queue's reference.
*/
call = list_entry(rx->to_be_accepted.next,
struct rxrpc_call, accept_link);
write_unlock(&rx->call_lock);
/* We need to gain the mutex from the interrupt handler without
* upsetting lockdep, so we have to release it there and take it here.
* We are, however, still holding the socket lock, so other accepts
* must wait for us and no one can add the user ID behind our backs.
*/
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
release_sock(&rx->sk);
kleave(" = -ERESTARTSYS");
return ERR_PTR(-ERESTARTSYS);
}
write_lock(&rx->call_lock);
list_del_init(&call->accept_link);
sk_acceptq_removed(&rx->sk);
rxrpc_see_call(call);
/* Find the user ID insertion point. */
pp = &rx->calls.rb_node;
parent = NULL;
while (*pp) {
parent = *pp;
call = rb_entry(parent, struct rxrpc_call, sock_node);
if (user_call_ID < call->user_call_ID)
pp = &(*pp)->rb_left;
else if (user_call_ID > call->user_call_ID)
pp = &(*pp)->rb_right;
else
BUG();
}
write_lock_bh(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_SERVER_ACCEPTING:
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
break;
case RXRPC_CALL_COMPLETE:
ret = call->error;
goto out_release;
default:
BUG();
}
/* formalise the acceptance */
call->notify_rx = notify_rx;
call->user_call_ID = user_call_ID;
rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp);
rb_insert_color(&call->sock_node, &rx->calls);
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
BUG();
write_unlock_bh(&call->state_lock);
write_unlock(&rx->call_lock);
rxrpc_notify_socket(call);
rxrpc_service_prealloc(rx, GFP_KERNEL);
release_sock(&rx->sk);
_leave(" = %p{%d}", call, call->debug_id);
return call;
out_release:
_debug("release %p", call);
write_unlock_bh(&call->state_lock);
write_unlock(&rx->call_lock);
rxrpc_release_call(rx, call);
rxrpc_put_call(call, rxrpc_call_put);
goto out;
id_in_use:
ret = -EBADSLT;
write_unlock(&rx->call_lock);
out:
rxrpc_service_prealloc(rx, GFP_KERNEL);
release_sock(&rx->sk);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
* Handle rejection of a call by userspace
* - reject the call at the front of the queue
*/ */
int rxrpc_reject_call(struct rxrpc_sock *rx) int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
{ {
struct rxrpc_call *call; struct rxrpc_backlog *b = rx->backlog;
bool abort = false;
int ret;
_enter("");
ASSERT(!irqs_disabled());
write_lock(&rx->call_lock);
if (list_empty(&rx->to_be_accepted)) {
write_unlock(&rx->call_lock);
return -ENODATA;
}
/* Dequeue the first call and check it's still valid. We gain
* responsibility for the queue's reference.
*/
call = list_entry(rx->to_be_accepted.next,
struct rxrpc_call, accept_link);
list_del_init(&call->accept_link);
sk_acceptq_removed(&rx->sk);
rxrpc_see_call(call);
write_lock_bh(&call->state_lock); if (rx->sk.sk_state == RXRPC_CLOSE)
switch (call->state) { return -ESHUTDOWN;
case RXRPC_CALL_SERVER_ACCEPTING:
__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
abort = true;
fallthrough;
case RXRPC_CALL_COMPLETE:
ret = call->error;
goto out_discard;
default:
BUG();
}
out_discard: return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
write_unlock_bh(&call->state_lock); GFP_KERNEL,
write_unlock(&rx->call_lock); atomic_inc_return(&rxrpc_debug_id));
if (abort) {
rxrpc_send_abort_packet(call);
rxrpc_release_call(rx, call);
rxrpc_put_call(call, rxrpc_call_put);
}
rxrpc_service_prealloc(rx, GFP_KERNEL);
_leave(" = %d", ret);
return ret;
} }
/* /*
......
...@@ -23,7 +23,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { ...@@ -23,7 +23,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
[RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
[RXRPC_CALL_SERVER_SECURING] = "SvSecure", [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
[RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
[RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
...@@ -352,8 +351,6 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx, ...@@ -352,8 +351,6 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
call->call_id = sp->hdr.callNumber; call->call_id = sp->hdr.callNumber;
call->service_id = sp->hdr.serviceId; call->service_id = sp->hdr.serviceId;
call->cid = sp->hdr.cid; call->cid = sp->hdr.cid;
call->state = RXRPC_CALL_SERVER_ACCEPTING;
if (sp->hdr.securityIndex > 0)
call->state = RXRPC_CALL_SERVER_SECURING; call->state = RXRPC_CALL_SERVER_SECURING;
call->cong_tstamp = skb->tstamp; call->cong_tstamp = skb->tstamp;
......
...@@ -269,7 +269,7 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call) ...@@ -269,7 +269,7 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
if (call) { if (call) {
write_lock_bh(&call->state_lock); write_lock_bh(&call->state_lock);
if (call->state == RXRPC_CALL_SERVER_SECURING) { if (call->state == RXRPC_CALL_SERVER_SECURING) {
call->state = RXRPC_CALL_SERVER_ACCEPTING; call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
rxrpc_notify_socket(call); rxrpc_notify_socket(call);
} }
write_unlock_bh(&call->state_lock); write_unlock_bh(&call->state_lock);
...@@ -340,18 +340,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, ...@@ -340,18 +340,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return ret; return ret;
spin_lock(&conn->channel_lock); spin_lock(&conn->channel_lock);
spin_lock(&conn->state_lock); spin_lock_bh(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
conn->state = RXRPC_CONN_SERVICE; conn->state = RXRPC_CONN_SERVICE;
spin_unlock(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
for (loop = 0; loop < RXRPC_MAXCALLS; loop++) for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
rxrpc_call_is_secure( rxrpc_call_is_secure(
rcu_dereference_protected( rcu_dereference_protected(
conn->channels[loop].call, conn->channels[loop].call,
lockdep_is_held(&conn->channel_lock))); lockdep_is_held(&conn->channel_lock)));
} else { } else {
spin_unlock(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
} }
spin_unlock(&conn->channel_lock); spin_unlock(&conn->channel_lock);
......
...@@ -903,7 +903,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen) ...@@ -903,7 +903,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
_enter(""); _enter("");
if (optlen <= 0 || optlen > PAGE_SIZE - 1) if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities)
return -EINVAL; return -EINVAL;
description = memdup_sockptr_nul(optval, optlen); description = memdup_sockptr_nul(optval, optlen);
...@@ -940,7 +940,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen) ...@@ -940,7 +940,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
if (IS_ERR(description)) if (IS_ERR(description))
return PTR_ERR(description); return PTR_ERR(description);
key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL); key = request_key(&key_type_keyring, description, NULL);
if (IS_ERR(key)) { if (IS_ERR(key)) {
kfree(description); kfree(description);
_leave(" = %ld", PTR_ERR(key)); _leave(" = %ld", PTR_ERR(key));
...@@ -1072,7 +1072,7 @@ static long rxrpc_read(const struct key *key, ...@@ -1072,7 +1072,7 @@ static long rxrpc_read(const struct key *key,
switch (token->security_index) { switch (token->security_index) {
case RXRPC_SECURITY_RXKAD: case RXRPC_SECURITY_RXKAD:
toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, toksize += 8 * 4; /* viceid, kvno, key*2, begin,
* end, primary, tktlen */ * end, primary, tktlen */
toksize += RND(token->kad->ticket_len); toksize += RND(token->kad->ticket_len);
break; break;
...@@ -1107,7 +1107,8 @@ static long rxrpc_read(const struct key *key, ...@@ -1107,7 +1107,8 @@ static long rxrpc_read(const struct key *key,
break; break;
default: /* we have a ticket we can't encode */ default: /* we have a ticket we can't encode */
BUG(); pr_err("Unsupported key token type (%u)\n",
token->security_index);
continue; continue;
} }
...@@ -1138,6 +1139,14 @@ static long rxrpc_read(const struct key *key, ...@@ -1138,6 +1139,14 @@ static long rxrpc_read(const struct key *key,
memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
xdr += (_l + 3) >> 2; \ xdr += (_l + 3) >> 2; \
} while(0) } while(0)
#define ENCODE_BYTES(l, s) \
do { \
u32 _l = (l); \
memcpy(xdr, (s), _l); \
if (_l & 3) \
memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
xdr += (_l + 3) >> 2; \
} while(0)
#define ENCODE64(x) \ #define ENCODE64(x) \
do { \ do { \
__be64 y = cpu_to_be64(x); \ __be64 y = cpu_to_be64(x); \
...@@ -1165,7 +1174,7 @@ static long rxrpc_read(const struct key *key, ...@@ -1165,7 +1174,7 @@ static long rxrpc_read(const struct key *key,
case RXRPC_SECURITY_RXKAD: case RXRPC_SECURITY_RXKAD:
ENCODE(token->kad->vice_id); ENCODE(token->kad->vice_id);
ENCODE(token->kad->kvno); ENCODE(token->kad->kvno);
ENCODE_DATA(8, token->kad->session_key); ENCODE_BYTES(8, token->kad->session_key);
ENCODE(token->kad->start); ENCODE(token->kad->start);
ENCODE(token->kad->expiry); ENCODE(token->kad->expiry);
ENCODE(token->kad->primary_flag); ENCODE(token->kad->primary_flag);
...@@ -1215,7 +1224,6 @@ static long rxrpc_read(const struct key *key, ...@@ -1215,7 +1224,6 @@ static long rxrpc_read(const struct key *key,
break; break;
default: default:
BUG();
break; break;
} }
......
...@@ -178,37 +178,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg) ...@@ -178,37 +178,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
return ret; return ret;
} }
/*
* Pass back notification of a new call. The call is added to the
* to-be-accepted list. This means that the next call to be accepted might not
* be the last call seen awaiting acceptance, but unless we leave this on the
* front of the queue and block all other messages until someone gives us a
* user_ID for it, there's not a lot we can do.
*/
static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
struct rxrpc_call *call,
struct msghdr *msg, int flags)
{
int tmp = 0, ret;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
if (ret == 0 && !(flags & MSG_PEEK)) {
_debug("to be accepted");
write_lock_bh(&rx->recvmsg_lock);
list_del_init(&call->recvmsg_link);
write_unlock_bh(&rx->recvmsg_lock);
rxrpc_get_call(call, rxrpc_call_got);
write_lock(&rx->call_lock);
list_add_tail(&call->accept_link, &rx->to_be_accepted);
write_unlock(&rx->call_lock);
}
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
return ret;
}
/* /*
* End the packet reception phase. * End the packet reception phase.
*/ */
...@@ -630,9 +599,6 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -630,9 +599,6 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
} }
switch (READ_ONCE(call->state)) { switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_ACCEPTING:
ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
break;
case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_CLIENT_RECV_REPLY:
case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_RECV_REQUEST:
case RXRPC_CALL_SERVER_ACK_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST:
...@@ -728,7 +694,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, ...@@ -728,7 +694,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
call->debug_id, rxrpc_call_states[call->state], call->debug_id, rxrpc_call_states[call->state],
iov_iter_count(iter), want_more); iov_iter_count(iter), want_more);
ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING); ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
mutex_lock(&call->user_mutex); mutex_lock(&call->user_mutex);
......
...@@ -530,10 +530,10 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) ...@@ -530,10 +530,10 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
return -EINVAL; return -EINVAL;
break; break;
case RXRPC_ACCEPT: case RXRPC_CHARGE_ACCEPT:
if (p->command != RXRPC_CMD_SEND_DATA) if (p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL; return -EINVAL;
p->command = RXRPC_CMD_ACCEPT; p->command = RXRPC_CMD_CHARGE_ACCEPT;
if (len != 0) if (len != 0)
return -EINVAL; return -EINVAL;
break; break;
...@@ -659,16 +659,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ...@@ -659,16 +659,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (ret < 0) if (ret < 0)
goto error_release_sock; goto error_release_sock;
if (p.command == RXRPC_CMD_ACCEPT) { if (p.command == RXRPC_CMD_CHARGE_ACCEPT) {
ret = -EINVAL; ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
goto error_release_sock; goto error_release_sock;
call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID);
/* The socket is now unlocked. */ goto error_release_sock;
if (IS_ERR(call))
return PTR_ERR(call);
ret = 0;
goto out_put_unlock;
} }
call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
...@@ -690,7 +686,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ...@@ -690,7 +686,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
case RXRPC_CALL_CLIENT_AWAIT_CONN: case RXRPC_CALL_CLIENT_AWAIT_CONN:
case RXRPC_CALL_SERVER_PREALLOC: case RXRPC_CALL_SERVER_PREALLOC:
case RXRPC_CALL_SERVER_SECURING: case RXRPC_CALL_SERVER_SECURING:
case RXRPC_CALL_SERVER_ACCEPTING:
rxrpc_put_call(call, rxrpc_call_put); rxrpc_put_call(call, rxrpc_call_put);
ret = -EBUSY; ret = -EBUSY;
goto error_release_sock; goto error_release_sock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment