Commit 0122c6d5 authored by David S. Miller's avatar David S. Miller

Merge tag 'rxrpc-rewrite-20160904-1' of...

Merge tag 'rxrpc-rewrite-20160904-1' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: Small fixes

Here's a set of small fix patches:

 (1) Fix some uninitialised variables.

 (2) Set the client call state before making it live by attaching it to the
     conn struct.

 (3) Randomise the epoch and starting client conn ID values, and don't
     change the epoch when the client conn ID rolls round.

 (4) Replace deprecated create_singlethread_workqueue() calls.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5e1e61a3 434e6120
...@@ -461,8 +461,8 @@ static void afs_callback_updater(struct work_struct *work) ...@@ -461,8 +461,8 @@ static void afs_callback_updater(struct work_struct *work)
*/ */
int __init afs_callback_update_init(void) int __init afs_callback_update_init(void)
{ {
afs_callback_update_worker = afs_callback_update_worker = alloc_ordered_workqueue("kafs_callbackd",
create_singlethread_workqueue("kafs_callbackd"); WQ_MEM_RECLAIM);
return afs_callback_update_worker ? 0 : -ENOMEM; return afs_callback_update_worker ? 0 : -ENOMEM;
} }
......
...@@ -36,8 +36,8 @@ static int afs_init_lock_manager(void) ...@@ -36,8 +36,8 @@ static int afs_init_lock_manager(void)
if (!afs_lock_manager) { if (!afs_lock_manager) {
mutex_lock(&afs_lock_manager_mutex); mutex_lock(&afs_lock_manager_mutex);
if (!afs_lock_manager) { if (!afs_lock_manager) {
afs_lock_manager = afs_lock_manager = alloc_workqueue("kafs_lockd",
create_singlethread_workqueue("kafs_lockd"); WQ_MEM_RECLAIM, 0);
if (!afs_lock_manager) if (!afs_lock_manager)
ret = -ENOMEM; ret = -ENOMEM;
} }
......
...@@ -76,7 +76,7 @@ int afs_open_socket(void) ...@@ -76,7 +76,7 @@ int afs_open_socket(void)
_enter(""); _enter("");
ret = -ENOMEM; ret = -ENOMEM;
afs_async_calls = create_singlethread_workqueue("kafsd"); afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
if (!afs_async_calls) if (!afs_async_calls)
goto error_0; goto error_0;
......
...@@ -594,8 +594,8 @@ static void afs_vlocation_reaper(struct work_struct *work) ...@@ -594,8 +594,8 @@ static void afs_vlocation_reaper(struct work_struct *work)
*/ */
int __init afs_vlocation_update_init(void) int __init afs_vlocation_update_init(void)
{ {
afs_vlocation_update_worker = afs_vlocation_update_worker = alloc_workqueue("kafs_vlupdated",
create_singlethread_workqueue("kafs_vlupdated"); WQ_MEM_RECLAIM, 0);
return afs_vlocation_update_worker ? 0 : -ENOMEM; return afs_vlocation_update_worker ? 0 : -ENOMEM;
} }
......
...@@ -24,6 +24,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ ...@@ -24,6 +24,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
*/ */
struct rxrpc_wire_header { struct rxrpc_wire_header {
__be32 epoch; /* client boot timestamp */ __be32 epoch; /* client boot timestamp */
#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */
__be32 cid; /* connection and channel ID */ __be32 cid; /* connection and channel ID */
#define RXRPC_MAXCALLS 4 /* max active calls per conn */ #define RXRPC_MAXCALLS 4 /* max active calls per conn */
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/net.h> #include <linux/net.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/key-type.h> #include <linux/key-type.h>
...@@ -700,7 +701,13 @@ static int __init af_rxrpc_init(void) ...@@ -700,7 +701,13 @@ static int __init af_rxrpc_init(void)
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
rxrpc_epoch = get_seconds(); get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
get_random_bytes(&rxrpc_client_conn_ids.cur,
sizeof(rxrpc_client_conn_ids.cur));
rxrpc_client_conn_ids.cur &= 0x3fffffff;
if (rxrpc_client_conn_ids.cur == 0)
rxrpc_client_conn_ids.cur = 1;
ret = -ENOMEM; ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create( rxrpc_call_jar = kmem_cache_create(
......
...@@ -868,7 +868,6 @@ void rxrpc_process_call(struct work_struct *work) ...@@ -868,7 +868,6 @@ void rxrpc_process_call(struct work_struct *work)
/* deal with events of a final nature */ /* deal with events of a final nature */
if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
enum rxrpc_skb_mark mark; enum rxrpc_skb_mark mark;
int error;
clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events); clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events); clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
...@@ -876,10 +875,10 @@ void rxrpc_process_call(struct work_struct *work) ...@@ -876,10 +875,10 @@ void rxrpc_process_call(struct work_struct *work)
if (call->completion == RXRPC_CALL_NETWORK_ERROR) { if (call->completion == RXRPC_CALL_NETWORK_ERROR) {
mark = RXRPC_SKB_MARK_NET_ERROR; mark = RXRPC_SKB_MARK_NET_ERROR;
_debug("post net error %d", error); _debug("post net error %d", call->error);
} else { } else {
mark = RXRPC_SKB_MARK_LOCAL_ERROR; mark = RXRPC_SKB_MARK_LOCAL_ERROR;
_debug("post net local error %d", error); _debug("post net local error %d", call->error);
} }
if (rxrpc_post_message(call, mark, call->error, true) < 0) if (rxrpc_post_message(call, mark, call->error, true) < 0)
......
...@@ -197,8 +197,6 @@ static int rxrpc_begin_client_call(struct rxrpc_call *call, ...@@ -197,8 +197,6 @@ static int rxrpc_begin_client_call(struct rxrpc_call *call,
if (ret < 0) if (ret < 0)
return ret; return ret;
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
spin_lock(&call->conn->params.peer->lock); spin_lock(&call->conn->params.peer->lock);
hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
spin_unlock(&call->conn->params.peer->lock); spin_unlock(&call->conn->params.peer->lock);
...@@ -586,7 +584,7 @@ static void rxrpc_dead_call_expired(unsigned long _call) ...@@ -586,7 +584,7 @@ static void rxrpc_dead_call_expired(unsigned long _call)
*/ */
static void rxrpc_mark_call_released(struct rxrpc_call *call) static void rxrpc_mark_call_released(struct rxrpc_call *call)
{ {
bool sched; bool sched = false;
rxrpc_see_call(call); rxrpc_see_call(call);
write_lock(&call->state_lock); write_lock(&call->state_lock);
......
...@@ -108,12 +108,12 @@ static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap, ...@@ -108,12 +108,12 @@ static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
/* /*
* Get a connection ID and epoch for a client connection from the global pool. * Get a connection ID and epoch for a client connection from the global pool.
* The connection struct pointer is then recorded in the idr radix tree. The * The connection struct pointer is then recorded in the idr radix tree. The
* epoch is changed if this wraps. * epoch doesn't change until the client is rebooted (or, at least, unless the
* module is unloaded).
*/ */
static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
gfp_t gfp) gfp_t gfp)
{ {
u32 epoch;
int id; int id;
_enter(""); _enter("");
...@@ -121,34 +121,18 @@ static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, ...@@ -121,34 +121,18 @@ static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
idr_preload(gfp); idr_preload(gfp);
spin_lock(&rxrpc_conn_id_lock); spin_lock(&rxrpc_conn_id_lock);
epoch = rxrpc_epoch; id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
/* We could use idr_alloc_cyclic() here, but we really need to know
* when the thing wraps so that we can advance the epoch.
*/
if (rxrpc_client_conn_ids.cur == 0)
rxrpc_client_conn_ids.cur = 1;
id = idr_alloc(&rxrpc_client_conn_ids, conn,
rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
if (id < 0) {
if (id != -ENOSPC)
goto error;
id = idr_alloc(&rxrpc_client_conn_ids, conn,
1, 0x40000000, GFP_NOWAIT); 1, 0x40000000, GFP_NOWAIT);
if (id < 0) if (id < 0)
goto error; goto error;
epoch++;
rxrpc_epoch = epoch;
}
rxrpc_client_conn_ids.cur = id + 1;
spin_unlock(&rxrpc_conn_id_lock); spin_unlock(&rxrpc_conn_id_lock);
idr_preload_end(); idr_preload_end();
conn->proto.epoch = epoch; conn->proto.epoch = rxrpc_epoch;
conn->proto.cid = id << RXRPC_CIDSHIFT; conn->proto.cid = id << RXRPC_CIDSHIFT;
set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
_leave(" [CID %x:%x]", epoch, conn->proto.cid); _leave(" [CID %x]", conn->proto.cid);
return 0; return 0;
error: error:
...@@ -537,6 +521,10 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, ...@@ -537,6 +521,10 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
struct rxrpc_call, chan_wait_link); struct rxrpc_call, chan_wait_link);
u32 call_id = chan->call_counter + 1; u32 call_id = chan->call_counter + 1;
write_lock_bh(&call->state_lock);
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
write_unlock_bh(&call->state_lock);
rxrpc_see_call(call); rxrpc_see_call(call);
list_del_init(&call->chan_wait_link); list_del_init(&call->chan_wait_link);
conn->active_chans |= 1 << channel; conn->active_chans |= 1 << channel;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment