Commit 44a4d551 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: refactor use of first_peer_device()

Reduce the number of calls to first_peer_device(). Instead, call
first_peer_device() just once to assign a local variable peer_device.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 35b5ed5b
...@@ -552,8 +552,10 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection) ...@@ -552,8 +552,10 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
} }
enum drbd_state_rv enum drbd_state_rv
drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
{ {
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
const int max_tries = 4; const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR; enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
struct net_conf *nc; struct net_conf *nc;
...@@ -601,7 +603,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -601,7 +603,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
device->state.disk == D_CONSISTENT && mask.pdsk == 0) { device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
D_ASSERT(device, device->state.pdsk == D_UNKNOWN); D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
if (conn_try_outdate_peer(first_peer_device(device)->connection)) { if (conn_try_outdate_peer(connection)) {
val.disk = D_UP_TO_DATE; val.disk = D_UP_TO_DATE;
mask.disk = D_MASK; mask.disk = D_MASK;
} }
...@@ -611,7 +613,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -611,7 +613,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
if (rv == SS_NOTHING_TO_DO) if (rv == SS_NOTHING_TO_DO)
goto out; goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) { if (!conn_try_outdate_peer(connection) && force) {
drbd_warn(device, "Forced into split brain situation!\n"); drbd_warn(device, "Forced into split brain situation!\n");
mask.pdsk = D_MASK; mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED; val.pdsk = D_OUTDATED;
...@@ -624,7 +626,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -624,7 +626,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
retry at most once more in this case. */ retry at most once more in this case. */
int timeo; int timeo;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(connection->net_conf);
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
rcu_read_unlock(); rcu_read_unlock();
schedule_timeout_interruptible(timeo); schedule_timeout_interruptible(timeo);
...@@ -661,7 +663,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -661,7 +663,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
} else { } else {
/* Called from drbd_adm_set_role only. /* Called from drbd_adm_set_role only.
* We are still holding the conf_update mutex. */ * We are still holding the conf_update mutex. */
nc = first_peer_device(device)->connection->net_conf; nc = connection->net_conf;
if (nc) if (nc)
nc->discard_my_data = 0; /* without copy; single bit op is atomic */ nc->discard_my_data = 0; /* without copy; single bit op is atomic */
...@@ -683,8 +685,8 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -683,8 +685,8 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
if (device->state.conn >= C_WF_REPORT_PARAMS) { if (device->state.conn >= C_WF_REPORT_PARAMS) {
/* if this was forced, we should consider sync */ /* if this was forced, we should consider sync */
if (forced) if (forced)
drbd_send_uuids(first_peer_device(device)); drbd_send_uuids(peer_device);
drbd_send_current_state(first_peer_device(device)); drbd_send_current_state(peer_device);
} }
drbd_md_sync(device); drbd_md_sync(device);
...@@ -1433,6 +1435,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1433,6 +1435,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
{ {
struct drbd_config_context adm_ctx; struct drbd_config_context adm_ctx;
struct drbd_device *device; struct drbd_device *device;
struct drbd_peer_device *peer_device;
struct drbd_connection *connection;
int err; int err;
enum drbd_ret_code retcode; enum drbd_ret_code retcode;
enum determine_dev_size dd; enum determine_dev_size dd;
...@@ -1455,7 +1459,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1455,7 +1459,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
device = adm_ctx.device; device = adm_ctx.device;
mutex_lock(&adm_ctx.resource->adm_mutex); mutex_lock(&adm_ctx.resource->adm_mutex);
conn_reconfig_start(first_peer_device(device)->connection); peer_device = first_peer_device(device);
connection = peer_device ? peer_device->connection : NULL;
conn_reconfig_start(connection);
/* if you want to reconfigure, please tear down first */ /* if you want to reconfigure, please tear down first */
if (device->state.disk > D_DISKLESS) { if (device->state.disk > D_DISKLESS) {
...@@ -1522,7 +1528,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1522,7 +1528,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail; goto fail;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(connection->net_conf);
if (nc) { if (nc) {
if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
rcu_read_unlock(); rcu_read_unlock();
...@@ -1642,7 +1648,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1642,7 +1648,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
*/ */
wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device)); wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
/* and for any other previously queued work */ /* and for any other previously queued work */
drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); drbd_flush_workqueue(&connection->sender_work);
rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
retcode = rv; /* FIXME: Type mismatch. */ retcode = rv; /* FIXME: Type mismatch. */
...@@ -1838,7 +1844,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1838,7 +1844,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(device); put_ldev(device);
conn_reconfig_done(first_peer_device(device)->connection); conn_reconfig_done(connection);
mutex_unlock(&adm_ctx.resource->adm_mutex); mutex_unlock(&adm_ctx.resource->adm_mutex);
drbd_adm_finish(&adm_ctx, info, retcode); drbd_adm_finish(&adm_ctx, info, retcode);
return 0; return 0;
...@@ -1849,7 +1855,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1849,7 +1855,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
drbd_force_state(device, NS(disk, D_DISKLESS)); drbd_force_state(device, NS(disk, D_DISKLESS));
drbd_md_sync(device); drbd_md_sync(device);
fail: fail:
conn_reconfig_done(first_peer_device(device)->connection); conn_reconfig_done(connection);
if (nbc) { if (nbc) {
if (nbc->backing_bdev) if (nbc->backing_bdev)
blkdev_put(nbc->backing_bdev, blkdev_put(nbc->backing_bdev,
......
...@@ -2857,8 +2857,10 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, ...@@ -2857,8 +2857,10 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
-1091 requires proto 91 -1091 requires proto 91
-1096 requires proto 96 -1096 requires proto 96
*/ */
static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_hold(local) static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __must_hold(local)
{ {
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
u64 self, peer; u64 self, peer;
int i, j; int i, j;
...@@ -2884,7 +2886,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho ...@@ -2884,7 +2886,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
if (first_peer_device(device)->connection->agreed_pro_version < 91) if (connection->agreed_pro_version < 91)
return -1091; return -1091;
if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
...@@ -2907,7 +2909,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho ...@@ -2907,7 +2909,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
if (first_peer_device(device)->connection->agreed_pro_version < 91) if (connection->agreed_pro_version < 91)
return -1091; return -1091;
if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
...@@ -2940,7 +2942,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho ...@@ -2940,7 +2942,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
case 1: /* self_pri && !peer_pri */ return 1; case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1; case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */ case 3: /* self_pri && peer_pri */
dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags); dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
return dc ? -1 : 1; return dc ? -1 : 1;
} }
} }
...@@ -2953,14 +2955,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho ...@@ -2953,14 +2955,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
*rule_nr = 51; *rule_nr = 51;
peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) { if (self == peer) {
if (first_peer_device(device)->connection->agreed_pro_version < 96 ? if (connection->agreed_pro_version < 96 ?
(device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
(device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of /* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */ resync as sync source modifications of the peer's UUIDs. */
if (first_peer_device(device)->connection->agreed_pro_version < 91) if (connection->agreed_pro_version < 91)
return -1091; return -1091;
device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
...@@ -2990,14 +2992,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho ...@@ -2990,14 +2992,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
*rule_nr = 71; *rule_nr = 71;
self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) { if (self == peer) {
if (first_peer_device(device)->connection->agreed_pro_version < 96 ? if (connection->agreed_pro_version < 96 ?
(device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
(device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of /* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */ resync as sync source modifications of our UUIDs. */
if (first_peer_device(device)->connection->agreed_pro_version < 91) if (connection->agreed_pro_version < 91)
return -1091; return -1091;
__drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
......
...@@ -454,7 +454,9 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request ...@@ -454,7 +454,9 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request
int __req_mod(struct drbd_request *req, enum drbd_req_event what, int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m) struct bio_and_error *m)
{ {
struct drbd_device *device = req->device; struct drbd_device *const device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
struct net_conf *nc; struct net_conf *nc;
int p, rv = 0; int p, rv = 0;
...@@ -477,7 +479,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -477,7 +479,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* and from w_read_retry_remote */ * and from w_read_retry_remote */
D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(connection->net_conf);
p = nc->wire_protocol; p = nc->wire_protocol;
rcu_read_unlock(); rcu_read_unlock();
req->rq_state |= req->rq_state |=
...@@ -549,7 +551,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -549,7 +551,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
mod_rq_state(req, m, 0, RQ_NET_QUEUED); mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_read_req; req->w.cb = w_send_read_req;
drbd_queue_work(&first_peer_device(device)->connection->sender_work, drbd_queue_work(&connection->sender_work,
&req->w); &req->w);
break; break;
...@@ -585,23 +587,23 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -585,23 +587,23 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(device, req->rq_state & RQ_NET_PENDING); D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock; req->w.cb = w_send_dblock;
drbd_queue_work(&first_peer_device(device)->connection->sender_work, drbd_queue_work(&connection->sender_work,
&req->w); &req->w);
/* close the epoch, in case it outgrew the limit */ /* close the epoch, in case it outgrew the limit */
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(connection->net_conf);
p = nc->max_epoch_size; p = nc->max_epoch_size;
rcu_read_unlock(); rcu_read_unlock();
if (first_peer_device(device)->connection->current_tle_writes >= p) if (connection->current_tle_writes >= p)
start_new_tl_epoch(first_peer_device(device)->connection); start_new_tl_epoch(connection);
break; break;
case QUEUE_FOR_SEND_OOS: case QUEUE_FOR_SEND_OOS:
mod_rq_state(req, m, 0, RQ_NET_QUEUED); mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_out_of_sync; req->w.cb = w_send_out_of_sync;
drbd_queue_work(&first_peer_device(device)->connection->sender_work, drbd_queue_work(&connection->sender_work,
&req->w); &req->w);
break; break;
...@@ -714,7 +716,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -714,7 +716,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
get_ldev(device); /* always succeeds in this call path */ get_ldev(device); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io; req->w.cb = w_restart_disk_io;
drbd_queue_work(&first_peer_device(device)->connection->sender_work, drbd_queue_work(&connection->sender_work,
&req->w); &req->w);
break; break;
...@@ -736,7 +738,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -736,7 +738,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) { if (req->w.cb) {
drbd_queue_work(&first_peer_device(device)->connection->sender_work, /* w.cb expected to be w_send_dblock, or w_send_read_req */
drbd_queue_work(&connection->sender_work,
&req->w); &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
} /* else: FIXME can this happen? */ } /* else: FIXME can this happen? */
...@@ -769,7 +772,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -769,7 +772,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
case QUEUE_AS_DRBD_BARRIER: case QUEUE_AS_DRBD_BARRIER:
start_new_tl_epoch(first_peer_device(device)->connection); start_new_tl_epoch(connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break; break;
}; };
......
...@@ -952,6 +952,8 @@ enum drbd_state_rv ...@@ -952,6 +952,8 @@ enum drbd_state_rv
__drbd_set_state(struct drbd_device *device, union drbd_state ns, __drbd_set_state(struct drbd_device *device, union drbd_state ns,
enum chg_state_flags flags, struct completion *done) enum chg_state_flags flags, struct completion *done)
{ {
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
union drbd_state os; union drbd_state os;
enum drbd_state_rv rv = SS_SUCCESS; enum drbd_state_rv rv = SS_SUCCESS;
enum sanitize_state_warnings ssw; enum sanitize_state_warnings ssw;
...@@ -978,9 +980,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -978,9 +980,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
this happen...*/ this happen...*/
if (is_valid_state(device, os) == rv) if (is_valid_state(device, os) == rv)
rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); rv = is_valid_soft_transition(os, ns, connection);
} else } else
rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); rv = is_valid_soft_transition(os, ns, connection);
} }
if (rv < SS_SUCCESS) { if (rv < SS_SUCCESS) {
...@@ -997,7 +999,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -997,7 +999,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
sanitize_state(). Only display it here if we where not called from sanitize_state(). Only display it here if we where not called from
_conn_request_state() */ _conn_request_state() */
if (!(flags & CS_DC_SUSP)) if (!(flags & CS_DC_SUSP))
conn_pr_state_change(first_peer_device(device)->connection, os, ns, conn_pr_state_change(connection, os, ns,
(flags & ~CS_DC_MASK) | CS_DC_SUSP); (flags & ~CS_DC_MASK) | CS_DC_SUSP);
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
...@@ -1017,19 +1019,19 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -1017,19 +1019,19 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
/* put replicated vs not-replicated requests in seperate epochs */ /* put replicated vs not-replicated requests in seperate epochs */
if (did_remote != should_do_remote) if (did_remote != should_do_remote)
start_new_tl_epoch(first_peer_device(device)->connection); start_new_tl_epoch(connection);
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
drbd_print_uuids(device, "attached to UUIDs"); drbd_print_uuids(device, "attached to UUIDs");
/* Wake up role changes, that were delayed because of connection establishing */ /* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
no_peer_wf_report_params(first_peer_device(device)->connection)) no_peer_wf_report_params(connection))
clear_bit(STATE_SENT, &first_peer_device(device)->connection->flags); clear_bit(STATE_SENT, &connection->flags);
wake_up(&device->misc_wait); wake_up(&device->misc_wait);
wake_up(&device->state_wait); wake_up(&device->state_wait);
wake_up(&first_peer_device(device)->connection->ping_wait); wake_up(&connection->ping_wait);
/* Aborted verify run, or we reached the stop sector. /* Aborted verify run, or we reached the stop sector.
* Log the last position, unless end-of-device. */ * Log the last position, unless end-of-device. */
...@@ -1118,21 +1120,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -1118,21 +1120,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
/* Receiver should clean up itself */ /* Receiver should clean up itself */
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver); drbd_thread_stop_nowait(&connection->receiver);
/* Now the receiver finished cleaning up itself, it should die */ /* Now the receiver finished cleaning up itself, it should die */
if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver); drbd_thread_stop_nowait(&connection->receiver);
/* Upon network failure, we need to restart the receiver. */ /* Upon network failure, we need to restart the receiver. */
if (os.conn > C_WF_CONNECTION && if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&first_peer_device(device)->connection->receiver); drbd_thread_restart_nowait(&connection->receiver);
/* Resume AL writing if we get a connection */ /* Resume AL writing if we get a connection */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
drbd_resume_al(device); drbd_resume_al(device);
first_peer_device(device)->connection->connect_cnt++; connection->connect_cnt++;
} }
/* remember last attach time so request_timer_fn() won't /* remember last attach time so request_timer_fn() won't
...@@ -1150,7 +1152,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -1150,7 +1152,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
ascw->w.cb = w_after_state_ch; ascw->w.cb = w_after_state_ch;
ascw->device = device; ascw->device = device;
ascw->done = done; ascw->done = done;
drbd_queue_work(&first_peer_device(device)->connection->sender_work, drbd_queue_work(&connection->sender_work,
&ascw->w); &ascw->w);
} else { } else {
drbd_err(device, "Could not kmalloc an ascw\n"); drbd_err(device, "Could not kmalloc an ascw\n");
...@@ -1222,6 +1224,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1222,6 +1224,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags) union drbd_state ns, enum chg_state_flags flags)
{ {
struct drbd_resource *resource = device->resource; struct drbd_resource *resource = device->resource;
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct sib_info sib; struct sib_info sib;
sib.sib_reason = SIB_STATE_CHANGE; sib.sib_reason = SIB_STATE_CHANGE;
...@@ -1245,7 +1249,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1245,7 +1249,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
state change. This function might sleep */ state change. This function might sleep */
if (ns.susp_nod) { if (ns.susp_nod) {
struct drbd_connection *connection = first_peer_device(device)->connection;
enum drbd_req_event what = NOTHING; enum drbd_req_event what = NOTHING;
spin_lock_irq(&device->resource->req_lock); spin_lock_irq(&device->resource->req_lock);
...@@ -1267,8 +1270,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1267,8 +1270,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
} }
if (ns.susp_fen) { if (ns.susp_fen) {
struct drbd_connection *connection = first_peer_device(device)->connection;
spin_lock_irq(&device->resource->req_lock); spin_lock_irq(&device->resource->req_lock);
if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
/* case2: The connection was established again: */ /* case2: The connection was established again: */
...@@ -1294,8 +1295,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1294,8 +1295,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
* which is unexpected. */ * which is unexpected. */
if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
first_peer_device(device)->connection->agreed_pro_version >= 96 && get_ldev(device)) { connection->agreed_pro_version >= 96 && get_ldev(device)) {
drbd_gen_and_send_sync_uuid(first_peer_device(device)); drbd_gen_and_send_sync_uuid(peer_device);
put_ldev(device); put_ldev(device);
} }
...@@ -1309,8 +1310,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1309,8 +1310,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
atomic_set(&device->rs_pending_cnt, 0); atomic_set(&device->rs_pending_cnt, 0);
drbd_rs_cancel_all(device); drbd_rs_cancel_all(device);
drbd_send_uuids(first_peer_device(device)); drbd_send_uuids(peer_device);
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
} }
/* No point in queuing send_bitmap if we don't have a connection /* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state * anymore, so check also the _current_ state, not only the new state
...@@ -1335,7 +1336,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1335,7 +1336,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
set_bit(NEW_CUR_UUID, &device->flags); set_bit(NEW_CUR_UUID, &device->flags);
} else { } else {
drbd_uuid_new_current(device); drbd_uuid_new_current(device);
drbd_send_uuids(first_peer_device(device)); drbd_send_uuids(peer_device);
} }
} }
put_ldev(device); put_ldev(device);
...@@ -1346,7 +1347,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1346,7 +1347,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY && if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(device); drbd_uuid_new_current(device);
drbd_send_uuids(first_peer_device(device)); drbd_send_uuids(peer_device);
} }
/* D_DISKLESS Peer becomes secondary */ /* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
...@@ -1373,16 +1374,16 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1373,16 +1374,16 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* Last part of the attaching process ... */ /* Last part of the attaching process ... */
if (ns.conn >= C_CONNECTED && if (ns.conn >= C_CONNECTED &&
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
drbd_send_sizes(first_peer_device(device), 0, 0); /* to start sync... */ drbd_send_sizes(peer_device, 0, 0); /* to start sync... */
drbd_send_uuids(first_peer_device(device)); drbd_send_uuids(peer_device);
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
} }
/* We want to pause/continue resync, tell peer. */ /* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED && if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) || ((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp))) (os.user_isp != ns.user_isp)))
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
/* In case one of the isp bits got set, suspend other devices. */ /* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) && if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
...@@ -1392,10 +1393,10 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1392,10 +1393,10 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* Make sure the peer gets informed about eventual state /* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */ changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD) if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
/* We are in the progress to start a full sync... */ /* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
...@@ -1449,7 +1450,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1449,7 +1450,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
drbd_disk_str(device->state.disk)); drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED) if (ns.conn >= C_CONNECTED)
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
drbd_rs_cancel_all(device); drbd_rs_cancel_all(device);
...@@ -1473,7 +1474,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1473,7 +1474,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
drbd_disk_str(device->state.disk)); drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED) if (ns.conn >= C_CONNECTED)
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
/* corresponding get_ldev in __drbd_set_state /* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */ * this may finally trigger drbd_ldev_destroy. */
put_ldev(device); put_ldev(device);
...@@ -1481,7 +1482,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1481,7 +1482,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* Notify peer that I had a local IO error, and did not detached.. */ /* Notify peer that I had a local IO error, and did not detached.. */
if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED) if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
/* Disks got bigger while they were detached */ /* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
...@@ -1499,14 +1500,14 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1499,14 +1500,14 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* sync target done with resync. Explicitly notify peer, even though /* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */ * it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
/* Verify finished, or reached stop sector. Peer did not know about /* Verify finished, or reached stop sector. Peer did not know about
* the stop sector, and we may even have changed the stop sector during * the stop sector, and we may even have changed the stop sector during
* verify to interrupt/stop early. Send the new state. */ * verify to interrupt/stop early. Send the new state. */
if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
&& verify_can_do_stop_sector(device)) && verify_can_do_stop_sector(device))
drbd_send_state(first_peer_device(device), ns); drbd_send_state(peer_device, ns);
/* This triggers bitmap writeout of potentially still unwritten pages /* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk * if the resync finished cleanly, or aborted because of peer disk
......
...@@ -583,8 +583,10 @@ static int drbd_rs_number_requests(struct drbd_device *device) ...@@ -583,8 +583,10 @@ static int drbd_rs_number_requests(struct drbd_device *device)
return number; return number;
} }
static int make_resync_request(struct drbd_device *device, int cancel) static int make_resync_request(struct drbd_device *const device, int cancel)
{ {
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
unsigned long bit; unsigned long bit;
sector_t sector; sector_t sector;
const sector_t capacity = drbd_get_capacity(device->this_bdev); const sector_t capacity = drbd_get_capacity(device->this_bdev);
...@@ -618,15 +620,15 @@ static int make_resync_request(struct drbd_device *device, int cancel) ...@@ -618,15 +620,15 @@ static int make_resync_request(struct drbd_device *device, int cancel)
for (i = 0; i < number; i++) { for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */ /* Stop generating RS requests, when half of the send buffer is filled */
mutex_lock(&first_peer_device(device)->connection->data.mutex); mutex_lock(&connection->data.mutex);
if (first_peer_device(device)->connection->data.socket) { if (connection->data.socket) {
queued = first_peer_device(device)->connection->data.socket->sk->sk_wmem_queued; queued = connection->data.socket->sk->sk_wmem_queued;
sndbuf = first_peer_device(device)->connection->data.socket->sk->sk_sndbuf; sndbuf = connection->data.socket->sk->sk_sndbuf;
} else { } else {
queued = 1; queued = 1;
sndbuf = 0; sndbuf = 0;
} }
mutex_unlock(&first_peer_device(device)->connection->data.mutex); mutex_unlock(&connection->data.mutex);
if (queued > sndbuf / 2) if (queued > sndbuf / 2)
goto requeue; goto requeue;
...@@ -696,9 +698,9 @@ static int make_resync_request(struct drbd_device *device, int cancel) ...@@ -696,9 +698,9 @@ static int make_resync_request(struct drbd_device *device, int cancel)
/* adjust very last sectors, in case we are oddly sized */ /* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity) if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9; size = (capacity-sector)<<9;
if (first_peer_device(device)->connection->agreed_pro_version >= 89 && if (connection->agreed_pro_version >= 89 &&
first_peer_device(device)->connection->csums_tfm) { connection->csums_tfm) {
switch (read_for_csum(first_peer_device(device), sector, size)) { switch (read_for_csum(peer_device, sector, size)) {
case -EIO: /* Disk failure */ case -EIO: /* Disk failure */
put_ldev(device); put_ldev(device);
return -EIO; return -EIO;
...@@ -717,7 +719,7 @@ static int make_resync_request(struct drbd_device *device, int cancel) ...@@ -717,7 +719,7 @@ static int make_resync_request(struct drbd_device *device, int cancel)
int err; int err;
inc_rs_pending(device); inc_rs_pending(device);
err = drbd_send_drequest(first_peer_device(device), P_RS_DATA_REQUEST, err = drbd_send_drequest(peer_device, P_RS_DATA_REQUEST,
sector, size, ID_SYNCER); sector, size, ID_SYNCER);
if (err) { if (err) {
drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
...@@ -1351,7 +1353,8 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) ...@@ -1351,7 +1353,8 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
{ {
struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = req->device; struct drbd_device *device = req->device;
struct drbd_connection *connection = first_peer_device(device)->connection; struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device->connection;
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
...@@ -1365,7 +1368,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) ...@@ -1365,7 +1368,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
* No more barriers will be sent, until we leave AHEAD mode again. */ * No more barriers will be sent, until we leave AHEAD mode again. */
maybe_send_barrier(connection, req->epoch); maybe_send_barrier(connection, req->epoch);
err = drbd_send_out_of_sync(first_peer_device(device), req); err = drbd_send_out_of_sync(peer_device, req);
req_mod(req, OOS_HANDED_TO_NETWORK); req_mod(req, OOS_HANDED_TO_NETWORK);
return err; return err;
...@@ -1380,7 +1383,8 @@ int w_send_dblock(struct drbd_work *w, int cancel) ...@@ -1380,7 +1383,8 @@ int w_send_dblock(struct drbd_work *w, int cancel)
{ {
struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = req->device; struct drbd_device *device = req->device;
struct drbd_connection *connection = first_peer_device(device)->connection; struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
...@@ -1392,7 +1396,7 @@ int w_send_dblock(struct drbd_work *w, int cancel) ...@@ -1392,7 +1396,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
maybe_send_barrier(connection, req->epoch); maybe_send_barrier(connection, req->epoch);
connection->send.current_epoch_writes++; connection->send.current_epoch_writes++;
err = drbd_send_dblock(first_peer_device(device), req); err = drbd_send_dblock(peer_device, req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
return err; return err;
...@@ -1407,7 +1411,8 @@ int w_send_read_req(struct drbd_work *w, int cancel) ...@@ -1407,7 +1411,8 @@ int w_send_read_req(struct drbd_work *w, int cancel)
{ {
struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = req->device; struct drbd_device *device = req->device;
struct drbd_connection *connection = first_peer_device(device)->connection; struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
...@@ -1419,7 +1424,7 @@ int w_send_read_req(struct drbd_work *w, int cancel) ...@@ -1419,7 +1424,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
* if there was any yet. */ * if there was any yet. */
maybe_send_barrier(connection, req->epoch); maybe_send_barrier(connection, req->epoch);
err = drbd_send_drequest(first_peer_device(device), P_DATA_REQUEST, req->i.sector, req->i.size, err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size,
(unsigned long)req); (unsigned long)req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
...@@ -1633,6 +1638,8 @@ int w_start_resync(struct drbd_work *w, int cancel) ...@@ -1633,6 +1638,8 @@ int w_start_resync(struct drbd_work *w, int cancel)
*/ */
void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
{ {
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
union drbd_state ns; union drbd_state ns;
int r; int r;
...@@ -1651,7 +1658,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1651,7 +1658,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
if (r > 0) { if (r > 0) {
drbd_info(device, "before-resync-target handler returned %d, " drbd_info(device, "before-resync-target handler returned %d, "
"dropping connection.\n", r); "dropping connection.\n", r);
conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
return; return;
} }
} else /* C_SYNC_SOURCE */ { } else /* C_SYNC_SOURCE */ {
...@@ -1664,7 +1671,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1664,7 +1671,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
} else { } else {
drbd_info(device, "before-resync-source handler returned %d, " drbd_info(device, "before-resync-source handler returned %d, "
"dropping connection.\n", r); "dropping connection.\n", r);
conn_request_state(first_peer_device(device)->connection, conn_request_state(connection,
NS(conn, C_DISCONNECTING), CS_HARD); NS(conn, C_DISCONNECTING), CS_HARD);
return; return;
} }
...@@ -1672,7 +1679,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1672,7 +1679,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
} }
} }
if (current == first_peer_device(device)->connection->worker.task) { if (current == connection->worker.task) {
/* The worker should not sleep waiting for state_mutex, /* The worker should not sleep waiting for state_mutex,
that can take long */ that can take long */
if (!mutex_trylock(device->state_mutex)) { if (!mutex_trylock(device->state_mutex)) {
...@@ -1756,12 +1763,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1756,12 +1763,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
* drbd_resync_finished from here in that case. * drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96, * We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */ * and from after_state_ch otherwise. */
if (side == C_SYNC_SOURCE && if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96)
first_peer_device(device)->connection->agreed_pro_version < 96) drbd_gen_and_send_sync_uuid(peer_device);
drbd_gen_and_send_sync_uuid(first_peer_device(device));
if (first_peer_device(device)->connection->agreed_pro_version < 95 && if (connection->agreed_pro_version < 95 && device->rs_total == 0) {
device->rs_total == 0) {
/* This still has a race (about when exactly the peers /* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync * detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit * on next handshake. In 8.3.9 we fixed this with explicit
...@@ -1777,7 +1782,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1777,7 +1782,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
int timeo; int timeo;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(connection->net_conf);
timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
rcu_read_unlock(); rcu_read_unlock();
schedule_timeout_interruptible(timeo); schedule_timeout_interruptible(timeo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment