Commit 06f10adb authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Jens Axboe

drbd: prepare for more than 32 bit flags

 - struct drbd_conf { ... unsigned long flags; ... }
 + struct drbd_conf { ... unsigned long drbd_flags[N]; ... }

And introduce wrapper functions for test/set/clear bit operations
on this member.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 44edfb0d
......@@ -90,7 +90,7 @@ void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backi
dt = MAX_SCHEDULE_TIMEOUT;
dt = wait_event_timeout(mdev->misc_wait,
*done || test_bit(FORCE_DETACH, &mdev->flags), dt);
*done || drbd_test_flag(mdev, FORCE_DETACH), dt);
if (dt == 0) {
dev_err(DEV, "meta-data IO operation timed out\n");
drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
......@@ -108,7 +108,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
mdev->md_io.done = 0;
mdev->md_io.error = -ENODEV;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
if ((rw & WRITE) && !drbd_test_flag(mdev, MD_NO_FUA))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
......
......@@ -808,7 +808,7 @@ enum {
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
/* global flag bits */
enum {
enum drbd_flag {
CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
SIGNAL_ASENDER, /* whether asender wants to be interrupted */
SEND_PING, /* whether asender should send a ping asap */
......@@ -858,6 +858,9 @@ enum {
* and potentially deadlock on, this drbd worker.
*/
DISCONNECT_SENT, /* Currently the last bit in this 32bit word */
/* keep last */
DRBD_N_FLAGS,
};
struct drbd_bitmap; /* opaque for drbd_conf */
......@@ -970,8 +973,7 @@ struct fifo_buffer {
};
struct drbd_conf {
/* things that are stored as / read from meta data on disk */
unsigned long flags;
unsigned long drbd_flags[(DRBD_N_FLAGS + BITS_PER_LONG -1)/BITS_PER_LONG];
/* configured by drbdsetup */
struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
......@@ -1143,6 +1145,31 @@ struct drbd_conf {
unsigned int local_max_bio_size;
};
static inline void drbd_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
set_bit(f, &mdev->drbd_flags[0]);
}
static inline void drbd_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
clear_bit(f, &mdev->drbd_flags[0]);
}
static inline int drbd_test_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
return test_bit(f, &mdev->drbd_flags[0]);
}
static inline int drbd_test_and_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
return test_and_set_bit(f, &mdev->drbd_flags[0]);
}
static inline int drbd_test_and_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
return test_and_clear_bit(f, &mdev->drbd_flags[0]);
}
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
{
struct drbd_conf *mdev;
......@@ -1812,12 +1839,12 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
!test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
!drbd_test_and_set_flag(mdev, CLUSTER_ST_CHANGE));
}
static inline void drbd_state_unlock(struct drbd_conf *mdev)
{
clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
drbd_clear_flag(mdev, CLUSTER_ST_CHANGE);
wake_up(&mdev->misc_wait);
}
......@@ -1874,9 +1901,9 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
/* NOTE fall through to detach case if forcedetach set */
case EP_DETACH:
case EP_CALL_HELPER:
set_bit(WAS_IO_ERROR, &mdev->flags);
drbd_set_flag(mdev, WAS_IO_ERROR);
if (forcedetach == DRBD_FORCE_DETACH)
set_bit(FORCE_DETACH, &mdev->flags);
drbd_set_flag(mdev, FORCE_DETACH);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV,
......@@ -2037,13 +2064,13 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
static inline void wake_asender(struct drbd_conf *mdev)
{
if (test_bit(SIGNAL_ASENDER, &mdev->flags))
if (drbd_test_flag(mdev, SIGNAL_ASENDER))
force_sig(DRBD_SIG, mdev->asender.task);
}
static inline void request_ping(struct drbd_conf *mdev)
{
set_bit(SEND_PING, &mdev->flags);
drbd_set_flag(mdev, SEND_PING);
wake_asender(mdev);
}
......@@ -2374,7 +2401,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
if (is_susp(mdev->state))
return false;
if (test_bit(SUSPEND_IO, &mdev->flags))
if (drbd_test_flag(mdev, SUSPEND_IO))
return false;
/* to avoid potential deadlock or bitmap corruption,
......@@ -2389,7 +2416,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
* and we are within the spinlock anyways, we have this workaround. */
if (atomic_read(&mdev->ap_bio_cnt) > mxb)
return false;
if (test_bit(BITMAP_IO, &mdev->flags))
if (drbd_test_flag(mdev, BITMAP_IO))
return false;
return true;
}
......@@ -2427,8 +2454,8 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
D_ASSERT(ap_bio >= 0);
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
if (ap_bio == 0 && drbd_test_flag(mdev, BITMAP_IO)) {
if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
}
......@@ -2477,7 +2504,7 @@ static inline void drbd_update_congested(struct drbd_conf *mdev)
{
struct sock *sk = mdev->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
set_bit(NET_CONGESTED, &mdev->flags);
drbd_set_flag(mdev, NET_CONGESTED);
}
static inline int drbd_queue_order_type(struct drbd_conf *mdev)
......@@ -2494,14 +2521,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
if (test_bit(MD_NO_FUA, &mdev->flags))
if (drbd_test_flag(mdev, MD_NO_FUA))
return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
set_bit(MD_NO_FUA, &mdev->flags);
drbd_set_flag(mdev, MD_NO_FUA);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}
#endif
This diff is collapsed.
......@@ -148,7 +148,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
int ret;
if (current == mdev->worker.task)
set_bit(CALLBACK_PENDING, &mdev->flags);
drbd_set_flag(mdev, CALLBACK_PENDING);
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
......@@ -193,7 +193,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
(ret >> 8) & 0xff, ret);
if (current == mdev->worker.task)
clear_bit(CALLBACK_PENDING, &mdev->flags);
drbd_clear_flag(mdev, CALLBACK_PENDING);
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
......@@ -295,7 +295,7 @@ static int _try_outdate_peer_async(void *data)
*/
spin_lock_irq(&mdev->req_lock);
ns = mdev->state;
if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
if (ns.conn < C_WF_REPORT_PARAMS && !drbd_test_flag(mdev, STATE_SENT)) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
......@@ -583,7 +583,7 @@ char *ppsize(char *buf, unsigned long long size)
*/
void drbd_suspend_io(struct drbd_conf *mdev)
{
set_bit(SUSPEND_IO, &mdev->flags);
drbd_set_flag(mdev, SUSPEND_IO);
if (is_susp(mdev->state))
return;
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
......@@ -591,7 +591,7 @@ void drbd_suspend_io(struct drbd_conf *mdev)
void drbd_resume_io(struct drbd_conf *mdev)
{
clear_bit(SUSPEND_IO, &mdev->flags);
drbd_clear_flag(mdev, SUSPEND_IO);
wake_up(&mdev->misc_wait);
}
......@@ -881,8 +881,8 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
*/
static void drbd_reconfig_start(struct drbd_conf *mdev)
{
wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
wait_event(mdev->state_wait, !drbd_test_and_set_flag(mdev, CONFIG_PENDING));
wait_event(mdev->state_wait, !drbd_test_flag(mdev, DEVICE_DYING));
drbd_thread_start(&mdev->worker);
drbd_flush_workqueue(mdev);
}
......@@ -896,10 +896,10 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
if (mdev->state.disk == D_DISKLESS &&
mdev->state.conn == C_STANDALONE &&
mdev->state.role == R_SECONDARY) {
set_bit(DEVICE_DYING, &mdev->flags);
drbd_set_flag(mdev, DEVICE_DYING);
drbd_thread_stop_nowait(&mdev->worker);
} else
clear_bit(CONFIG_PENDING, &mdev->flags);
drbd_clear_flag(mdev, CONFIG_PENDING);
spin_unlock_irq(&mdev->req_lock);
wake_up(&mdev->state_wait);
}
......@@ -919,7 +919,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
spin_lock_irq(&mdev->req_lock);
if (mdev->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
s = !drbd_test_and_set_flag(mdev, AL_SUSPENDED);
spin_unlock_irq(&mdev->req_lock);
......@@ -958,7 +958,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* make sure there is no leftover from previous force-detach attempts */
clear_bit(FORCE_DETACH, &mdev->flags);
drbd_clear_flag(mdev, FORCE_DETACH);
/* and no leftover from previously aborted resync or verify, either */
mdev->rs_total = 0;
......@@ -1168,9 +1168,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (nbc->dc.no_md_flush)
set_bit(MD_NO_FUA, &mdev->flags);
drbd_set_flag(mdev, MD_NO_FUA);
else
clear_bit(MD_NO_FUA, &mdev->flags);
drbd_clear_flag(mdev, MD_NO_FUA);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
......@@ -1186,13 +1186,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_bump_write_ordering(mdev, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
drbd_set_flag(mdev, CRASHED_PRIMARY);
else
clear_bit(CRASHED_PRIMARY, &mdev->flags);
drbd_clear_flag(mdev, CRASHED_PRIMARY);
if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
!(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
set_bit(CRASHED_PRIMARY, &mdev->flags);
drbd_set_flag(mdev, CRASHED_PRIMARY);
cp_discovered = 1;
}
......@@ -1217,18 +1217,18 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* so we can automatically recover from a crash of a
* degraded but active "cluster" after a certain timeout.
*/
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
drbd_clear_flag(mdev, USE_DEGR_WFC_T);
if (mdev->state.role != R_PRIMARY &&
drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &mdev->flags);
drbd_set_flag(mdev, USE_DEGR_WFC_T);
dd = drbd_determine_dev_size(mdev, 0);
if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
} else if (dd == grew)
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
drbd_set_flag(mdev, RESYNC_AFTER_NEG);
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
dev_info(DEV, "Assuming that all blocks are out of sync "
......@@ -1362,7 +1362,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
if (dt.detach_force) {
set_bit(FORCE_DETACH, &mdev->flags);
drbd_set_flag(mdev, FORCE_DETACH);
drbd_force_state(mdev, NS(disk, D_FAILED));
reply->ret_code = SS_SUCCESS;
goto out;
......@@ -1707,7 +1707,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
if (mdev->state.role != mdev->state.peer)
iass = (mdev->state.role == R_PRIMARY);
else
iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
iass = drbd_test_flag(mdev, DISCARD_CONCURRENT);
if (iass)
drbd_start_resync(mdev, C_SYNC_SOURCE);
......@@ -1765,7 +1765,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
if (mdev->state.conn == C_CONNECTED) {
if (dd == grew)
set_bit(RESIZE_PENDING, &mdev->flags);
drbd_set_flag(mdev, RESIZE_PENDING);
drbd_send_uuids(mdev);
drbd_send_sizes(mdev, 1, ddsf);
......@@ -1983,7 +1983,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
......@@ -2026,7 +2026,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
......@@ -2094,9 +2094,9 @@ static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
drbd_clear_flag(mdev, NEW_CUR_UUID);
}
drbd_suspend_io(mdev);
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
......@@ -2199,7 +2199,7 @@ static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_r
tl = reply->tag_list;
rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
drbd_test_flag(mdev, USE_DEGR_WFC_T) ? UT_DEGRADED : UT_DEFAULT;
tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
put_unaligned(TT_END, tl++); /* Close the tag list */
......@@ -2224,7 +2224,7 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
/* w_make_ov_request expects start position to be aligned */
mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);
......
......@@ -270,7 +270,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
mdev->congestion_reason ?: '-',
test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
drbd_test_flag(mdev, AL_SUSPENDED) ? 's' : '-',
mdev->send_cnt/2,
mdev->recv_cnt/2,
mdev->writ_cnt/2,
......
This diff is collapsed.
......@@ -118,7 +118,7 @@ static void queue_barrier(struct drbd_conf *mdev)
* barrier/epoch object is added. This is the only place this bit is
* set. It indicates that the barrier for this epoch is already queued,
* and no new epoch has been created yet. */
if (test_bit(CREATE_BARRIER, &mdev->flags))
if (drbd_test_flag(mdev, CREATE_BARRIER))
return;
b = mdev->newest_tle;
......@@ -129,7 +129,7 @@ static void queue_barrier(struct drbd_conf *mdev)
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
drbd_queue_work(&mdev->data.work, &b->w);
set_bit(CREATE_BARRIER, &mdev->flags);
drbd_set_flag(mdev, CREATE_BARRIER);
}
static void _about_to_complete_local_write(struct drbd_conf *mdev,
......@@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* corresponding hlist_del is in _req_may_be_done() */
hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
set_bit(UNPLUG_REMOTE, &mdev->flags);
drbd_set_flag(mdev, UNPLUG_REMOTE);
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED;
......@@ -541,11 +541,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &mdev->flags);
drbd_set_flag(mdev, UNPLUG_REMOTE);
/* see drbd_make_request_common,
* just after it grabs the req_lock */
D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
D_ASSERT(drbd_test_flag(mdev, CREATE_BARRIER) == 0);
req->epoch = mdev->newest_tle->br_number;
......@@ -888,7 +888,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* Empty flushes don't need to go into the activity log, they can only
* flush data for pending writes which are already in there. */
if (rw == WRITE && local && size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
&& !drbd_test_flag(mdev, AL_SUSPENDED)) {
req->rq_state |= RQ_IN_ACT_LOG;
drbd_al_begin_io(mdev, sector);
}
......@@ -912,7 +912,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* if we lost that race, we retry. */
if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
drbd_test_flag(mdev, CREATE_BARRIER)) {
allocate_barrier:
b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
if (!b) {
......@@ -955,7 +955,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
}
if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
drbd_test_flag(mdev, CREATE_BARRIER)) {
/* someone closed the current epoch
* while we were grabbing the spinlock */
spin_unlock_irq(&mdev->req_lock);
......@@ -977,12 +977,12 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
if ((remote || send_oos) && mdev->unused_spare_tle &&
test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
_tl_add_barrier(mdev, mdev->unused_spare_tle);
mdev->unused_spare_tle = NULL;
} else {
D_ASSERT(!(remote && rw == WRITE &&
test_bit(CREATE_BARRIER, &mdev->flags)));
drbd_test_flag(mdev, CREATE_BARRIER)));
}
/* NOTE
......
......@@ -793,7 +793,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
drbd_clear_flag(mdev, AHEAD_TO_SYNC_SOURCE);
return 1;
}
......@@ -817,10 +817,10 @@ static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int ca
static void ping_peer(struct drbd_conf *mdev)
{
clear_bit(GOT_PING_ACK, &mdev->flags);
drbd_clear_flag(mdev, GOT_PING_ACK);
request_ping(mdev);
wait_event(mdev->misc_wait,
test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
drbd_test_flag(mdev, GOT_PING_ACK) || mdev->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_conf *mdev)
......@@ -1749,8 +1749,8 @@ int drbd_worker(struct drbd_thread *thi)
NS(conn, C_NETWORK_FAILURE));
}
}
D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
D_ASSERT(drbd_test_flag(mdev, DEVICE_DYING));
D_ASSERT(drbd_test_flag(mdev, CONFIG_PENDING));
spin_lock_irq(&mdev->data.work.q_lock);
i = 0;
......@@ -1783,8 +1783,8 @@ int drbd_worker(struct drbd_thread *thi)
dev_info(DEV, "worker terminated\n");
clear_bit(DEVICE_DYING, &mdev->flags);
clear_bit(CONFIG_PENDING, &mdev->flags);
drbd_clear_flag(mdev, DEVICE_DYING);
drbd_clear_flag(mdev, CONFIG_PENDING);
wake_up(&mdev->state_wait);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment