Commit 9ed57dcb authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: ignore volume number for drbd barrier packet exchange

Transfer log epochs, and therefore P_BARRIER packets,
are per resource, not per volume.
We must not associate them with "some random volume".
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 648e46b5
......@@ -584,7 +584,7 @@ struct drbd_tl_epoch {
};
struct drbd_epoch {
struct drbd_conf *mdev;
struct drbd_tconn *tconn;
struct list_head list;
unsigned int barrier_nr;
atomic_t epoch_size; /* increased on every request added. */
......@@ -1060,7 +1060,7 @@ extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_f
extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
extern int drbd_send_current_state(struct drbd_conf *mdev);
extern int drbd_send_sync_param(struct drbd_conf *mdev);
extern void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
u32 set_size);
extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
struct drbd_peer_request *);
......
......@@ -1463,21 +1463,21 @@ int drbd_send_bitmap(struct drbd_conf *mdev)
return err;
}
void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
{
struct drbd_socket *sock;
struct p_barrier_ack *p;
if (mdev->state.conn < C_CONNECTED)
if (tconn->cstate < C_WF_REPORT_PARAMS)
return;
sock = &mdev->tconn->meta;
p = drbd_prepare_command(mdev, sock);
sock = &tconn->meta;
p = conn_prepare_command(tconn, sock);
if (!p)
return;
p->barrier = barrier_nr;
p->set_size = cpu_to_be32(set_size);
drbd_send_command(mdev, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
......
......@@ -1169,11 +1169,15 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&tconn->epoch_lock);
drbd_send_b_ack(epoch->mdev, epoch->barrier_nr, epoch_size);
drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
spin_lock(&tconn->epoch_lock);
}
#if 0
/* FIXME: dec unacked on connection, once we have
* something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
dec_unacked(epoch->mdev);
dec_unacked(epoch->tconn);
#endif
if (tconn->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
......@@ -1369,19 +1373,15 @@ void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
{
struct drbd_conf *mdev;
int rv;
struct p_barrier *p = pi->data;
struct drbd_epoch *epoch;
mdev = vnr_to_mdev(tconn, pi->vnr);
if (!mdev)
return -EIO;
inc_unacked(mdev);
/* FIXME these are unacked on connection,
* not a specific (peer)device.
*/
tconn->current_epoch->barrier_nr = p->barrier;
tconn->current_epoch->mdev = mdev;
tconn->current_epoch->tconn = tconn;
rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
......@@ -1400,7 +1400,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
if (epoch)
break;
else
dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
/* Fall through */
case WO_bdev_flush:
......@@ -1414,15 +1414,9 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
break;
}
epoch = tconn->current_epoch;
wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
D_ASSERT(atomic_read(&epoch->active) == 0);
D_ASSERT(epoch->flags == 0);
return 0;
default:
dev_err(DEV, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
return -EIO;
}
......@@ -5049,21 +5043,22 @@ static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
struct drbd_conf *mdev;
struct p_barrier_ack *p = pi->data;
struct drbd_conf *mdev;
int vnr;
mdev = vnr_to_mdev(tconn, pi->vnr);
if (!mdev)
return -EIO;
tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
if (mdev->state.conn == C_AHEAD &&
atomic_read(&mdev->ap_in_flight) == 0 &&
!test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
mdev->start_resync_timer.expires = jiffies + HZ;
add_timer(&mdev->start_resync_timer);
}
}
rcu_read_unlock();
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment