return mxb;
}
-static inline int drbd_state_is_stable(union drbd_state s)
+static inline int drbd_state_is_stable(struct drbd_conf *mdev)
{
+ union drbd_state s = mdev->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
case C_PAUSED_SYNC_T:
case C_AHEAD:
case C_BEHIND:
- /* maybe stable, look at the disk state */
- break;
-
- /* no new io accepted during tansitional states
- * like handshake or teardown */
+ /* transitional states, IO allowed */
case C_DISCONNECTING:
case C_UNCONNECTED:
case C_TIMEOUT:
case C_WF_REPORT_PARAMS:
case C_STARTING_SYNC_S:
case C_STARTING_SYNC_T:
+ break;
+
+ /* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
+ if (mdev->agreed_pro_version < 96)
+ return 0;
+ break;
+
+ /* no new io accepted in these states */
case C_WF_BITMAP_T:
case C_WF_SYNC_UUID:
case C_MASK:
* to start during "stable" states. */
/* no new io accepted when attaching or detaching the disk */
- if (!drbd_state_is_stable(mdev->state))
+ if (!drbd_state_is_stable(mdev))
return 0;
/* since some older kernels don't have atomic_add_unless,
int ok = FALSE;
struct p_header80 *h = &mdev->data.rbuf.header.h80;
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
-
- drbd_bm_lock(mdev, "receive bitmap");
+ /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
/* maybe we should use some per thread scratch page,
* and allocate that during initial device creation? */
ok = TRUE;
out:
- drbd_bm_unlock(mdev);
+ /* drbd_bm_unlock(mdev); by intention no lock */
if (ok && mdev->state.conn == C_WF_BITMAP_S)
drbd_start_resync(mdev, C_SYNC_SOURCE);
free_page((unsigned long) buffer);
if (os.conn == C_DISCONNECTING) {
wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
- if (!is_susp(mdev->state)) {
- /* we must not free the tl_hash
- * while application io is still on the fly */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
- drbd_free_tl_hash(mdev);
- }
-
crypto_free_hash(mdev->cram_hmac_tfm);
mdev->cram_hmac_tfm = NULL;
/* before we can signal completion to the upper layers,
* we may need to close the current epoch */
- if (mdev->state.conn >= C_CONNECTED && mdev->state.conn < C_AHEAD &&
+ if (mdev->state.conn >= C_WF_BITMAP_T && mdev->state.conn < C_AHEAD &&
req->epoch == mdev->newest_tle->br_number)
queue_barrier(mdev);
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
}
+static int drbd_should_do_remote(struct drbd_conf *mdev)
+{
+ union drbd_state s = mdev->state;
+
+ return s.pdsk == D_UP_TO_DATE ||
+ (s.pdsk >= D_INCONSISTENT &&
+ s.conn >= C_WF_BITMAP_T &&
+ s.conn < C_AHEAD);
+}
+static int drbd_should_send_oos(struct drbd_conf *mdev)
+{
+ union drbd_state s = mdev->state;
+
+ return s.pdsk >= D_INCONSISTENT &&
+ (s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S);
+}
+
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
drbd_al_begin_io(mdev, sector);
}
- remote = remote && (mdev->state.pdsk == D_UP_TO_DATE ||
- (mdev->state.pdsk >= D_INCONSISTENT &&
- mdev->state.conn >= C_CONNECTED &&
- mdev->state.conn < C_AHEAD));
- send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD &&
- mdev->state.pdsk >= D_INCONSISTENT);
+ remote = remote && drbd_should_do_remote(mdev);
+ send_oos = rw == WRITE && drbd_should_send_oos(mdev);
+ D_ASSERT(!(remote && send_oos));
if (!(local || remote) && !is_susp(mdev->state)) {
if (__ratelimit(&drbd_ratelimit_state))
}
if (remote || send_oos) {
- remote = (mdev->state.pdsk == D_UP_TO_DATE ||
- (mdev->state.pdsk >= D_INCONSISTENT &&
- mdev->state.conn >= C_CONNECTED &&
- mdev->state.conn < C_AHEAD));
- send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD &&
- mdev->state.pdsk >= D_INCONSISTENT);
+ remote = drbd_should_do_remote(mdev);
+ send_oos = rw == WRITE && drbd_should_send_oos(mdev);
+ D_ASSERT(!(remote && send_oos));
if (!(remote || send_oos))
dev_warn(DEV, "lost connection while grabbing the req_lock!\n");