struct scrub_bio *wr_curr_bio;
struct mutex wr_lock;
int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
- atomic_t flush_all_writes;
struct btrfs_device *wr_tgtdev;
+ bool flush_all_writes;
/*
* statistics
WARN_ON(!fs_info->dev_replace.tgtdev);
sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
- atomic_set(&sctx->flush_all_writes, 0);
+ sctx->flush_all_writes = false;
}
return sctx;
scrub_block_put(sblock);
- if (sctx->is_dev_replace &&
- atomic_read(&sctx->flush_all_writes)) {
+ if (sctx->is_dev_replace && sctx->flush_all_writes) {
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
sctx->first_free = sbio->index;
spin_unlock(&sctx->list_lock);
- if (sctx->is_dev_replace &&
- atomic_read(&sctx->flush_all_writes)) {
+ if (sctx->is_dev_replace && sctx->flush_all_writes) {
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
*/
if (atomic_read(&fs_info->scrub_pause_req)) {
/* push queued extents */
- atomic_set(&sctx->flush_all_writes, 1);
+ sctx->flush_all_writes = true;
scrub_submit(sctx);
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0);
- atomic_set(&sctx->flush_all_writes, 0);
+ sctx->flush_all_writes = false;
scrub_blocked_if_needed(fs_info);
}
* write requests are really completed when bios_in_flight
* changes to 0.
*/
- atomic_set(&sctx->flush_all_writes, 1);
+ sctx->flush_all_writes = true;
scrub_submit(sctx);
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
*/
wait_event(sctx->list_wait,
atomic_read(&sctx->workers_pending) == 0);
- atomic_set(&sctx->flush_all_writes, 0);
+ sctx->flush_all_writes = false;
scrub_pause_off(fs_info);