btrfs: scrub: use GFP_KERNEL on the submission path
authorDavid Sterba <dsterba@suse.com>
Thu, 11 Feb 2016 09:49:42 +0000 (10:49 +0100)
committerDavid Sterba <dsterba@suse.com>
Thu, 11 Feb 2016 14:19:39 +0000 (15:19 +0100)
Scrub is not on the critical writeback path we don't need to use
GFP_NOFS for all allocations. The failures are handled and stats passed
back to userspace.

Let's use GFP_KERNEL on the paths where everything is ok, ie. setup the
global structures and the IO submission paths.

Functions that do the repair and fixups still use GFP_NOFS as we might
want to skip any other filesystem activity if we encounter an error.
This could turn out to be unnecessary, but requires more review compared
to the easy cases in this patch.

Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/dev-replace.c
fs/btrfs/scrub.c

index cbb7dbfb3fffffc54f621b8dc43a7b1ec9a1185b..01ce5fcecc5c388a3d628058910d5c06606aed81 100644 (file)
@@ -802,7 +802,7 @@ static int btrfs_dev_replace_kthread(void *data)
        struct btrfs_ioctl_dev_replace_args *status_args;
        u64 progress;
 
-       status_args = kzalloc(sizeof(*status_args), GFP_NOFS);
+       status_args = kzalloc(sizeof(*status_args), GFP_KERNEL);
        if (status_args) {
                btrfs_dev_replace_status(fs_info, status_args);
                progress = status_args->status.progress_1000;
index 92bf5ee732fb0e14f7e330dddaadccb4b70b35cc..2de7817d0e1be7176a3e3dcf7ad44793fe446e2c 100644 (file)
@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
        int ret;
 
-       sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
+       sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
        if (!sctx)
                goto nomem;
        atomic_set(&sctx->refs, 1);
@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
                struct scrub_bio *sbio;
 
-               sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
+               sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
                if (!sbio)
                        goto nomem;
                sctx->bios[i] = sbio;
@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 again:
        if (!wr_ctx->wr_curr_bio) {
                wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
-                                             GFP_NOFS);
+                                             GFP_KERNEL);
                if (!wr_ctx->wr_curr_bio) {
                        mutex_unlock(&wr_ctx->wr_lock);
                        return -ENOMEM;
@@ -1671,7 +1671,8 @@ again:
                sbio->dev = wr_ctx->tgtdev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
+                       bio = btrfs_io_bio_alloc(GFP_KERNEL,
+                                       wr_ctx->pages_per_wr_bio);
                        if (!bio) {
                                mutex_unlock(&wr_ctx->wr_lock);
                                return -ENOMEM;
@@ -2076,7 +2077,8 @@ again:
                sbio->dev = spage->dev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
+                       bio = btrfs_io_bio_alloc(GFP_KERNEL,
+                                       sctx->pages_per_rd_bio);
                        if (!bio)
                                return -ENOMEM;
                        sbio->bio = bio;
@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        struct scrub_block *sblock;
        int index;
 
-       sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+       sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
        if (!sblock) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                struct scrub_page *spage;
                u64 l = min_t(u64, len, PAGE_SIZE);
 
-               spage = kzalloc(sizeof(*spage), GFP_NOFS);
+               spage = kzalloc(sizeof(*spage), GFP_KERNEL);
                if (!spage) {
 leave_nomem:
                        spin_lock(&sctx->stat_lock);
@@ -2286,7 +2288,7 @@ leave_nomem:
                        spage->have_csum = 0;
                }
                sblock->page_count++;
-               spage->page = alloc_page(GFP_NOFS);
+               spage->page = alloc_page(GFP_KERNEL);
                if (!spage->page)
                        goto leave_nomem;
                len -= l;
@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
        struct scrub_block *sblock;
        int index;
 
-       sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+       sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
        if (!sblock) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
                struct scrub_page *spage;
                u64 l = min_t(u64, len, PAGE_SIZE);
 
-               spage = kzalloc(sizeof(*spage), GFP_NOFS);
+               spage = kzalloc(sizeof(*spage), GFP_KERNEL);
                if (!spage) {
 leave_nomem:
                        spin_lock(&sctx->stat_lock);
@@ -2591,7 +2593,7 @@ leave_nomem:
                        spage->have_csum = 0;
                }
                sblock->page_count++;
-               spage->page = alloc_page(GFP_NOFS);
+               spage->page = alloc_page(GFP_KERNEL);
                if (!spage->page)
                        goto leave_nomem;
                len -= l;