From eef16ba269ea1d55ca1b4eab8d91f02e185835c9 Mon Sep 17 00:00:00 2001 From: Kuanling Huang Date: Fri, 15 Sep 2017 16:47:45 +0800 Subject: [PATCH] Btrfs: send, apply asynchronous page cache readahead to enhance page read By analyzing the perf on btrfs send, we found it take large amount of cpu time on page_cache_sync_readahead. This effort can be reduced after switching to asynchronous one. Overall performance gain on HDD and SSD were 9 and 15 percent if simply send a large file. Signed-off-by: Kuanling Huang Reviewed-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/send.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 07445be8c1cc..0746eda7231d 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4720,16 +4720,27 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) /* initial readahead */ memset(&sctx->ra, 0, sizeof(struct file_ra_state)); file_ra_state_init(&sctx->ra, inode->i_mapping); - page_cache_sync_readahead(inode->i_mapping, &sctx->ra, NULL, index, - last_index - index + 1); while (index <= last_index) { unsigned cur_len = min_t(unsigned, len, PAGE_SIZE - pg_offset); - page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); + + page = find_lock_page(inode->i_mapping, index); if (!page) { - ret = -ENOMEM; - break; + page_cache_sync_readahead(inode->i_mapping, &sctx->ra, + NULL, index, last_index + 1 - index); + + page = find_or_create_page(inode->i_mapping, index, + GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + break; + } + } + + if (PageReadahead(page)) { + page_cache_async_readahead(inode->i_mapping, &sctx->ra, + NULL, page, index, last_index + 1 - index); } if (!PageUptodate(page)) { -- 2.30.2