ckpt->next_free_nid = cpu_to_le32(last_nid);
/* 2 cp + n data seg summary + orphan inode blocks */
- data_sum_blocks = npages_for_summary_flush(sbi);
+ data_sum_blocks = npages_for_summary_flush(sbi, false);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
void clear_prefree_segments(struct f2fs_sb_info *);
void release_discard_addrs(struct f2fs_sb_info *);
void discard_next_dnode(struct f2fs_sb_info *, block_t);
-int npages_for_summary_flush(struct f2fs_sb_info *);
+int npages_for_summary_flush(struct f2fs_sb_info *, bool);
void allocate_new_segments(struct f2fs_sb_info *);
int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
/*
* Calculate the number of current summary pages for writing
*/
-int npages_for_summary_flush(struct f2fs_sb_info *sbi)
+int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
{
int valid_sum_count = 0;
int i, sum_in_page;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
if (sbi->ckpt->alloc_type[i] == SSR)
valid_sum_count += sbi->blocks_per_seg;
- else
- valid_sum_count += curseg_blkoff(sbi, i);
+ else {
+ if (for_ra)
+ valid_sum_count += le16_to_cpu(
+ F2FS_CKPT(sbi)->cur_data_blkoff[i]);
+ else
+ valid_sum_count += curseg_blkoff(sbi, i);
+ }
}
sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
int err;
if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
+ int npages = npages_for_summary_flush(sbi, true);
+
+ if (npages >= 2)
+ ra_meta_pages(sbi, start_sum_block(sbi), npages,
+ META_CP);
+
/* restore for compacted data summary */
if (read_compacted_summaries(sbi))
return -EINVAL;
type = CURSEG_HOT_NODE;
}
+ if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
+ ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
+ NR_CURSEG_TYPE - type, META_CP);
+
for (; type <= CURSEG_COLD_NODE; type++) {
err = read_normal_summaries(sbi, type);
if (err)