#ifdef EROFS_FS_HAS_MANAGED_CACHE
#define EROFS_UNALLOCATED_CACHED_PAGE ((void *)0x5F0EF00D)
-extern int try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp);
-extern int try_to_free_cached_page(struct address_space *mapping,
+extern int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page);
#endif
BUG_ON(mapping->a_ops != &managed_cache_aops);
if (PagePrivate(page))
- ret = try_to_free_cached_page(mapping, page);
+ ret = erofs_try_to_free_cached_page(mapping, page);
return ret;
}
}
/* called by erofs_shrinker to get rid of all compressed_pages */
-int try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *egrp)
+int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp)
{
struct z_erofs_vle_workgroup *const grp =
container_of(egrp, struct z_erofs_vle_workgroup, obj);
return 0;
}
-int try_to_free_cached_page(struct address_space *mapping, struct page *page)
+int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page)
{
struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
const unsigned int clusterpages = erofs_clusterpages(sbi);
}
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (try_to_free_all_cached_pages(sbi, grp))
+ if (erofs_try_to_free_all_cached_pages(sbi, grp))
goto skip;
erofs_workgroup_unfreeze(grp, 1);
spin_unlock(&erofs_sb_list_lock);
sbi->shrinker_run_no = run_no;
- /* add scan handlers here */
+#ifdef CONFIG_EROFS_FS_ZIP
+ freed += erofs_shrink_workstation(sbi, nr, false);
+#endif
spin_lock(&erofs_sb_list_lock);
/* Get the next list element before we move this one */
list_move_tail(&sbi->list, &erofs_sb_list);
mutex_unlock(&sbi->umount_mutex);
- freed += erofs_shrink_workstation(sbi, nr, false);
if (freed >= nr)
break;
}