if (ref->key_for_search.type)
continue;
BUG_ON(!ref->wanted_disk_byte);
- eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
- 0);
+ eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
ref->level == 0) {
struct extent_buffer *eb;
- eb = read_tree_block(fs_info->extent_root,
- ref->parent, 0);
+ eb = read_tree_block(fs_info, ref->parent, 0);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
return submit_bio_wait(bio);
}
-int btrfsic_mount(struct btrfs_root *root,
+int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask)
{
int ret;
struct btrfsic_state *state;
struct list_head *dev_head = &fs_devices->devices;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *device;
if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) {
btrfsic_is_initialized = 1;
}
mutex_lock(&btrfsic_mutex);
- state->fs_info = root->fs_info;
+ state->fs_info = fs_info;
state->print_mask = print_mask;
state->include_extent_data = including_extent_data;
state->csum_size = 0;
ret = btrfsic_process_superblock(state, fs_devices);
if (0 != ret) {
mutex_unlock(&btrfsic_mutex);
- btrfsic_unmount(root, fs_devices);
+ btrfsic_unmount(fs_devices);
return ret;
}
return 0;
}
-void btrfsic_unmount(struct btrfs_root *root,
- struct btrfs_fs_devices *fs_devices)
+void btrfsic_unmount(struct btrfs_fs_devices *fs_devices)
{
struct btrfsic_block *b_all, *tmp_all;
struct btrfsic_state *state;
#define btrfsic_submit_bio_wait submit_bio_wait
#endif
-int btrfsic_mount(struct btrfs_root *root,
+int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask);
-void btrfsic_unmount(struct btrfs_root *root,
- struct btrfs_fs_devices *fs_devices);
+void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
#endif
u64 disk_start, struct bio *orig_bio,
size_t srclen);
-static inline int compressed_bio_size(struct btrfs_root *root,
+static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
return sizeof(struct compressed_bio) +
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_SIZE - 1));
- cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+ cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
return -ENOMEM;
atomic_set(&cb->pending_bios, 0);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
- ret = btrfs_csum_one_bio(root, inode, bio,
- start, 1);
+ ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
- ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
+ ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
struct extent_io_tree *tree;
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
- struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long compressed_len;
unsigned long nr_pages;
unsigned long pg_index;
return -EIO;
compressed_len = em->block_len;
- cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+ cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
goto out;
atomic_inc(&cb->pending_bios);
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- ret = btrfs_lookup_bio_sums(root, inode,
- comp_bio, sums);
+ ret = btrfs_lookup_bio_sums(inode, comp_bio,
+ sums);
BUG_ON(ret); /* -ENOMEM */
}
sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
fs_info->sectorsize);
- ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);
BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+ ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);
*root, struct btrfs_key *ins_key,
struct btrfs_path *path, int data_size, int extend);
static int push_node_left(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *dst,
+ struct btrfs_fs_info *fs_info,
+ struct extent_buffer *dst,
struct extent_buffer *src, int empty);
static int balance_node_right(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
*/
if (btrfs_block_can_be_shared(root, buf)) {
- ret = btrfs_lookup_extent_info(trans, root, buf->start,
+ ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
btrfs_header_level(buf), 1,
&refs, &flags);
if (ret)
if (new_flags != 0) {
int level = btrfs_header_level(buf);
- ret = btrfs_set_disk_extent_flags(trans, root,
+ ret = btrfs_set_disk_extent_flags(trans, fs_info,
buf->start,
buf->len,
new_flags, level, 0);
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- old = read_tree_block(root, logical, 0);
+ old = read_tree_block(fs_info, logical, 0);
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
if (!IS_ERR(old))
free_extent_buffer(old);
uptodate = 0;
if (!cur || !uptodate) {
if (!cur) {
- cur = read_tree_block(root, blocknr, gen);
+ cur = read_tree_block(fs_info, blocknr, gen);
if (IS_ERR(cur)) {
return PTR_ERR(cur);
} else if (!extent_buffer_uptodate(cur)) {
/* given a node and slot number, this reads the blocks it points to. The
* extent buffer is returned with a reference taken (but unlocked).
*/
-static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
- struct extent_buffer *parent, int slot)
+static noinline struct extent_buffer *
+read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
+ int slot)
{
int level = btrfs_header_level(parent);
struct extent_buffer *eb;
BUG_ON(level == 0);
- eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
+ eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
btrfs_node_ptr_generation(parent, slot));
if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return 0;
/* promote the child to a root */
- child = read_node_slot(root, mid, 0);
+ child = read_node_slot(fs_info, mid, 0);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
btrfs_handle_fs_error(fs_info, ret, NULL);
BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
return 0;
- left = read_node_slot(root, parent, pslot - 1);
+ left = read_node_slot(fs_info, parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
}
}
- right = read_node_slot(root, parent, pslot + 1);
+ right = read_node_slot(fs_info, parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
/* first, try to make some room in the middle buffer */
if (left) {
orig_slot += btrfs_header_nritems(left);
- wret = push_node_left(trans, root, left, mid, 1);
+ wret = push_node_left(trans, fs_info, left, mid, 1);
if (wret < 0)
ret = wret;
}
* then try to empty the right most buffer into the middle
*/
if (right) {
- wret = push_node_left(trans, root, mid, right, 1);
+ wret = push_node_left(trans, fs_info, mid, right, 1);
if (wret < 0 && wret != -ENOSPC)
ret = wret;
if (btrfs_header_nritems(right) == 0) {
btrfs_handle_fs_error(fs_info, ret, NULL);
goto enospc;
}
- wret = balance_node_right(trans, root, mid, left);
+ wret = balance_node_right(trans, fs_info, mid, left);
if (wret < 0) {
ret = wret;
goto enospc;
}
if (wret == 1) {
- wret = push_node_left(trans, root, left, mid, 1);
+ wret = push_node_left(trans, fs_info, left, mid, 1);
if (wret < 0)
ret = wret;
}
if (!parent)
return 1;
- left = read_node_slot(root, parent, pslot - 1);
+ left = read_node_slot(fs_info, parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
if (ret)
wret = 1;
else {
- wret = push_node_left(trans, root,
+ wret = push_node_left(trans, fs_info,
left, mid, 0);
}
}
btrfs_tree_unlock(left);
free_extent_buffer(left);
}
- right = read_node_slot(root, parent, pslot + 1);
+ right = read_node_slot(fs_info, parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
if (ret)
wret = 1;
else {
- wret = balance_node_right(trans, root,
+ wret = balance_node_right(trans, fs_info,
right, mid);
}
}
* readahead one full node of leaves, finding things that are close
* to the block in 'slot', and triggering ra on them.
*/
-static void reada_for_search(struct btrfs_root *root,
+static void reada_for_search(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int level, int slot, u64 objectid)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *node;
struct btrfs_disk_key disk_key;
u32 nritems;
search = btrfs_node_blockptr(node, nr);
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
- readahead_tree_block(root, search);
+ readahead_tree_block(fs_info, search);
nread += blocksize;
}
nscan++;
}
}
-static noinline void reada_for_balance(struct btrfs_root *root,
+static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, int level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int slot;
int nritems;
struct extent_buffer *parent;
}
if (block1)
- readahead_tree_block(root, block1);
+ readahead_tree_block(fs_info, block1);
if (block2)
- readahead_tree_block(root, block2);
+ readahead_tree_block(fs_info, block2);
}
free_extent_buffer(tmp);
if (p->reada != READA_NONE)
- reada_for_search(root, p, level, slot, key->objectid);
+ reada_for_search(fs_info, p, level, slot, key->objectid);
btrfs_release_path(p);
ret = -EAGAIN;
- tmp = read_tree_block(root, blocknr, 0);
+ tmp = read_tree_block(fs_info, blocknr, 0);
if (!IS_ERR(tmp)) {
/*
* If the read above didn't mark this buffer up to date,
}
btrfs_set_path_blocking(p);
- reada_for_balance(root, p, level);
+ reada_for_balance(fs_info, p, level);
sret = split_node(trans, root, p, level);
btrfs_clear_path_blocking(p, NULL, 0);
}
btrfs_set_path_blocking(p);
- reada_for_balance(root, p, level);
+ reada_for_balance(fs_info, p, level);
sret = balance_level(trans, root, p, level);
btrfs_clear_path_blocking(p, NULL, 0);
} else {
p->slots[level] = slot;
if (ins_len > 0 &&
- btrfs_leaf_free_space(root, b) < ins_len) {
+ btrfs_leaf_free_space(fs_info, b) < ins_len) {
if (write_lock_level < 1) {
write_lock_level = 1;
btrfs_release_path(p);
* error, and > 0 if there was no room in the left hand block.
*/
static int push_node_left(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *dst,
+ struct btrfs_fs_info *fs_info,
+ struct extent_buffer *dst,
struct extent_buffer *src, int empty)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int push_items = 0;
int src_nritems;
int dst_nritems;
* this will only push up to 1/2 the contents of the left node over
*/
static int balance_node_right(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct extent_buffer *dst,
struct extent_buffer *src)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int push_items = 0;
int max_push;
int src_nritems;
* blocknr is the block the key points to.
*/
static void insert_ptr(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_fs_info *fs_info, struct btrfs_path *path,
struct btrfs_disk_key *key, u64 bytenr,
int slot, int level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *lower;
int nritems;
int ret;
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(split);
- insert_ptr(trans, root, path, &disk_key, split->start,
+ insert_ptr(trans, fs_info, path, &disk_key, split->start,
path->slots[level + 1] + 1, level + 1);
if (path->slots[level] >= mid) {
* the start of the leaf data. IOW, how much room
* the leaf has left for both items and data
*/
-noinline int btrfs_leaf_free_space(struct btrfs_root *root,
+noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int nritems = btrfs_header_nritems(leaf);
int ret;
* right. We'll push up to and including min_slot, but no lower
*/
static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int data_size, int empty,
struct extent_buffer *right,
int free_space, u32 left_nritems,
u32 min_slot)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *upper = path->nodes[1];
struct btrfs_map_token token;
if (path->slots[0] > i)
break;
if (path->slots[0] == i) {
- int space = btrfs_leaf_free_space(root, left);
+ int space = btrfs_leaf_free_space(fs_info, left);
if (space + push_space * 2 > free_space)
break;
}
right_nritems = btrfs_header_nritems(right);
push_space = btrfs_item_end_nr(left, left_nritems - push_items);
- push_space -= leaf_data_end(root, left);
+ push_space -= leaf_data_end(fs_info, left);
/* make room in the right data area */
- data_end = leaf_data_end(root, right);
+ data_end = leaf_data_end(fs_info, right);
memmove_extent_buffer(right,
btrfs_leaf_data(right) + data_end - push_space,
btrfs_leaf_data(right) + data_end,
/* copy from the left data area */
copy_extent_buffer(right, left, btrfs_leaf_data(right) +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
- btrfs_leaf_data(left) + leaf_data_end(root, left),
+ btrfs_leaf_data(left) + leaf_data_end(fs_info, left),
push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
int min_data_size, int data_size,
int empty, u32 min_slot)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *right;
struct extent_buffer *upper;
btrfs_assert_tree_locked(path->nodes[1]);
- right = read_node_slot(root, upper, slot + 1);
+ right = read_node_slot(fs_info, upper, slot + 1);
/*
* slot + 1 is not valid or we fail to read the right node,
* no big deal, just return.
btrfs_tree_lock(right);
btrfs_set_lock_blocking(right);
- free_space = btrfs_leaf_free_space(root, right);
+ free_space = btrfs_leaf_free_space(fs_info, right);
if (free_space < data_size)
goto out_unlock;
if (ret)
goto out_unlock;
- free_space = btrfs_leaf_free_space(root, right);
+ free_space = btrfs_leaf_free_space(fs_info, right);
if (free_space < data_size)
goto out_unlock;
return 0;
}
- return __push_leaf_right(trans, root, path, min_data_size, empty,
+ return __push_leaf_right(trans, fs_info, path, min_data_size, empty,
right, free_space, left_nritems, min_slot);
out_unlock:
btrfs_tree_unlock(right);
* items
*/
static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path, int data_size,
int empty, struct extent_buffer *left,
int free_space, u32 right_nritems,
u32 max_slot)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key;
struct extent_buffer *right = path->nodes[0];
int i;
if (path->slots[0] < i)
break;
if (path->slots[0] == i) {
- int space = btrfs_leaf_free_space(root, right);
+ int space = btrfs_leaf_free_space(fs_info, right);
if (space + push_space * 2 > free_space)
break;
}
btrfs_item_offset_nr(right, push_items - 1);
copy_extent_buffer(left, right, btrfs_leaf_data(left) +
- leaf_data_end(root, left) - push_space,
+ leaf_data_end(fs_info, left) - push_space,
btrfs_leaf_data(right) +
btrfs_item_offset_nr(right, push_items - 1),
push_space);
if (push_items < right_nritems) {
push_space = btrfs_item_offset_nr(right, push_items - 1) -
- leaf_data_end(root, right);
+ leaf_data_end(fs_info, right);
memmove_extent_buffer(right, btrfs_leaf_data(right) +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(right) +
- leaf_data_end(root, right), push_space);
+ leaf_data_end(fs_info, right), push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(0),
btrfs_item_nr_offset(push_items),
*root, struct btrfs_path *path, int min_data_size,
int data_size, int empty, u32 max_slot)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = path->nodes[0];
struct extent_buffer *left;
int slot;
btrfs_assert_tree_locked(path->nodes[1]);
- left = read_node_slot(root, path->nodes[1], slot - 1);
+ left = read_node_slot(fs_info, path->nodes[1], slot - 1);
/*
* slot - 1 is not valid or we fail to read the left node,
* no big deal, just return.
btrfs_tree_lock(left);
btrfs_set_lock_blocking(left);
- free_space = btrfs_leaf_free_space(root, left);
+ free_space = btrfs_leaf_free_space(fs_info, left);
if (free_space < data_size) {
ret = 1;
goto out;
goto out;
}
- free_space = btrfs_leaf_free_space(root, left);
+ free_space = btrfs_leaf_free_space(fs_info, left);
if (free_space < data_size) {
ret = 1;
goto out;
}
- return __push_leaf_left(trans, root, path, min_data_size,
+ return __push_leaf_left(trans, fs_info, path, min_data_size,
empty, left, free_space, right_nritems,
max_slot);
out:
* available for the resulting leaf level of the path.
*/
static noinline void copy_for_split(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct extent_buffer *l,
struct extent_buffer *right,
int slot, int mid, int nritems)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int data_copy_size;
int rt_data_off;
int i;
nritems = nritems - mid;
btrfs_set_header_nritems(right, nritems);
- data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
+ data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
btrfs_item_nr_offset(mid),
copy_extent_buffer(right, l,
btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
data_copy_size, btrfs_leaf_data(l) +
- leaf_data_end(root, l), data_copy_size);
+ leaf_data_end(fs_info, l), data_copy_size);
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
btrfs_set_header_nritems(l, mid);
btrfs_item_key(right, &disk_key, 0);
- insert_ptr(trans, root, path, &disk_key, right->start,
+ insert_ptr(trans, fs_info, path, &disk_key, right->start,
path->slots[1] + 1, 1);
btrfs_mark_buffer_dirty(right);
struct btrfs_path *path,
int data_size)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int progress = 0;
int slot;
slot = path->slots[0];
if (slot < btrfs_header_nritems(path->nodes[0]))
- space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
+ space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
/*
* try to push all the items after our slot into the
if (path->slots[0] == 0 || path->slots[0] == nritems)
return 0;
- if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+ if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
return 0;
/* try to push all the items before our slot into the next leaf */
int space_needed = data_size;
if (slot < btrfs_header_nritems(l))
- space_needed -= btrfs_leaf_free_space(root, l);
+ space_needed -= btrfs_leaf_free_space(fs_info, l);
wret = push_leaf_right(trans, root, path, space_needed,
space_needed, 0, 0);
l = path->nodes[0];
/* did the pushes work? */
- if (btrfs_leaf_free_space(root, l) >= data_size)
+ if (btrfs_leaf_free_space(fs_info, l) >= data_size)
return 0;
}
if (split == 0) {
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
- insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1] + 1, 1);
+ insert_ptr(trans, fs_info, path, &disk_key,
+ right->start, path->slots[1] + 1, 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
path->slots[1] += 1;
} else {
btrfs_set_header_nritems(right, 0);
- insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1], 1);
+ insert_ptr(trans, fs_info, path, &disk_key,
+ right->start, path->slots[1], 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
return ret;
}
- copy_for_split(trans, root, path, l, right, slot, mid, nritems);
+ copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
if (split == 2) {
BUG_ON(num_doubles != 0);
push_for_double:
push_for_double_split(trans, root, path, data_size);
tried_avoid_double = 1;
- if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+ if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
return 0;
goto again;
}
struct btrfs_root *root,
struct btrfs_path *path, int ins_len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
key.type != BTRFS_EXTENT_CSUM_KEY);
- if (btrfs_leaf_free_space(root, leaf) >= ins_len)
+ if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
return 0;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
goto err;
/* the leaf has changed, it now has room. return now */
- if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
+ if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
goto err;
if (key.type == BTRFS_EXTENT_DATA_KEY) {
}
static noinline int split_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *new_key,
unsigned long split_offset)
struct btrfs_disk_key disk_key;
leaf = path->nodes[0];
- BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
+ BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
btrfs_set_path_blocking(path);
item_size - split_offset);
btrfs_mark_buffer_dirty(leaf);
- BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
+ BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
kfree(buf);
return 0;
}
if (ret)
return ret;
- ret = split_item(trans, root, path, new_key, split_offset);
+ ret = split_item(trans, root->fs_info, path, new_key, split_offset);
return ret;
}
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
-void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
- u32 new_size, int from_end)
+void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path, u32 new_size, int from_end)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
return;
nritems = btrfs_header_nritems(leaf);
- data_end = leaf_data_end(root, leaf);
+ data_end = leaf_data_end(fs_info, leaf);
old_data_start = btrfs_item_offset_nr(leaf, slot);
btrfs_set_item_size(leaf, item, new_size);
btrfs_mark_buffer_dirty(leaf);
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
/*
* make the item pointed to by the path bigger, data_size is the added size.
*/
-void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u32 data_size)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
- data_end = leaf_data_end(root, leaf);
+ data_end = leaf_data_end(fs_info, leaf);
- if (btrfs_leaf_free_space(root, leaf) < data_size) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
slot = path->slots[0];
BUG_ON(slot < 0);
if (slot >= nritems) {
- btrfs_print_leaf(root, leaf);
+ btrfs_print_leaf(fs_info, leaf);
btrfs_crit(fs_info, "slot %d too large, nritems %d",
slot, nritems);
BUG_ON(1);
btrfs_set_item_size(leaf, item, old_size + data_size);
btrfs_mark_buffer_dirty(leaf);
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
slot = path->slots[0];
nritems = btrfs_header_nritems(leaf);
- data_end = leaf_data_end(root, leaf);
+ data_end = leaf_data_end(fs_info, leaf);
- if (btrfs_leaf_free_space(root, leaf) < total_size) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
+ btrfs_print_leaf(fs_info, leaf);
btrfs_crit(fs_info, "not enough freespace need %u have %d",
- total_size, btrfs_leaf_free_space(root, leaf));
+ total_size, btrfs_leaf_free_space(fs_info, leaf));
BUG();
}
unsigned int old_data = btrfs_item_end_nr(leaf, slot);
if (old_data < data_end) {
- btrfs_print_leaf(root, leaf);
+ btrfs_print_leaf(fs_info, leaf);
btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
slot, old_data, data_end);
BUG_ON(1);
btrfs_set_header_nritems(leaf, nritems + nr);
btrfs_mark_buffer_dirty(leaf);
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
nritems = btrfs_header_nritems(leaf);
if (slot + nr != nritems) {
- int data_end = leaf_data_end(root, leaf);
+ int data_end = leaf_data_end(fs_info, leaf);
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
data_end + dsize,
struct btrfs_path *path,
u64 min_trans)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cur;
struct btrfs_key found_key;
int slot;
goto out;
}
btrfs_set_path_blocking(path);
- cur = read_node_slot(root, cur, slot);
+ cur = read_node_slot(fs_info, cur, slot);
if (IS_ERR(cur)) {
ret = PTR_ERR(cur);
goto out;
return ret;
}
-static int tree_move_down(struct btrfs_root *root,
+static int tree_move_down(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level)
{
struct extent_buffer *eb;
BUG_ON(*level == 0);
- eb = read_node_slot(root, path->nodes[*level], path->slots[*level]);
+ eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
if (IS_ERR(eb))
return PTR_ERR(eb);
return 0;
}
-static int tree_move_next_or_upnext(struct btrfs_root *root,
+static int tree_move_next_or_upnext(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level)
{
* Returns 1 if it had to move up and next. 0 is returned if it moved only next
* or down.
*/
-static int tree_advance(struct btrfs_root *root,
+static int tree_advance(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level,
int allow_down,
int ret;
if (*level == 0 || !allow_down) {
- ret = tree_move_next_or_upnext(root, path, level, root_level);
+ ret = tree_move_next_or_upnext(fs_info, path, level,
+ root_level);
} else {
- ret = tree_move_down(root, path, level, root_level);
+ ret = tree_move_down(fs_info, path, level, root_level);
}
if (ret >= 0) {
if (*level == 0)
return ret;
}
-static int tree_compare_item(struct btrfs_root *left_root,
- struct btrfs_path *left_path,
+static int tree_compare_item(struct btrfs_path *left_path,
struct btrfs_path *right_path,
char *tmp_buf)
{
while (1) {
if (advance_left && !left_end_reached) {
- ret = tree_advance(left_root, left_path, &left_level,
+ ret = tree_advance(fs_info, left_path, &left_level,
left_root_level,
advance_left != ADVANCE_ONLY_NEXT,
&left_key);
advance_left = 0;
}
if (advance_right && !right_end_reached) {
- ret = tree_advance(right_root, right_path, &right_level,
+ ret = tree_advance(fs_info, right_path, &right_level,
right_root_level,
advance_right != ADVANCE_ONLY_NEXT,
&right_key);
enum btrfs_compare_tree_result result;
WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
- ret = tree_compare_item(left_root, left_path,
- right_path, tmp_buf);
+ ret = tree_compare_item(left_path, right_path,
+ tmp_buf);
if (ret)
result = BTRFS_COMPARE_TREE_CHANGED;
else
#ifdef CONFIG_BTRFS_DEBUG
static inline int
-btrfs_should_fragment_free_space(struct btrfs_root *root,
- struct btrfs_block_group_cache *block_group)
+btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
* this returns the address of the start of the last item,
* which is the stop of the leaf data stack
*/
-static inline unsigned int leaf_data_end(struct btrfs_root *root,
+static inline unsigned int leaf_data_end(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u32 nr = btrfs_header_nritems(leaf);
if (nr == 0)
/* extent-tree.c */
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes);
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, unsigned long count);
-int btrfs_async_run_delayed_refs(struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info, unsigned long count);
+int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait);
-int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr,
+ struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags);
-int btrfs_pin_extent(struct btrfs_root *root,
+int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num, int reserved);
-int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes);
-int btrfs_exclude_logged_extents(struct btrfs_root *root,
+int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
u64 root_objectid, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins);
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset);
-int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
- int delalloc);
-int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
+ u64 start, u64 len, int delalloc);
+int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len);
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset);
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
+ struct btrfs_fs_info *fs_info);
+int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytes_used,
+ struct btrfs_fs_info *fs_info, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode);
struct btrfs_block_rsv *rsv,
int nitems,
u64 *qgroup_reserved, bool use_global_rsv);
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
u64 qgroup_reserved);
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
-void btrfs_free_block_rsv(struct btrfs_root *root,
+void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
-int btrfs_block_rsv_check(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, int min_factor);
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush);
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
-void btrfs_block_rsv_release(struct btrfs_root *root,
+void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
int btrfs_inc_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
-void btrfs_dec_block_group_ro(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache);
+void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
-int btrfs_error_unpin_extent_range(struct btrfs_root *root,
+int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
-int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 type);
-int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
+ struct btrfs_fs_info *fs_info, u64 type);
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const u64 type);
+ struct btrfs_fs_info *fs_info, const u64 type);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end);
struct extent_buffer **cow_ret, u64 new_root_objectid);
int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf);
-void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u32 data_size);
-void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
- u32 new_size, int from_end);
+void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
{
return btrfs_next_old_item(root, p, 0);
}
-int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
+int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf);
int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
int update_ref, int for_reloc);
* anything except sleeping. This function is used to check the status of
* the fs.
*/
-static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root)
+static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
- return (fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info));
+ return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
}
static inline void free_fs_info(struct btrfs_fs_info *fs_info)
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
-int verify_dir_item(struct btrfs_root *root,
+int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item);
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name,
int name_len);
struct btrfs_dio_private;
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
-int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u32 *dst);
-int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 logical_offset);
+int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
+int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
+ u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
-int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 file_start, int contig);
+int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+ u64 file_start, int contig);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit);
void btrfs_extent_item_to_extent_map(struct inode *inode,
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
void btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
-void btrfs_run_delayed_iputs(struct btrfs_root *root);
+void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
-int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
- struct page **pages, size_t num_pages,
- loff_t pos, size_t write_bytes,
+int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+ size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in,
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* super.c */
-int btrfs_parse_options(struct btrfs_root *root, char *options,
+int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
int readonly, int is_dev_replace);
-void btrfs_scrub_pause(struct btrfs_root *root);
-void btrfs_scrub_continue(struct btrfs_root *root);
+void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
+void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
int btrfs_scrub_cancel(struct btrfs_fs_info *info);
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
struct btrfs_device *dev);
-int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress);
/* dev-replace.c */
}
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv;
u64 num_bytes;
return ret;
}
-static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *rsv;
if (!item->bytes_reserved)
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid, item->bytes_reserved,
0);
- btrfs_block_rsv_release(root, rsv,
+ btrfs_block_rsv_release(fs_info, rsv,
item->bytes_reserved);
}
if (release) {
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 0);
- btrfs_block_rsv_release(root, src_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
}
return ret;
}
-static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
+static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *rsv;
if (!node->bytes_reserved)
rsv = &fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(fs_info, "delayed_inode",
node->inode_id, node->bytes_reserved, 0);
- btrfs_block_rsv_release(root, rsv,
+ btrfs_block_rsv_release(fs_info, rsv,
node->bytes_reserved);
node->bytes_reserved = 0;
}
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
int free_space;
int total_data_size = 0, total_size = 0;
BUG_ON(!path->nodes[0]);
leaf = path->nodes[0];
- free_space = btrfs_leaf_free_space(root, leaf);
+ free_space = btrfs_leaf_free_space(fs_info, leaf);
INIT_LIST_HEAD(&head);
next = item;
curr->data_len);
slot++;
- btrfs_delayed_item_release_metadata(root, curr);
+ btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
struct btrfs_path *path,
struct btrfs_delayed_item *delayed_item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
char *ptr;
int ret;
delayed_item->data_len);
btrfs_mark_buffer_dirty(leaf);
- btrfs_delayed_item_release_metadata(root, delayed_item);
+ btrfs_delayed_item_release_metadata(fs_info, delayed_item);
return 0;
}
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
struct extent_buffer *leaf;
struct btrfs_key key;
goto out;
list_for_each_entry_safe(curr, next, &head, tree_list) {
- btrfs_delayed_item_release_metadata(root, curr);
+ btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
}
struct btrfs_path *path,
struct btrfs_delayed_node *node)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
no_iref:
btrfs_release_path(path);
err_out:
- btrfs_delayed_inode_release_metadata(root, node);
+ btrfs_delayed_inode_release_metadata(fs_info, node);
btrfs_release_delayed_inode(node);
return ret;
* outstanding delayed items cleaned up.
*/
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int nr)
+ struct btrfs_fs_info *fs_info, int nr)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path;
}
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- return __btrfs_run_delayed_items(trans, root, -1);
+ return __btrfs_run_delayed_items(trans, fs_info, -1);
}
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int nr)
+ struct btrfs_fs_info *fs_info, int nr)
{
- return __btrfs_run_delayed_items(trans, root, nr);
+ return __btrfs_run_delayed_items(trans, fs_info, nr);
}
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
int btrfs_commit_inode_delayed_inode(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
- trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
+ trans->block_rsv = &fs_info->delayed_block_rsv;
mutex_lock(&delayed_node->mutex);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
trans->block_rsv = block_rsv;
trans_out:
btrfs_end_transaction(trans, delayed_node->root);
- btrfs_btree_balance_dirty(delayed_node->root);
+ btrfs_btree_balance_dirty(fs_info);
out:
btrfs_release_delayed_node(delayed_node);
trans->block_rsv = block_rsv;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty_nodelay(root);
+ btrfs_btree_balance_dirty_nodelay(root->fs_info);
release_path:
btrfs_release_path(path);
return 0;
}
-void btrfs_balance_delayed_items(struct btrfs_root *root)
+void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
- struct btrfs_delayed_root *delayed_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
-
- delayed_root = fs_info->delayed_root;
+ struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
return;
/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- int name_len, struct inode *dir,
+ struct btrfs_fs_info *fs_info,
+ const char *name, int name_len,
+ struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index)
{
btrfs_set_stack_dir_type(dir_item, type);
memcpy((char *)(dir_item + 1), name, name_len);
- ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+ ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
return ret;
}
-static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
+static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node,
struct btrfs_key *key)
{
return 1;
}
- btrfs_delayed_item_release_metadata(root, item);
+ btrfs_delayed_item_release_metadata(fs_info, item);
btrfs_release_delayed_item(item);
mutex_unlock(&node->mutex);
return 0;
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *dir,
- u64 index)
+ struct btrfs_fs_info *fs_info,
+ struct inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
struct btrfs_delayed_item *item;
item_key.type = BTRFS_DIR_INDEX_KEY;
item_key.offset = index;
- ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
+ ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
if (!ret)
goto end;
item->key = item_key;
- ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
+ ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
BUG();
static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
{
struct btrfs_root *root = delayed_node->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr_item, *prev_item;
mutex_lock(&delayed_node->mutex);
curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
while (curr_item) {
- btrfs_delayed_item_release_metadata(root, curr_item);
+ btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
while (curr_item) {
- btrfs_delayed_item_release_metadata(root, curr_item);
+ btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
btrfs_release_delayed_iref(delayed_node);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
- btrfs_delayed_inode_release_metadata(root, delayed_node);
+ btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
btrfs_release_delayed_inode(delayed_node);
}
mutex_unlock(&delayed_node->mutex);
}
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- int name_len, struct inode *dir,
+ struct btrfs_fs_info *fs_info,
+ const char *name, int name_len,
+ struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *dir,
- u64 index);
+ struct btrfs_fs_info *fs_info,
+ struct inode *dir, u64 index);
int btrfs_inode_delayed_dir_index_count(struct inode *inode);
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int nr);
+ struct btrfs_fs_info *fs_info, int nr);
-void btrfs_balance_delayed_items(struct btrfs_root *root);
+void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info);
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct inode *inode);
dev_replace->cursor_left_last_write_of_item;
}
-int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src)
{
+ struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int ret;
struct btrfs_device *tgt_device = NULL;
/* the disk copy procedure reuses the scrub code */
mutex_lock(&fs_info->volume_mutex);
- ret = btrfs_find_device_by_devspec(root, srcdevid,
+ ret = btrfs_find_device_by_devspec(fs_info, srcdevid,
srcdev_name, &src_device);
if (ret) {
mutex_unlock(&fs_info->volume_mutex);
return ret;
}
- ret = btrfs_init_dev_replace_tgtdev(root, tgtdev_name,
+ ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
src_device, &tgt_device);
mutex_unlock(&fs_info->volume_mutex);
if (ret)
return ret;
}
-int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
+int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
int ret;
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
- ret = btrfs_dev_replace_start(root, args->start.tgtdev_name,
+ ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,
args->start.srcdev_name,
args->start.cont_reading_from_srcdev_mode);
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
-int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
+int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
-int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src);
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
const char *name,
int name_len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *ptr;
struct btrfs_item *item;
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (ret == -EEXIST) {
struct btrfs_dir_item *di;
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
- btrfs_extend_item(root, path, data_size);
+ btrfs_extend_item(fs_info, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
}
btrfs_release_path(path);
- ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
- &disk_key, type, index);
+ ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
+ name_len, dir, &disk_key, type,
+ index);
out_free:
btrfs_free_path(path);
if (ret)
if (ret > 0)
return NULL;
- return btrfs_match_dir_item_name(root, path, name, name_len);
+ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
}
/* we found an item, look for our name in the item */
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
if (di) {
/* our exact name was found */
ret = -EEXIST;
return ERR_PTR(ret);
if (ret > 0)
return ERR_PTR(-ENOENT);
- return btrfs_match_dir_item_name(root, path, name, name_len);
+ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
struct btrfs_dir_item *
if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
break;
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(root->fs_info, path,
+ name, name_len);
if (di)
return di;
if (ret > 0)
return NULL;
- return btrfs_match_dir_item_name(root, path, name, name_len);
+ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
/*
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name, int name_len)
{
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
- if (verify_dir_item(root, leaf, dir_item))
+ if (verify_dir_item(fs_info, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
- btrfs_truncate_item(root, path, item_len - sub_item_len, 1);
+ btrfs_truncate_item(root->fs_info, path,
+ item_len - sub_item_len, 1);
}
return ret;
}
-int verify_dir_item(struct btrfs_root *root,
+int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u16 namelen = BTRFS_NAME_LEN;
u8 type = btrfs_dir_type(leaf, dir_item);
int read_only);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark);
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents);
-static int btrfs_cleanup_transaction(struct btrfs_root *root);
-static void btrfs_error_commit_super(struct btrfs_root *root);
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
/*
* btrfs_end_io_wq structs are used to do processing in task context when an IO
* helper to read a given tree block, doing retries as required when
* the checksums don't match and we have alternate mirrors to try.
*/
-static int btree_read_extent_buffer_pages(struct btrfs_root *root,
+static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
u64 parent_transid)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree;
int failed = 0;
int ret;
}
if (failed && !ret && failed_mirror)
- repair_eb_io_failure(root, eb, failed_mirror);
+ repair_eb_io_failure(fs_info, eb, failed_mirror);
return ret;
}
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
+ ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else {
/*
* kthread helpers are used to submit writes so that
.set_page_dirty = btree_set_page_dirty,
};
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
+void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
+ struct inode *btree_inode = fs_info->btree_inode;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
free_extent_buffer(buf);
}
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
+int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
+ struct inode *btree_inode = fs_info->btree_inode;
struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
int ret;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return 0;
return 0;
}
-struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
- u64 bytenr)
+struct extent_buffer *btrfs_find_create_tree_block(
+ struct btrfs_fs_info *fs_info,
+ u64 bytenr)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
if (btrfs_is_testing(fs_info))
return alloc_test_extent_buffer(fs_info, bytenr);
return alloc_extent_buffer(fs_info, bytenr);
buf->start, buf->start + buf->len - 1);
}
-struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
+struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 parent_transid)
{
struct extent_buffer *buf = NULL;
int ret;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return buf;
- ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
+ ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
if (ret) {
free_extent_buffer(buf);
return ERR_PTR(ret);
}
generation = btrfs_root_generation(&root->root_item);
- root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+ root->node = read_tree_block(fs_info,
+ btrfs_root_bytenr(&root->root_item),
generation);
if (IS_ERR(root->node)) {
ret = PTR_ERR(root->node);
again = 0;
/* Make the cleaner go to sleep early. */
- if (btrfs_need_cleaner_sleep(root))
+ if (btrfs_need_cleaner_sleep(fs_info))
goto sleep;
/*
* Avoid the problem that we change the status of the fs
* during the above check and trylock.
*/
- if (btrfs_need_cleaner_sleep(root)) {
+ if (btrfs_need_cleaner_sleep(fs_info)) {
mutex_unlock(&fs_info->cleaner_mutex);
goto sleep;
}
mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
- btrfs_run_delayed_iputs(root);
+ btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
again = btrfs_clean_one_deleted_snapshot(root);
if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
&fs_info->fs_state)))
- btrfs_cleanup_transaction(root);
+ btrfs_cleanup_transaction(fs_info);
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
(!btrfs_transaction_blocked(fs_info) ||
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_free_log_root_tree(NULL, fs_info);
- btrfs_destroy_pinned_extent(fs_info->tree_root,
- fs_info->pinned_extents);
+ btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
}
}
static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
{
- fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
- set_nlink(fs_info->btree_inode, 1);
+ struct inode *inode = fs_info->btree_inode;
+
+ inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+ set_nlink(inode, 1);
/*
* we set the i_size on the btree inode to the max possible int.
* the real end of the address space is determined by all of
* the devices in the system
*/
- fs_info->btree_inode->i_size = OFFSET_MAX;
- fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+ inode->i_size = OFFSET_MAX;
+ inode->i_mapping->a_ops = &btree_aops;
- RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
- extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
- fs_info->btree_inode->i_mapping);
- BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
- extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+ RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
+ extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
+ BTRFS_I(inode)->io_tree.track_uptodate = 0;
+ extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
- BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+ BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
- BTRFS_I(fs_info->btree_inode)->root = fs_info->tree_root;
- memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
- sizeof(struct btrfs_key));
- set_bit(BTRFS_INODE_DUMMY,
- &BTRFS_I(fs_info->btree_inode)->runtime_flags);
- btrfs_insert_inode_hash(fs_info->btree_inode);
+ BTRFS_I(inode)->root = fs_info->tree_root;
+ memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
+ set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
+ btrfs_insert_inode_hash(inode);
}
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
struct btrfs_fs_devices *fs_devices)
{
int ret;
- struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *log_tree_root;
struct btrfs_super_block *disk_super = fs_info->super_copy;
u64 bytenr = btrfs_super_log_root(disk_super);
__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
- log_tree_root->node = read_tree_block(tree_root, bytenr,
- fs_info->generation + 1);
+ log_tree_root->node = read_tree_block(fs_info, bytenr,
+ fs_info->generation + 1);
if (IS_ERR(log_tree_root->node)) {
btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
*/
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
- ret = btrfs_parse_options(tree_root, options, sb->s_flags);
+ ret = btrfs_parse_options(fs_info, options, sb->s_flags);
if (ret) {
err = ret;
goto fail_alloc;
__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
- chunk_root->node = read_tree_block(chunk_root,
+ chunk_root->node = read_tree_block(fs_info,
btrfs_super_chunk_root(disk_super),
generation);
if (IS_ERR(chunk_root->node) ||
retry_root_backup:
generation = btrfs_super_generation(disk_super);
- tree_root->node = read_tree_block(tree_root,
+ tree_root->node = read_tree_block(fs_info,
btrfs_super_root(disk_super),
generation);
if (IS_ERR(tree_root->node) ||
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
- ret = btrfsic_mount(tree_root, fs_devices,
+ ret = btrfsic_mount(fs_info, fs_devices,
btrfs_test_opt(fs_info,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
1 : 0,
btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
- btrfs_cleanup_transaction(fs_info->tree_root);
+ btrfs_cleanup_transaction(fs_info);
btrfs_free_fs_roots(fs_info);
fail_cleaner:
kthread_stop(fs_info->cleaner_kthread);
return num_tolerated_disk_barrier_failures;
}
-static int write_all_supers(struct btrfs_root *root, int max_mirrors)
+static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *head;
struct btrfs_device *dev;
struct btrfs_super_block *sb;
}
int write_ctree_super(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int max_mirrors)
+ struct btrfs_fs_info *fs_info, int max_mirrors)
{
- return write_all_supers(root, max_mirrors);
+ return write_all_supers(fs_info, max_mirrors);
}
/* Drop a fs root from the radix tree and free it. */
{
iput(root->ino_cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
- btrfs_free_block_rsv(root, root->orphan_block_rsv);
+ btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
struct btrfs_trans_handle *trans;
mutex_lock(&fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(root);
+ btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
wake_up_process(fs_info->cleaner_kthread);
}
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
- btrfs_error_commit_super(root);
+ btrfs_error_commit_super(fs_info);
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
- btrfsic_unmount(root, fs_info->fs_devices);
+ btrfsic_unmount(fs_info->fs_devices);
#endif
btrfs_close_devices(fs_info->fs_devices);
fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
- btrfs_print_leaf(root, buf);
+ btrfs_print_leaf(fs_info, buf);
ASSERT(0);
}
#endif
}
-static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
+static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
int flush_delayed)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
/*
* looks as though older kernels can get into trouble with
* this code, they end up stuck in balance_dirty_pages forever
return;
if (flush_delayed)
- btrfs_balance_delayed_items(root);
+ btrfs_balance_delayed_items(fs_info);
ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
BTRFS_DIRTY_METADATA_THRESH);
}
}
-void btrfs_btree_balance_dirty(struct btrfs_root *root)
+void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
{
- __btrfs_btree_balance_dirty(root, 1);
+ __btrfs_btree_balance_dirty(fs_info, 1);
}
-void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
{
- __btrfs_btree_balance_dirty(root, 0);
+ __btrfs_btree_balance_dirty(fs_info, 0);
}
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
- return btree_read_extent_buffer_pages(root, buf, parent_transid);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
}
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
return ret;
}
-static void btrfs_error_commit_super(struct btrfs_root *root)
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
mutex_lock(&fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(root);
+ btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
down_write(&fs_info->cleanup_work_sem);
up_write(&fs_info->cleanup_work_sem);
/* cleanup FS via transaction */
- btrfs_cleanup_transaction(root);
+ btrfs_cleanup_transaction(fs_info);
}
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
mutex_unlock(&head->mutex);
if (pin_bytes)
- btrfs_pin_extent(root, head->node.bytenr,
+ btrfs_pin_extent(fs_info, head->node.bytenr,
head->node.num_bytes, 1);
btrfs_put_delayed_ref(&head->node);
cond_resched();
spin_unlock(&fs_info->delalloc_root_lock);
}
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct extent_buffer *eb;
u64 start = 0;
return ret;
}
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *unpin;
u64 start;
u64 end;
break;
clear_extent_dirty(unpin, start, end);
- btrfs_error_unpin_extent_range(root, start, end);
+ btrfs_error_unpin_extent_range(fs_info, start, end);
cond_resched();
}
}
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
spin_lock(&cur_trans->dirty_bgs_lock);
}
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- btrfs_cleanup_dirty_bgs(cur_trans, root);
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
- btrfs_destroy_delayed_refs(cur_trans, root);
+ btrfs_destroy_delayed_refs(cur_trans, fs_info);
cur_trans->state = TRANS_STATE_COMMIT_START;
wake_up(&fs_info->transaction_blocked_wait);
btrfs_destroy_delayed_inodes(fs_info);
btrfs_assert_delayed_root_empty(fs_info);
- btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
+ btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);
- btrfs_destroy_pinned_extent(root,
+ btrfs_destroy_pinned_extent(fs_info,
fs_info->pinned_extents);
cur_trans->state =TRANS_STATE_COMPLETED;
*/
}
-static int btrfs_cleanup_transaction(struct btrfs_root *root)
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *t;
mutex_lock(&fs_info->transaction_kthread_mutex);
if (t->state >= TRANS_STATE_COMMIT_START) {
atomic_inc(&t->use_count);
spin_unlock(&fs_info->trans_lock);
- btrfs_wait_for_commit(root, t->transid);
+ btrfs_wait_for_commit(fs_info, t->transid);
btrfs_put_transaction(t);
spin_lock(&fs_info->trans_lock);
continue;
} else {
spin_unlock(&fs_info->trans_lock);
}
- btrfs_cleanup_one_transaction(t, root);
+ btrfs_cleanup_one_transaction(t, fs_info);
spin_lock(&fs_info->trans_lock);
if (t == fs_info->running_transaction)
spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(t);
- trace_btrfs_transaction_commit(root);
+ trace_btrfs_transaction_commit(fs_info->tree_root);
spin_lock(&fs_info->trans_lock);
}
spin_unlock(&fs_info->trans_lock);
btrfs_destroy_all_ordered_extents(fs_info);
btrfs_destroy_delayed_inodes(fs_info);
btrfs_assert_delayed_root_empty(fs_info);
- btrfs_destroy_pinned_extent(root, fs_info->pinned_extents);
+ btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
btrfs_destroy_all_delalloc_inodes(fs_info);
mutex_unlock(&fs_info->transaction_kthread_mutex);
struct btrfs_device;
struct btrfs_fs_devices;
-struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
- u64 parent_transid);
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
+struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 parent_transid);
+void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
+int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb);
-struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
- u64 bytenr);
+struct extent_buffer *btrfs_find_create_tree_block(
+ struct btrfs_fs_info *fs_info,
+ u64 bytenr);
void clean_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, struct extent_buffer *buf);
int open_ctree(struct super_block *sb,
char *options);
void close_ctree(struct btrfs_fs_info *fs_info);
int write_ctree_super(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int max_mirrors);
+ struct btrfs_fs_info *fs_info, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
struct buffer_head **bh_ret);
}
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
-void btrfs_btree_balance_dirty(struct btrfs_root *root);
-void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
+void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_root(struct btrfs_root *root);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 objectid);
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, int alloc);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins);
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 flags,
+ struct btrfs_fs_info *fs_info, u64 flags,
int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
u64 num_bytes, int delalloc);
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
-int btrfs_pin_extent(struct btrfs_root *root,
- u64 bytenr, u64 num_bytes, int reserved);
static int __reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_space_info *space_info,
u64 orig_bytes,
return ret;
}
-static int add_excluded_extent(struct btrfs_root *root,
+static int add_excluded_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 end = start + num_bytes - 1;
set_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
return 0;
}
-static void free_excluded_extents(struct btrfs_root *root,
+static void free_excluded_extents(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 start, end;
start = cache->key.objectid;
start, end, EXTENT_UPTODATE);
}
-static int exclude_super_stripes(struct btrfs_root *root,
+static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 *logical;
int stripe_len;
if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
cache->bytes_super += stripe_len;
- ret = add_excluded_extent(root, cache->key.objectid,
+ ret = add_excluded_extent(fs_info, cache->key.objectid,
stripe_len);
if (ret)
return ret;
}
cache->bytes_super += len;
- ret = add_excluded_extent(root, start, len);
+ ret = add_excluded_extent(fs_info, start, len);
if (ret) {
kfree(logical);
return ret;
}
#ifdef CONFIG_BTRFS_DEBUG
-static void fragment_free_space(struct btrfs_root *root,
- struct btrfs_block_group_cache *block_group)
+static void fragment_free_space(struct btrfs_block_group_cache *block_group)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 start = block_group->key.objectid;
u64 len = block_group->key.offset;
u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
* allocate from this block group until we've had a chance to fragment
* the free space.
*/
- if (btrfs_should_fragment_free_space(extent_root, block_group))
+ if (btrfs_should_fragment_free_space(block_group))
wakeup = false;
#endif
/*
spin_unlock(&block_group->lock);
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(extent_root, block_group)) {
+ if (btrfs_should_fragment_free_space(block_group)) {
u64 bytes_used;
spin_lock(&block_group->space_info->lock);
block_group->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&block_group->lock);
spin_unlock(&block_group->space_info->lock);
- fragment_free_space(extent_root, block_group);
+ fragment_free_space(block_group);
}
#endif
caching_ctl->progress = (u64)-1;
up_read(&fs_info->commit_root_sem);
- free_excluded_extents(fs_info->extent_root, block_group);
+ free_excluded_extents(fs_info, block_group);
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
spin_unlock(&cache->lock);
#ifdef CONFIG_BTRFS_DEBUG
if (ret == 1 &&
- btrfs_should_fragment_free_space(fs_info->extent_root,
- cache)) {
+ btrfs_should_fragment_free_space(cache)) {
u64 bytes_used;
spin_lock(&cache->space_info->lock);
cache->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- fragment_free_space(fs_info->extent_root, cache);
+ fragment_free_space(cache);
}
#endif
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
if (ret == 1) {
put_caching_control(caching_ctl);
- free_excluded_extents(fs_info->extent_root, cache);
+ free_excluded_extents(fs_info, cache);
return 0;
}
} else {
}
/* simple helper to search for an existing data extent at a given offset */
-int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
+int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_key key;
struct btrfs_path *path;
* the delayed refs are not processed.
*/
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr,
+ struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_path *path;
return ret;
BUG_ON(ret); /* Corruption */
- btrfs_extend_item(root, path, new_size);
+ btrfs_extend_item(root->fs_info, path, new_size);
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
- btrfs_extend_item(root, path, size);
+ btrfs_extend_item(root->fs_info, path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
- btrfs_truncate_item(root, path, item_size, 1);
+ btrfs_truncate_item(root->fs_info, path, item_size, 1);
}
btrfs_mark_buffer_dirty(leaf);
}
return ret;
}
-int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
u64 discarded_bytes = 0;
struct btrfs_bio *bbio = NULL;
/* Can return -ENOMEM */
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
root_objectid == BTRFS_TREE_LOG_OBJECTID);
}
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_extent_item *item;
}
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
struct btrfs_delayed_data_ref *ref;
struct btrfs_key ins;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
if (extent_op)
flags |= extent_op->flags_to_set;
- ret = alloc_reserved_file_extent(trans, root,
+ ret = alloc_reserved_file_extent(trans, fs_info,
parent, ref_root, flags,
ref->objectid, ref->offset,
&ins, node->ref_mod);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, root, node, parent,
+ ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, root, node, parent,
+ ret = __btrfs_free_extent(trans, fs_info, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
struct btrfs_delayed_tree_ref *ref;
struct btrfs_key ins;
}
if (node->ref_mod != 1) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
- ret = alloc_reserved_tree_block(trans, root,
+ ret = alloc_reserved_tree_block(trans, fs_info,
parent, ref_root,
extent_op->flags_to_set,
&extent_op->key,
ref->level, &ins);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, root, node,
+ ret = __btrfs_inc_extent_ref(trans, fs_info, node,
parent, ref_root,
ref->level, 0, 1,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, root, node,
+ ret = __btrfs_free_extent(trans, fs_info, node,
parent, ref_root,
ref->level, 0, 1, extent_op);
} else {
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
if (trans->aborted) {
if (insert_reserved)
- btrfs_pin_extent(root, node->bytenr,
+ btrfs_pin_extent(fs_info, node->bytenr,
node->num_bytes, 1);
return 0;
}
trace_run_delayed_ref_head(fs_info, node, head, node->action);
if (insert_reserved) {
- btrfs_pin_extent(root, node->bytenr,
+ btrfs_pin_extent(fs_info, node->bytenr,
node->num_bytes, 1);
if (head->is_data) {
ret = btrfs_del_csums(trans, fs_info,
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = run_delayed_tree_ref(trans, root, node, extent_op,
+ ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
- ret = run_delayed_data_ref(trans, root, node, extent_op,
+ ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
insert_reserved);
else
BUG();
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
*/
static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
unsigned long nr)
{
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
- struct btrfs_fs_info *fs_info = root->fs_info;
ktime_t start = ktime_get();
int ret;
unsigned long count = 0;
if (extent_op) {
spin_unlock(&locked_ref->lock);
- ret = run_delayed_extent_op(trans, root,
+ ret = run_delayed_extent_op(trans, fs_info,
ref, extent_op);
btrfs_free_delayed_extent_op(extent_op);
}
spin_unlock(&locked_ref->lock);
- ret = run_one_delayed_ref(trans, root, ref, extent_op,
+ ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
must_insert_reserved);
btrfs_free_delayed_extent_op(extent_op);
}
#endif
-static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
+static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_bytes;
num_bytes = heads * (sizeof(struct btrfs_extent_item) +
* Takes the number of bytes to be csumm'ed and figures out how many leaves it
* would require to store the csums for that many bytes.
*/
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 csum_size;
u64 num_csums_per_leaf;
u64 num_csums;
}
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
int ret = 0;
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
- num_heads = heads_to_leaves(root, num_heads);
+ num_heads = heads_to_leaves(fs_info, num_heads);
if (num_heads > 1)
num_bytes += (num_heads - 1) * fs_info->nodesize;
num_bytes <<= 1;
- num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * fs_info->nodesize;
+ num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
+ fs_info->nodesize;
num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
num_dirty_bgs);
global_rsv = &fs_info->global_block_rsv;
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_entries =
atomic_read(&trans->transaction->delayed_refs.num_entries);
u64 avg_runtime;
if (val >= NSEC_PER_SEC / 2)
return 2;
- return btrfs_check_space_for_delayed_refs(trans, root);
+ return btrfs_check_space_for_delayed_refs(trans, fs_info);
}
struct async_delayed_refs {
struct btrfs_work work;
};
+static inline struct async_delayed_refs *
+to_async_delayed_refs(struct btrfs_work *work)
+{
+ return container_of(work, struct async_delayed_refs, work);
+}
+
static void delayed_ref_async_start(struct btrfs_work *work)
{
- struct async_delayed_refs *async;
+ struct async_delayed_refs *async = to_async_delayed_refs(work);
struct btrfs_trans_handle *trans;
+ struct btrfs_fs_info *fs_info = async->root->fs_info;
int ret;
- async = container_of(work, struct async_delayed_refs, work);
-
/* if the commit is already started, we don't need to wait here */
- if (btrfs_transaction_blocked(async->root->fs_info))
+ if (btrfs_transaction_blocked(fs_info))
goto done;
trans = btrfs_join_transaction(async->root);
if (trans->transid > async->transid)
goto end;
- ret = btrfs_run_delayed_refs(trans, async->root, async->count);
+ ret = btrfs_run_delayed_refs(trans, fs_info, async->count);
if (ret)
async->error = ret;
end:
kfree(async);
}
-int btrfs_async_run_delayed_refs(struct btrfs_root *root,
+int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct async_delayed_refs *async;
int ret;
* Returns <0 on error and aborts the transaction
*/
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, unsigned long count)
+ struct btrfs_fs_info *fs_info, unsigned long count)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *head;
if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
return 0;
- if (root == fs_info->extent_root)
- root = fs_info->tree_root;
-
delayed_refs = &trans->transaction->delayed_refs;
if (count == 0)
count = atomic_read(&delayed_refs->num_entries) * 2;
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
trans->can_flush_pending_bgs = false;
- ret = __btrfs_run_delayed_refs(trans, root, count);
+ ret = __btrfs_run_delayed_refs(trans, fs_info, count);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
return ret;
if (run_all) {
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, fs_info);
spin_lock(&delayed_refs->lock);
node = rb_first(&delayed_refs->href_root);
}
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_extent_op *extent_op;
int ret;
int i;
int level;
int ret = 0;
- int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
+ int (*process_func)(struct btrfs_trans_handle *,
+ struct btrfs_fs_info *,
u64, u64, u64, u64, u64, u64);
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
- ret = process_func(trans, root, bytenr, num_bytes,
+ ret = process_func(trans, fs_info, bytenr, num_bytes,
parent, ref_root, key.objectid,
key.offset);
if (ret)
} else {
bytenr = btrfs_node_blockptr(buf, i);
num_bytes = fs_info->nodesize;
- ret = process_func(trans, root, bytenr, num_bytes,
+ ret = process_func(trans, fs_info, bytenr, num_bytes,
parent, ref_root, level - 1, 0);
if (ret)
goto fail;
}
static int write_one_cache_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_block_group_cache *cache)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
unsigned long bi;
struct extent_buffer *leaf;
}
static struct btrfs_block_group_cache *
-next_block_group(struct btrfs_root *root,
+next_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
spin_lock(&fs_info->block_group_cache_lock);
WARN_ON(ret);
if (i_size_read(inode) > 0) {
- ret = btrfs_check_trunc_cache_free_space(root,
+ ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
if (ret)
goto out_put;
}
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache, *tmp;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_path *path;
* we're still allowing others to join the commit.
*/
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
* make sure all the block groups on our dirty list actually
* exist
*/
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, fs_info);
if (!path) {
path = btrfs_alloc_path();
}
}
if (!ret) {
- ret = write_one_cache_group(trans, root, path, cache);
+ ret = write_one_cache_group(trans, fs_info,
+ path, cache);
/*
* Our block group might still be attached to the list
* of new block groups in the transaction handle of some
* go through delayed refs for all the stuff we've just kicked off
* and then loop back (just once)
*/
- ret = btrfs_run_delayed_refs(trans, root, 0);
+ ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (!ret && loops == 0) {
loops++;
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret < 0) {
- btrfs_cleanup_dirty_bgs(cur_trans, root);
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
}
btrfs_free_path(path);
}
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
cache_save_setup(cache, trans, path);
if (!ret)
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
+ ret = btrfs_run_delayed_refs(trans, fs_info,
+ (unsigned long) -1);
if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
}
}
if (!ret) {
- ret = write_one_cache_group(trans, root, path, cache);
+ ret = write_one_cache_group(trans, fs_info,
+ path, cache);
/*
* One of the free space endio workers might have
* created a new block group while updating a free space
if (ret == -ENOENT) {
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
- ret = write_one_cache_group(trans, root, path,
- cache);
+ ret = write_one_cache_group(trans, fs_info,
+ path, cache);
}
if (ret)
btrfs_abort_transaction(trans, ret);
return ret;
}
-int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
+int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *block_group;
int readonly = 0;
* progress (either running or paused) picks the target profile (if it's
* already available), otherwise falls back to plain reducing.
*/
-static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
+static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_devices = fs_info->fs_devices->rw_devices;
u64 target;
u64 raid_type;
return extended_to_chunk(flags | allowed);
}
-static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
+static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
unsigned seq;
u64 flags;
flags |= fs_info->avail_metadata_alloc_bits;
} while (read_seqretry(&fs_info->profiles_lock, seq));
- return btrfs_reduce_alloc_profile(root, flags);
+ return btrfs_reduce_alloc_profile(fs_info, flags);
}
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
else
flags = BTRFS_BLOCK_GROUP_METADATA;
- ret = get_alloc_profile(root, flags);
+ ret = get_alloc_profile(fs_info, flags);
return ret;
}
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = do_chunk_alloc(trans, fs_info->extent_root,
- alloc_target,
+ ret = do_chunk_alloc(trans, fs_info, alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
if (ret < 0) {
return (global->size << 1);
}
-static int should_alloc_chunk(struct btrfs_root *root,
+static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *sinfo, int force)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
return 1;
}
-static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
+static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_dev;
if (type & (BTRFS_BLOCK_GROUP_RAID10 |
* removing a chunk.
*/
void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 type)
+ struct btrfs_fs_info *fs_info, u64 type)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_space_info *info;
u64 left;
u64 thresh;
info->bytes_may_use;
spin_unlock(&info->lock);
- num_devs = get_profile_num_devs(root, type);
+ num_devs = get_profile_num_devs(fs_info, type);
/* num_devs device items to update and 1 chunk item to add or remove */
thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
*/
- ret = btrfs_alloc_chunk(trans, root, flags);
+ ret = btrfs_alloc_chunk(trans, fs_info, flags);
}
if (!ret) {
* - return errors including -ENOSPC otherwise.
*/
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 flags, int force)
+ struct btrfs_fs_info *fs_info, u64 flags, int force)
{
struct btrfs_space_info *space_info;
- struct btrfs_fs_info *fs_info = extent_root->fs_info;
int wait_for_alloc = 0;
int ret = 0;
if (force < space_info->force_alloc)
force = space_info->force_alloc;
if (space_info->full) {
- if (should_alloc_chunk(extent_root, space_info, force))
+ if (should_alloc_chunk(fs_info, space_info, force))
ret = -ENOSPC;
else
ret = 0;
return ret;
}
- if (!should_alloc_chunk(extent_root, space_info, force)) {
+ if (!should_alloc_chunk(fs_info, space_info, force)) {
spin_unlock(&space_info->lock);
return 0;
} else if (space_info->chunk_alloc) {
* Check if we have enough space in SYSTEM chunk because we may need
* to update devices.
*/
- check_system_chunk(trans, extent_root, flags);
+ check_system_chunk(trans, fs_info, flags);
- ret = btrfs_alloc_chunk(trans, extent_root, flags);
+ ret = btrfs_alloc_chunk(trans, fs_info, flags);
trans->allocating_chunk = false;
spin_lock(&space_info->lock);
*/
if (trans->can_flush_pending_bgs &&
trans->chunk_bytes_reserved >= (u64)SZ_2M) {
- btrfs_create_pending_block_groups(trans, extent_root);
+ btrfs_create_pending_block_groups(trans, fs_info);
btrfs_trans_release_chunk_metadata(trans);
}
return ret;
return 0;
}
-static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
unsigned long nr_pages, int nr_items)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct super_block *sb = fs_info->sb;
if (down_read_trylock(&sb->s_umount)) {
}
}
-static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
+static inline int calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
+ u64 to_reclaim)
{
u64 bytes;
int nr;
- bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1);
+ bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
nr = (int)div64_u64(to_reclaim, bytes);
if (!nr)
nr = 1;
enum btrfs_reserve_flush_enum flush;
/* Calc the number of the pages we need flush for space reservation */
- items = calc_reclaim_items_nr(root, to_reclaim);
+ items = calc_reclaim_items_nr(fs_info, to_reclaim);
to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
trans = (struct btrfs_trans_handle *)current->journal_info;
while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim);
nr_pages = max_reclaim >> PAGE_SHIFT;
- btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
+ btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
/*
* We need to wait for the async pages to actually start before
* we do anything.
case FLUSH_DELAYED_ITEMS_NR:
case FLUSH_DELAYED_ITEMS:
if (state == FLUSH_DELAYED_ITEMS_NR)
- nr = calc_reclaim_items_nr(root, num_bytes) * 2;
+ nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
else
nr = -1;
ret = PTR_ERR(trans);
break;
}
- ret = btrfs_run_delayed_items_nr(trans, root, nr);
+ ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
btrfs_end_transaction(trans, root);
break;
case FLUSH_DELALLOC:
ret = PTR_ERR(trans);
break;
}
- ret = do_chunk_alloc(trans, fs_info->extent_root,
+ ret = do_chunk_alloc(trans, fs_info,
btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
rsv->type = type;
}
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type)
{
struct btrfs_block_rsv *block_rsv;
- struct btrfs_fs_info *fs_info = root->fs_info;
block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
if (!block_rsv)
return block_rsv;
}
-void btrfs_free_block_rsv(struct btrfs_root *root,
+void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
if (!rsv)
return;
- btrfs_block_rsv_release(root, rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
kfree(rsv);
}
return ret;
}
-int btrfs_block_rsv_check(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, int min_factor)
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
return ret;
}
-void btrfs_block_rsv_release(struct btrfs_root *root,
+void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
if (global_rsv == block_rsv ||
}
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
if (!trans->block_rsv)
return;
trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, trans->bytes_reserved, 0);
- btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv,
+ trans->bytes_reserved);
trans->bytes_reserved = 0;
}
trace_btrfs_space_reservation(fs_info, "orphan",
btrfs_ino(inode), num_bytes, 0);
- btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
}
/*
return ret;
}
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
u64 qgroup_reserved)
{
- btrfs_block_rsv_release(root, rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
}
/**
int reserve)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
u64 old_csums, num_csums;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
BTRFS_I(inode)->csum_bytes == 0)
return 0;
- old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
+ old_csums = btrfs_csum_bytes_to_leaves(fs_info,
+ BTRFS_I(inode)->csum_bytes);
if (reserve)
BTRFS_I(inode)->csum_bytes += num_bytes;
else
BTRFS_I(inode)->csum_bytes -= num_bytes;
- num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
+ num_csums = btrfs_csum_bytes_to_leaves(fs_info,
+ BTRFS_I(inode)->csum_bytes);
/* No change, no need to reserve more */
if (old_csums == num_csums)
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), to_reserve, 1);
if (release_extra)
- btrfs_block_rsv_release(root, block_rsv,
+ btrfs_block_rsv_release(fs_info, block_rsv,
btrfs_calc_trans_metadata_size(fs_info, 1));
return 0;
to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
if (to_free) {
- btrfs_block_rsv_release(root, block_rsv, to_free);
+ btrfs_block_rsv_release(fs_info, block_rsv, to_free);
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
}
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
u64 to_free = 0;
unsigned dropped;
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
- btrfs_block_rsv_release(root, &fs_info->delalloc_block_rsv, to_free);
+ btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free);
}
/**
return 0;
}
-static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
+static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
u64 bytenr;
return bytenr;
}
-static int pin_down_extent(struct btrfs_root *root,
+static int pin_down_extent(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
u64 bytenr, u64 num_bytes, int reserved)
{
- struct btrfs_fs_info *fs_info = cache->fs_info;
-
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
cache->pinned += num_bytes;
/*
* this function must be called within transaction
*/
-int btrfs_pin_extent(struct btrfs_root *root,
+int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, int reserved)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(fs_info, bytenr);
BUG_ON(!cache); /* Logic error */
- pin_down_extent(root, cache, bytenr, num_bytes, reserved);
+ pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
btrfs_put_block_group(cache);
return 0;
/*
* this function must be called within transaction
*/
-int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
int ret;
*/
cache_block_group(cache, 1);
- pin_down_extent(root, cache, bytenr, num_bytes, 0);
+ pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
/* remove us from the free space cache (if we're there at all) */
ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
return ret;
}
-static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
+static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
+ u64 start, u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_block_group_cache *block_group;
struct btrfs_caching_control *caching_ctl;
mutex_lock(&caching_ctl->mutex);
if (start >= caching_ctl->progress) {
- ret = add_excluded_extent(root, start, num_bytes);
+ ret = add_excluded_extent(fs_info, start, num_bytes);
} else if (start + num_bytes <= caching_ctl->progress) {
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
num_bytes = (start + num_bytes) -
caching_ctl->progress;
start = caching_ctl->progress;
- ret = add_excluded_extent(root, start, num_bytes);
+ ret = add_excluded_extent(fs_info, start, num_bytes);
}
out_lock:
mutex_unlock(&caching_ctl->mutex);
return ret;
}
-int btrfs_exclude_logged_extents(struct btrfs_root *log,
+int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
struct btrfs_file_extent_item *item;
int found_type;
int i;
- if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
+ if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
return 0;
for (i = 0; i < btrfs_header_nritems(eb); i++) {
continue;
key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
- __exclude_logged_extent(log, key.objectid, key.offset);
+ __exclude_logged_extent(fs_info, key.objectid, key.offset);
}
return 0;
return ret;
}
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_caching_control *next;
struct btrfs_caching_control *caching_ctl;
struct btrfs_block_group_cache *cache;
* what it should be based on the mount options.
*/
static struct btrfs_free_cluster *
-fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
- u64 *empty_cluster)
+fetch_cluster_info(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info, u64 *empty_cluster)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_cluster *ret = NULL;
bool ssd = btrfs_test_opt(fs_info, SSD);
return ret;
}
-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
+static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+ u64 start, u64 end,
const bool return_free_space)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_space_info *space_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
cache = btrfs_lookup_block_group(fs_info, start);
BUG_ON(!cache); /* Logic error */
- cluster = fetch_cluster_info(root,
+ cluster = fetch_cluster_info(fs_info,
cache->space_info,
&empty_cluster);
empty_cluster <<= 1;
}
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *block_group, *tmp;
struct list_head *deleted_bgs;
struct extent_io_tree *unpin;
}
if (btrfs_test_opt(fs_info, DISCARD))
- ret = btrfs_discard_extent(root, start,
+ ret = btrfs_discard_extent(fs_info, start,
end + 1 - start, NULL);
clear_extent_dirty(unpin, start, end);
- unpin_extent_range(root, start, end, true);
+ unpin_extent_range(fs_info, start, end, true);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
ret = -EROFS;
if (!trans->aborted)
- ret = btrfs_discard_extent(root,
+ ret = btrfs_discard_extent(fs_info,
block_group->key.objectid,
block_group->key.offset,
&trimmed);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *info,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
{
struct btrfs_key key;
struct btrfs_path *path;
- struct btrfs_fs_info *info = root->fs_info;
struct btrfs_root *extent_root = info->extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
if (ret > 0)
- btrfs_print_leaf(extent_root,
- path->nodes[0]);
+ btrfs_print_leaf(info, path->nodes[0]);
}
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
extent_slot = path->slots[0];
}
} else if (WARN_ON(ret == -ENOENT)) {
- btrfs_print_leaf(extent_root, path->nodes[0]);
+ btrfs_print_leaf(info, path->nodes[0]);
btrfs_err(info,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
bytenr, parent, root_objectid, owner_objectid,
btrfs_err(info,
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
- btrfs_print_leaf(extent_root, path->nodes[0]);
+ btrfs_print_leaf(info, path->nodes[0]);
}
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
* removes it from the tree.
*/
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr)
+ u64 bytenr)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_block_group_cache *cache;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- ret = check_ref_cleanup(trans, root, buf->start);
+ ret = check_ref_cleanup(trans, buf->start);
if (!ret)
goto out;
}
cache = btrfs_lookup_block_group(fs_info, buf->start);
if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
- pin_down_extent(root, cache, buf->start, buf->len, 1);
+ pin_down_extent(fs_info, cache, buf->start,
+ buf->len, 1);
btrfs_put_block_group(cache);
goto out;
}
}
/* Can return -ENOMEM */
-int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+int btrfs_free_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_is_testing(fs_info))
return 0;
if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
/* unlocks the pinned mutex */
- btrfs_pin_extent(root, bytenr, num_bytes, 1);
+ btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
ret = 0;
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
spin_unlock(&space_info->lock);
}
- last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
+ last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
if (last_ptr) {
spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
spin_unlock(&last_ptr->lock);
}
- search_start = max(search_start, first_logical_byte(root, 0));
+ search_start = max(search_start, first_logical_byte(fs_info, 0));
search_start = max(search_start, hint_byte);
if (search_start == hint_byte) {
block_group = btrfs_lookup_block_group(fs_info, search_start);
block_group->full_stripe_len);
/* allocate a cluster in this block group */
- ret = btrfs_find_space_cluster(root, block_group,
+ ret = btrfs_find_space_cluster(fs_info, block_group,
last_ptr, search_start,
num_bytes,
aligned_cluster);
goto out;
}
- ret = do_chunk_alloc(trans, root, flags,
+ ret = do_chunk_alloc(trans, fs_info, flags,
CHUNK_ALLOC_FORCE);
/*
return ret;
}
-static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len,
int pin, int delalloc)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
int ret = 0;
}
if (pin)
- pin_down_extent(root, cache, start, len, 1);
+ pin_down_extent(fs_info, cache, start, len, 1);
else {
if (btrfs_test_opt(fs_info, DISCARD))
- ret = btrfs_discard_extent(root, start, len, NULL);
+ ret = btrfs_discard_extent(fs_info, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_free_reserved_bytes(cache, len, delalloc);
trace_btrfs_reserved_extent_free(fs_info, start, len);
return ret;
}
-int btrfs_free_reserved_extent(struct btrfs_root *root,
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc)
{
- return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
+ return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
}
-int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
- return __btrfs_free_reserved_extent(root, start, len, 1, 0);
+ return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
}
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_extent_item *extent_item;
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
}
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_extent_item *extent_item;
struct btrfs_tree_block_info *block_info;
struct btrfs_extent_inline_ref *iref;
path = btrfs_alloc_path();
if (!path) {
- btrfs_free_and_pin_reserved_extent(root, ins->objectid,
+ btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
fs_info->nodesize);
return -ENOMEM;
}
ins, size);
if (ret) {
btrfs_free_path(path);
- btrfs_free_and_pin_reserved_extent(root, ins->objectid,
+ btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
fs_info->nodesize);
return ret;
}
}
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
u64 root_objectid, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
* space cache bits as well
*/
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
* need to do the exclude dance if this fs isn't mixed.
*/
if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
- ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
+ ret = __exclude_logged_extent(fs_info, ins->objectid,
+ ins->offset);
if (ret)
return ret;
}
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
+ ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
0, owner, offset, ins, 1);
btrfs_put_block_group(block_group);
return ret;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return buf;
out_free_buf:
free_extent_buffer(buf);
out_free_reserved:
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
out_unuse:
unuse_block_rsv(fs_info, block_rsv, blocksize);
return ERR_PTR(ret);
continue;
/* We don't lock the tree block, it's OK to be racy here */
- ret = btrfs_lookup_extent_info(trans, root, bytenr,
+ ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
wc->level - 1, 1, &refs,
&flags);
/* We don't care about errors in readahead. */
continue;
}
reada:
- readahead_tree_block(root, bytenr);
+ readahead_tree_block(fs_info, bytenr);
nread++;
}
wc->reada_slot = slot;
struct btrfs_path *path,
struct walk_control *wc, int lookup_info)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
BUG_ON(!path->locks[level]);
- ret = btrfs_lookup_extent_info(trans, root,
+ ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
+ ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
eb->len, flag,
btrfs_header_level(eb), 0);
BUG_ON(ret); /* -ENOMEM */
next = find_extent_buffer(fs_info, bytenr);
if (!next) {
- next = btrfs_find_create_tree_block(root, bytenr);
+ next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
+ ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
&wc->refs[level - 1],
&wc->flags[level - 1]);
if (ret < 0)
if (!next) {
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
- next = read_tree_block(root, bytenr, generation);
+ next = read_tree_block(fs_info, bytenr, generation);
if (IS_ERR(next)) {
return PTR_ERR(next);
} else if (!extent_buffer_uptodate(next)) {
ret);
}
}
- ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
- root->root_key.objectid, level - 1, 0);
+ ret = btrfs_free_extent(trans, fs_info, bytenr, blocksize,
+ parent, root->root_key.objectid,
+ level - 1, 0);
if (ret)
goto out_unlock;
}
btrfs_set_lock_blocking(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
- ret = btrfs_lookup_extent_info(trans, root,
+ ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
else
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_qgroup_trace_leaf_items(trans, root, eb);
+ ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
if (ret) {
btrfs_err_rl(fs_info,
"error %d accounting leaf items. Quota is out of sync, rescan required.",
btrfs_set_lock_blocking(path->nodes[level]);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
- ret = btrfs_lookup_extent_info(trans, root,
+ ret = btrfs_lookup_extent_info(trans, fs_info,
path->nodes[level]->start,
level, 1, &wc->refs[level],
&wc->flags[level]);
BUG_ON(wc->level == 0);
if (btrfs_should_end_transaction(trans, tree_root) ||
- (!for_reloc && btrfs_need_cleaner_sleep(root))) {
+ (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
root_item);
}
btrfs_end_transaction_throttle(trans, tree_root);
- if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
+ if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
btrfs_debug(fs_info,
"drop snapshot early exit");
err = -EAGAIN;
mutex_unlock(&fs_info->ro_block_group_mutex);
btrfs_end_transaction(trans, root);
- ret = btrfs_wait_for_commit(root, transid);
+ ret = btrfs_wait_for_commit(fs_info, transid);
if (ret)
return ret;
goto again;
*/
alloc_flags = update_block_group_flags(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
- ret = do_chunk_alloc(trans, root, alloc_flags,
+ ret = do_chunk_alloc(trans, fs_info, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
ret = inc_block_group_ro(cache, 0);
if (!ret)
goto out;
- alloc_flags = get_alloc_profile(root, cache->space_info->flags);
- ret = do_chunk_alloc(trans, root, alloc_flags,
+ alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
+ ret = do_chunk_alloc(trans, fs_info, alloc_flags,
CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(fs_info, cache->flags);
lock_chunks(fs_info);
- check_system_chunk(trans, root, alloc_flags);
+ check_system_chunk(trans, fs_info, alloc_flags);
unlock_chunks(fs_info);
}
mutex_unlock(&fs_info->ro_block_group_mutex);
}
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 type)
+ struct btrfs_fs_info *fs_info, u64 type)
{
- u64 alloc_flags = get_alloc_profile(root, type);
- return do_chunk_alloc(trans, root, alloc_flags,
- CHUNK_ALLOC_FORCE);
+ u64 alloc_flags = get_alloc_profile(fs_info, type);
+
+ return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
}
/*
return free_bytes;
}
-void btrfs_dec_block_group_ro(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache)
+void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
if (block_group->iref)
break;
spin_unlock(&block_group->lock);
- block_group = next_block_group(info->tree_root,
- block_group);
+ block_group = next_block_group(info, block_group);
}
if (!block_group) {
if (last == 0)
*/
if (block_group->cached == BTRFS_CACHE_NO ||
block_group->cached == BTRFS_CACHE_ERROR)
- free_excluded_extents(info->extent_root, block_group);
+ free_excluded_extents(info, block_group);
btrfs_remove_free_space_cache(block_group);
ASSERT(list_empty(&block_group->dirty_list));
}
static struct btrfs_block_group_cache *
-btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
+btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
+ u64 start, u64 size)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
cache = kzalloc(sizeof(*cache), GFP_NOFS);
cache->sectorsize = fs_info->sectorsize;
cache->fs_info = fs_info;
- cache->full_stripe_len = btrfs_full_stripe_len(root,
- &fs_info->mapping_tree, start);
+ cache->full_stripe_len = btrfs_full_stripe_len(fs_info,
+ &fs_info->mapping_tree,
+ start);
set_free_space_tree_thresholds(cache);
atomic_set(&cache->count, 1);
int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
- struct btrfs_root *root = info->extent_root;
struct btrfs_path *path;
int ret;
struct btrfs_block_group_cache *cache;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- cache = btrfs_create_block_group_cache(root, found_key.objectid,
+ cache = btrfs_create_block_group_cache(info, found_key.objectid,
found_key.offset);
if (!cache) {
ret = -ENOMEM;
* info has super bytes accounted for, otherwise we'll think
* we have more space than we actually do.
*/
- ret = exclude_super_stripes(root, cache);
+ ret = exclude_super_stripes(info, cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(root, cache);
+ free_excluded_extents(info, cache);
btrfs_put_block_group(cache);
goto error;
}
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- free_excluded_extents(root, cache);
+ free_excluded_extents(info, cache);
} else if (btrfs_block_group_used(&cache->item) == 0) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
found_key.objectid,
found_key.objectid +
found_key.offset);
- free_excluded_extents(root, cache);
+ free_excluded_extents(info, cache);
}
ret = btrfs_add_block_group_cache(info, cache);
__link_block_group(space_info, cache);
set_avail_alloc_bits(info, cache->flags);
- if (btrfs_chunk_readonly(root, cache->key.objectid)) {
+ if (btrfs_chunk_readonly(info, cache->key.objectid)) {
inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) {
spin_lock(&info->unused_bgs_lock);
}
list_for_each_entry_rcu(space_info, &info->space_info, list) {
- if (!(get_alloc_profile(root, space_info->flags) &
+ if (!(get_alloc_profile(info, space_info->flags) &
(BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
}
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *block_group, *tmp;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_block_group_item item;
}
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytes_used,
+ struct btrfs_fs_info *fs_info, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
int ret;
btrfs_set_log_full_commit(fs_info, trans);
- cache = btrfs_create_block_group_cache(root, chunk_offset, size);
+ cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
if (!cache)
return -ENOMEM;
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
cache->needs_free_space = 1;
- ret = exclude_super_stripes(root, cache);
+ ret = exclude_super_stripes(fs_info, cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(root, cache);
+ free_excluded_extents(fs_info, cache);
btrfs_put_block_group(cache);
return ret;
}
add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
- free_excluded_extents(root, cache);
+ free_excluded_extents(fs_info, cache);
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(root, cache)) {
+ if (btrfs_should_fragment_free_space(cache)) {
u64 new_bytes_used = size - bytes_used;
bytes_used += new_bytes_used >> 1;
- fragment_free_space(root, cache);
+ fragment_free_space(cache);
}
#endif
/*
* Free the reserved super bytes from this block group before
* remove it.
*/
- free_excluded_extents(root, block_group);
+ free_excluded_extents(fs_info, block_group);
memcpy(&key, &block_group->key, sizeof(key));
index = get_block_group_index(block_group);
trans = btrfs_start_trans_remove_block_group(fs_info,
block_group->key.objectid);
if (IS_ERR(trans)) {
- btrfs_dec_block_group_ro(root, block_group);
+ btrfs_dec_block_group_ro(block_group);
ret = PTR_ERR(trans);
goto next;
}
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- btrfs_dec_block_group_ro(root, block_group);
+ btrfs_dec_block_group_ro(block_group);
goto end_trans;
}
ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- btrfs_dec_block_group_ro(root, block_group);
+ btrfs_dec_block_group_ro(block_group);
goto end_trans;
}
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
return ret;
}
-int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
+ u64 start, u64 end)
{
- return unpin_extent_range(root, start, end, false);
+ return unpin_extent_range(fs_info, start, end, false);
}
/*
return ret;
}
-int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_device *device;
struct list_head *devices;
}
}
- cache = next_block_group(fs_info->tree_root, cache);
+ cache = next_block_group(fs_info, cache);
}
mutex_lock(&fs_info->fs_devices->device_list_mutex);
return 0;
}
-int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
- int mirror_num)
+int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int mirror_num)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0;
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
*/
start = btrfs_item_nr_offset(nritems);
- end = btrfs_leaf_data(eb) +
- leaf_data_end(fs_info->tree_root, eb);
+ end = btrfs_leaf_data(eb) + leaf_data_end(fs_info, eb);
memzero_extent_buffer(eb, start, end - start);
}
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
unsigned int pg_offset);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
-int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
- int mirror_num);
+int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int mirror_num);
/*
* When IO fails, either with EIO or csum verification fails, we
kfree(bio->csum_allocated);
}
-static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
- struct inode *inode, struct bio *bio,
+static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
return 0;
}
-int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u32 *dst)
+int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
{
- return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
+ return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
}
-int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 offset)
+int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
{
- return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
+ return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
return ret;
}
-int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 file_start, int contig)
+int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+ u64 file_start, int contig)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums;
* This calls btrfs_truncate_item with the correct args based on the
* overlap, and fixes up the key as required.
*/
-static noinline void truncate_one_csum(struct btrfs_root *root,
+static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
u64 csum_end;
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(root, path, new_size, 1);
+ btrfs_truncate_item(fs_info, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(root, path, new_size, 0);
+ btrfs_truncate_item(fs_info, path, new_size, 0);
key->offset = end_byte;
btrfs_set_item_key_safe(fs_info, path, key);
key.offset = end_byte - 1;
} else {
- truncate_one_csum(root, path, &key, bytenr, len);
+ truncate_one_csum(fs_info, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
u32 diff;
u32 free_space;
- if (btrfs_leaf_free_space(root, leaf) <
+ if (btrfs_leaf_free_space(fs_info, leaf) <
sizeof(struct btrfs_item) + csum_size * 2)
goto insert;
- free_space = btrfs_leaf_free_space(root, leaf) -
+ free_space = btrfs_leaf_free_space(fs_info, leaf) -
sizeof(struct btrfs_item) - csum_size;
tmp = sums->len - total_bytes;
tmp >>= fs_info->sb->s_blocksize_bits;
diff /= csum_size;
diff *= csum_size;
- btrfs_extend_item(root, path, diff);
+ btrfs_extend_item(fs_info, path, diff);
ret = 0;
goto csum;
}
return 0;
}
-static inline int __need_auto_defrag(struct btrfs_root *root)
+static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
u64 transid;
int ret;
- if (!__need_auto_defrag(root))
+ if (!__need_auto_defrag(fs_info))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
struct inode_defrag *defrag)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
- if (!__need_auto_defrag(root))
+ if (!__need_auto_defrag(fs_info))
goto out;
/*
&fs_info->fs_state))
break;
- if (!__need_auto_defrag(fs_info->tree_root))
+ if (!__need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
- struct page **pages, size_t num_pages,
- loff_t pos, size_t write_bytes,
- struct extent_state **cached)
+int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+ size_t num_pages, loff_t pos, size_t write_bytes,
+ struct extent_state **cached)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int err = 0;
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
- ret = btrfs_free_extent(trans, root,
+ ret = btrfs_free_extent(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
key.objectid, key.offset -
if (!ret && replace_extent && leafs_visited == 1 &&
(path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
path->locks[0] == BTRFS_WRITE_LOCK) &&
- btrfs_leaf_free_space(root, leaf) >=
+ btrfs_leaf_free_space(fs_info, leaf) >=
sizeof(struct btrfs_item) + extent_item_size) {
key.objectid = ino;
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
- root->root_key.objectid,
+ ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
+ 0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
fs_info->sectorsize);
if (copied > 0)
- ret = btrfs_dirty_pages(root, inode, pages,
- dirty_pages, pos, copied,
- NULL);
+ ret = btrfs_dirty_pages(inode, pages, dirty_pages,
+ pos, copied, NULL);
if (need_unlock)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state,
balance_dirty_pages_ratelimited(inode->i_mapping);
if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
pos += copied;
num_written += copied;
goto out;
}
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
ret = -ENOMEM;
goto out_free;
}
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, rsv_count);
if (IS_ERR(trans)) {
ret = btrfs_update_inode(trans, root, inode);
updated_inode = true;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
out_free:
btrfs_free_path(path);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
block_group->key.objectid);
}
-int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 needed_bytes;
int ret;
}
static noinline_for_stack int
-write_pinned_extent_entries(struct btrfs_root *root,
+write_pinned_extent_entries(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_io_ctl *io_ctl,
int *entries)
{
- struct btrfs_fs_info *fs_info;
u64 start, extent_start, extent_end, len;
struct extent_io_tree *unpin = NULL;
int ret;
if (!block_group)
return 0;
- fs_info = block_group->fs_info;
-
/*
* We want to add any pinned extents to our free space cache
* so we don't leak the space
struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 offset)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_state *cached_state = NULL;
LIST_HEAD(bitmap_list);
int entries = 0;
* If this changes while we are working we'll get added back to
* the dirty list and redo it. No locking needed
*/
- ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
+ ret = write_pinned_extent_entries(fs_info, block_group,
+ io_ctl, &entries);
if (ret)
goto out_nospc_locked;
io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
- ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
- 0, i_size_read(inode), &cached_state);
+ ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
+ i_size_read(inode), &cached_state);
if (ret)
goto out_nospc;
bool forced = false;
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(fs_info->extent_root, block_group))
+ if (btrfs_should_fragment_free_space(block_group))
forced = true;
#endif
* returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc
*/
-int btrfs_find_space_cluster(struct btrfs_root *root,
+int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
- struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space *entry, *tmp;
LIST_HEAD(bitmaps);
u64 min_bytes;
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- ret = btrfs_discard_extent(fs_info->extent_root,
- start, bytes, &trimmed);
+ ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
if (!ret)
*total_trimmed += trimmed;
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
-int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
-int btrfs_find_space_cluster(struct btrfs_root *root,
+int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size);
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
- btrfs_truncate_item(root, path, item_size - del_len, 1);
+ btrfs_truncate_item(root->fs_info, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
- btrfs_truncate_item(root, path, item_size - sub_item_len, 1);
+ btrfs_truncate_item(root->fs_info, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
name, name_len, NULL))
goto out;
- btrfs_extend_item(root, path, ins_len);
+ btrfs_extend_item(root->fs_info, path, ins_len);
ret = 0;
}
if (ret < 0)
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
- btrfs_extend_item(root, path, ins_len);
+ btrfs_extend_item(fs_info, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
out_release:
trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
trans->bytes_reserved, 0);
- btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv,
+ trans->bytes_reserved);
out:
trans->block_rsv = rsv;
trans->bytes_reserved = num_bytes;
return;
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_free:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
out_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_unlock:
extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
locked_page,
return 0;
}
-static noinline int csum_exist_in_range(struct btrfs_root *root,
+static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_ordered_sum *sums;
LIST_HEAD(list);
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
- if (btrfs_extent_readonly(root, disk_bytenr))
+ if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
- if (csum_exist_in_range(root, disk_bytenr, num_bytes))
+ if (csum_exist_in_range(fs_info, disk_bytenr,
+ num_bytes))
goto out_check;
if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
goto out_check;
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
- ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+ ret = btrfs_csum_one_bio(inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
- ret = btrfs_map_bio(root, bio, mirror_num, 1);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
bio_flags);
goto out;
} else if (!skip_sum) {
- ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
+ ret = btrfs_lookup_bio_sums(inode, bio, NULL);
if (ret)
goto out;
}
__btrfs_submit_bio_done);
goto out;
} else if (!skip_sum) {
- ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+ ret = btrfs_csum_one_bio(inode, bio, 0, 0);
if (ret)
goto out;
}
mapit:
- ret = btrfs_map_bio(root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
out:
if (ret < 0) {
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_alloc_reserved_file_extent(trans, root,
- root->root_key.objectid,
- btrfs_ino(inode), file_pos,
- ram_bytes, &ins);
+ ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
+ btrfs_ino(inode), file_pos,
+ ram_bytes, &ins);
/*
* Release the reserved range from inode dirty range map, as it is
* already moved into delayed_ref_head
inode_add_bytes(inode, len);
btrfs_release_path(path);
- ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
+ ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
new->disk_len, 0,
backref->root_id, backref->inum,
new->file_pos); /* start - extent_offset */
return NULL;
}
-static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
+static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(fs_info, start);
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
if (!ret)
- btrfs_release_delalloc_bytes(root,
+ btrfs_release_delalloc_bytes(fs_info,
ordered_extent->start,
ordered_extent->disk_len);
}
if ((ret || !logical_len) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
- btrfs_free_reserved_extent(root, ordered_extent->start,
+ btrfs_free_reserved_extent(fs_info,
+ ordered_extent->start,
ordered_extent->disk_len, 1);
}
spin_unlock(&fs_info->delayed_iput_lock);
}
-void btrfs_run_delayed_iputs(struct btrfs_root *root)
+void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->delayed_iput_lock);
while (!list_empty(&fs_info->delayed_iputs)) {
if (block_rsv) {
WARN_ON(block_rsv->size > 0);
- btrfs_free_block_rsv(root, block_rsv);
+ btrfs_free_block_rsv(fs_info, block_rsv);
}
}
int ret;
if (!root->orphan_block_rsv) {
- block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ block_rsv = btrfs_alloc_block_rsv(fs_info,
+ BTRFS_BLOCK_RSV_TEMP);
if (!block_rsv)
return -ENOMEM;
}
if (!root->orphan_block_rsv) {
root->orphan_block_rsv = block_rsv;
} else if (block_rsv) {
- btrfs_free_block_rsv(root, block_rsv);
+ btrfs_free_block_rsv(fs_info, block_rsv);
block_rsv = NULL;
}
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
- btrfs_block_rsv_release(root, root->orphan_block_rsv,
+ btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
(u64)-1);
if (root->orphan_block_rsv ||
goto err;
}
skip_backref:
- ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+ ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto err;
out:
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(root->fs_info);
return ret;
}
}
btrfs_release_path(path);
- ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+ ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
out:
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(root->fs_info);
return err;
}
* This is only used to apply pressure to the enospc system, we don't
* intend to use this reservation at all.
*/
- bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
+ bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
bytes_deleted *= fs_info->nodesize;
ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size = btrfs_file_extent_calc_inline_size(size);
- btrfs_truncate_item(root, path, size, 1);
+ btrfs_truncate_item(root->fs_info, path, size, 1);
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
inode_sub_bytes(inode, item_end + 1 - new_size);
root == fs_info->tree_root)) {
btrfs_set_path_blocking(path);
bytes_deleted += extent_num_bytes;
- ret = btrfs_free_extent(trans, root, extent_start,
+ ret = btrfs_free_extent(trans, fs_info, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset);
BUG_ON(ret);
- if (btrfs_should_throttle_delayed_refs(trans, root))
- btrfs_async_run_delayed_refs(root,
+ if (btrfs_should_throttle_delayed_refs(trans, fs_info))
+ btrfs_async_run_delayed_refs(fs_info,
trans->delayed_ref_updates * 2,
trans->transid, 0);
if (be_nice) {
should_end = 1;
}
if (btrfs_should_throttle_delayed_refs(trans,
- root)) {
+ fs_info))
should_throttle = 1;
- }
}
}
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
- ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+ ret = btrfs_run_delayed_refs(trans,
+ fs_info,
+ updates * 2);
if (ret && !err)
err = ret;
}
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
- ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+ ret = btrfs_run_delayed_refs(trans, fs_info,
+ updates * 2);
if (ret && !err)
err = ret;
}
goto no_delete;
}
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
"Could not get space for a delete, will truncate on mount %d",
ret);
btrfs_orphan_del(NULL, inode);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
* again.
*/
if (steal_from_global) {
- if (!btrfs_check_space_for_delayed_refs(trans, root))
+ if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
ret = btrfs_block_rsv_migrate(global_rsv, rsv,
min_size, 0);
else
ret = btrfs_commit_transaction(trans, root);
if (ret) {
btrfs_orphan_del(NULL, inode);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
continue;
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans, root);
trans = NULL;
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
}
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
/*
* Errors here aren't a big deal, it just means we leave orphan items
btrfs_return_ino(root, btrfs_ino(inode));
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
no_delete:
btrfs_remove_delayed_node(inode);
clear_inode(inode);
* needs to be changed to reflect the root directory of the tree root. This
* is kind of like crossing a mount point.
*/
-static int fixup_tree_root_location(struct btrfs_root *root,
+static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
struct inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_path *path;
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
index = srcu_read_lock(&fs_info->subvol_srcu);
- ret = fixup_tree_root_location(root, dir, dentry,
+ ret = fixup_tree_root_location(fs_info, dir, dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
ctx->pos = found_key.offset;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
- if (verify_dir_item(root, leaf, di))
+ if (verify_dir_item(fs_info, leaf, di))
goto next;
name_len = btrfs_dir_name_len(leaf, di);
*/
static int btrfs_dirty_inode(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
- btrfs_balance_delayed_items(root);
+ btrfs_balance_delayed_items(fs_info);
return ret;
}
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
out_unlock:
btrfs_end_transaction(trans, root);
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
static int btrfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
return err;
out_unlock_inode:
struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = d_inode(old_dentry);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 index;
int err;
int drop_inode = 0;
btrfs_log_new_name(trans, inode, NULL, parent);
}
- btrfs_balance_delayed_items(root);
+ btrfs_balance_delayed_items(fs_info);
fail:
if (trans)
btrfs_end_transaction(trans, root);
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
return err;
}
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
return err;
out_fail_inode:
ins.offset, 0);
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (IS_ERR(em))
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid,
+ ins.offset, 1);
return em;
}
u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
int ret;
*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
}
- if (btrfs_extent_readonly(root, disk_bytenr))
+ if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out;
num_bytes = min(offset + *len, extent_end) - offset;
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
- if (csum_exist_in_range(root, disk_bytenr, num_bytes))
- goto out;
+ if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
+ goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
int mirror_num)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
bio_get(bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, bio,
- BTRFS_WQ_ENDIO_DIO_REPAIR);
+ ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
if (ret)
goto err;
- ret = btrfs_map_bio(root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
err:
bio_put(bio);
return ret;
unsigned long bio_flags, u64 offset)
{
int ret;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
+ ret = btrfs_csum_one_bio(inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
return bio;
}
-static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
- struct inode *inode,
+static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
struct btrfs_dio_private *dip,
struct bio *bio,
u64 file_offset)
* contention.
*/
if (dip->logical_offset == file_offset) {
- ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
+ ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
file_offset);
if (ret)
return ret;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_dio_private *dip = bio->bi_private;
bool write = bio_op(bio) == REQ_OP_WRITE;
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
if (async_submit)
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
- ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+ ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
if (ret)
goto err;
} else {
- ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
+ ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
file_offset);
if (ret)
goto err;
}
map:
- ret = btrfs_map_bio(root, bio, 0, async_submit);
+ ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
kfree(dip);
}
-static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
- const struct iov_iter *iter, loff_t offset)
+static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+ struct kiocb *iocb,
+ const struct iov_iter *iter, loff_t offset)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int seg;
int i;
unsigned int blocksize_mask = fs_info->sectorsize - 1;
bool relock = false;
ssize_t ret;
- if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
+ if (check_direct_IO(fs_info, iocb, iter, offset))
return 0;
inode_dio_begin(inode);
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
}
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
err = ret;
ret = btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
}
out:
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
if (ret && !err)
err = ret;
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
return err;
out_unlock_inode:
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
if (ret) {
- btrfs_free_reserved_extent(root, ins.objectid,
+ btrfs_free_reserved_extent(fs_info, ins.objectid,
ins.offset, 0);
btrfs_abort_transaction(trans, ret);
if (own_trans)
static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
btrfs_end_transaction(trans, root);
if (ret)
iput(inode);
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
return ret;
out_inode:
range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
- ret = btrfs_trim_fs(fs_info->tree_root, &range);
+ ret = btrfs_trim_fs(fs_info, &range);
if (ret < 0)
return ret;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
- btrfs_subvolume_release_metadata(root, &block_rsv,
+ btrfs_subvolume_release_metadata(fs_info, &block_rsv,
qgroup_reserved);
goto fail_free;
}
kfree(root_item);
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
- btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+ btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
if (async_transid) {
*async_transid = trans->transid;
d_instantiate(dentry, inode);
ret = 0;
fail:
- btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
+ btrfs_subvolume_release_metadata(fs_info,
&pending_snapshot->block_rsv,
pending_snapshot->qgroup_reserved);
dec_and_free:
err = ret;
inode->i_flags |= S_DEAD;
out_release:
- btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+ btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
out_up_write:
up_write(&fs_info->subvol_sem);
if (err) {
return ret;
}
-static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_vol_args *vol_args;
int ret;
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
- ret = btrfs_init_new_device(root->fs_info, vol_args->name);
+ ret = btrfs_init_new_device(fs_info, vol_args->name);
if (!ret)
btrfs_info(fs_info, "disk added %s", vol_args->name);
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
mutex_lock(&fs_info->volume_mutex);
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
- ret = btrfs_rm_device(root, NULL, vol_args->devid);
+ ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
} else {
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
- ret = btrfs_rm_device(root, vol_args->name, 0);
+ ret = btrfs_rm_device(fs_info, vol_args->name, 0);
}
mutex_unlock(&fs_info->volume_mutex);
atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
int ret;
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
mutex_lock(&fs_info->volume_mutex);
- ret = btrfs_rm_device(root, vol_args->name, 0);
+ ret = btrfs_rm_device(fs_info, vol_args->name, 0);
mutex_unlock(&fs_info->volume_mutex);
if (!ret)
return ret;
}
-static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_fs_info_args *fi_args;
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
return ret;
}
-static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_dev_info_args *di_args;
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
if (disko) {
inode_add_bytes(inode, datal);
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans,
+ fs_info,
disko, diskl, 0,
root->root_key.objectid,
btrfs_ino(inode),
}
}
-static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_space_args space_args;
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
return 0;
}
-static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
+static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
void __user *argp)
{
u64 transid;
} else {
transid = 0; /* current trans */
}
- return btrfs_wait_for_commit(root, transid);
+ return btrfs_wait_for_commit(fs_info, transid);
}
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
return ret;
}
-static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return btrfs_scrub_cancel(root->fs_info);
+ return btrfs_scrub_cancel(fs_info);
}
-static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
+static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_scrub_args *sa;
if (IS_ERR(sa))
return PTR_ERR(sa);
- ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
+ ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
return ret;
}
-static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
+static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_get_dev_stats *sa;
return -EPERM;
}
- ret = btrfs_get_dev_stats(root, sa);
+ ret = btrfs_get_dev_stats(fs_info, sa);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
return ret;
}
-static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_dev_replace_args *p;
int ret;
&fs_info->mutually_exclusive_operation_running, 1)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
} else {
- ret = btrfs_dev_replace_by_ioctl(root, p);
+ ret = btrfs_dev_replace_by_ioctl(fs_info, p);
atomic_set(
&fs_info->mutually_exclusive_operation_running, 0);
}
return 0;
}
-static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
+static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
void __user *arg)
{
int ret = 0;
goto out;
}
- ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
+ ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
build_ino_list, inodes);
if (ret == -EINVAL)
ret = -ENOENT;
return ret;
}
-static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
+static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return -EINVAL;
}
-static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
+static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
int ret = 0;
return 0;
}
-static int check_feature_bits(struct btrfs_root *root,
+static int check_feature_bits(struct btrfs_fs_info *fs_info,
enum btrfs_feature_set set,
u64 change_mask, u64 flags, u64 supported_flags,
u64 safe_set, u64 safe_clear)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
const char *type = btrfs_feature_set_names[set];
char *names;
u64 disallowed, unsupported;
return 0;
}
-#define check_feature(root, change_mask, flags, mask_base) \
-check_feature_bits(root, FEAT_##mask_base, change_mask, flags, \
+#define check_feature(fs_info, change_mask, flags, mask_base) \
+check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
BTRFS_FEATURE_ ## mask_base ## _SUPP, \
BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
!flags[0].incompat_flags)
return 0;
- ret = check_feature(root, flags[0].compat_flags,
+ ret = check_feature(fs_info, flags[0].compat_flags,
flags[1].compat_flags, COMPAT);
if (ret)
return ret;
- ret = check_feature(root, flags[0].compat_ro_flags,
+ ret = check_feature(fs_info, flags[0].compat_ro_flags,
flags[1].compat_ro_flags, COMPAT_RO);
if (ret)
return ret;
- ret = check_feature(root, flags[0].incompat_flags,
+ ret = check_feature(fs_info, flags[0].incompat_flags,
flags[1].incompat_flags, INCOMPAT);
if (ret)
return ret;
case BTRFS_IOC_RESIZE:
return btrfs_ioctl_resize(file, argp);
case BTRFS_IOC_ADD_DEV:
- return btrfs_ioctl_add_dev(root, argp);
+ return btrfs_ioctl_add_dev(fs_info, argp);
case BTRFS_IOC_RM_DEV:
return btrfs_ioctl_rm_dev(file, argp);
case BTRFS_IOC_RM_DEV_V2:
return btrfs_ioctl_rm_dev_v2(file, argp);
case BTRFS_IOC_FS_INFO:
- return btrfs_ioctl_fs_info(root, argp);
+ return btrfs_ioctl_fs_info(fs_info, argp);
case BTRFS_IOC_DEV_INFO:
- return btrfs_ioctl_dev_info(root, argp);
+ return btrfs_ioctl_dev_info(fs_info, argp);
case BTRFS_IOC_BALANCE:
return btrfs_ioctl_balance(file, NULL);
case BTRFS_IOC_TRANS_START:
case BTRFS_IOC_INO_PATHS:
return btrfs_ioctl_ino_to_path(root, argp);
case BTRFS_IOC_LOGICAL_INO:
- return btrfs_ioctl_logical_to_ino(root, argp);
+ return btrfs_ioctl_logical_to_ino(fs_info, argp);
case BTRFS_IOC_SPACE_INFO:
- return btrfs_ioctl_space_info(root, argp);
+ return btrfs_ioctl_space_info(fs_info, argp);
case BTRFS_IOC_SYNC: {
int ret;
case BTRFS_IOC_START_SYNC:
return btrfs_ioctl_start_sync(root, argp);
case BTRFS_IOC_WAIT_SYNC:
- return btrfs_ioctl_wait_sync(root, argp);
+ return btrfs_ioctl_wait_sync(fs_info, argp);
case BTRFS_IOC_SCRUB:
return btrfs_ioctl_scrub(file, argp);
case BTRFS_IOC_SCRUB_CANCEL:
- return btrfs_ioctl_scrub_cancel(root, argp);
+ return btrfs_ioctl_scrub_cancel(fs_info);
case BTRFS_IOC_SCRUB_PROGRESS:
- return btrfs_ioctl_scrub_progress(root, argp);
+ return btrfs_ioctl_scrub_progress(fs_info, argp);
case BTRFS_IOC_BALANCE_V2:
return btrfs_ioctl_balance(file, argp);
case BTRFS_IOC_BALANCE_CTL:
- return btrfs_ioctl_balance_ctl(root, arg);
+ return btrfs_ioctl_balance_ctl(fs_info, arg);
case BTRFS_IOC_BALANCE_PROGRESS:
- return btrfs_ioctl_balance_progress(root, argp);
+ return btrfs_ioctl_balance_progress(fs_info, argp);
case BTRFS_IOC_SET_RECEIVED_SUBVOL:
return btrfs_ioctl_set_received_subvol(file, argp);
#ifdef CONFIG_64BIT
case BTRFS_IOC_SEND:
return btrfs_ioctl_send(file, argp);
case BTRFS_IOC_GET_DEV_STATS:
- return btrfs_ioctl_get_dev_stats(root, argp);
+ return btrfs_ioctl_get_dev_stats(fs_info, argp);
case BTRFS_IOC_QUOTA_CTL:
return btrfs_ioctl_quota_ctl(file, argp);
case BTRFS_IOC_QGROUP_ASSIGN:
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
return btrfs_ioctl_quota_rescan_wait(file, argp);
case BTRFS_IOC_DEV_REPLACE:
- return btrfs_ioctl_dev_replace(root, argp);
+ return btrfs_ioctl_dev_replace(fs_info, argp);
case BTRFS_IOC_GET_FSLABEL:
return btrfs_ioctl_get_fslabel(file, argp);
case BTRFS_IOC_SET_FSLABEL:
}
}
-void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
+void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int i;
u32 type, nr;
struct btrfs_item *item;
nr = btrfs_header_nritems(l);
btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d",
- btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l));
+ btrfs_header_bytenr(l), nr,
+ btrfs_leaf_free_space(fs_info, l));
for (i = 0 ; i < nr ; i++) {
item = btrfs_item_nr(i);
btrfs_item_key_to_cpu(l, &key, i);
}
}
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
+void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int i; u32 nr;
struct btrfs_key key;
int level;
nr = btrfs_header_nritems(c);
level = btrfs_header_level(c);
if (level == 0) {
- btrfs_print_leaf(root, c);
+ btrfs_print_leaf(fs_info, c);
return;
}
btrfs_info(fs_info,
btrfs_node_blockptr(c, i));
}
for (i = 0; i < nr; i++) {
- struct extent_buffer *next = read_tree_block(root,
+ struct extent_buffer *next = read_tree_block(fs_info,
btrfs_node_blockptr(c, i),
btrfs_node_ptr_generation(c, i));
if (IS_ERR(next)) {
if (btrfs_header_level(next) !=
level - 1)
BUG();
- btrfs_print_tree(root, next);
+ btrfs_print_tree(fs_info, next);
free_extent_buffer(next);
}
}
#ifndef __PRINT_TREE_
#define __PRINT_TREE_
-void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c);
+void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l);
+void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c);
#endif
struct inode *parent)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int i;
if (!value)
continue;
- num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1);
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
num_bytes, BTRFS_RESERVE_NO_FLUSH);
if (ret)
goto out;
ret = __btrfs_set_prop(trans, inode, h->xattr_name,
value, strlen(value), 0);
- btrfs_block_rsv_release(root, trans->block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes);
if (ret)
goto out;
}
record->old_roots = NULL;
spin_lock(&delayed_refs->lock);
- ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs,
- record);
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
spin_unlock(&delayed_refs->lock);
if (ret > 0)
kfree(record);
}
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int nr = btrfs_header_nritems(eb);
int i, extent_type, ret;
struct btrfs_key key;
}
if (root_level == 0) {
- ret = btrfs_qgroup_trace_leaf_items(trans, root, root_eb);
+ ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
goto out;
}
child_bytenr = btrfs_node_blockptr(eb, parent_slot);
child_gen = btrfs_node_ptr_generation(eb, parent_slot);
- eb = read_tree_block(root, child_bytenr, child_gen);
+ eb = read_tree_block(fs_info, child_bytenr, child_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
}
if (level == 0) {
- ret = btrfs_qgroup_trace_leaf_items(trans, root,
- path->nodes[level]);
+ ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
+ path->nodes[level]);
if (ret)
goto out;
* Return <0 for error(ENOMEM)
*/
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
/*
* Inform qgroup to trace a whole subtree, including all its child tree
* allocation and initial setup for the btrfs_raid_bio. Not
* this does not allocate any pages for rbio->pages.
*/
-static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
- struct btrfs_bio *bbio, u64 stripe_len)
+static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
+ struct btrfs_bio *bbio,
+ u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio;
- rbio->fs_info = root->fs_info;
+ rbio->fs_info = fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
rbio->real_stripes = real_stripes;
/*
* our main entry point for writes from the rest of the FS.
*/
-int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
int ret;
- rbio = alloc_rbio(root, bbio, stripe_len);
+ rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
btrfs_put_bbio(bbio);
return PTR_ERR(rbio);
* so we assume the bio they send down corresponds to a failed part
* of the drive.
*/
-int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio;
int ret;
- rbio = alloc_rbio(root, bbio, stripe_len);
+ rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
if (generic_io)
btrfs_put_bbio(bbio);
*/
struct btrfs_raid_bio *
-raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio;
int i;
- rbio = alloc_rbio(root, bbio, stripe_len);
+ rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
/* The following code is used for dev replace of a missing RAID 5/6 device. */
struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length)
{
struct btrfs_raid_bio *rbio;
- rbio = alloc_rbio(root, bbio, length);
+ rbio = alloc_rbio(fs_info, bbio, length);
if (IS_ERR(rbio))
return NULL;
struct btrfs_raid_bio;
struct btrfs_device;
-int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io);
-int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
u64 logical);
struct btrfs_raid_bio *
-raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length);
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
return zone;
}
-static struct reada_extent *reada_find_extent(struct btrfs_root *root,
+static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
u64 logical,
struct btrfs_key *top)
{
int ret;
struct reada_extent *re = NULL;
struct reada_extent *re_exist = NULL;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_bio *bbio = NULL;
struct btrfs_device *dev;
struct btrfs_device *prev_dev;
struct reada_extctl *rec;
/* takes one ref */
- re = reada_find_extent(fs_info->tree_root, logical, top);
+ re = reada_find_extent(fs_info, logical, top);
if (!re)
return -1;
logical = re->logical;
atomic_inc(&dev->reada_in_flight);
- ret = reada_tree_block_flagged(fs_info->extent_root, logical,
- mirror_num, &eb);
+ ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
if (ret)
__readahead_hook(fs_info, re, NULL, ret);
else if (eb)
dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
- ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
+ ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
num_bytes, parent,
btrfs_header_owner(leaf),
key.objectid, key.offset);
break;
}
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
break;
}
- eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
+ eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(path->nodes[level]);
- ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
- path->nodes[level]->start,
+ ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr,
+ blocksize, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
- 0, dest->root_key.objectid, level - 1,
- 0);
+ ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+ blocksize, 0, dest->root_key.objectid,
+ level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize,
0, dest->root_key.objectid, level - 1,
0);
BUG_ON(ret);
int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
int *level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *eb = NULL;
int i;
u64 bytenr;
}
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
- eb = read_tree_block(root, bytenr, ptr_gen);
+ eb = read_tree_block(fs_info, bytenr, ptr_gen);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
btrfs_end_transaction_throttle(trans, root);
trans = NULL;
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
if (trans)
btrfs_end_transaction_throttle(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
if (!err)
- btrfs_block_rsv_release(rc->extent_root,
- rc->block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv,
+ num_bytes);
return PTR_ERR(trans);
}
if (!err) {
if (num_bytes != rc->merging_rsv_size) {
btrfs_end_transaction(trans, rc->extent_root);
- btrfs_block_rsv_release(rc->extent_root,
- rc->block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv,
+ num_bytes);
goto again;
}
}
struct btrfs_key *key,
struct btrfs_path *path, int lowest)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *upper;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
blocksize = root->fs_info->nodesize;
generation = btrfs_node_ptr_generation(upper->eb, slot);
- eb = read_tree_block(root, bytenr, generation);
+ eb = read_tree_block(fs_info, bytenr, generation);
if (IS_ERR(eb)) {
err = PTR_ERR(eb);
goto next;
trans->transid);
btrfs_mark_buffer_dirty(upper->eb);
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, root->fs_info,
node->eb->start, blocksize,
upper->eb->start,
btrfs_header_owner(upper->eb),
return 0;
}
-static int get_tree_block_key(struct reloc_control *rc,
+static int get_tree_block_key(struct btrfs_fs_info *fs_info,
struct tree_block *block)
{
struct extent_buffer *eb;
BUG_ON(block->key_ready);
- eb = read_tree_block(rc->extent_root, block->bytenr,
- block->key.offset);
+ eb = read_tree_block(fs_info, block->bytenr, block->key.offset);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct rb_root *blocks)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready)
- readahead_tree_block(rc->extent_root, block->bytenr);
+ readahead_tree_block(fs_info, block->bytenr);
rb_node = rb_next(rb_node);
}
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready) {
- err = get_tree_block_key(rc, block);
+ err = get_tree_block_key(fs_info, block);
if (err)
goto out_free_path;
}
static int relocate_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 page_start;
u64 page_end;
u64 offset = BTRFS_I(inode)->index_cnt;
index++;
balance_dirty_pages_ratelimited(inode->i_mapping);
- btrfs_throttle(BTRFS_I(inode)->root);
+ btrfs_throttle(fs_info);
}
WARN_ON(nr != cluster->nr);
out:
btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
return 1;
- ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
+ ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
eb->start, btrfs_header_level(eb), 1,
NULL, &flags);
BUG_ON(ret);
}
truncate:
- ret = btrfs_check_trunc_cache_free_space(root,
+ ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
if (ret)
goto out;
ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode);
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
out:
iput(inode);
return ret;
struct btrfs_trans_handle *trans;
int ret;
- rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
+ rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
BTRFS_BLOCK_RSV_TEMP);
if (!rc->block_rsv)
return -ENOMEM;
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
struct btrfs_trans_handle *trans = NULL;
}
btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root);
+ btrfs_btree_balance_dirty(fs_info);
trans = NULL;
if (rc->stage == MOVE_DATA_EXTENTS &&
}
}
if (trans && progress && err == -ENOSPC) {
- ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+ ret = btrfs_force_chunk_alloc(trans, fs_info,
rc->block_group->flags);
if (ret == 1) {
err = 0;
if (trans) {
btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root);
+ btrfs_btree_balance_dirty(fs_info);
}
if (!err) {
set_reloc_control(rc);
backref_cache_cleanup(&rc->backref_cache);
- btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
err = prepare_to_merge(rc, err);
rc->merge_reloc_tree = 0;
unset_reloc_control(rc);
- btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
/* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root);
}
btrfs_commit_transaction(trans, rc->extent_root);
out_free:
- btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
+ btrfs_free_block_rsv(fs_info, rc->block_rsv);
btrfs_free_path(path);
return err;
}
err = btrfs_orphan_add(trans, inode);
out:
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
if (err) {
if (inode)
iput(inode);
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
out:
if (err && rw)
- btrfs_dec_block_group_ro(extent_root, rc->block_group);
+ btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
btrfs_put_block_group(rc->block_group);
kfree(rc);
}
if (ret != 0) {
- btrfs_print_leaf(root, path->nodes[0]);
+ btrfs_print_leaf(fs_info, path->nodes[0]);
btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
key->objectid, key->type, key->offset);
BUG_ON(1);
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
- ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
+ ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
page->recover->map_length,
page->mirror_num, 0);
if (ret)
{
struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_root *dev_root = fs_info->dev_root;
u64 length = sblock->page_count * PAGE_SIZE;
u64 logical = sblock->pagev[0]->logical;
struct btrfs_bio *bbio = NULL;
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
- rbio = raid56_alloc_missing_rbio(dev_root, bio, bbio, length);
+ rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
if (!rbio)
goto rbio_out;
{
struct scrub_ctx *sctx = sparity->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_root *dev_root = fs_info->dev_root;
struct bio *bio;
struct btrfs_raid_bio *rbio;
struct scrub_page *spage;
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;
- rbio = raid56_parity_alloc_scrub_rbio(dev_root, bio, bbio,
+ rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
length, sparity->scrub_dev,
sparity->dbitmap,
sparity->nsectors);
btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
if (ro_set)
- btrfs_dec_block_group_ro(root, cache);
+ btrfs_dec_block_group_ro(cache);
/*
* We might have prevented the cleaner kthread from deleting
return ret;
}
-void btrfs_scrub_pause(struct btrfs_root *root)
+void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
mutex_lock(&fs_info->scrub_lock);
atomic_inc(&fs_info->scrub_pause_req);
while (atomic_read(&fs_info->scrubs_paused) !=
mutex_unlock(&fs_info->scrub_lock);
}
-void btrfs_scrub_continue(struct btrfs_root *root)
+void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
atomic_dec(&fs_info->scrub_pause_req);
wake_up(&fs_info->scrub_pause_wait);
}
return 0;
}
-int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *dev;
struct scrub_ctx *sctx = NULL;
struct recorded_ref *parent_ref,
const bool is_orphan)
{
+ struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key di_key;
goto out;
}
- di = btrfs_match_dir_item_name(sctx->parent_root, path,
- parent_ref->name, parent_ref->name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
+ parent_ref->name_len);
if (!di) {
ret = 0;
goto out;
* reading in a new superblock is parsed here.
* XXX JDM: This needs to be cleaned up for remount.
*/
-int btrfs_parse_options(struct btrfs_root *root, char *options,
+int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags)
{
- struct btrfs_fs_info *info = root->fs_info;
substring_t args[MAX_OPT_ARGS];
char *p, *num, *orig = NULL;
u64 cache_gen;
}
}
- ret = btrfs_parse_options(root, data, *flags);
+ ret = btrfs_parse_options(fs_info, data, *flags);
if (ret) {
ret = -EINVAL;
goto restore;
/*
* either allocate a new transaction or hop into the existing one
*/
-static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
+static noinline int join_transaction(struct btrfs_fs_info *fs_info,
+ unsigned int type)
{
struct btrfs_transaction *cur_trans;
- struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
loop:
* when this is done, it is safe to start a new transaction, but the current
* transaction might not be fully on disk.
*/
-static void wait_current_trans(struct btrfs_root *root)
+static void wait_current_trans(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans;
spin_lock(&fs_info->trans_lock);
}
}
-static int may_wait_transaction(struct btrfs_root *root, int type)
+static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return 0;
if (type & __TRANS_FREEZABLE)
sb_start_intwrite(fs_info->sb);
- if (may_wait_transaction(root, type))
- wait_current_trans(root);
+ if (may_wait_transaction(fs_info, type))
+ wait_current_trans(fs_info);
do {
- ret = join_transaction(root, type);
+ ret = join_transaction(fs_info, type);
if (ret == -EBUSY) {
- wait_current_trans(root);
+ wait_current_trans(fs_info);
if (unlikely(type == TRANS_ATTACH))
ret = -ENOENT;
}
smp_mb();
if (cur_trans->state >= TRANS_STATE_BLOCKED &&
- may_wait_transaction(root, type)) {
+ may_wait_transaction(fs_info, type)) {
current->journal_info = h;
btrfs_commit_transaction(h, root);
goto again;
kmem_cache_free(btrfs_trans_handle_cachep, h);
alloc_fail:
if (num_bytes)
- btrfs_block_rsv_release(root, &fs_info->trans_block_rsv,
+ btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
num_bytes);
reserve_fail:
btrfs_qgroup_free_meta(root, qgroup_reserved);
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH);
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
- btrfs_wait_for_commit(root, 0);
+ btrfs_wait_for_commit(root->fs_info, 0);
return trans;
}
/* wait for a transaction commit to be fully complete */
-static noinline void wait_for_commit(struct btrfs_root *root,
- struct btrfs_transaction *commit)
+static noinline void wait_for_commit(struct btrfs_transaction *commit)
{
wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
}
-int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = NULL, *t;
int ret = 0;
goto out; /* nothing committing|committed */
}
- wait_for_commit(root, cur_trans);
+ wait_for_commit(cur_trans);
btrfs_put_transaction(cur_trans);
out:
return ret;
}
-void btrfs_throttle(struct btrfs_root *root)
+void btrfs_throttle(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
if (!atomic_read(&fs_info->open_ioctl_trans))
- wait_current_trans(root);
+ wait_current_trans(fs_info);
}
-static int should_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static int should_end_transaction(struct btrfs_trans_handle *trans)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
if (fs_info->global_block_rsv.space_info->full &&
- btrfs_check_space_for_delayed_refs(trans, root))
+ btrfs_check_space_for_delayed_refs(trans, fs_info))
return 1;
- return !!btrfs_block_rsv_check(root, &fs_info->global_block_rsv, 5);
+ return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
}
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
int updates;
int err;
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (updates) {
- err = btrfs_run_delayed_refs(trans, root, updates * 2);
+ err = btrfs_run_delayed_refs(trans, fs_info, updates * 2);
if (err) /* Error code will also eval true */
return err;
}
- return should_end_transaction(trans, root);
+ return should_end_transaction(trans);
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
return 0;
}
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, info);
trans->block_rsv = NULL;
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, info);
trans->delayed_ref_updates = 0;
if (!trans->sync) {
must_run_delayed_refs =
- btrfs_should_throttle_delayed_refs(trans, root);
+ btrfs_should_throttle_delayed_refs(trans, info);
cur = max_t(unsigned long, cur, 32);
/*
must_run_delayed_refs = 2;
}
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, info);
trans->block_rsv = NULL;
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, info);
btrfs_trans_release_chunk_metadata(trans);
if (lock && !atomic_read(&info->open_ioctl_trans) &&
- should_end_transaction(trans, root) &&
+ should_end_transaction(trans) &&
ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
spin_lock(&info->trans_lock);
if (cur_trans->state == TRANS_STATE_RUNNING)
current->journal_info = NULL;
if (throttle)
- btrfs_run_delayed_iputs(root);
+ btrfs_run_delayed_iputs(info);
if (trans->aborted ||
test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (must_run_delayed_refs) {
- btrfs_async_run_delayed_refs(root, cur, transid,
+ btrfs_async_run_delayed_refs(info, cur, transid,
must_run_delayed_refs == 1);
}
return err;
* them in one of two extent_io trees. This is used to make sure all of
* those extents are sent to disk but does not wait on them
*/
-int btrfs_write_marked_extents(struct btrfs_root *root,
+int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark)
{
int err = 0;
int werr = 0;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
struct blk_plug plug;
blk_start_plug(&plug);
- ret = btrfs_write_marked_extents(root, dirty_pages, mark);
+ ret = btrfs_write_marked_extents(root->fs_info, dirty_pages, mark);
blk_finish_plug(&plug);
ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
}
static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_root *root)
{
int ret;
* to clean up the delayed refs.
*/
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
struct list_head *io_bgs = &trans->transaction->io_bgs;
struct list_head *next;
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
if (ret)
return ret;
- ret = btrfs_setup_space_cache(trans, root);
+ ret = btrfs_setup_space_cache(trans, fs_info);
if (ret)
return ret;
/* run_qgroups might have added some more refs */
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
again:
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
+ struct btrfs_root *root;
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list);
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
}
while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
- ret = btrfs_write_dirty_block_groups(trans, root);
+ ret = btrfs_write_dirty_block_groups(trans, fs_info);
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
}
ret = btrfs_defrag_leaves(trans, root);
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(info->tree_root);
+ btrfs_btree_balance_dirty(info);
cond_resched();
if (btrfs_fs_closing(info) || ret != -EAGAIN)
* like chunk and root tree, as they won't affect qgroup.
* And we don't write super to avoid half committed status.
*/
- ret = commit_cowonly_roots(trans, src);
+ ret = commit_cowonly_roots(trans, fs_info);
if (ret)
goto out;
switch_commit_roots(trans->transaction, fs_info);
* otherwise we corrupt the FS during
* snapshot
*/
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret) { /* Transaction aborted */
btrfs_abort_transaction(trans, ret);
goto fail;
goto fail;
}
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
}
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
return ret;
}
-static void update_super_roots(struct btrfs_root *root)
+static void update_super_roots(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root_item *root_item;
struct btrfs_super_block *super;
* wait for the current transaction commit to start and block subsequent
* transaction joins
*/
-static void wait_current_trans_commit_start(struct btrfs_root *root,
+static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *trans)
{
- wait_event(root->fs_info->transaction_blocked_wait,
- trans->state >= TRANS_STATE_COMMIT_START ||
- trans->aborted);
+ wait_event(fs_info->transaction_blocked_wait,
+ trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
}
/*
* wait for the current transaction to start and then become unblocked.
* caller holds ref.
*/
-static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
- struct btrfs_transaction *trans)
+static void wait_current_trans_commit_start_and_unblock(
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_transaction *trans)
{
- wait_event(root->fs_info->transaction_wait,
- trans->state >= TRANS_STATE_UNBLOCKED ||
- trans->aborted);
+ wait_event(fs_info->transaction_wait,
+ trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
}
/*
/* wait for transaction to start and unblock */
if (wait_for_unblock)
- wait_current_trans_commit_start_and_unblock(root, cur_trans);
+ wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
else
- wait_current_trans_commit_start(root, cur_trans);
+ wait_current_trans_commit_start(fs_info, cur_trans);
if (current->journal_info == trans)
current->journal_info = NULL;
}
spin_unlock(&fs_info->trans_lock);
- btrfs_cleanup_one_transaction(trans->transaction, root);
+ btrfs_cleanup_one_transaction(trans->transaction, fs_info);
spin_lock(&fs_info->trans_lock);
if (cur_trans == fs_info->running_transaction)
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
- ret = btrfs_run_delayed_refs(trans, root, 0);
+ ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (ret) {
btrfs_end_transaction(trans, root);
return ret;
}
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, fs_info);
trans->block_rsv = NULL;
cur_trans = trans->transaction;
smp_wmb();
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, fs_info);
- ret = btrfs_run_delayed_refs(trans, root, 0);
+ ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (ret) {
btrfs_end_transaction(trans, root);
return ret;
mutex_unlock(&fs_info->ro_block_group_mutex);
if (run_it)
- ret = btrfs_start_dirty_block_groups(trans, root);
+ ret = btrfs_start_dirty_block_groups(trans, fs_info);
}
if (ret) {
btrfs_end_transaction(trans, root);
atomic_inc(&cur_trans->use_count);
ret = btrfs_end_transaction(trans, root);
- wait_for_commit(root, cur_trans);
+ wait_for_commit(cur_trans);
if (unlikely(cur_trans->aborted))
ret = cur_trans->aborted;
atomic_inc(&prev_trans->use_count);
spin_unlock(&fs_info->trans_lock);
- wait_for_commit(root, prev_trans);
+ wait_for_commit(prev_trans);
ret = prev_trans->aborted;
btrfs_put_transaction(prev_trans);
if (ret)
goto cleanup_transaction;
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
goto cleanup_transaction;
extwriter_counter_read(cur_trans) == 0);
/* some pending stuffs might be added after the previous flush. */
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
goto cleanup_transaction;
btrfs_wait_pending_ordered(cur_trans);
- btrfs_scrub_pause(root);
+ btrfs_scrub_pause(fs_info);
/*
* Ok now we need to make sure to block out any other joins while we
* commit the transaction. We could have started a join before setting
* because all the tree which are snapshoted will be forced to COW
* the nodes and leaves.
*/
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret) {
mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
goto scrub_continue;
}
- ret = commit_cowonly_roots(trans, root);
+ ret = commit_cowonly_roots(trans, fs_info);
if (ret) {
mutex_unlock(&fs_info->tree_log_mutex);
mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
- btrfs_prepare_extent_commit(trans, root);
+ btrfs_prepare_extent_commit(trans, fs_info);
cur_trans = fs_info->running_transaction;
assert_qgroups_uptodate(trans);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
- update_super_roots(root);
+ update_super_roots(fs_info);
btrfs_set_super_log_root(fs_info->super_copy, 0);
btrfs_set_super_log_root_level(fs_info->super_copy, 0);
sizeof(*fs_info->super_copy));
btrfs_update_commit_device_size(fs_info);
- btrfs_update_commit_device_bytes_used(root, cur_trans);
+ btrfs_update_commit_device_bytes_used(fs_info, cur_trans);
clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
goto scrub_continue;
}
- ret = write_ctree_super(trans, root, 0);
+ ret = write_ctree_super(trans, fs_info, 0);
if (ret) {
mutex_unlock(&fs_info->tree_log_mutex);
goto scrub_continue;
*/
mutex_unlock(&fs_info->tree_log_mutex);
- btrfs_finish_extent_commit(trans, root);
+ btrfs_finish_extent_commit(trans, fs_info);
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
btrfs_clear_space_info_full(fs_info);
trace_btrfs_transaction_commit(root);
- btrfs_scrub_continue(root);
+ btrfs_scrub_continue(fs_info);
if (current->journal_info == trans)
current->journal_info = NULL;
*/
if (current != fs_info->transaction_kthread &&
current != fs_info->cleaner_kthread && !fs_info->fs_frozen)
- btrfs_run_delayed_iputs(root);
+ btrfs_run_delayed_iputs(fs_info);
return ret;
scrub_continue:
- btrfs_scrub_continue(root);
+ btrfs_scrub_continue(fs_info);
cleanup_transaction:
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, fs_info);
btrfs_trans_release_chunk_metadata(trans);
trans->block_rsv = NULL;
btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
-int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
+int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
struct btrfs_root *root);
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-void btrfs_throttle(struct btrfs_root *root);
+void btrfs_throttle(struct btrfs_fs_info *fs_info);
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_write_marked_extents(struct btrfs_root *root,
+int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark);
int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark);
}
if (wc->pin)
- ret = btrfs_pin_extent_for_log_replay(fs_info->extent_root,
- eb->start, eb->len);
+ ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
+ eb->len);
if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
if (wc->pin && btrfs_header_level(eb) == 0)
- ret = btrfs_exclude_logged_extents(log, eb);
+ ret = btrfs_exclude_logged_extents(fs_info, eb);
if (wc->write)
btrfs_write_tree_block(eb);
if (wc->wait)
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
u32 item_size;
u64 saved_i_size = 0;
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
- btrfs_truncate_item(root, path, item_size, 1);
+ btrfs_truncate_item(fs_info, path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(root, path,
+ btrfs_extend_item(fs_info, path,
item_size - found_size);
} else if (ret) {
return ret;
* is this extent already allocated in the extent
* allocation tree? If so, just add a reference
*/
- ret = btrfs_lookup_data_extent(root, ins.objectid,
+ ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
ins.offset);
if (ret == 0) {
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, fs_info,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset);
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
- root, root->root_key.objectid,
+ fs_info,
+ root->root_key.objectid,
key->objectid, offset, &ins);
if (ret)
goto out;
struct inode *dir,
struct btrfs_dir_item *di)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode;
char *name;
int name_len;
if (ret)
goto out;
else
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
out:
kfree(name);
iput(inode);
u64 ref_index, char *name, int namelen,
int *search_done)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *victim_name;
int victim_name_len;
kfree(victim_name);
if (ret)
return ret;
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
return ret;
*search_done = 1;
victim_name_len);
if (!ret)
ret = btrfs_run_delayed_items(
- trans, root);
+ trans,
+ fs_info);
}
iput(victim_parent);
kfree(victim_name);
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
struct btrfs_dir_item *di;
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(root, eb, di))
+ if (verify_dir_item(fs_info, eb, di))
return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
struct inode *dir,
struct btrfs_key *dir_key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct extent_buffer *eb;
int slot;
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(root, eb, di)) {
+ if (verify_dir_item(fs_info, eb, di)) {
ret = -EIO;
goto out;
}
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
if (!ret)
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
kfree(name);
iput(inode);
if (ret)
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
- next = btrfs_find_create_tree_block(root, bytenr);
+ next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
WARN_ON(root_owner !=
BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(root,
- bytenr, blocksize);
+ ret = btrfs_free_and_pin_reserved_extent(
+ fs_info, bytenr,
+ blocksize);
if (ret) {
free_extent_buffer(next);
return ret;
}
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(root,
+ ret = btrfs_free_and_pin_reserved_extent(
+ fs_info,
path->nodes[*level]->start,
path->nodes[*level]->len);
if (ret)
static int walk_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *log, struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
int wret;
int level;
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, log->fs_info, next);
+ clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(log->root_key.objectid !=
BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(log, next->start,
- next->len);
+ ret = btrfs_free_and_pin_reserved_extent(fs_info,
+ next->start, next->len);
if (ret)
goto out;
}
* wait for them until later.
*/
blk_start_plug(&plug);
- ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
+ ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
}
- ret = btrfs_write_marked_extents(log_root_tree,
+ ret = btrfs_write_marked_extents(fs_info,
&log_root_tree->dirty_log_pages,
EXTENT_DIRTY | EXTENT_NEW);
blk_finish_plug(&plug);
* the running transaction open, so a full commit can't hop
* in and cause problems either.
*/
- ret = write_ctree_super(trans, fs_info->tree_root, 1);
+ ret = write_ctree_super(trans, fs_info, 1);
if (ret) {
btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
* An item with that type already exists.
* Extend the item and store the new subid at the end.
*/
- btrfs_extend_item(uuid_root, path, sizeof(subid_le));
+ btrfs_extend_item(fs_info, path, sizeof(subid_le));
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
move_src = offset + sizeof(subid);
move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
memmove_extent_buffer(eb, move_dst, move_src, move_len);
- btrfs_truncate_item(uuid_root, path, item_size - sizeof(subid), 1);
+ btrfs_truncate_item(fs_info, path, item_size - sizeof(subid), 1);
out:
btrfs_free_path(path);
};
static int init_first_rw_device(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_device *device);
-static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
fs_info->fs_devices->latest_bdev = next_device->bdev;
}
-int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
+int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
u64 num_devices;
if (ret)
goto out;
- ret = btrfs_find_device_by_devspec(root, devid, device_path,
- &device);
+ ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
+ &device);
if (ret)
goto out;
call_rcu(&tgtdev->rcu, free_device);
}
-static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
+ char *device_path,
struct btrfs_device **device)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
struct btrfs_super_block *disk_super;
u64 devid;
return ret;
}
-int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
*device = NULL;
if (strcmp(device_path, "missing") == 0) {
struct list_head *devices;
return 0;
} else {
- return btrfs_find_device_by_path(root, device_path, device);
+ return btrfs_find_device_by_path(fs_info, device_path, device);
}
}
/*
* Lookup a device given by device id, or the path if the id is 0.
*/
-int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
- char *devpath,
- struct btrfs_device **device)
+int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
+ char *devpath, struct btrfs_device **device)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
if (devid) {
if (!devpath || !devpath[0])
return -EINVAL;
- ret = btrfs_find_device_missing_or_by_path(root, devpath,
+ ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
device);
}
return ret;
/*
* does all the dirty work required for changing file system's UUID.
*/
-static int btrfs_prepare_sprout(struct btrfs_root *root)
+static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
struct btrfs_fs_devices *seed_devices;
if (seeding_dev) {
sb->s_flags &= ~MS_RDONLY;
- ret = btrfs_prepare_sprout(root);
+ ret = btrfs_prepare_sprout(fs_info);
BUG_ON(ret); /* -ENOMEM */
}
if (seeding_dev) {
lock_chunks(fs_info);
- ret = init_first_rw_device(trans, root, device);
+ ret = init_first_rw_device(trans, fs_info, device);
unlock_chunks(fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
if (ret) /* transaction commit */
return ret;
- ret = btrfs_relocate_sys_chunks(root);
+ ret = btrfs_relocate_sys_chunks(fs_info);
if (ret < 0)
btrfs_handle_fs_error(fs_info, ret,
"Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
return ret;
}
-int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ char *device_path,
struct btrfs_device *srcdev,
struct btrfs_device **device_out)
{
struct request_queue *q;
struct btrfs_device *device;
struct block_device *bdev;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *devices;
struct rcu_string *name;
u64 devid = BTRFS_DEV_REPLACE_DEVID;
{
struct extent_map_tree *em_tree;
struct extent_map *em;
- struct btrfs_root *extent_root = fs_info->extent_root;
struct map_lookup *map;
u64 dev_extent_len = 0;
u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
}
map = em->map_lookup;
lock_chunks(fs_info);
- check_system_chunk(trans, extent_root, map->type);
+ check_system_chunk(trans, fs_info, map->type);
unlock_chunks(fs_info);
/*
return -ENOSPC;
/* step one, relocate all the extents inside this chunk */
- btrfs_scrub_pause(root);
+ btrfs_scrub_pause(fs_info);
ret = btrfs_relocate_block_group(fs_info, chunk_offset);
- btrfs_scrub_continue(root);
+ btrfs_scrub_continue(fs_info);
if (ret)
return ret;
return ret;
}
-static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
+static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
return 0;
}
-static int should_balance_chunk(struct btrfs_root *root,
+static int should_balance_chunk(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 chunk_offset)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
struct btrfs_balance_args *bargs = NULL;
u64 chunk_type = btrfs_chunk_type(leaf, chunk);
spin_unlock(&fs_info->balance_lock);
}
- ret = should_balance_chunk(chunk_root, leaf, chunk,
+ ret = should_balance_chunk(fs_info, leaf, chunk,
found_key.offset);
btrfs_release_path(path);
goto error;
}
- ret = btrfs_force_chunk_alloc(trans, chunk_root,
+ ret = btrfs_force_chunk_alloc(trans, fs_info,
BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans, chunk_root);
if (ret < 0) {
return ret;
}
-static int btrfs_add_system_chunk(struct btrfs_root *root,
+static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_disk_key disk_key;
u32 array_size;
/ sizeof(struct btrfs_stripe) + 1)
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 start,
+ struct btrfs_fs_info *fs_info, u64 start,
u64 type)
{
- struct btrfs_fs_info *info = extent_root->fs_info;
+ struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
struct list_head *cur;
struct map_lookup *map = NULL;
goto error;
}
- ret = btrfs_make_block_group(trans, extent_root, 0, type,
+ ret = btrfs_make_block_group(trans, info, 0, type,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, num_bytes);
if (ret)
* TODO: Cleanup of inserted chunk root in case of
* failure.
*/
- ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
- item_size);
+ ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
}
out:
* bootstrap process of adding storage to a seed btrfs.
*/
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 type)
+ struct btrfs_fs_info *fs_info, u64 type)
{
- struct btrfs_fs_info *fs_info = extent_root->fs_info;
u64 chunk_offset;
ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
chunk_offset = find_next_chunk(fs_info);
- return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
+ return __btrfs_alloc_chunk(trans, fs_info, chunk_offset, type);
}
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
+ struct btrfs_root *extent_root = fs_info->extent_root;
u64 chunk_offset;
u64 sys_chunk_offset;
u64 alloc_profile;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
int ret;
chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
- ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
- alloc_profile);
+ ret = __btrfs_alloc_chunk(trans, fs_info, chunk_offset, alloc_profile);
if (ret)
return ret;
sys_chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
- ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
+ ret = __btrfs_alloc_chunk(trans, fs_info, sys_chunk_offset,
alloc_profile);
return ret;
}
return max_errors;
}
-int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
+int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
return ret;
}
-unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
u64 logical)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-static noinline void btrfs_schedule_bio(struct btrfs_root *root,
- struct btrfs_device *device,
+static noinline void btrfs_schedule_bio(struct btrfs_device *device,
struct bio *bio)
{
struct btrfs_fs_info *fs_info = device->fs_info;
btrfs_queue_work(fs_info->submit_workers, &device->work);
}
-static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
- struct bio *bio, u64 physical, int dev_nr,
- int async)
+static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
+ u64 physical, int dev_nr, int async)
{
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
+ struct btrfs_fs_info *fs_info = bbio->fs_info;
bio->bi_private = bbio;
btrfs_io_bio(bio)->stripe_index = dev_nr;
#endif
bio->bi_bdev = dev->bdev;
- btrfs_bio_counter_inc_noblocked(root->fs_info);
+ btrfs_bio_counter_inc_noblocked(fs_info);
if (async)
- btrfs_schedule_bio(root, dev, bio);
+ btrfs_schedule_bio(dev, bio);
else
btrfsic_submit_bio(bio);
}
}
}
-int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
+int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *dev;
struct bio *first_bio = bio;
u64 logical = (u64)bio->bi_iter.bi_sector << 9;
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
if (bio_op(bio) == REQ_OP_WRITE) {
- ret = raid56_parity_write(root, bio, bbio, map_length);
+ ret = raid56_parity_write(fs_info, bio, bbio,
+ map_length);
} else {
- ret = raid56_parity_recover(root, bio, bbio, map_length,
- mirror_num, 1);
+ ret = raid56_parity_recover(fs_info, bio, bbio,
+ map_length, mirror_num, 1);
}
btrfs_bio_counter_dec(fs_info);
} else
bio = first_bio;
- submit_stripe_bio(root, bbio, bio,
- bbio->stripes[dev_nr].physical, dev_nr,
- async_submit);
+ submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
+ dev_nr, async_submit);
}
btrfs_bio_counter_dec(fs_info);
return 0;
return NULL;
}
-static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
- struct btrfs_fs_devices *fs_devices,
+static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
}
/* Return -EIO if any error, otherwise return 0. */
-static int btrfs_check_chunk_valid(struct btrfs_root *root,
+static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 length;
u64 stripe_len;
u16 num_stripes;
return 0;
}
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+ ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
- add_missing_dev(root, fs_info->fs_devices,
- devid, uuid);
+ add_missing_dev(fs_info->fs_devices, devid,
+ uuid);
if (!map->stripes[i].dev) {
free_extent_map(em);
return -EIO;
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
}
-static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
+static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
u8 *fsid)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_devices *fs_devices;
int ret;
return fs_devices;
}
-static int read_one_dev(struct btrfs_root *root,
+static int read_one_dev(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
u64 devid;
BTRFS_UUID_SIZE);
if (memcmp(fs_uuid, fs_info->fsid, BTRFS_UUID_SIZE)) {
- fs_devices = open_seed_devices(root, fs_uuid);
+ fs_devices = open_seed_devices(fs_info, fs_uuid);
if (IS_ERR(fs_devices))
return PTR_ERR(fs_devices);
}
if (!btrfs_test_opt(fs_info, DEGRADED))
return -EIO;
- device = add_missing_dev(root, fs_devices, devid, dev_uuid);
+ device = add_missing_dev(fs_devices, devid, dev_uuid);
if (!device)
return -ENOMEM;
btrfs_warn(fs_info, "devid %llu uuid %pU missing",
* fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
* overallocate but we can keep it as-is, only the first page is used.
*/
- sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
+ sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
if (IS_ERR(sb))
return PTR_ERR(sb);
set_extent_buffer_uptodate(sb);
if (cur_offset + len > array_size)
goto out_short_read;
- ret = read_one_chunk(root, &key, sb, chunk);
+ ret = read_one_chunk(fs_info, &key, sb, chunk);
if (ret)
break;
} else {
struct btrfs_dev_item *dev_item;
dev_item = btrfs_item_ptr(leaf, slot,
struct btrfs_dev_item);
- ret = read_one_dev(root, leaf, dev_item);
+ ret = read_one_dev(fs_info, leaf, dev_item);
if (ret)
goto error;
total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
- ret = read_one_chunk(root, &found_key, leaf, chunk);
+ ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
if (ret)
goto error;
}
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}
-int btrfs_get_dev_stats(struct btrfs_root *root,
+int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int i;
}
/* Must be invoked during the transaction commit */
-void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *transaction)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_device *dev;
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 type);
+ struct btrfs_fs_info *fs_info, u64 type);
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
-int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
+int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step);
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
struct btrfs_device *device, struct btrfs_device *this_dev);
-int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device);
-int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
+int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
char *devpath,
struct btrfs_device **device);
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
const u8 *uuid);
-int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid);
+int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+ char *device_path, u64 devid);
void btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
u8 *uuid, u8 *fsid);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path);
-int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ char *device_path,
struct btrfs_device *srcdev,
struct btrfs_device **device_out);
int btrfs_balance(struct btrfs_balance_control *bctl,
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
-int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
+int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
struct btrfs_device *device, u64 num_bytes,
u64 search_start, u64 *start, u64 *max_avail);
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
-int btrfs_get_dev_stats(struct btrfs_root *root,
+int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats);
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path);
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
u64 logical, u64 len, int mirror_num);
-unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
}
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
-void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *transaction);
static inline void lock_chunks(struct btrfs_fs_info *fs_info)
{
struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
size_t name_len = strlen(name);
int ret = 0;
*/
ret = 0;
btrfs_assert_tree_locked(path->nodes[0]);
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (!di && !(flags & XATTR_REPLACE)) {
ret = -ENOSPC;
goto out;
}
} else if (ret == -EEXIST) {
ret = 0;
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
ASSERT(di); /* logic error */
} else if (ret) {
goto out;
char *ptr;
if (size > old_data_len) {
- if (btrfs_leaf_free_space(root, leaf) <
+ if (btrfs_leaf_free_space(fs_info, leaf) <
(size - old_data_len)) {
ret = -ENOSPC;
goto out;
if (old_data_len + name_len + sizeof(*di) == item_size) {
/* No other xattrs packed in the same leaf item. */
if (size > old_data_len)
- btrfs_extend_item(root, path,
+ btrfs_extend_item(fs_info, path,
size - old_data_len);
else if (size < old_data_len)
- btrfs_truncate_item(root, path, data_size, 1);
+ btrfs_truncate_item(fs_info, path,
+ data_size, 1);
} else {
/* There are other xattrs packed in the same item. */
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto out;
- btrfs_extend_item(root, path, data_size);
+ btrfs_extend_item(fs_info, path, data_size);
}
item = btrfs_item_nr(slot);
{
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret = 0;
u32 this_len = sizeof(*di) + name_len + data_len;
unsigned long name_ptr = (unsigned long)(di + 1);
- if (verify_dir_item(root, leaf, di)) {
+ if (verify_dir_item(fs_info, leaf, di)) {
ret = -EIO;
goto err;
}