mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
btrfs: remove old unused commented out code
Remove code which has been #if0-ed out for a very long time and does not seem to be related to current codebase anymore. Signed-off-by: David Sterba <dsterba@suse.cz>
This commit is contained in:
parent
f2a97a9dbd
commit
182608c829
@ -709,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
|
||||
return btrfs_delayed_node_to_head(ref);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* add a delayed ref to the tree. This does all of the accounting required
|
||||
* to make sure the delayed ref is eventually processed before this
|
||||
* transaction commits.
|
||||
*
|
||||
* The main point of this call is to add and remove a backreference in a single
|
||||
* shot, taking the lock only once, and only searching for the head node once.
|
||||
*
|
||||
* It is the same as doing a ref add and delete in two separate calls.
|
||||
*/
|
||||
#if 0
|
||||
int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes, u64 orig_parent,
|
||||
u64 parent, u64 orig_ref_root, u64 ref_root,
|
||||
u64 orig_ref_generation, u64 ref_generation,
|
||||
u64 owner_objectid, int pin)
|
||||
{
|
||||
struct btrfs_delayed_ref *ref;
|
||||
struct btrfs_delayed_ref *old_ref;
|
||||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
int ret;
|
||||
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
|
||||
if (!old_ref) {
|
||||
kfree(ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* the parent = 0 case comes from cases where we don't actually
|
||||
* know the parent yet. It will get updated later via a add/drop
|
||||
* pair.
|
||||
*/
|
||||
if (parent == 0)
|
||||
parent = bytenr;
|
||||
if (orig_parent == 0)
|
||||
orig_parent = bytenr;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kfree(ref);
|
||||
kfree(old_ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
spin_lock(&delayed_refs->lock);
|
||||
|
||||
/*
|
||||
* insert both the head node and the new ref without dropping
|
||||
* the spin lock
|
||||
*/
|
||||
ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
|
||||
(u64)-1, 0, 0, 0,
|
||||
BTRFS_UPDATE_DELAYED_HEAD, 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
|
||||
parent, ref_root, ref_generation,
|
||||
owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
|
||||
orig_parent, orig_ref_root,
|
||||
orig_ref_generation, owner_objectid,
|
||||
BTRFS_DROP_DELAYED_REF, pin);
|
||||
BUG_ON(ret);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -1348,35 +1348,6 @@ fail:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
#if 0
|
||||
struct btrfs_root *root;
|
||||
int ret;
|
||||
|
||||
root = btrfs_read_fs_root_no_name(fs_info, location);
|
||||
if (!root)
|
||||
return NULL;
|
||||
|
||||
if (root->in_sysfs)
|
||||
return root;
|
||||
|
||||
ret = btrfs_set_root_name(root, name, namelen);
|
||||
if (ret) {
|
||||
free_extent_buffer(root->node);
|
||||
kfree(root);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = btrfs_sysfs_add_root(root);
|
||||
if (ret) {
|
||||
free_extent_buffer(root->node);
|
||||
kfree(root->name);
|
||||
kfree(root);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
root->in_sysfs = 1;
|
||||
return root;
|
||||
#endif
|
||||
|
||||
static int btrfs_congested_fn(void *congested_data, int bdi_bits)
|
||||
{
|
||||
struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
|
||||
|
File diff suppressed because it is too large
Load Diff
172
fs/btrfs/inode.c
172
fs/btrfs/inode.c
@ -3093,178 +3093,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* when truncating bytes in a file, it is possible to avoid reading
|
||||
* the leaves that contain only checksum items. This can be the
|
||||
* majority of the IO required to delete a large file, but it must
|
||||
* be done carefully.
|
||||
*
|
||||
* The keys in the level just above the leaves are checked to make sure
|
||||
* the lowest key in a given leaf is a csum key, and starts at an offset
|
||||
* after the new size.
|
||||
*
|
||||
* Then the key for the next leaf is checked to make sure it also has
|
||||
* a checksum item for the same file. If it does, we know our target leaf
|
||||
* contains only checksum items, and it can be safely freed without reading
|
||||
* it.
|
||||
*
|
||||
* This is just an optimization targeted at large files. It may do
|
||||
* nothing. It will return 0 unless things went badly.
|
||||
*/
|
||||
static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct inode *inode, u64 new_size)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
int ret;
|
||||
int nritems;
|
||||
struct btrfs_key found_key;
|
||||
struct btrfs_key other_key;
|
||||
struct btrfs_leaf_ref *ref;
|
||||
u64 leaf_gen;
|
||||
u64 leaf_start;
|
||||
|
||||
path->lowest_level = 1;
|
||||
key.objectid = inode->i_ino;
|
||||
key.type = BTRFS_CSUM_ITEM_KEY;
|
||||
key.offset = new_size;
|
||||
again:
|
||||
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (path->nodes[1] == NULL) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
ret = 0;
|
||||
btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
|
||||
nritems = btrfs_header_nritems(path->nodes[1]);
|
||||
|
||||
if (!nritems)
|
||||
goto out;
|
||||
|
||||
if (path->slots[1] >= nritems)
|
||||
goto next_node;
|
||||
|
||||
/* did we find a key greater than anything we want to delete? */
|
||||
if (found_key.objectid > inode->i_ino ||
|
||||
(found_key.objectid == inode->i_ino && found_key.type > key.type))
|
||||
goto out;
|
||||
|
||||
/* we check the next key in the node to make sure the leave contains
|
||||
* only checksum items. This comparison doesn't work if our
|
||||
* leaf is the last one in the node
|
||||
*/
|
||||
if (path->slots[1] + 1 >= nritems) {
|
||||
next_node:
|
||||
/* search forward from the last key in the node, this
|
||||
* will bring us into the next node in the tree
|
||||
*/
|
||||
btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
|
||||
|
||||
/* unlikely, but we inc below, so check to be safe */
|
||||
if (found_key.offset == (u64)-1)
|
||||
goto out;
|
||||
|
||||
/* search_forward needs a path with locks held, do the
|
||||
* search again for the original key. It is possible
|
||||
* this will race with a balance and return a path that
|
||||
* we could modify, but this drop is just an optimization
|
||||
* and is allowed to miss some leaves.
|
||||
*/
|
||||
btrfs_release_path(root, path);
|
||||
found_key.offset++;
|
||||
|
||||
/* setup a max key for search_forward */
|
||||
other_key.offset = (u64)-1;
|
||||
other_key.type = key.type;
|
||||
other_key.objectid = key.objectid;
|
||||
|
||||
path->keep_locks = 1;
|
||||
ret = btrfs_search_forward(root, &found_key, &other_key,
|
||||
path, 0, 0);
|
||||
path->keep_locks = 0;
|
||||
if (ret || found_key.objectid != key.objectid ||
|
||||
found_key.type != key.type) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
key.offset = found_key.offset;
|
||||
btrfs_release_path(root, path);
|
||||
cond_resched();
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* we know there's one more slot after us in the tree,
|
||||
* read that key so we can verify it is also a checksum item
|
||||
*/
|
||||
btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
|
||||
|
||||
if (found_key.objectid < inode->i_ino)
|
||||
goto next_key;
|
||||
|
||||
if (found_key.type != key.type || found_key.offset < new_size)
|
||||
goto next_key;
|
||||
|
||||
/*
|
||||
* if the key for the next leaf isn't a csum key from this objectid,
|
||||
* we can't be sure there aren't good items inside this leaf.
|
||||
* Bail out
|
||||
*/
|
||||
if (other_key.objectid != inode->i_ino || other_key.type != key.type)
|
||||
goto out;
|
||||
|
||||
leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
|
||||
leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
|
||||
/*
|
||||
* it is safe to delete this leaf, it contains only
|
||||
* csum items from this inode at an offset >= new_size
|
||||
*/
|
||||
ret = btrfs_del_leaf(trans, root, path, leaf_start);
|
||||
BUG_ON(ret);
|
||||
|
||||
if (root->ref_cows && leaf_gen < trans->transid) {
|
||||
ref = btrfs_alloc_leaf_ref(root, 0);
|
||||
if (ref) {
|
||||
ref->root_gen = root->root_key.offset;
|
||||
ref->bytenr = leaf_start;
|
||||
ref->owner = 0;
|
||||
ref->generation = leaf_gen;
|
||||
ref->nritems = 0;
|
||||
|
||||
btrfs_sort_leaf_ref(ref);
|
||||
|
||||
ret = btrfs_add_leaf_ref(root, ref, 0);
|
||||
WARN_ON(ret);
|
||||
btrfs_free_leaf_ref(root, ref);
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
next_key:
|
||||
btrfs_release_path(root, path);
|
||||
|
||||
if (other_key.objectid == inode->i_ino &&
|
||||
other_key.type == key.type && other_key.offset > key.offset) {
|
||||
key.offset = other_key.offset;
|
||||
cond_resched();
|
||||
goto again;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
/* fixup any changes we've made to the path */
|
||||
path->lowest_level = 0;
|
||||
path->keep_locks = 0;
|
||||
btrfs_release_path(root, path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* this can truncate away extent items, csum items and directory items.
|
||||
* It starts at a high offset and removes keys until it can't find
|
||||
|
@ -346,49 +346,6 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* rate limit against the drop_snapshot code. This helps to slow down new
|
||||
* operations if the drop_snapshot code isn't able to keep up.
|
||||
*/
|
||||
static void throttle_on_drops(struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
int harder_count = 0;
|
||||
|
||||
harder:
|
||||
if (atomic_read(&info->throttles)) {
|
||||
DEFINE_WAIT(wait);
|
||||
int thr;
|
||||
thr = atomic_read(&info->throttle_gen);
|
||||
|
||||
do {
|
||||
prepare_to_wait(&info->transaction_throttle,
|
||||
&wait, TASK_UNINTERRUPTIBLE);
|
||||
if (!atomic_read(&info->throttles)) {
|
||||
finish_wait(&info->transaction_throttle, &wait);
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
finish_wait(&info->transaction_throttle, &wait);
|
||||
} while (thr == atomic_read(&info->throttle_gen));
|
||||
harder_count++;
|
||||
|
||||
if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
|
||||
harder_count < 2)
|
||||
goto harder;
|
||||
|
||||
if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
|
||||
harder_count < 10)
|
||||
goto harder;
|
||||
|
||||
if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
|
||||
harder_count < 20)
|
||||
goto harder;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void btrfs_throttle(struct btrfs_root *root)
|
||||
{
|
||||
mutex_lock(&root->fs_info->trans_mutex);
|
||||
@ -808,97 +765,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* when dropping snapshots, we generate a ton of delayed refs, and it makes
|
||||
* sense not to join the transaction while it is trying to flush the current
|
||||
* queue of delayed refs out.
|
||||
*
|
||||
* This is used by the drop snapshot code only
|
||||
*/
|
||||
static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
mutex_lock(&info->trans_mutex);
|
||||
while (info->running_transaction &&
|
||||
info->running_transaction->delayed_refs.flushing) {
|
||||
prepare_to_wait(&info->transaction_wait, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
mutex_unlock(&info->trans_mutex);
|
||||
|
||||
schedule();
|
||||
|
||||
mutex_lock(&info->trans_mutex);
|
||||
finish_wait(&info->transaction_wait, &wait);
|
||||
}
|
||||
mutex_unlock(&info->trans_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
|
||||
* all of them
|
||||
*/
|
||||
int btrfs_drop_dead_root(struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *tree_root = root->fs_info->tree_root;
|
||||
unsigned long nr;
|
||||
int ret;
|
||||
|
||||
while (1) {
|
||||
/*
|
||||
* we don't want to jump in and create a bunch of
|
||||
* delayed refs if the transaction is starting to close
|
||||
*/
|
||||
wait_transaction_pre_flush(tree_root->fs_info);
|
||||
trans = btrfs_start_transaction(tree_root, 1);
|
||||
|
||||
/*
|
||||
* we've joined a transaction, make sure it isn't
|
||||
* closing right now
|
||||
*/
|
||||
if (trans->transaction->delayed_refs.flushing) {
|
||||
btrfs_end_transaction(trans, tree_root);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = btrfs_drop_snapshot(trans, root);
|
||||
if (ret != -EAGAIN)
|
||||
break;
|
||||
|
||||
ret = btrfs_update_root(trans, tree_root,
|
||||
&root->root_key,
|
||||
&root->root_item);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
nr = trans->blocks_used;
|
||||
ret = btrfs_end_transaction(trans, tree_root);
|
||||
BUG_ON(ret);
|
||||
|
||||
btrfs_btree_balance_dirty(tree_root, nr);
|
||||
cond_resched();
|
||||
}
|
||||
BUG_ON(ret);
|
||||
|
||||
ret = btrfs_del_root(trans, tree_root, &root->root_key);
|
||||
BUG_ON(ret);
|
||||
|
||||
nr = trans->blocks_used;
|
||||
ret = btrfs_end_transaction(trans, tree_root);
|
||||
BUG_ON(ret);
|
||||
|
||||
free_extent_buffer(root->node);
|
||||
free_extent_buffer(root->commit_root);
|
||||
kfree(root);
|
||||
|
||||
btrfs_btree_balance_dirty(tree_root, nr);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* new snapshots need to be created at a very specific time in the
|
||||
* transaction commit. This does the actual creation
|
||||
|
Loading…
Reference in New Issue
Block a user