mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-10 04:24:17 +08:00
3e49363be6
Whenever we add or remove an entry to a directory, we issue an utimes command for the directory. If we add 1000 entries to a directory (create 1000 files under it or move 1000 files to it), then we issue the same utimes command 1000 times, which increases the send stream size, results in more pipe IO, one search in the send b+tree, allocating one path for the search, etc, as well as making the receiver do a system call for each duplicated utimes command. We also issue an utimes command when we create a new directory, but later we might add entries to it corresponding to inodes with an higher inode number, so it's pointless to issue the utimes command before we create the last inode under the directory. So use a lru cache to track directories for which we must send a utimes command. When we need to remove an entry from the cache, we issue the utimes command for the respective directory. When finishing the send operation, we go over each cache element and issue the respective utimes command. Finally the caching is entirely optional, just a performance optimization, meaning that if we fail to cache (due to memory allocation failure), we issue the utimes command right away, that is, we fallback to the previous, unoptimized, behaviour. This patch belongs to a patchset comprised of the following patches: btrfs: send: directly return from did_overwrite_ref() and simplify it btrfs: send: avoid unnecessary generation search at did_overwrite_ref() btrfs: send: directly return from will_overwrite_ref() and simplify it btrfs: send: avoid extra b+tree searches when checking reference overrides btrfs: send: remove send_progress argument from can_rmdir() btrfs: send: avoid duplicated orphan dir allocation and initialization btrfs: send: avoid unnecessary orphan dir rbtree search at can_rmdir() btrfs: send: reduce searches on parent root when checking if dir can be removed btrfs: send: iterate waiting dir move rbtree only once when processing refs btrfs: send: initialize all the red black trees earlier btrfs: send: genericize the backref cache to allow it to be reused btrfs: adapt lru cache to allow for 64 bits keys on 32 bits systems btrfs: send: cache information about created directories btrfs: allow a generation number to be associated with lru cache entries btrfs: add an api to delete a specific entry from the lru cache btrfs: send: use the lru cache to implement the name cache btrfs: send: update size of roots array for backref cache entries btrfs: send: cache utimes operations for directories if possible The following test was run before and after applying the whole patchset, and on a non-debug kernel (Debian's default kernel config): #!/bin/bash MNT=/mnt/sdi DEV=/dev/sdi mkfs.btrfs -f $DEV > /dev/null mount $DEV $MNT mkdir $MNT/A for ((i = 1; i <= 20000; i++)); do echo -n > $MNT/A/file_$i done btrfs subvolume snapshot -r $MNT $MNT/snap1 mkdir $MNT/B for ((i = 20000; i <= 40000; i++)); do echo -n > $MNT/B/file_$i done mv $MNT/A/file_* $MNT/B/ btrfs subvolume snapshot -r $MNT $MNT/snap2 start=$(date +%s%N) btrfs send -p $MNT/snap1 $MNT/snap2 > /dev/null end=$(date +%s%N) dur=$(( (end - start) / 1000000 )) echo "Incremental send took $dur milliseconds" umount $MNT Before the whole patchset: 18408 milliseconds After the whole patchset: 1942 milliseconds (9.5x speedup) Using 60000 files instead of 40000: Before the whole patchset: 39764 milliseconds After the whole patchset: 3076 milliseconds (12.9x speedup) Using 20000 files instead of 40000: Before the whole patchset: 5072 milliseconds After the whole patchset: 916 milliseconds (5.5x speedup) Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
167 lines
3.8 KiB
C
167 lines
3.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/mm.h>
|
|
#include "lru_cache.h"
|
|
#include "messages.h"
|
|
|
|
/*
|
|
* Initialize a cache object.
|
|
*
|
|
* @cache: The cache.
|
|
* @max_size: Maximum size (number of entries) for the cache.
|
|
* Use 0 for unlimited size, it's the user's responsability to
|
|
* trim the cache in that case.
|
|
*/
|
|
void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
|
|
{
|
|
INIT_LIST_HEAD(&cache->lru_list);
|
|
mt_init(&cache->entries);
|
|
cache->size = 0;
|
|
cache->max_size = max_size;
|
|
}
|
|
|
|
static struct btrfs_lru_cache_entry *match_entry(struct list_head *head, u64 key,
|
|
u64 gen)
|
|
{
|
|
struct btrfs_lru_cache_entry *entry;
|
|
|
|
list_for_each_entry(entry, head, list) {
|
|
if (entry->key == key && entry->gen == gen)
|
|
return entry;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Lookup for an entry in the cache.
|
|
*
|
|
* @cache: The cache.
|
|
* @key: The key of the entry we are looking for.
|
|
* @gen: Generation associated to the key.
|
|
*
|
|
* Returns the entry associated with the key or NULL if none found.
|
|
*/
|
|
struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
|
|
u64 key, u64 gen)
|
|
{
|
|
struct list_head *head;
|
|
struct btrfs_lru_cache_entry *entry;
|
|
|
|
head = mtree_load(&cache->entries, key);
|
|
if (!head)
|
|
return NULL;
|
|
|
|
entry = match_entry(head, key, gen);
|
|
if (entry)
|
|
list_move_tail(&entry->lru_list, &cache->lru_list);
|
|
|
|
return entry;
|
|
}
|
|
|
|
/*
|
|
* Remove an entry from the cache.
|
|
*
|
|
* @cache: The cache to remove from.
|
|
* @entry: The entry to remove from the cache.
|
|
*
|
|
* Note: this also frees the memory used by the entry.
|
|
*/
|
|
void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache,
|
|
struct btrfs_lru_cache_entry *entry)
|
|
{
|
|
struct list_head *prev = entry->list.prev;
|
|
|
|
ASSERT(cache->size > 0);
|
|
ASSERT(!mtree_empty(&cache->entries));
|
|
|
|
list_del(&entry->list);
|
|
list_del(&entry->lru_list);
|
|
|
|
if (list_empty(prev)) {
|
|
struct list_head *head;
|
|
|
|
/*
|
|
* If previous element in the list entry->list is now empty, it
|
|
* means it's a head entry not pointing to any cached entries,
|
|
* so remove it from the maple tree and free it.
|
|
*/
|
|
head = mtree_erase(&cache->entries, entry->key);
|
|
ASSERT(head == prev);
|
|
kfree(head);
|
|
}
|
|
|
|
kfree(entry);
|
|
cache->size--;
|
|
}
|
|
|
|
/*
|
|
* Store an entry in the cache.
|
|
*
|
|
* @cache: The cache.
|
|
* @entry: The entry to store.
|
|
*
|
|
* Returns 0 on success and < 0 on error.
|
|
*/
|
|
int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
|
|
struct btrfs_lru_cache_entry *new_entry,
|
|
gfp_t gfp)
|
|
{
|
|
const u64 key = new_entry->key;
|
|
struct list_head *head;
|
|
int ret;
|
|
|
|
head = kmalloc(sizeof(*head), gfp);
|
|
if (!head)
|
|
return -ENOMEM;
|
|
|
|
ret = mtree_insert(&cache->entries, key, head, gfp);
|
|
if (ret == 0) {
|
|
INIT_LIST_HEAD(head);
|
|
list_add_tail(&new_entry->list, head);
|
|
} else if (ret == -EEXIST) {
|
|
kfree(head);
|
|
head = mtree_load(&cache->entries, key);
|
|
ASSERT(head != NULL);
|
|
if (match_entry(head, key, new_entry->gen) != NULL)
|
|
return -EEXIST;
|
|
list_add_tail(&new_entry->list, head);
|
|
} else if (ret < 0) {
|
|
kfree(head);
|
|
return ret;
|
|
}
|
|
|
|
if (cache->max_size > 0 && cache->size == cache->max_size) {
|
|
struct btrfs_lru_cache_entry *lru_entry;
|
|
|
|
lru_entry = list_first_entry(&cache->lru_list,
|
|
struct btrfs_lru_cache_entry,
|
|
lru_list);
|
|
btrfs_lru_cache_remove(cache, lru_entry);
|
|
}
|
|
|
|
list_add_tail(&new_entry->lru_list, &cache->lru_list);
|
|
cache->size++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Empty a cache.
|
|
*
|
|
* @cache: The cache to empty.
|
|
*
|
|
* Removes all entries from the cache.
|
|
*/
|
|
void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache)
|
|
{
|
|
struct btrfs_lru_cache_entry *entry;
|
|
struct btrfs_lru_cache_entry *tmp;
|
|
|
|
list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list)
|
|
btrfs_lru_cache_remove(cache, entry);
|
|
|
|
ASSERT(cache->size == 0);
|
|
ASSERT(mtree_empty(&cache->entries));
|
|
}
|