2022-10-20 06:31:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2020-06-16 07:53:46 +08:00
|
|
|
#include "btree_cache.h"
|
2019-03-08 08:46:10 +08:00
|
|
|
#include "btree_iter.h"
|
|
|
|
#include "btree_key_cache.h"
|
|
|
|
#include "btree_locking.h"
|
|
|
|
#include "btree_update.h"
|
2022-07-18 11:06:38 +08:00
|
|
|
#include "errcode.h"
|
2019-03-08 08:46:10 +08:00
|
|
|
#include "error.h"
|
|
|
|
#include "journal.h"
|
|
|
|
#include "journal_reclaim.h"
|
|
|
|
#include "trace.h"
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
#include <linux/sched/mm.h>
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
static inline bool btree_uses_pcpu_readers(enum btree_id id)
|
|
|
|
{
|
|
|
|
return id == BTREE_ID_subvolumes;
|
|
|
|
}
|
|
|
|
|
2020-11-19 03:09:33 +08:00
|
|
|
static struct kmem_cache *bch2_key_cache;
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
|
|
|
|
const void *obj)
|
|
|
|
{
|
|
|
|
const struct bkey_cached *ck = obj;
|
|
|
|
const struct bkey_cached_key *key = arg->key;
|
|
|
|
|
2022-11-24 16:12:22 +08:00
|
|
|
return ck->key.btree_id != key->btree_id ||
|
|
|
|
!bpos_eq(ck->key.pos, key->pos);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rhashtable_params bch2_btree_key_cache_params = {
|
|
|
|
.head_offset = offsetof(struct bkey_cached, hash),
|
|
|
|
.key_offset = offsetof(struct bkey_cached, key),
|
|
|
|
.key_len = sizeof(struct bkey_cached_key),
|
|
|
|
.obj_cmpfn = bch2_btree_key_cache_cmp_fn,
|
|
|
|
};
|
|
|
|
|
|
|
|
__flatten
|
2019-09-23 07:10:21 +08:00
|
|
|
inline struct bkey_cached *
|
|
|
|
bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bkey_cached_key key = {
|
|
|
|
.btree_id = btree_id,
|
|
|
|
.pos = pos,
|
|
|
|
};
|
|
|
|
|
|
|
|
return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
|
|
|
|
bch2_btree_key_cache_params);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
if (!six_trylock_intent(&ck->c.lock))
|
|
|
|
return false;
|
|
|
|
|
2023-01-08 13:05:30 +08:00
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-01-08 13:05:30 +08:00
|
|
|
if (!six_trylock_write(&ck->c.lock)) {
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bkey_cached_evict(struct btree_key_cache *c,
|
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
|
|
|
|
bch2_btree_key_cache_params));
|
|
|
|
memset(&ck->key, ~0, sizeof(ck->key));
|
2020-11-10 02:01:52 +08:00
|
|
|
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_dec(&c->nr_keys);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
static void bkey_cached_free(struct btree_key_cache *bc,
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
2020-11-13 06:19:47 +08:00
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
ck->btree_trans_barrier_seq =
|
|
|
|
start_poll_synchronize_srcu(&c->btree_trans_barrier);
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (ck->c.lock.readers)
|
|
|
|
list_move_tail(&ck->list, &bc->freed_pcpu);
|
|
|
|
else
|
|
|
|
list_move_tail(&ck->list, &bc->freed_nonpcpu);
|
2022-06-17 13:07:54 +08:00
|
|
|
atomic_long_inc(&bc->nr_freed);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
kfree(ck->k);
|
|
|
|
ck->k = NULL;
|
|
|
|
ck->u64s = 0;
|
|
|
|
|
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
}
|
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
|
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
struct bkey_cached *pos;
|
|
|
|
|
|
|
|
list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
|
|
|
|
if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
|
|
|
|
pos->btree_trans_barrier_seq)) {
|
|
|
|
list_move(&ck->list, &pos->list);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_move(&ck->list, &bc->freed_nonpcpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-08-22 02:29:43 +08:00
|
|
|
static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
|
2022-09-04 10:07:31 +08:00
|
|
|
struct bkey_cached *ck)
|
2022-06-17 13:07:54 +08:00
|
|
|
{
|
|
|
|
BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (!ck->c.lock.readers) {
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-10-17 14:08:07 +08:00
|
|
|
struct btree_key_cache_freelist *f;
|
|
|
|
bool freed = false;
|
|
|
|
|
2022-06-17 13:07:54 +08:00
|
|
|
preempt_disable();
|
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (f->nr < ARRAY_SIZE(f->objs)) {
|
|
|
|
f->objs[f->nr++] = ck;
|
|
|
|
freed = true;
|
2022-06-17 13:07:54 +08:00
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (!freed) {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
preempt_disable();
|
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
|
|
|
|
|
|
|
while (f->nr > ARRAY_SIZE(f->objs) / 2) {
|
|
|
|
struct bkey_cached *ck2 = f->objs[--f->nr];
|
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
__bkey_cached_move_to_freelist_ordered(bc, ck2);
|
2022-09-04 10:07:31 +08:00
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
__bkey_cached_move_to_freelist_ordered(bc, ck);
|
2022-09-04 10:07:31 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
#else
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
list_move_tail(&ck->list, &bc->freed_nonpcpu);
|
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
#endif
|
2022-09-04 10:07:31 +08:00
|
|
|
} else {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
list_move_tail(&ck->list, &bc->freed_pcpu);
|
2022-06-17 13:07:54 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
}
|
2022-08-22 02:29:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bkey_cached_free_fast(struct btree_key_cache *bc,
|
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
|
|
|
|
|
|
|
ck->btree_trans_barrier_seq =
|
|
|
|
start_poll_synchronize_srcu(&c->btree_trans_barrier);
|
|
|
|
|
|
|
|
list_del_init(&ck->list);
|
|
|
|
atomic_long_inc(&bc->nr_freed);
|
|
|
|
|
|
|
|
kfree(ck->k);
|
|
|
|
ck->k = NULL;
|
|
|
|
ck->u64s = 0;
|
|
|
|
|
|
|
|
bkey_cached_move_to_freelist(bc, ck);
|
2022-06-17 13:07:54 +08:00
|
|
|
|
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
}
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
static struct bkey_cached *
|
2022-11-14 15:22:30 +08:00
|
|
|
bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
bool *was_new)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2022-09-04 09:14:53 +08:00
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2022-06-17 13:07:54 +08:00
|
|
|
struct bkey_cached *ck = NULL;
|
2022-09-04 10:07:31 +08:00
|
|
|
bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
|
2023-01-07 18:46:52 +08:00
|
|
|
int ret;
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (!pcpu_readers) {
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-10-17 14:08:07 +08:00
|
|
|
struct btree_key_cache_freelist *f;
|
|
|
|
|
2022-06-17 13:07:54 +08:00
|
|
|
preempt_disable();
|
2022-09-04 09:14:53 +08:00
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
2022-09-04 10:07:31 +08:00
|
|
|
if (f->nr)
|
|
|
|
ck = f->objs[--f->nr];
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
if (!ck) {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
preempt_disable();
|
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
while (!list_empty(&bc->freed_nonpcpu) &&
|
|
|
|
f->nr < ARRAY_SIZE(f->objs) / 2) {
|
|
|
|
ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
|
|
|
|
list_del_init(&ck->list);
|
|
|
|
f->objs[f->nr++] = ck;
|
|
|
|
}
|
|
|
|
|
|
|
|
ck = f->nr ? f->objs[--f->nr] : NULL;
|
|
|
|
preempt_enable();
|
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
#else
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
if (!list_empty(&bc->freed_nonpcpu)) {
|
|
|
|
ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
|
|
|
|
list_del_init(&ck->list);
|
|
|
|
}
|
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
#endif
|
2022-09-04 10:07:31 +08:00
|
|
|
} else {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
if (!list_empty(&bc->freed_pcpu)) {
|
|
|
|
ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
|
2022-06-17 13:07:54 +08:00
|
|
|
list_del_init(&ck->list);
|
|
|
|
}
|
2022-09-04 09:14:53 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
2022-06-17 13:07:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ck) {
|
2022-08-22 02:29:43 +08:00
|
|
|
int ret;
|
|
|
|
|
2023-02-05 08:39:59 +08:00
|
|
|
ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_);
|
2022-08-22 02:29:43 +08:00
|
|
|
if (unlikely(ret)) {
|
2022-09-04 09:14:53 +08:00
|
|
|
bkey_cached_move_to_freelist(bc, ck);
|
2022-08-22 02:29:43 +08:00
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2022-09-04 10:24:16 +08:00
|
|
|
path->l[0].b = (void *) ck;
|
2023-05-21 11:57:48 +08:00
|
|
|
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
|
2022-09-04 10:24:16 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
|
|
|
|
|
|
|
|
ret = bch2_btree_node_lock_write(trans, path, &ck->c);
|
2022-08-22 02:29:43 +08:00
|
|
|
if (unlikely(ret)) {
|
2022-09-04 10:24:16 +08:00
|
|
|
btree_node_unlock(trans, path, 0);
|
2022-09-04 09:14:53 +08:00
|
|
|
bkey_cached_move_to_freelist(bc, ck);
|
2022-08-22 02:29:43 +08:00
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2022-06-17 13:07:54 +08:00
|
|
|
return ck;
|
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-05-28 15:44:38 +08:00
|
|
|
ck = allocate_dropping_locks(trans, ret,
|
|
|
|
kmem_cache_zalloc(bch2_key_cache, _gfp));
|
2023-01-07 18:46:52 +08:00
|
|
|
if (ret) {
|
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
|
|
|
return ERR_PTR(ret);
|
2020-11-20 04:38:27 +08:00
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-01-07 18:46:52 +08:00
|
|
|
if (!ck)
|
|
|
|
return NULL;
|
2023-05-28 15:44:38 +08:00
|
|
|
|
2023-01-07 18:46:52 +08:00
|
|
|
INIT_LIST_HEAD(&ck->list);
|
2023-05-21 08:57:55 +08:00
|
|
|
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
|
2023-01-07 18:46:52 +08:00
|
|
|
|
|
|
|
ck->c.cached = true;
|
|
|
|
BUG_ON(!six_trylock_intent(&ck->c.lock));
|
|
|
|
BUG_ON(!six_trylock_write(&ck->c.lock));
|
|
|
|
*was_new = true;
|
|
|
|
return ck;
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct bkey_cached *
|
|
|
|
bkey_cached_reuse(struct btree_key_cache *c)
|
|
|
|
{
|
|
|
|
struct bucket_table *tbl;
|
|
|
|
struct rhash_head *pos;
|
|
|
|
struct bkey_cached *ck;
|
|
|
|
unsigned i;
|
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
mutex_lock(&c->lock);
|
2021-03-25 11:37:33 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
tbl = rht_dereference_rcu(c->table.tbl, &c->table);
|
|
|
|
for (i = 0; i < tbl->size; i++)
|
|
|
|
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
|
|
|
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
|
|
|
bkey_cached_lock_for_evict(ck)) {
|
|
|
|
bkey_cached_evict(c, ck);
|
2022-10-15 12:47:21 +08:00
|
|
|
goto out;
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
ck = NULL;
|
|
|
|
out:
|
2021-03-25 11:37:33 +08:00
|
|
|
rcu_read_unlock();
|
2022-10-15 12:47:21 +08:00
|
|
|
mutex_unlock(&c->lock);
|
|
|
|
return ck;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct bkey_cached *
|
2022-09-04 10:07:31 +08:00
|
|
|
btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2022-08-22 02:29:43 +08:00
|
|
|
struct bch_fs *c = trans->c;
|
2021-12-31 09:14:52 +08:00
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck;
|
2022-11-14 15:22:30 +08:00
|
|
|
bool was_new = false;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2022-11-14 15:22:30 +08:00
|
|
|
ck = bkey_cached_alloc(trans, path, &was_new);
|
2022-10-20 06:31:33 +08:00
|
|
|
if (IS_ERR(ck))
|
2022-08-22 02:29:43 +08:00
|
|
|
return ck;
|
2021-03-25 11:37:33 +08:00
|
|
|
|
|
|
|
if (unlikely(!ck)) {
|
2021-12-31 09:14:52 +08:00
|
|
|
ck = bkey_cached_reuse(bc);
|
|
|
|
if (unlikely(!ck)) {
|
|
|
|
bch_err(c, "error allocating memory for key cache item, btree %s",
|
2022-09-04 09:14:53 +08:00
|
|
|
bch2_btree_ids[path->btree_id]);
|
2023-03-15 03:35:57 +08:00
|
|
|
return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create);
|
2021-12-31 09:14:52 +08:00
|
|
|
}
|
2021-03-25 11:37:33 +08:00
|
|
|
|
2022-09-04 10:24:16 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
ck->c.level = 0;
|
2022-09-04 09:14:53 +08:00
|
|
|
ck->c.btree_id = path->btree_id;
|
|
|
|
ck->key.btree_id = path->btree_id;
|
|
|
|
ck->key.pos = path->pos;
|
2019-03-08 08:46:10 +08:00
|
|
|
ck->valid = false;
|
2020-11-20 04:38:27 +08:00
|
|
|
ck->flags = 1U << BKEY_CACHED_ACCESSED;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-12-31 09:14:52 +08:00
|
|
|
if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
|
2019-03-08 08:46:10 +08:00
|
|
|
&ck->hash,
|
2021-03-25 11:37:33 +08:00
|
|
|
bch2_btree_key_cache_params))) {
|
2019-03-08 08:46:10 +08:00
|
|
|
/* We raced with another fill: */
|
2021-03-25 11:37:33 +08:00
|
|
|
|
|
|
|
if (likely(was_new)) {
|
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
kfree(ck);
|
|
|
|
} else {
|
2022-06-17 13:07:54 +08:00
|
|
|
bkey_cached_free_fast(bc, ck);
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
|
|
|
|
2022-09-04 10:24:16 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
|
2019-03-08 08:46:10 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-12-31 09:14:52 +08:00
|
|
|
atomic_long_inc(&bc->nr_keys);
|
2020-11-10 02:01:52 +08:00
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
|
|
|
|
return ck;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int btree_key_cache_fill(struct btree_trans *trans,
|
2021-08-31 03:18:31 +08:00
|
|
|
struct btree_path *ck_path,
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
2022-11-23 09:15:33 +08:00
|
|
|
struct btree_iter iter;
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_s_c k;
|
|
|
|
unsigned new_u64s = 0;
|
|
|
|
struct bkey_i *new_k = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2023-04-30 07:33:09 +08:00
|
|
|
k = bch2_bkey_get_iter(trans, &iter, ck->key.btree_id, ck->key.pos,
|
|
|
|
BTREE_ITER_KEY_CACHE_FILL|
|
|
|
|
BTREE_ITER_CACHED_NOFILL);
|
2022-11-23 09:15:33 +08:00
|
|
|
ret = bkey_err(k);
|
2021-03-20 10:54:18 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
|
2022-08-28 00:48:36 +08:00
|
|
|
trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
|
2023-01-07 18:46:52 +08:00
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
|
2021-03-20 10:54:18 +08:00
|
|
|
goto err;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2021-04-24 12:42:02 +08:00
|
|
|
/*
|
|
|
|
* bch2_varint_decode can read past the end of the buffer by at
|
|
|
|
* most 7 bytes (it won't be used):
|
|
|
|
*/
|
|
|
|
new_u64s = k.k->u64s + 1;
|
|
|
|
|
2022-04-18 05:50:47 +08:00
|
|
|
/*
|
|
|
|
* Allocate some extra space so that the transaction commit path is less
|
|
|
|
* likely to have to reallocate, since that requires a transaction
|
|
|
|
* restart:
|
|
|
|
*/
|
|
|
|
new_u64s = min(256U, (new_u64s * 3) / 2);
|
|
|
|
|
2021-04-24 12:42:02 +08:00
|
|
|
if (new_u64s > ck->u64s) {
|
|
|
|
new_u64s = roundup_pow_of_two(new_u64s);
|
2023-01-07 18:46:52 +08:00
|
|
|
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (!new_k) {
|
2023-01-07 18:46:52 +08:00
|
|
|
bch2_trans_unlock(trans);
|
|
|
|
|
|
|
|
new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
|
|
|
|
if (!new_k) {
|
|
|
|
bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
|
|
|
|
bch2_btree_ids[ck->key.btree_id], new_u64s);
|
2023-03-15 03:35:57 +08:00
|
|
|
ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
|
2023-01-07 18:46:52 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
|
|
|
|
kfree(new_k);
|
|
|
|
trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
|
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_trans_relock(trans);
|
|
|
|
if (ret) {
|
|
|
|
kfree(new_k);
|
|
|
|
goto err;
|
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-04 09:09:54 +08:00
|
|
|
ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
|
2022-08-23 11:39:23 +08:00
|
|
|
if (ret) {
|
|
|
|
kfree(new_k);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
if (new_k) {
|
|
|
|
kfree(ck->k);
|
|
|
|
ck->u64s = new_u64s;
|
|
|
|
ck->k = new_k;
|
|
|
|
}
|
|
|
|
|
|
|
|
bkey_reassemble(ck->k, k);
|
|
|
|
ck->valid = true;
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
/* We're not likely to need this iterator again: */
|
2022-11-23 09:15:33 +08:00
|
|
|
set_btree_iter_dontneed(&iter);
|
2021-03-20 10:54:18 +08:00
|
|
|
err:
|
2022-11-23 09:15:33 +08:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2021-03-20 10:54:18 +08:00
|
|
|
return ret;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2022-10-20 06:31:33 +08:00
|
|
|
static noinline int
|
2022-09-27 10:34:49 +08:00
|
|
|
bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
unsigned flags)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bkey_cached *ck;
|
|
|
|
int ret = 0;
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
BUG_ON(path->level);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
path->l[1].b = NULL;
|
2021-07-15 03:13:27 +08:00
|
|
|
|
2023-01-26 01:16:23 +08:00
|
|
|
if (bch2_btree_node_relock_notrace(trans, path, 0)) {
|
2021-08-31 03:18:31 +08:00
|
|
|
ck = (void *) path->l[0].b;
|
2019-03-08 08:46:10 +08:00
|
|
|
goto fill;
|
|
|
|
}
|
|
|
|
retry:
|
2021-08-31 03:18:31 +08:00
|
|
|
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (!ck) {
|
2022-09-04 09:14:53 +08:00
|
|
|
ck = btree_key_cache_create(trans, path);
|
2019-03-08 08:46:10 +08:00
|
|
|
ret = PTR_ERR_OR_ZERO(ck);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
if (!ck)
|
|
|
|
goto retry;
|
|
|
|
|
2022-07-14 16:33:09 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
|
2021-08-31 03:18:31 +08:00
|
|
|
path->locks_want = 1;
|
2019-03-08 08:46:10 +08:00
|
|
|
} else {
|
2021-08-31 03:18:31 +08:00
|
|
|
enum six_lock_type lock_want = __btree_lock_want(path, 0);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2022-08-23 03:29:53 +08:00
|
|
|
ret = btree_node_lock(trans, path, (void *) ck, 0,
|
|
|
|
lock_want, _THIS_IP_);
|
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
BUG_ON(ret);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
if (ck->key.btree_id != path->btree_id ||
|
2022-11-24 16:12:22 +08:00
|
|
|
!bpos_eq(ck->key.pos, path->pos)) {
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_type(&ck->c.lock, lock_want);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2022-07-14 16:33:09 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, lock_want);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2023-05-21 11:57:48 +08:00
|
|
|
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
|
2021-08-31 03:18:31 +08:00
|
|
|
path->l[0].b = (void *) ck;
|
2019-03-08 08:46:10 +08:00
|
|
|
fill:
|
2023-01-25 23:15:39 +08:00
|
|
|
path->uptodate = BTREE_ITER_UPTODATE;
|
|
|
|
|
2022-12-21 00:26:57 +08:00
|
|
|
if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
|
2022-08-08 01:43:32 +08:00
|
|
|
/*
|
|
|
|
* Using the underscore version because we haven't set
|
|
|
|
* path->uptodate yet:
|
|
|
|
*/
|
2021-08-31 03:18:31 +08:00
|
|
|
if (!path->locks_want &&
|
|
|
|
!__bch2_btree_path_upgrade(trans, path, 1)) {
|
2022-08-28 00:48:36 +08:00
|
|
|
trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
|
2022-08-05 00:46:37 +08:00
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
|
2019-03-08 08:46:10 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
ret = btree_key_cache_fill(trans, path, ck);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2023-01-25 23:15:39 +08:00
|
|
|
|
|
|
|
ret = bch2_btree_path_relock(trans, path, _THIS_IP_);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
path->uptodate = BTREE_ITER_UPTODATE;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
|
|
|
|
set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
BUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
|
2023-01-25 23:15:39 +08:00
|
|
|
BUG_ON(path->uptodate);
|
2021-03-09 06:09:13 +08:00
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
return ret;
|
|
|
|
err:
|
2023-01-25 23:15:39 +08:00
|
|
|
path->uptodate = BTREE_ITER_NEED_TRAVERSE;
|
2022-07-18 11:06:38 +08:00
|
|
|
if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
|
2022-07-14 14:58:23 +08:00
|
|
|
btree_node_unlock(trans, path, 0);
|
2022-08-11 07:08:30 +08:00
|
|
|
path->l[0].b = ERR_PTR(ret);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-09-27 10:34:49 +08:00
|
|
|
int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bkey_cached *ck;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
EBUG_ON(path->level);
|
|
|
|
|
|
|
|
path->l[1].b = NULL;
|
|
|
|
|
2023-01-26 01:16:23 +08:00
|
|
|
if (bch2_btree_node_relock_notrace(trans, path, 0)) {
|
2022-09-27 10:34:49 +08:00
|
|
|
ck = (void *) path->l[0].b;
|
|
|
|
goto fill;
|
|
|
|
}
|
|
|
|
retry:
|
|
|
|
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
|
|
|
|
if (!ck) {
|
|
|
|
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
|
|
|
|
} else {
|
|
|
|
enum six_lock_type lock_want = __btree_lock_want(path, 0);
|
|
|
|
|
|
|
|
ret = btree_node_lock(trans, path, (void *) ck, 0,
|
|
|
|
lock_want, _THIS_IP_);
|
|
|
|
EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (ck->key.btree_id != path->btree_id ||
|
2022-11-24 16:12:22 +08:00
|
|
|
!bpos_eq(ck->key.pos, path->pos)) {
|
2022-09-27 10:34:49 +08:00
|
|
|
six_unlock_type(&ck->c.lock, lock_want);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
mark_btree_node_locked(trans, path, 0, lock_want);
|
|
|
|
}
|
|
|
|
|
2023-05-21 11:57:48 +08:00
|
|
|
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
|
2022-09-27 10:34:49 +08:00
|
|
|
path->l[0].b = (void *) ck;
|
|
|
|
fill:
|
|
|
|
if (!ck->valid)
|
|
|
|
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
|
|
|
|
|
|
|
|
if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
|
|
|
|
set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
|
|
|
|
|
|
|
|
path->uptodate = BTREE_ITER_UPTODATE;
|
|
|
|
EBUG_ON(!ck->valid);
|
|
|
|
EBUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
|
|
|
struct bkey_cached_key key,
|
|
|
|
u64 journal_seq,
|
2021-04-04 04:24:13 +08:00
|
|
|
unsigned commit_flags,
|
2019-03-08 08:46:10 +08:00
|
|
|
bool evict)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct journal *j = &c->journal;
|
2021-08-31 03:18:31 +08:00
|
|
|
struct btree_iter c_iter, b_iter;
|
2020-12-04 02:09:08 +08:00
|
|
|
struct bkey_cached *ck = NULL;
|
2019-03-08 08:46:10 +08:00
|
|
|
int ret;
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
|
|
|
|
BTREE_ITER_SLOTS|
|
2021-03-05 11:29:25 +08:00
|
|
|
BTREE_ITER_INTENT|
|
|
|
|
BTREE_ITER_ALL_SNAPSHOTS);
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
|
|
|
|
BTREE_ITER_CACHED|
|
|
|
|
BTREE_ITER_INTENT);
|
2022-02-07 12:15:12 +08:00
|
|
|
b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
ret = bch2_btree_iter_traverse(&c_iter);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (ret)
|
2021-07-24 06:26:38 +08:00
|
|
|
goto out;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
ck = (void *) c_iter.path->l[0].b;
|
2022-01-12 13:49:23 +08:00
|
|
|
if (!ck)
|
2019-03-08 08:46:10 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
2022-01-12 13:49:23 +08:00
|
|
|
if (evict)
|
|
|
|
goto evict;
|
|
|
|
goto out;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2022-01-12 13:49:23 +08:00
|
|
|
BUG_ON(!ck->valid);
|
|
|
|
|
|
|
|
if (journal_seq && ck->journal.seq != journal_seq)
|
|
|
|
goto out;
|
|
|
|
|
2021-04-21 05:09:25 +08:00
|
|
|
/*
|
|
|
|
* Since journal reclaim depends on us making progress here, and the
|
|
|
|
* allocator/copygc depend on journal reclaim making progress, we need
|
|
|
|
* to be using alloc reserves:
|
2022-10-20 06:31:33 +08:00
|
|
|
*/
|
2021-08-31 03:18:31 +08:00
|
|
|
ret = bch2_btree_iter_traverse(&b_iter) ?:
|
|
|
|
bch2_trans_update(trans, &b_iter, ck->k,
|
2022-01-12 14:14:47 +08:00
|
|
|
BTREE_UPDATE_KEY_CACHE_RECLAIM|
|
2021-07-06 10:16:02 +08:00
|
|
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
|
|
|
BTREE_TRIGGER_NORUN) ?:
|
2019-03-08 08:46:10 +08:00
|
|
|
bch2_trans_commit(trans, NULL, NULL,
|
|
|
|
BTREE_INSERT_NOCHECK_RW|
|
|
|
|
BTREE_INSERT_NOFAIL|
|
2021-04-21 05:09:25 +08:00
|
|
|
BTREE_INSERT_USE_RESERVE|
|
2021-04-04 04:24:13 +08:00
|
|
|
(ck->journal.seq == journal_last_seq(j)
|
2022-03-15 09:48:42 +08:00
|
|
|
? JOURNAL_WATERMARK_reserved
|
2021-04-04 04:24:13 +08:00
|
|
|
: 0)|
|
|
|
|
commit_flags);
|
2022-07-18 11:06:38 +08:00
|
|
|
|
|
|
|
bch2_fs_fatal_err_on(ret &&
|
|
|
|
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
|
|
|
|
!bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
|
|
|
|
!bch2_journal_error(j), c,
|
|
|
|
"error flushing key cache: %s", bch2_err_str(ret));
|
|
|
|
if (ret)
|
2019-03-08 08:46:10 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
bch2_journal_pin_drop(j, &ck->journal);
|
|
|
|
bch2_journal_preres_put(j, &ck->res);
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
BUG_ON(!btree_node_locked(c_iter.path, 0));
|
2021-03-25 11:37:33 +08:00
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
if (!evict) {
|
2020-11-10 02:01:52 +08:00
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_dec(&c->btree_key_cache.nr_dirty);
|
2020-11-10 02:01:52 +08:00
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
} else {
|
2022-09-03 10:59:39 +08:00
|
|
|
struct btree_path *path2;
|
2019-03-08 08:46:10 +08:00
|
|
|
evict:
|
2022-09-03 10:59:39 +08:00
|
|
|
trans_for_each_path(trans, path2)
|
|
|
|
if (path2 != c_iter.path)
|
|
|
|
__bch2_btree_path_unlock(trans, path2);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2022-09-03 10:59:39 +08:00
|
|
|
bch2_btree_node_lock_write_nofail(trans, c_iter.path, &ck->c);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2020-11-10 02:01:52 +08:00
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_dec(&c->btree_key_cache.nr_dirty);
|
2020-11-10 02:01:52 +08:00
|
|
|
}
|
|
|
|
|
2022-09-03 10:59:39 +08:00
|
|
|
mark_btree_node_locked_noreset(c_iter.path, 0, BTREE_NODE_UNLOCKED);
|
2019-03-08 08:46:10 +08:00
|
|
|
bkey_cached_evict(&c->btree_key_cache, ck);
|
2022-06-17 13:07:54 +08:00
|
|
|
bkey_cached_free_fast(&c->btree_key_cache, ck);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
out:
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_iter_exit(trans, &b_iter);
|
|
|
|
bch2_trans_iter_exit(trans, &c_iter);
|
2019-03-08 08:46:10 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-04-01 09:44:55 +08:00
|
|
|
int bch2_btree_key_cache_journal_flush(struct journal *j,
|
|
|
|
struct journal_entry_pin *pin, u64 seq)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
struct bkey_cached *ck =
|
|
|
|
container_of(pin, struct bkey_cached, journal);
|
|
|
|
struct bkey_cached_key key;
|
2022-08-22 02:29:43 +08:00
|
|
|
struct btree_trans trans;
|
|
|
|
int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
|
2021-04-04 04:24:13 +08:00
|
|
|
int ret = 0;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2022-08-22 02:29:43 +08:00
|
|
|
bch2_trans_init(&trans, c, 0, 0);
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2022-08-22 02:29:43 +08:00
|
|
|
btree_node_lock_nopath_nofail(&trans, &ck->c, SIX_LOCK_read);
|
2019-03-08 08:46:10 +08:00
|
|
|
key = ck->key;
|
|
|
|
|
|
|
|
if (ck->journal.seq != seq ||
|
|
|
|
!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
six_unlock_read(&ck->c.lock);
|
2020-11-13 06:19:47 +08:00
|
|
|
goto unlock;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
2023-01-04 17:34:16 +08:00
|
|
|
|
|
|
|
if (ck->seq != seq) {
|
|
|
|
bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal,
|
|
|
|
bch2_btree_key_cache_journal_flush);
|
|
|
|
six_unlock_read(&ck->c.lock);
|
|
|
|
goto unlock;
|
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_read(&ck->c.lock);
|
|
|
|
|
2022-08-22 02:29:43 +08:00
|
|
|
ret = commit_do(&trans, NULL, NULL, 0,
|
2021-07-24 06:26:38 +08:00
|
|
|
btree_key_cache_flush_pos(&trans, key, seq,
|
|
|
|
BTREE_INSERT_JOURNAL_RECLAIM, false));
|
2020-11-13 06:19:47 +08:00
|
|
|
unlock:
|
|
|
|
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
|
2021-04-04 04:24:13 +08:00
|
|
|
|
2022-08-22 02:29:43 +08:00
|
|
|
bch2_trans_exit(&trans);
|
2021-04-04 04:24:13 +08:00
|
|
|
return ret;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush and evict a key from the key cache:
|
|
|
|
*/
|
|
|
|
int bch2_btree_key_cache_flush(struct btree_trans *trans,
|
|
|
|
enum btree_id id, struct bpos pos)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bkey_cached_key key = { id, pos };
|
|
|
|
|
|
|
|
/* Fastpath - assume it won't be found: */
|
2019-09-23 07:10:21 +08:00
|
|
|
if (!bch2_btree_key_cache_find(c, id, pos))
|
2019-03-08 08:46:10 +08:00
|
|
|
return 0;
|
|
|
|
|
2021-04-04 04:24:13 +08:00
|
|
|
return btree_key_cache_flush_pos(trans, key, 0, 0, true);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool bch2_btree_insert_key_cached(struct btree_trans *trans,
|
2023-02-10 02:22:12 +08:00
|
|
|
unsigned flags,
|
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 22:03:37 +08:00
|
|
|
struct btree_insert_entry *insert_entry)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 22:03:37 +08:00
|
|
|
struct bkey_cached *ck = (void *) insert_entry->path->l[0].b;
|
|
|
|
struct bkey_i *insert = insert_entry->k;
|
2020-11-20 08:54:40 +08:00
|
|
|
bool kick_reclaim = false;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-03-05 12:05:55 +08:00
|
|
|
BUG_ON(insert->k.u64s > ck->u64s);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-02-10 02:22:12 +08:00
|
|
|
if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
|
2019-03-08 08:46:10 +08:00
|
|
|
int difference;
|
|
|
|
|
2023-03-05 12:05:55 +08:00
|
|
|
BUG_ON(jset_u64s(insert->k.u64s) > trans->journal_preres.u64s);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-03-05 12:05:55 +08:00
|
|
|
difference = jset_u64s(insert->k.u64s) - ck->res.u64s;
|
2019-03-08 08:46:10 +08:00
|
|
|
if (difference > 0) {
|
|
|
|
trans->journal_preres.u64s -= difference;
|
|
|
|
ck->res.u64s += difference;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bkey_copy(ck->k, insert);
|
|
|
|
ck->valid = true;
|
|
|
|
|
|
|
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
set_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_inc(&c->btree_key_cache.nr_dirty);
|
2020-11-20 08:54:40 +08:00
|
|
|
|
|
|
|
if (bch2_nr_btree_keys_need_flush(c))
|
|
|
|
kick_reclaim = true;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 22:03:37 +08:00
|
|
|
/*
|
|
|
|
* To minimize lock contention, we only add the journal pin here and
|
|
|
|
* defer pin updates to the flush callback via ->seq. Be careful not to
|
|
|
|
* update ->seq on nojournal commits because we don't want to update the
|
|
|
|
* pin to a seq that doesn't include journal updates on disk. Otherwise
|
|
|
|
* we risk losing the update after a crash.
|
|
|
|
*
|
|
|
|
* The only exception is if the pin is not active in the first place. We
|
|
|
|
* have to add the pin because journal reclaim drives key cache
|
|
|
|
* flushing. The flush callback will not proceed unless ->seq matches
|
|
|
|
* the latest pin, so make sure it starts with a consistent value.
|
|
|
|
*/
|
|
|
|
if (!(insert_entry->flags & BTREE_UPDATE_NOJOURNAL) ||
|
|
|
|
!journal_pin_active(&ck->journal)) {
|
|
|
|
ck->seq = trans->journal_res.seq;
|
|
|
|
}
|
2023-01-04 17:34:16 +08:00
|
|
|
bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
|
|
|
|
&ck->journal, bch2_btree_key_cache_journal_flush);
|
2020-11-20 08:54:40 +08:00
|
|
|
|
|
|
|
if (kick_reclaim)
|
2020-11-20 09:55:33 +08:00
|
|
|
journal_reclaim_kick(&c->journal);
|
2019-03-08 08:46:10 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-12 14:14:47 +08:00
|
|
|
void bch2_btree_key_cache_drop(struct btree_trans *trans,
|
|
|
|
struct btree_path *path)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2022-08-12 09:06:43 +08:00
|
|
|
struct bch_fs *c = trans->c;
|
2022-01-12 14:14:47 +08:00
|
|
|
struct bkey_cached *ck = (void *) path->l[0].b;
|
|
|
|
|
2022-08-12 09:06:43 +08:00
|
|
|
BUG_ON(!ck->valid);
|
2022-01-12 14:14:47 +08:00
|
|
|
|
2022-08-12 09:06:43 +08:00
|
|
|
/*
|
|
|
|
* We just did an update to the btree, bypassing the key cache: the key
|
|
|
|
* cache key is now stale and must be dropped, even if dirty:
|
|
|
|
*/
|
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
|
|
|
atomic_long_dec(&c->btree_key_cache.nr_dirty);
|
|
|
|
bch2_journal_pin_drop(&c->journal, &ck->journal);
|
|
|
|
}
|
|
|
|
|
|
|
|
ck->valid = false;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
|
|
|
|
struct shrink_control *sc)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(shrink, struct bch_fs,
|
|
|
|
btree_key_cache.shrink);
|
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2021-03-25 11:37:33 +08:00
|
|
|
struct bucket_table *tbl;
|
2020-11-13 06:19:47 +08:00
|
|
|
struct bkey_cached *ck, *t;
|
|
|
|
size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
|
2021-03-25 11:37:33 +08:00
|
|
|
unsigned start, flags;
|
|
|
|
int srcu_idx;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2022-10-14 18:48:23 +08:00
|
|
|
mutex_lock(&bc->lock);
|
2021-03-25 11:37:33 +08:00
|
|
|
srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
|
2020-11-13 06:19:47 +08:00
|
|
|
flags = memalloc_nofs_save();
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
/*
|
|
|
|
* Newest freed entries are at the end of the list - once we hit one
|
|
|
|
* that's too new to be freed, we can bail out:
|
|
|
|
*/
|
2022-09-04 10:07:31 +08:00
|
|
|
list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
|
|
|
|
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
|
|
|
|
ck->btree_trans_barrier_seq))
|
|
|
|
break;
|
|
|
|
|
|
|
|
list_del(&ck->list);
|
2023-05-21 08:57:55 +08:00
|
|
|
six_lock_exit(&ck->c.lock);
|
2022-09-04 10:07:31 +08:00
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
|
|
|
atomic_long_dec(&bc->nr_freed);
|
|
|
|
scanned++;
|
|
|
|
freed++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scanned >= nr)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
|
2020-11-20 04:38:27 +08:00
|
|
|
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
|
|
|
|
ck->btree_trans_barrier_seq))
|
|
|
|
break;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
list_del(&ck->list);
|
2023-05-21 08:57:55 +08:00
|
|
|
six_lock_exit(&ck->c.lock);
|
2020-11-20 04:38:27 +08:00
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
2022-06-17 13:07:54 +08:00
|
|
|
atomic_long_dec(&bc->nr_freed);
|
2020-11-20 04:38:27 +08:00
|
|
|
scanned++;
|
|
|
|
freed++;
|
2020-11-13 06:19:47 +08:00
|
|
|
}
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
if (scanned >= nr)
|
|
|
|
goto out;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2021-03-25 11:37:33 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
|
|
|
|
if (bc->shrink_iter >= tbl->size)
|
|
|
|
bc->shrink_iter = 0;
|
|
|
|
start = bc->shrink_iter;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2021-03-25 11:37:33 +08:00
|
|
|
do {
|
|
|
|
struct rhash_head *pos, *next;
|
|
|
|
|
|
|
|
pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
|
|
|
|
|
|
|
|
while (!rht_is_a_nulls(pos)) {
|
|
|
|
next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
|
|
|
|
ck = container_of(pos, struct bkey_cached, hash);
|
|
|
|
|
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
|
|
|
|
clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
|
|
|
|
else if (bkey_cached_lock_for_evict(ck)) {
|
|
|
|
bkey_cached_evict(bc, ck);
|
|
|
|
bkey_cached_free(bc, ck);
|
|
|
|
}
|
|
|
|
|
|
|
|
scanned++;
|
|
|
|
if (scanned >= nr)
|
|
|
|
break;
|
|
|
|
next:
|
|
|
|
pos = next;
|
2020-11-13 06:19:47 +08:00
|
|
|
}
|
2021-03-25 11:37:33 +08:00
|
|
|
|
|
|
|
bc->shrink_iter++;
|
|
|
|
if (bc->shrink_iter >= tbl->size)
|
|
|
|
bc->shrink_iter = 0;
|
|
|
|
} while (scanned < nr && bc->shrink_iter != start);
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
2020-11-13 06:19:47 +08:00
|
|
|
out:
|
|
|
|
memalloc_nofs_restore(flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
|
2020-11-13 06:19:47 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
|
|
|
|
return freed;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
|
|
|
|
struct shrink_control *sc)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(shrink, struct bch_fs,
|
|
|
|
btree_key_cache.shrink);
|
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2021-04-28 02:02:00 +08:00
|
|
|
long nr = atomic_long_read(&bc->nr_keys) -
|
|
|
|
atomic_long_read(&bc->nr_dirty);
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2021-04-28 02:02:00 +08:00
|
|
|
return max(0L, nr);
|
2020-11-13 06:19:47 +08:00
|
|
|
}
|
|
|
|
|
2020-11-10 02:01:52 +08:00
|
|
|
void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2020-11-10 02:01:52 +08:00
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
2021-03-25 11:37:33 +08:00
|
|
|
struct bucket_table *tbl;
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck, *n;
|
2021-03-25 11:37:33 +08:00
|
|
|
struct rhash_head *pos;
|
2023-03-03 12:51:47 +08:00
|
|
|
LIST_HEAD(items);
|
2021-03-25 11:37:33 +08:00
|
|
|
unsigned i;
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-06-17 13:07:54 +08:00
|
|
|
int cpu;
|
2022-10-15 12:47:21 +08:00
|
|
|
#endif
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-06-27 11:31:49 +08:00
|
|
|
unregister_shrinker(&bc->shrink);
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2020-11-10 02:01:52 +08:00
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
/*
|
|
|
|
* The loop is needed to guard against racing with rehash:
|
|
|
|
*/
|
|
|
|
while (atomic_long_read(&bc->nr_keys)) {
|
|
|
|
rcu_read_lock();
|
|
|
|
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
|
|
|
|
if (tbl)
|
|
|
|
for (i = 0; i < tbl->size; i++)
|
|
|
|
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
|
|
|
|
bkey_cached_evict(bc, ck);
|
2023-03-03 12:51:47 +08:00
|
|
|
list_add(&ck->list, &items);
|
2022-10-15 12:47:21 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2021-03-25 11:37:33 +08:00
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-06-17 13:07:54 +08:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct btree_key_cache_freelist *f =
|
|
|
|
per_cpu_ptr(bc->pcpu_freed, cpu);
|
|
|
|
|
|
|
|
for (i = 0; i < f->nr; i++) {
|
|
|
|
ck = f->objs[i];
|
2023-03-03 12:51:47 +08:00
|
|
|
list_add(&ck->list, &items);
|
2022-06-17 13:07:54 +08:00
|
|
|
}
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
#endif
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2023-03-03 12:51:47 +08:00
|
|
|
list_splice(&bc->freed_pcpu, &items);
|
|
|
|
list_splice(&bc->freed_nonpcpu, &items);
|
2022-09-04 10:07:31 +08:00
|
|
|
|
2023-03-03 12:51:47 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(ck, n, &items, list) {
|
2020-12-14 05:12:04 +08:00
|
|
|
cond_resched();
|
|
|
|
|
2020-11-12 06:47:39 +08:00
|
|
|
bch2_journal_pin_drop(&c->journal, &ck->journal);
|
|
|
|
bch2_journal_preres_put(&c->journal, &ck->res);
|
|
|
|
|
2020-11-19 03:09:33 +08:00
|
|
|
list_del(&ck->list);
|
2021-03-25 11:37:33 +08:00
|
|
|
kfree(ck->k);
|
2023-05-21 08:57:55 +08:00
|
|
|
six_lock_exit(&ck->c.lock);
|
2020-11-19 03:09:33 +08:00
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
2020-11-10 02:01:52 +08:00
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
if (atomic_long_read(&bc->nr_dirty) &&
|
|
|
|
!bch2_journal_error(&c->journal) &&
|
|
|
|
test_bit(BCH_FS_WAS_RW, &c->flags))
|
|
|
|
panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
|
|
|
|
atomic_long_read(&bc->nr_dirty));
|
|
|
|
|
|
|
|
if (atomic_long_read(&bc->nr_keys))
|
|
|
|
panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
|
|
|
|
atomic_long_read(&bc->nr_keys));
|
2020-12-14 05:12:04 +08:00
|
|
|
|
2020-11-30 12:48:20 +08:00
|
|
|
if (bc->table_init_done)
|
|
|
|
rhashtable_destroy(&bc->table);
|
2022-06-17 13:07:54 +08:00
|
|
|
|
|
|
|
free_percpu(bc->pcpu_freed);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
|
|
|
|
{
|
|
|
|
mutex_init(&c->lock);
|
2022-09-04 10:07:31 +08:00
|
|
|
INIT_LIST_HEAD(&c->freed_pcpu);
|
|
|
|
INIT_LIST_HEAD(&c->freed_nonpcpu);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2020-11-13 06:19:47 +08:00
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-06-17 13:07:54 +08:00
|
|
|
bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
|
|
|
|
if (!bc->pcpu_freed)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
|
2022-10-15 12:47:21 +08:00
|
|
|
#endif
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2023-03-15 03:35:57 +08:00
|
|
|
if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
|
|
|
|
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
|
2020-11-30 12:48:20 +08:00
|
|
|
|
|
|
|
bc->table_init_done = true;
|
2021-04-05 13:23:55 +08:00
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
bc->shrink.seeks = 0;
|
2021-04-05 13:23:55 +08:00
|
|
|
bc->shrink.count_objects = bch2_btree_key_cache_count;
|
|
|
|
bc->shrink.scan_objects = bch2_btree_key_cache_scan;
|
2023-03-15 03:35:57 +08:00
|
|
|
if (register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name))
|
|
|
|
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
|
|
|
|
return 0;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
2020-06-16 07:53:46 +08:00
|
|
|
|
|
|
|
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
|
|
|
|
{
|
2022-10-17 14:08:07 +08:00
|
|
|
prt_printf(out, "nr_freed:\t%zu", atomic_long_read(&c->nr_freed));
|
|
|
|
prt_newline(out);
|
|
|
|
prt_printf(out, "nr_keys:\t%lu", atomic_long_read(&c->nr_keys));
|
|
|
|
prt_newline(out);
|
|
|
|
prt_printf(out, "nr_dirty:\t%lu", atomic_long_read(&c->nr_dirty));
|
|
|
|
prt_newline(out);
|
2020-06-16 07:53:46 +08:00
|
|
|
}
|
2020-11-19 03:09:33 +08:00
|
|
|
|
|
|
|
void bch2_btree_key_cache_exit(void)
|
|
|
|
{
|
2022-10-20 06:31:33 +08:00
|
|
|
kmem_cache_destroy(bch2_key_cache);
|
2020-11-19 03:09:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int __init bch2_btree_key_cache_init(void)
|
|
|
|
{
|
|
|
|
bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
|
|
|
|
if (!bch2_key_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|