2022-10-20 06:31:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2020-06-16 07:53:46 +08:00
|
|
|
#include "btree_cache.h"
|
2019-03-08 08:46:10 +08:00
|
|
|
#include "btree_iter.h"
|
|
|
|
#include "btree_key_cache.h"
|
|
|
|
#include "btree_locking.h"
|
|
|
|
#include "btree_update.h"
|
2022-07-18 11:06:38 +08:00
|
|
|
#include "errcode.h"
|
2019-03-08 08:46:10 +08:00
|
|
|
#include "error.h"
|
|
|
|
#include "journal.h"
|
|
|
|
#include "journal_reclaim.h"
|
|
|
|
#include "trace.h"
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
#include <linux/sched/mm.h>
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
static inline bool btree_uses_pcpu_readers(enum btree_id id)
|
|
|
|
{
|
|
|
|
return id == BTREE_ID_subvolumes;
|
|
|
|
}
|
|
|
|
|
2020-11-19 03:09:33 +08:00
|
|
|
static struct kmem_cache *bch2_key_cache;
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
|
|
|
|
const void *obj)
|
|
|
|
{
|
|
|
|
const struct bkey_cached *ck = obj;
|
|
|
|
const struct bkey_cached_key *key = arg->key;
|
|
|
|
|
2022-11-24 16:12:22 +08:00
|
|
|
return ck->key.btree_id != key->btree_id ||
|
|
|
|
!bpos_eq(ck->key.pos, key->pos);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rhashtable_params bch2_btree_key_cache_params = {
|
|
|
|
.head_offset = offsetof(struct bkey_cached, hash),
|
|
|
|
.key_offset = offsetof(struct bkey_cached, key),
|
|
|
|
.key_len = sizeof(struct bkey_cached_key),
|
|
|
|
.obj_cmpfn = bch2_btree_key_cache_cmp_fn,
|
|
|
|
};
|
|
|
|
|
|
|
|
__flatten
|
2019-09-23 07:10:21 +08:00
|
|
|
inline struct bkey_cached *
|
|
|
|
bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bkey_cached_key key = {
|
|
|
|
.btree_id = btree_id,
|
|
|
|
.pos = pos,
|
|
|
|
};
|
|
|
|
|
|
|
|
return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
|
|
|
|
bch2_btree_key_cache_params);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
if (!six_trylock_intent(&ck->c.lock))
|
|
|
|
return false;
|
|
|
|
|
2023-01-08 13:05:30 +08:00
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-01-08 13:05:30 +08:00
|
|
|
if (!six_trylock_write(&ck->c.lock)) {
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bkey_cached_evict(struct btree_key_cache *c,
|
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
|
|
|
|
bch2_btree_key_cache_params));
|
|
|
|
memset(&ck->key, ~0, sizeof(ck->key));
|
2020-11-10 02:01:52 +08:00
|
|
|
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_dec(&c->nr_keys);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
static void bkey_cached_free(struct btree_key_cache *bc,
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
2020-11-13 06:19:47 +08:00
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
ck->btree_trans_barrier_seq =
|
|
|
|
start_poll_synchronize_srcu(&c->btree_trans_barrier);
|
|
|
|
|
2023-11-06 22:53:14 +08:00
|
|
|
if (ck->c.lock.readers) {
|
2022-09-04 10:07:31 +08:00
|
|
|
list_move_tail(&ck->list, &bc->freed_pcpu);
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_pcpu++;
|
|
|
|
} else {
|
2022-09-04 10:07:31 +08:00
|
|
|
list_move_tail(&ck->list, &bc->freed_nonpcpu);
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_nonpcpu++;
|
|
|
|
}
|
2022-06-17 13:07:54 +08:00
|
|
|
atomic_long_inc(&bc->nr_freed);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
kfree(ck->k);
|
|
|
|
ck->k = NULL;
|
|
|
|
ck->u64s = 0;
|
|
|
|
|
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
}
|
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
|
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
struct bkey_cached *pos;
|
|
|
|
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_nonpcpu++;
|
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
|
|
|
|
if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
|
|
|
|
pos->btree_trans_barrier_seq)) {
|
|
|
|
list_move(&ck->list, &pos->list);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_move(&ck->list, &bc->freed_nonpcpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-08-22 02:29:43 +08:00
|
|
|
static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
|
2022-09-04 10:07:31 +08:00
|
|
|
struct bkey_cached *ck)
|
2022-06-17 13:07:54 +08:00
|
|
|
{
|
|
|
|
BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (!ck->c.lock.readers) {
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-10-17 14:08:07 +08:00
|
|
|
struct btree_key_cache_freelist *f;
|
|
|
|
bool freed = false;
|
|
|
|
|
2022-06-17 13:07:54 +08:00
|
|
|
preempt_disable();
|
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (f->nr < ARRAY_SIZE(f->objs)) {
|
|
|
|
f->objs[f->nr++] = ck;
|
|
|
|
freed = true;
|
2022-06-17 13:07:54 +08:00
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (!freed) {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
preempt_disable();
|
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
|
|
|
|
|
|
|
while (f->nr > ARRAY_SIZE(f->objs) / 2) {
|
|
|
|
struct bkey_cached *ck2 = f->objs[--f->nr];
|
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
__bkey_cached_move_to_freelist_ordered(bc, ck2);
|
2022-09-04 10:07:31 +08:00
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
|
2022-10-17 14:08:07 +08:00
|
|
|
__bkey_cached_move_to_freelist_ordered(bc, ck);
|
2022-09-04 10:07:31 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
#else
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
list_move_tail(&ck->list, &bc->freed_nonpcpu);
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_nonpcpu++;
|
2022-10-15 12:47:21 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
#endif
|
2022-09-04 10:07:31 +08:00
|
|
|
} else {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
list_move_tail(&ck->list, &bc->freed_pcpu);
|
2022-06-17 13:07:54 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
}
|
2022-08-22 02:29:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bkey_cached_free_fast(struct btree_key_cache *bc,
|
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
|
|
|
|
|
|
|
ck->btree_trans_barrier_seq =
|
|
|
|
start_poll_synchronize_srcu(&c->btree_trans_barrier);
|
|
|
|
|
|
|
|
list_del_init(&ck->list);
|
|
|
|
atomic_long_inc(&bc->nr_freed);
|
|
|
|
|
|
|
|
kfree(ck->k);
|
|
|
|
ck->k = NULL;
|
|
|
|
ck->u64s = 0;
|
|
|
|
|
|
|
|
bkey_cached_move_to_freelist(bc, ck);
|
2022-06-17 13:07:54 +08:00
|
|
|
|
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
}
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
static struct bkey_cached *
|
2022-11-14 15:22:30 +08:00
|
|
|
bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
bool *was_new)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2022-09-04 09:14:53 +08:00
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2022-06-17 13:07:54 +08:00
|
|
|
struct bkey_cached *ck = NULL;
|
2022-09-04 10:07:31 +08:00
|
|
|
bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
|
2023-01-07 18:46:52 +08:00
|
|
|
int ret;
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
if (!pcpu_readers) {
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-10-17 14:08:07 +08:00
|
|
|
struct btree_key_cache_freelist *f;
|
|
|
|
|
2022-06-17 13:07:54 +08:00
|
|
|
preempt_disable();
|
2022-09-04 09:14:53 +08:00
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
2022-09-04 10:07:31 +08:00
|
|
|
if (f->nr)
|
|
|
|
ck = f->objs[--f->nr];
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
if (!ck) {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
preempt_disable();
|
|
|
|
f = this_cpu_ptr(bc->pcpu_freed);
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
while (!list_empty(&bc->freed_nonpcpu) &&
|
|
|
|
f->nr < ARRAY_SIZE(f->objs) / 2) {
|
|
|
|
ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
|
|
|
|
list_del_init(&ck->list);
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_nonpcpu--;
|
2022-09-04 10:07:31 +08:00
|
|
|
f->objs[f->nr++] = ck;
|
|
|
|
}
|
|
|
|
|
|
|
|
ck = f->nr ? f->objs[--f->nr] : NULL;
|
|
|
|
preempt_enable();
|
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
#else
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
if (!list_empty(&bc->freed_nonpcpu)) {
|
|
|
|
ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
|
|
|
|
list_del_init(&ck->list);
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_nonpcpu--;
|
2022-10-15 12:47:21 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
#endif
|
2022-09-04 10:07:31 +08:00
|
|
|
} else {
|
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
if (!list_empty(&bc->freed_pcpu)) {
|
|
|
|
ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
|
2022-06-17 13:07:54 +08:00
|
|
|
list_del_init(&ck->list);
|
|
|
|
}
|
2022-09-04 09:14:53 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
2022-06-17 13:07:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ck) {
|
2023-02-05 08:39:59 +08:00
|
|
|
ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_);
|
2022-08-22 02:29:43 +08:00
|
|
|
if (unlikely(ret)) {
|
2022-09-04 09:14:53 +08:00
|
|
|
bkey_cached_move_to_freelist(bc, ck);
|
2022-08-22 02:29:43 +08:00
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2022-09-04 10:24:16 +08:00
|
|
|
path->l[0].b = (void *) ck;
|
2023-05-21 11:57:48 +08:00
|
|
|
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
|
2023-09-10 09:14:54 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
|
2022-09-04 10:24:16 +08:00
|
|
|
|
|
|
|
ret = bch2_btree_node_lock_write(trans, path, &ck->c);
|
2022-08-22 02:29:43 +08:00
|
|
|
if (unlikely(ret)) {
|
2022-09-04 10:24:16 +08:00
|
|
|
btree_node_unlock(trans, path, 0);
|
2022-09-04 09:14:53 +08:00
|
|
|
bkey_cached_move_to_freelist(bc, ck);
|
2022-08-22 02:29:43 +08:00
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2022-06-17 13:07:54 +08:00
|
|
|
return ck;
|
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-05-28 15:44:38 +08:00
|
|
|
ck = allocate_dropping_locks(trans, ret,
|
|
|
|
kmem_cache_zalloc(bch2_key_cache, _gfp));
|
2023-01-07 18:46:52 +08:00
|
|
|
if (ret) {
|
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
|
|
|
return ERR_PTR(ret);
|
2020-11-20 04:38:27 +08:00
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-01-07 18:46:52 +08:00
|
|
|
if (!ck)
|
|
|
|
return NULL;
|
2023-05-28 15:44:38 +08:00
|
|
|
|
2023-01-07 18:46:52 +08:00
|
|
|
INIT_LIST_HEAD(&ck->list);
|
2023-05-21 08:57:55 +08:00
|
|
|
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
|
2023-01-07 18:46:52 +08:00
|
|
|
|
|
|
|
ck->c.cached = true;
|
|
|
|
BUG_ON(!six_trylock_intent(&ck->c.lock));
|
|
|
|
BUG_ON(!six_trylock_write(&ck->c.lock));
|
|
|
|
*was_new = true;
|
|
|
|
return ck;
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct bkey_cached *
|
|
|
|
bkey_cached_reuse(struct btree_key_cache *c)
|
|
|
|
{
|
|
|
|
struct bucket_table *tbl;
|
|
|
|
struct rhash_head *pos;
|
|
|
|
struct bkey_cached *ck;
|
|
|
|
unsigned i;
|
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
mutex_lock(&c->lock);
|
2021-03-25 11:37:33 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
tbl = rht_dereference_rcu(c->table.tbl, &c->table);
|
|
|
|
for (i = 0; i < tbl->size; i++)
|
|
|
|
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
|
|
|
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
|
|
|
bkey_cached_lock_for_evict(ck)) {
|
|
|
|
bkey_cached_evict(c, ck);
|
2022-10-15 12:47:21 +08:00
|
|
|
goto out;
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
ck = NULL;
|
|
|
|
out:
|
2021-03-25 11:37:33 +08:00
|
|
|
rcu_read_unlock();
|
2022-10-15 12:47:21 +08:00
|
|
|
mutex_unlock(&c->lock);
|
|
|
|
return ck;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct bkey_cached *
|
2022-09-04 10:07:31 +08:00
|
|
|
btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2022-08-22 02:29:43 +08:00
|
|
|
struct bch_fs *c = trans->c;
|
2021-12-31 09:14:52 +08:00
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck;
|
2022-11-14 15:22:30 +08:00
|
|
|
bool was_new = false;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2022-11-14 15:22:30 +08:00
|
|
|
ck = bkey_cached_alloc(trans, path, &was_new);
|
2022-10-20 06:31:33 +08:00
|
|
|
if (IS_ERR(ck))
|
2022-08-22 02:29:43 +08:00
|
|
|
return ck;
|
2021-03-25 11:37:33 +08:00
|
|
|
|
|
|
|
if (unlikely(!ck)) {
|
2021-12-31 09:14:52 +08:00
|
|
|
ck = bkey_cached_reuse(bc);
|
|
|
|
if (unlikely(!ck)) {
|
|
|
|
bch_err(c, "error allocating memory for key cache item, btree %s",
|
2023-10-20 10:49:08 +08:00
|
|
|
bch2_btree_id_str(path->btree_id));
|
2023-03-15 03:35:57 +08:00
|
|
|
return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create);
|
2021-12-31 09:14:52 +08:00
|
|
|
}
|
2021-03-25 11:37:33 +08:00
|
|
|
|
2023-09-10 09:14:54 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
ck->c.level = 0;
|
2022-09-04 09:14:53 +08:00
|
|
|
ck->c.btree_id = path->btree_id;
|
|
|
|
ck->key.btree_id = path->btree_id;
|
|
|
|
ck->key.pos = path->pos;
|
2019-03-08 08:46:10 +08:00
|
|
|
ck->valid = false;
|
2020-11-20 04:38:27 +08:00
|
|
|
ck->flags = 1U << BKEY_CACHED_ACCESSED;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-12-31 09:14:52 +08:00
|
|
|
if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
|
2019-03-08 08:46:10 +08:00
|
|
|
&ck->hash,
|
2021-03-25 11:37:33 +08:00
|
|
|
bch2_btree_key_cache_params))) {
|
2019-03-08 08:46:10 +08:00
|
|
|
/* We raced with another fill: */
|
2021-03-25 11:37:33 +08:00
|
|
|
|
|
|
|
if (likely(was_new)) {
|
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
six_unlock_intent(&ck->c.lock);
|
|
|
|
kfree(ck);
|
|
|
|
} else {
|
2022-06-17 13:07:54 +08:00
|
|
|
bkey_cached_free_fast(bc, ck);
|
2021-03-25 11:37:33 +08:00
|
|
|
}
|
|
|
|
|
2022-09-04 10:24:16 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
|
2019-03-08 08:46:10 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-12-31 09:14:52 +08:00
|
|
|
atomic_long_inc(&bc->nr_keys);
|
2020-11-10 02:01:52 +08:00
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_write(&ck->c.lock);
|
|
|
|
|
|
|
|
return ck;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int btree_key_cache_fill(struct btree_trans *trans,
|
2021-08-31 03:18:31 +08:00
|
|
|
struct btree_path *ck_path,
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck)
|
|
|
|
{
|
2022-11-23 09:15:33 +08:00
|
|
|
struct btree_iter iter;
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_s_c k;
|
|
|
|
unsigned new_u64s = 0;
|
|
|
|
struct bkey_i *new_k = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2023-04-30 07:33:09 +08:00
|
|
|
k = bch2_bkey_get_iter(trans, &iter, ck->key.btree_id, ck->key.pos,
|
|
|
|
BTREE_ITER_KEY_CACHE_FILL|
|
|
|
|
BTREE_ITER_CACHED_NOFILL);
|
2022-11-23 09:15:33 +08:00
|
|
|
ret = bkey_err(k);
|
2021-03-20 10:54:18 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
|
2022-08-28 00:48:36 +08:00
|
|
|
trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
|
2023-01-07 18:46:52 +08:00
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
|
2021-03-20 10:54:18 +08:00
|
|
|
goto err;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2021-04-24 12:42:02 +08:00
|
|
|
/*
|
|
|
|
* bch2_varint_decode can read past the end of the buffer by at
|
|
|
|
* most 7 bytes (it won't be used):
|
|
|
|
*/
|
|
|
|
new_u64s = k.k->u64s + 1;
|
|
|
|
|
2022-04-18 05:50:47 +08:00
|
|
|
/*
|
|
|
|
* Allocate some extra space so that the transaction commit path is less
|
|
|
|
* likely to have to reallocate, since that requires a transaction
|
|
|
|
* restart:
|
|
|
|
*/
|
|
|
|
new_u64s = min(256U, (new_u64s * 3) / 2);
|
|
|
|
|
2021-04-24 12:42:02 +08:00
|
|
|
if (new_u64s > ck->u64s) {
|
|
|
|
new_u64s = roundup_pow_of_two(new_u64s);
|
2023-01-07 18:46:52 +08:00
|
|
|
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (!new_k) {
|
2023-01-07 18:46:52 +08:00
|
|
|
bch2_trans_unlock(trans);
|
|
|
|
|
|
|
|
new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
|
|
|
|
if (!new_k) {
|
|
|
|
bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
|
2023-10-20 10:49:08 +08:00
|
|
|
bch2_btree_id_str(ck->key.btree_id), new_u64s);
|
2023-03-15 03:35:57 +08:00
|
|
|
ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
|
2023-01-07 18:46:52 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
|
|
|
|
kfree(new_k);
|
|
|
|
trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
|
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_trans_relock(trans);
|
|
|
|
if (ret) {
|
|
|
|
kfree(new_k);
|
|
|
|
goto err;
|
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-04 09:09:54 +08:00
|
|
|
ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
|
2022-08-23 11:39:23 +08:00
|
|
|
if (ret) {
|
|
|
|
kfree(new_k);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
if (new_k) {
|
|
|
|
kfree(ck->k);
|
|
|
|
ck->u64s = new_u64s;
|
|
|
|
ck->k = new_k;
|
|
|
|
}
|
|
|
|
|
|
|
|
bkey_reassemble(ck->k, k);
|
|
|
|
ck->valid = true;
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
/* We're not likely to need this iterator again: */
|
2022-11-23 09:15:33 +08:00
|
|
|
set_btree_iter_dontneed(&iter);
|
2021-03-20 10:54:18 +08:00
|
|
|
err:
|
2022-11-23 09:15:33 +08:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2021-03-20 10:54:18 +08:00
|
|
|
return ret;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2022-10-20 06:31:33 +08:00
|
|
|
static noinline int
|
2022-09-27 10:34:49 +08:00
|
|
|
bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
unsigned flags)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bkey_cached *ck;
|
|
|
|
int ret = 0;
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
BUG_ON(path->level);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
path->l[1].b = NULL;
|
2021-07-15 03:13:27 +08:00
|
|
|
|
2023-01-26 01:16:23 +08:00
|
|
|
if (bch2_btree_node_relock_notrace(trans, path, 0)) {
|
2021-08-31 03:18:31 +08:00
|
|
|
ck = (void *) path->l[0].b;
|
2019-03-08 08:46:10 +08:00
|
|
|
goto fill;
|
|
|
|
}
|
|
|
|
retry:
|
2021-08-31 03:18:31 +08:00
|
|
|
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (!ck) {
|
2022-09-04 09:14:53 +08:00
|
|
|
ck = btree_key_cache_create(trans, path);
|
2019-03-08 08:46:10 +08:00
|
|
|
ret = PTR_ERR_OR_ZERO(ck);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
if (!ck)
|
|
|
|
goto retry;
|
|
|
|
|
2023-09-10 09:14:54 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
|
2021-08-31 03:18:31 +08:00
|
|
|
path->locks_want = 1;
|
2019-03-08 08:46:10 +08:00
|
|
|
} else {
|
2021-08-31 03:18:31 +08:00
|
|
|
enum six_lock_type lock_want = __btree_lock_want(path, 0);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2022-08-23 03:29:53 +08:00
|
|
|
ret = btree_node_lock(trans, path, (void *) ck, 0,
|
|
|
|
lock_want, _THIS_IP_);
|
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
BUG_ON(ret);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
if (ck->key.btree_id != path->btree_id ||
|
2022-11-24 16:12:22 +08:00
|
|
|
!bpos_eq(ck->key.pos, path->pos)) {
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_type(&ck->c.lock, lock_want);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2023-09-10 09:14:54 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0,
|
|
|
|
(enum btree_node_locked_type) lock_want);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2023-05-21 11:57:48 +08:00
|
|
|
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
|
2021-08-31 03:18:31 +08:00
|
|
|
path->l[0].b = (void *) ck;
|
2019-03-08 08:46:10 +08:00
|
|
|
fill:
|
2023-01-25 23:15:39 +08:00
|
|
|
path->uptodate = BTREE_ITER_UPTODATE;
|
|
|
|
|
2022-12-21 00:26:57 +08:00
|
|
|
if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
|
2022-08-08 01:43:32 +08:00
|
|
|
/*
|
|
|
|
* Using the underscore version because we haven't set
|
|
|
|
* path->uptodate yet:
|
|
|
|
*/
|
2021-08-31 03:18:31 +08:00
|
|
|
if (!path->locks_want &&
|
2023-10-28 03:23:46 +08:00
|
|
|
!__bch2_btree_path_upgrade(trans, path, 1, NULL)) {
|
2022-08-28 00:48:36 +08:00
|
|
|
trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
|
2022-08-05 00:46:37 +08:00
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
|
2019-03-08 08:46:10 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
ret = btree_key_cache_fill(trans, path, ck);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2023-01-25 23:15:39 +08:00
|
|
|
|
|
|
|
ret = bch2_btree_path_relock(trans, path, _THIS_IP_);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
path->uptodate = BTREE_ITER_UPTODATE;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
|
|
|
|
set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
BUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
|
2023-01-25 23:15:39 +08:00
|
|
|
BUG_ON(path->uptodate);
|
2021-03-09 06:09:13 +08:00
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
return ret;
|
|
|
|
err:
|
2023-01-25 23:15:39 +08:00
|
|
|
path->uptodate = BTREE_ITER_NEED_TRAVERSE;
|
2022-07-18 11:06:38 +08:00
|
|
|
if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
|
2022-07-14 14:58:23 +08:00
|
|
|
btree_node_unlock(trans, path, 0);
|
2022-08-11 07:08:30 +08:00
|
|
|
path->l[0].b = ERR_PTR(ret);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-09-27 10:34:49 +08:00
|
|
|
int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bkey_cached *ck;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
EBUG_ON(path->level);
|
|
|
|
|
|
|
|
path->l[1].b = NULL;
|
|
|
|
|
2023-01-26 01:16:23 +08:00
|
|
|
if (bch2_btree_node_relock_notrace(trans, path, 0)) {
|
2022-09-27 10:34:49 +08:00
|
|
|
ck = (void *) path->l[0].b;
|
|
|
|
goto fill;
|
|
|
|
}
|
|
|
|
retry:
|
|
|
|
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
|
|
|
|
if (!ck) {
|
|
|
|
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
|
|
|
|
} else {
|
|
|
|
enum six_lock_type lock_want = __btree_lock_want(path, 0);
|
|
|
|
|
|
|
|
ret = btree_node_lock(trans, path, (void *) ck, 0,
|
|
|
|
lock_want, _THIS_IP_);
|
|
|
|
EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (ck->key.btree_id != path->btree_id ||
|
2022-11-24 16:12:22 +08:00
|
|
|
!bpos_eq(ck->key.pos, path->pos)) {
|
2022-09-27 10:34:49 +08:00
|
|
|
six_unlock_type(&ck->c.lock, lock_want);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2023-09-10 09:14:54 +08:00
|
|
|
mark_btree_node_locked(trans, path, 0,
|
|
|
|
(enum btree_node_locked_type) lock_want);
|
2022-09-27 10:34:49 +08:00
|
|
|
}
|
|
|
|
|
2023-05-21 11:57:48 +08:00
|
|
|
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
|
2022-09-27 10:34:49 +08:00
|
|
|
path->l[0].b = (void *) ck;
|
|
|
|
fill:
|
|
|
|
if (!ck->valid)
|
|
|
|
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
|
|
|
|
|
|
|
|
if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
|
|
|
|
set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
|
|
|
|
|
|
|
|
path->uptodate = BTREE_ITER_UPTODATE;
|
|
|
|
EBUG_ON(!ck->valid);
|
|
|
|
EBUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
|
|
|
struct bkey_cached_key key,
|
|
|
|
u64 journal_seq,
|
2021-04-04 04:24:13 +08:00
|
|
|
unsigned commit_flags,
|
2019-03-08 08:46:10 +08:00
|
|
|
bool evict)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct journal *j = &c->journal;
|
2021-08-31 03:18:31 +08:00
|
|
|
struct btree_iter c_iter, b_iter;
|
2020-12-04 02:09:08 +08:00
|
|
|
struct bkey_cached *ck = NULL;
|
2019-03-08 08:46:10 +08:00
|
|
|
int ret;
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
|
|
|
|
BTREE_ITER_SLOTS|
|
2021-03-05 11:29:25 +08:00
|
|
|
BTREE_ITER_INTENT|
|
|
|
|
BTREE_ITER_ALL_SNAPSHOTS);
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
|
|
|
|
BTREE_ITER_CACHED|
|
|
|
|
BTREE_ITER_INTENT);
|
2022-02-07 12:15:12 +08:00
|
|
|
b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
ret = bch2_btree_iter_traverse(&c_iter);
|
2019-03-08 08:46:10 +08:00
|
|
|
if (ret)
|
2021-07-24 06:26:38 +08:00
|
|
|
goto out;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
ck = (void *) c_iter.path->l[0].b;
|
2022-01-12 13:49:23 +08:00
|
|
|
if (!ck)
|
2019-03-08 08:46:10 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
2022-01-12 13:49:23 +08:00
|
|
|
if (evict)
|
|
|
|
goto evict;
|
|
|
|
goto out;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2022-01-12 13:49:23 +08:00
|
|
|
BUG_ON(!ck->valid);
|
|
|
|
|
|
|
|
if (journal_seq && ck->journal.seq != journal_seq)
|
|
|
|
goto out;
|
|
|
|
|
2021-04-21 05:09:25 +08:00
|
|
|
/*
|
|
|
|
* Since journal reclaim depends on us making progress here, and the
|
|
|
|
* allocator/copygc depend on journal reclaim making progress, we need
|
|
|
|
* to be using alloc reserves:
|
2022-10-20 06:31:33 +08:00
|
|
|
*/
|
2021-08-31 03:18:31 +08:00
|
|
|
ret = bch2_btree_iter_traverse(&b_iter) ?:
|
|
|
|
bch2_trans_update(trans, &b_iter, ck->k,
|
2022-01-12 14:14:47 +08:00
|
|
|
BTREE_UPDATE_KEY_CACHE_RECLAIM|
|
2021-07-06 10:16:02 +08:00
|
|
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
|
|
|
BTREE_TRIGGER_NORUN) ?:
|
2019-03-08 08:46:10 +08:00
|
|
|
bch2_trans_commit(trans, NULL, NULL,
|
2023-11-12 05:31:50 +08:00
|
|
|
BCH_TRANS_COMMIT_no_check_rw|
|
|
|
|
BCH_TRANS_COMMIT_no_enospc|
|
2021-04-04 04:24:13 +08:00
|
|
|
(ck->journal.seq == journal_last_seq(j)
|
2023-06-28 05:32:38 +08:00
|
|
|
? BCH_WATERMARK_reclaim
|
2021-04-04 04:24:13 +08:00
|
|
|
: 0)|
|
|
|
|
commit_flags);
|
2022-07-18 11:06:38 +08:00
|
|
|
|
|
|
|
bch2_fs_fatal_err_on(ret &&
|
|
|
|
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
|
|
|
|
!bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
|
|
|
|
!bch2_journal_error(j), c,
|
|
|
|
"error flushing key cache: %s", bch2_err_str(ret));
|
|
|
|
if (ret)
|
2019-03-08 08:46:10 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
bch2_journal_pin_drop(j, &ck->journal);
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
BUG_ON(!btree_node_locked(c_iter.path, 0));
|
2021-03-25 11:37:33 +08:00
|
|
|
|
2019-03-08 08:46:10 +08:00
|
|
|
if (!evict) {
|
2020-11-10 02:01:52 +08:00
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_dec(&c->btree_key_cache.nr_dirty);
|
2020-11-10 02:01:52 +08:00
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
} else {
|
2022-09-03 10:59:39 +08:00
|
|
|
struct btree_path *path2;
|
2019-03-08 08:46:10 +08:00
|
|
|
evict:
|
2022-09-03 10:59:39 +08:00
|
|
|
trans_for_each_path(trans, path2)
|
|
|
|
if (path2 != c_iter.path)
|
|
|
|
__bch2_btree_path_unlock(trans, path2);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2022-09-03 10:59:39 +08:00
|
|
|
bch2_btree_node_lock_write_nofail(trans, c_iter.path, &ck->c);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2020-11-10 02:01:52 +08:00
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_dec(&c->btree_key_cache.nr_dirty);
|
2020-11-10 02:01:52 +08:00
|
|
|
}
|
|
|
|
|
2022-09-03 10:59:39 +08:00
|
|
|
mark_btree_node_locked_noreset(c_iter.path, 0, BTREE_NODE_UNLOCKED);
|
2019-03-08 08:46:10 +08:00
|
|
|
bkey_cached_evict(&c->btree_key_cache, ck);
|
2022-06-17 13:07:54 +08:00
|
|
|
bkey_cached_free_fast(&c->btree_key_cache, ck);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
out:
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_iter_exit(trans, &b_iter);
|
|
|
|
bch2_trans_iter_exit(trans, &c_iter);
|
2019-03-08 08:46:10 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-04-01 09:44:55 +08:00
|
|
|
int bch2_btree_key_cache_journal_flush(struct journal *j,
|
|
|
|
struct journal_entry_pin *pin, u64 seq)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
struct bkey_cached *ck =
|
|
|
|
container_of(pin, struct bkey_cached, journal);
|
|
|
|
struct bkey_cached_key key;
|
2023-09-13 05:16:02 +08:00
|
|
|
struct btree_trans *trans = bch2_trans_get(c);
|
2022-08-22 02:29:43 +08:00
|
|
|
int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
|
2021-04-04 04:24:13 +08:00
|
|
|
int ret = 0;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read);
|
2019-03-08 08:46:10 +08:00
|
|
|
key = ck->key;
|
|
|
|
|
|
|
|
if (ck->journal.seq != seq ||
|
|
|
|
!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
six_unlock_read(&ck->c.lock);
|
2020-11-13 06:19:47 +08:00
|
|
|
goto unlock;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
2023-01-04 17:34:16 +08:00
|
|
|
|
|
|
|
if (ck->seq != seq) {
|
|
|
|
bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal,
|
|
|
|
bch2_btree_key_cache_journal_flush);
|
|
|
|
six_unlock_read(&ck->c.lock);
|
|
|
|
goto unlock;
|
|
|
|
}
|
2019-03-08 08:46:10 +08:00
|
|
|
six_unlock_read(&ck->c.lock);
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
ret = commit_do(trans, NULL, NULL, 0,
|
|
|
|
btree_key_cache_flush_pos(trans, key, seq,
|
2023-11-12 05:31:50 +08:00
|
|
|
BCH_TRANS_COMMIT_journal_reclaim, false));
|
2020-11-13 06:19:47 +08:00
|
|
|
unlock:
|
|
|
|
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
|
2021-04-04 04:24:13 +08:00
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_put(trans);
|
2021-04-04 04:24:13 +08:00
|
|
|
return ret;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush and evict a key from the key cache:
|
|
|
|
*/
|
|
|
|
int bch2_btree_key_cache_flush(struct btree_trans *trans,
|
|
|
|
enum btree_id id, struct bpos pos)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bkey_cached_key key = { id, pos };
|
|
|
|
|
|
|
|
/* Fastpath - assume it won't be found: */
|
2019-09-23 07:10:21 +08:00
|
|
|
if (!bch2_btree_key_cache_find(c, id, pos))
|
2019-03-08 08:46:10 +08:00
|
|
|
return 0;
|
|
|
|
|
2021-04-04 04:24:13 +08:00
|
|
|
return btree_key_cache_flush_pos(trans, key, 0, 0, true);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool bch2_btree_insert_key_cached(struct btree_trans *trans,
|
2023-02-10 02:22:12 +08:00
|
|
|
unsigned flags,
|
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 22:03:37 +08:00
|
|
|
struct btree_insert_entry *insert_entry)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 22:03:37 +08:00
|
|
|
struct bkey_cached *ck = (void *) insert_entry->path->l[0].b;
|
|
|
|
struct bkey_i *insert = insert_entry->k;
|
2020-11-20 08:54:40 +08:00
|
|
|
bool kick_reclaim = false;
|
2019-03-08 08:46:10 +08:00
|
|
|
|
2023-03-05 12:05:55 +08:00
|
|
|
BUG_ON(insert->k.u64s > ck->u64s);
|
2019-03-08 08:46:10 +08:00
|
|
|
|
|
|
|
bkey_copy(ck->k, insert);
|
|
|
|
ck->valid = true;
|
|
|
|
|
|
|
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
2023-07-10 03:13:30 +08:00
|
|
|
EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
|
2019-03-08 08:46:10 +08:00
|
|
|
set_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
atomic_long_inc(&c->btree_key_cache.nr_dirty);
|
2020-11-20 08:54:40 +08:00
|
|
|
|
|
|
|
if (bch2_nr_btree_keys_need_flush(c))
|
|
|
|
kick_reclaim = true;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 22:03:37 +08:00
|
|
|
/*
|
|
|
|
* To minimize lock contention, we only add the journal pin here and
|
|
|
|
* defer pin updates to the flush callback via ->seq. Be careful not to
|
|
|
|
* update ->seq on nojournal commits because we don't want to update the
|
|
|
|
* pin to a seq that doesn't include journal updates on disk. Otherwise
|
|
|
|
* we risk losing the update after a crash.
|
|
|
|
*
|
|
|
|
* The only exception is if the pin is not active in the first place. We
|
|
|
|
* have to add the pin because journal reclaim drives key cache
|
|
|
|
* flushing. The flush callback will not proceed unless ->seq matches
|
|
|
|
* the latest pin, so make sure it starts with a consistent value.
|
|
|
|
*/
|
|
|
|
if (!(insert_entry->flags & BTREE_UPDATE_NOJOURNAL) ||
|
|
|
|
!journal_pin_active(&ck->journal)) {
|
|
|
|
ck->seq = trans->journal_res.seq;
|
|
|
|
}
|
2023-01-04 17:34:16 +08:00
|
|
|
bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
|
|
|
|
&ck->journal, bch2_btree_key_cache_journal_flush);
|
2020-11-20 08:54:40 +08:00
|
|
|
|
|
|
|
if (kick_reclaim)
|
2020-11-20 09:55:33 +08:00
|
|
|
journal_reclaim_kick(&c->journal);
|
2019-03-08 08:46:10 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-12 14:14:47 +08:00
|
|
|
void bch2_btree_key_cache_drop(struct btree_trans *trans,
|
|
|
|
struct btree_path *path)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2022-08-12 09:06:43 +08:00
|
|
|
struct bch_fs *c = trans->c;
|
2022-01-12 14:14:47 +08:00
|
|
|
struct bkey_cached *ck = (void *) path->l[0].b;
|
|
|
|
|
2022-08-12 09:06:43 +08:00
|
|
|
BUG_ON(!ck->valid);
|
2022-01-12 14:14:47 +08:00
|
|
|
|
2022-08-12 09:06:43 +08:00
|
|
|
/*
|
|
|
|
* We just did an update to the btree, bypassing the key cache: the key
|
|
|
|
* cache key is now stale and must be dropped, even if dirty:
|
|
|
|
*/
|
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
|
|
|
|
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
|
|
|
|
atomic_long_dec(&c->btree_key_cache.nr_dirty);
|
|
|
|
bch2_journal_pin_drop(&c->journal, &ck->journal);
|
|
|
|
}
|
|
|
|
|
|
|
|
ck->valid = false;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
|
|
|
|
struct shrink_control *sc)
|
|
|
|
{
|
Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series "Fixes and cleanups to compaction".
- Joel Fernandes has a patchset ("Optimize mremap during mutual
alignment within PMD") which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested.
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i the
following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series "Do not try to access unaccepted memory" Adrian Hunter
provides some fixups for the recently-added "unaccepted memory' feature.
To increase the feature's checking coverage. "Plug a few gaps where
RAM is exposed without checking if it is unaccepted memory".
- In the series "cleanups for lockless slab shrink" Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code.
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series "use refcount+RCU method to implement
lockless slab shrink".
- David Hildenbrand contributes some maintenance work for the rmap code
in the series "Anon rmap cleanups".
- Kefeng Wang does more folio conversions and some maintenance work in
the migration code. Series "mm: migrate: more folio conversion and
unification".
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series "Add and use bdev_getblk()".
- In the series "Use nth_page() in place of direct struct page
manipulation" Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames.
- In the series "mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO" has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of gigantic
pages are in use.
- Matthew Wilcox has sent the series "Small hugetlb cleanups" - code
rationalization and folio conversions in the hugetlb code.
- Yin Fengwei has improved mlock()'s handling of large folios in the
series "support large folio for mlock"
- In the series "Expose swapcache stat for memcg v1" Liu Shixin has
added statistics for memcg v1 users which are available (and useful)
under memcg v2.
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named "MDWE
without inheritance".
- Kefeng Wang has provided the series "mm: convert numa balancing
functions to use a folio" which does what it says.
- In the series "mm/ksm: add fork-exec support for prctl" Stefan Roesch
makes is possible for a process to propagate KSM treatment across
exec().
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use "high
bandwidth memory" in addition to Optane Data Center Persistent Memory
Modules (DCPMM). The series is named "memory tiering: calculate
abstract distance based on ACPI HMAT"
- In the series "Smart scanning mode for KSM" Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans.
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in the
series "mm: memcg: fix tracking of pending stats updates values".
- In the series "Implement IOCTL to get and optionally clear info about
PTEs" Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits
us to atomically read-then-clear page softdirty state. This is mainly
used by CRIU.
- Hugh Dickins contributed the series "shmem,tmpfs: general maintenance"
- a bunch of relatively minor maintenance tweaks to this code.
- Matthew Wilcox has increased the use of the VMA lock over file-backed
page faults in the series "Handle more faults under the VMA lock". Some
rationalizations of the fault path became possible as a result.
- In the series "mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()" David Hildenbrand has implemented some cleanups
and folio conversions.
- In the series "various improvements to the GUP interface" Lorenzo
Stoakes has simplified and improved the GUP interface with an eye to
providing groundwork for future improvements.
- Andrey Konovalov has sent along the series "kasan: assorted fixes and
improvements" which does those things.
- Some page allocator maintenance work from Kemeng Shi in the series
"Two minor cleanups to break_down_buddy_pages".
- In thes series "New selftest for mm" Breno Leitao has developed
another MM self test which tickles a race we had between madvise() and
page faults.
- In the series "Add folio_end_read" Matthew Wilcox provides cleanups
and an optimization to the core pagecache code.
- Nhat Pham has added memcg accounting for hugetlb memory in the series
"hugetlb memcg accounting".
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series "Abstract vma_merge() and split_vma()".
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series "Fix page_owner's use of free timestamps".
- Lorenzo Stoakes has fixed the handling of new mappings of sealed files
in the series "permit write-sealed memfd read-only shared mappings".
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series "Batch hugetlb vmemmap modification operations".
- Some buffer_head folio conversions and cleanups from Matthew Wilcox in
the series "Finish the create_empty_buffers() transition".
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the series
"mm: PCP high auto-tuning".
- Roman Gushchin has contributed the patchset "mm: improve performance
of accounted kernel memory allocations" which improves their performance
by ~30% as measured by a micro-benchmark.
- folio conversions from Kefeng Wang in the series "mm: convert page
cpupid functions to folios".
- Some kmemleak fixups in Liu Shixin's series "Some bugfix about
kmemleak".
- Qi Zheng has improved our handling of memoryless nodes by keeping them
off the allocation fallback list. This is done in the series "handle
memoryless nodes more appropriately".
- khugepaged conversions from Vishal Moola in the series "Some
khugepaged folio conversions".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZULEMwAKCRDdBJ7gKXxA
jhQHAQCYpD3g849x69DmHnHWHm/EHQLvQmRMDeYZI+nx/sCJOwEAw4AKg0Oemv9y
FgeUPAD1oasg6CP+INZvCj34waNxwAc=
=E+Y4
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series 'Fixes and cleanups to compaction'
- Joel Fernandes has a patchset ('Optimize mremap during mutual
alignment within PMD') which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i
the following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series 'Do not try to access unaccepted memory' Adrian
Hunter provides some fixups for the recently-added 'unaccepted
memory' feature. To increase the feature's checking coverage. 'Plug
a few gaps where RAM is exposed without checking if it is
unaccepted memory'
- In the series 'cleanups for lockless slab shrink' Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series 'use refcount+RCU method to
implement lockless slab shrink'
- David Hildenbrand contributes some maintenance work for the rmap
code in the series 'Anon rmap cleanups'
- Kefeng Wang does more folio conversions and some maintenance work
in the migration code. Series 'mm: migrate: more folio conversion
and unification'
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series 'Add and use bdev_getblk()'
- In the series 'Use nth_page() in place of direct struct page
manipulation' Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames
- In the series 'mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO' has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of
gigantic pages are in use
- Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code
rationalization and folio conversions in the hugetlb code
- Yin Fengwei has improved mlock()'s handling of large folios in the
series 'support large folio for mlock'
- In the series 'Expose swapcache stat for memcg v1' Liu Shixin has
added statistics for memcg v1 users which are available (and
useful) under memcg v2
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named 'MDWE
without inheritance'
- Kefeng Wang has provided the series 'mm: convert numa balancing
functions to use a folio' which does what it says
- In the series 'mm/ksm: add fork-exec support for prctl' Stefan
Roesch makes is possible for a process to propagate KSM treatment
across exec()
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use 'high
bandwidth memory' in addition to Optane Data Center Persistent
Memory Modules (DCPMM). The series is named 'memory tiering:
calculate abstract distance based on ACPI HMAT'
- In the series 'Smart scanning mode for KSM' Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in
the series 'mm: memcg: fix tracking of pending stats updates
values'
- In the series 'Implement IOCTL to get and optionally clear info
about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap
which permits us to atomically read-then-clear page softdirty
state. This is mainly used by CRIU
- Hugh Dickins contributed the series 'shmem,tmpfs: general
maintenance', a bunch of relatively minor maintenance tweaks to
this code
- Matthew Wilcox has increased the use of the VMA lock over
file-backed page faults in the series 'Handle more faults under the
VMA lock'. Some rationalizations of the fault path became possible
as a result
- In the series 'mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()' David Hildenbrand has implemented some
cleanups and folio conversions
- In the series 'various improvements to the GUP interface' Lorenzo
Stoakes has simplified and improved the GUP interface with an eye
to providing groundwork for future improvements
- Andrey Konovalov has sent along the series 'kasan: assorted fixes
and improvements' which does those things
- Some page allocator maintenance work from Kemeng Shi in the series
'Two minor cleanups to break_down_buddy_pages'
- In thes series 'New selftest for mm' Breno Leitao has developed
another MM self test which tickles a race we had between madvise()
and page faults
- In the series 'Add folio_end_read' Matthew Wilcox provides cleanups
and an optimization to the core pagecache code
- Nhat Pham has added memcg accounting for hugetlb memory in the
series 'hugetlb memcg accounting'
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series 'Abstract vma_merge() and split_vma()'
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series 'Fix page_owner's use of free timestamps'
- Lorenzo Stoakes has fixed the handling of new mappings of sealed
files in the series 'permit write-sealed memfd read-only shared
mappings'
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series 'Batch hugetlb vmemmap modification operations'
- Some buffer_head folio conversions and cleanups from Matthew Wilcox
in the series 'Finish the create_empty_buffers() transition'
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the
series 'mm: PCP high auto-tuning'
- Roman Gushchin has contributed the patchset 'mm: improve
performance of accounted kernel memory allocations' which improves
their performance by ~30% as measured by a micro-benchmark
- folio conversions from Kefeng Wang in the series 'mm: convert page
cpupid functions to folios'
- Some kmemleak fixups in Liu Shixin's series 'Some bugfix about
kmemleak'
- Qi Zheng has improved our handling of memoryless nodes by keeping
them off the allocation fallback list. This is done in the series
'handle memoryless nodes more appropriately'
- khugepaged conversions from Vishal Moola in the series 'Some
khugepaged folio conversions'"
[ bcachefs conflicts with the dynamically allocated shrinkers have been
resolved as per Stephen Rothwell in
https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/
with help from Qi Zheng.
The clone3 test filtering conflict was half-arsed by yours truly ]
* tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits)
mm/damon/sysfs: update monitoring target regions for online input commit
mm/damon/sysfs: remove requested targets when online-commit inputs
selftests: add a sanity check for zswap
Documentation: maple_tree: fix word spelling error
mm/vmalloc: fix the unchecked dereference warning in vread_iter()
zswap: export compression failure stats
Documentation: ubsan: drop "the" from article title
mempolicy: migration attempt to match interleave nodes
mempolicy: mmap_lock is not needed while migrating folios
mempolicy: alloc_pages_mpol() for NUMA policy without vma
mm: add page_rmappable_folio() wrapper
mempolicy: remove confusing MPOL_MF_LAZY dead code
mempolicy: mpol_shared_policy_init() without pseudo-vma
mempolicy trivia: use pgoff_t in shared mempolicy tree
mempolicy trivia: slightly more consistent naming
mempolicy trivia: delete those ancient pr_debug()s
mempolicy: fix migrate_pages(2) syscall return nr_failed
kernfs: drop shared NUMA mempolicy hooks
hugetlbfs: drop shared NUMA mempolicy pretence
mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets()
...
2023-11-03 13:38:47 +08:00
|
|
|
struct bch_fs *c = shrink->private_data;
|
2020-11-13 06:19:47 +08:00
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2021-03-25 11:37:33 +08:00
|
|
|
struct bucket_table *tbl;
|
2020-11-13 06:19:47 +08:00
|
|
|
struct bkey_cached *ck, *t;
|
|
|
|
size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
|
2021-03-25 11:37:33 +08:00
|
|
|
unsigned start, flags;
|
|
|
|
int srcu_idx;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2022-10-14 18:48:23 +08:00
|
|
|
mutex_lock(&bc->lock);
|
2021-03-25 11:37:33 +08:00
|
|
|
srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
|
2020-11-13 06:19:47 +08:00
|
|
|
flags = memalloc_nofs_save();
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
/*
|
|
|
|
* Newest freed entries are at the end of the list - once we hit one
|
|
|
|
* that's too new to be freed, we can bail out:
|
|
|
|
*/
|
2023-11-06 22:53:14 +08:00
|
|
|
scanned += bc->nr_freed_nonpcpu;
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
|
|
|
|
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
|
|
|
|
ck->btree_trans_barrier_seq))
|
|
|
|
break;
|
|
|
|
|
|
|
|
list_del(&ck->list);
|
2023-05-21 08:57:55 +08:00
|
|
|
six_lock_exit(&ck->c.lock);
|
2022-09-04 10:07:31 +08:00
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
|
|
|
atomic_long_dec(&bc->nr_freed);
|
|
|
|
freed++;
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_nonpcpu--;
|
2022-09-04 10:07:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (scanned >= nr)
|
|
|
|
goto out;
|
|
|
|
|
2023-11-06 22:53:14 +08:00
|
|
|
scanned += bc->nr_freed_pcpu;
|
|
|
|
|
2022-09-04 10:07:31 +08:00
|
|
|
list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
|
2020-11-20 04:38:27 +08:00
|
|
|
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
|
|
|
|
ck->btree_trans_barrier_seq))
|
|
|
|
break;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
list_del(&ck->list);
|
2023-05-21 08:57:55 +08:00
|
|
|
six_lock_exit(&ck->c.lock);
|
2020-11-20 04:38:27 +08:00
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
2022-06-17 13:07:54 +08:00
|
|
|
atomic_long_dec(&bc->nr_freed);
|
2020-11-20 04:38:27 +08:00
|
|
|
freed++;
|
2023-11-06 22:53:14 +08:00
|
|
|
bc->nr_freed_pcpu--;
|
2020-11-13 06:19:47 +08:00
|
|
|
}
|
|
|
|
|
2020-11-20 04:38:27 +08:00
|
|
|
if (scanned >= nr)
|
|
|
|
goto out;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2021-03-25 11:37:33 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
|
|
|
|
if (bc->shrink_iter >= tbl->size)
|
|
|
|
bc->shrink_iter = 0;
|
|
|
|
start = bc->shrink_iter;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2021-03-25 11:37:33 +08:00
|
|
|
do {
|
|
|
|
struct rhash_head *pos, *next;
|
|
|
|
|
|
|
|
pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
|
|
|
|
|
|
|
|
while (!rht_is_a_nulls(pos)) {
|
|
|
|
next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
|
|
|
|
ck = container_of(pos, struct bkey_cached, hash);
|
|
|
|
|
|
|
|
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
|
|
|
|
clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
|
|
|
|
else if (bkey_cached_lock_for_evict(ck)) {
|
|
|
|
bkey_cached_evict(bc, ck);
|
|
|
|
bkey_cached_free(bc, ck);
|
|
|
|
}
|
|
|
|
|
|
|
|
scanned++;
|
|
|
|
if (scanned >= nr)
|
|
|
|
break;
|
|
|
|
next:
|
|
|
|
pos = next;
|
2020-11-13 06:19:47 +08:00
|
|
|
}
|
2021-03-25 11:37:33 +08:00
|
|
|
|
|
|
|
bc->shrink_iter++;
|
|
|
|
if (bc->shrink_iter >= tbl->size)
|
|
|
|
bc->shrink_iter = 0;
|
|
|
|
} while (scanned < nr && bc->shrink_iter != start);
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
2020-11-13 06:19:47 +08:00
|
|
|
out:
|
|
|
|
memalloc_nofs_restore(flags);
|
2021-03-25 11:37:33 +08:00
|
|
|
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
|
2020-11-13 06:19:47 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
|
|
|
|
return freed;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
|
|
|
|
struct shrink_control *sc)
|
|
|
|
{
|
Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series "Fixes and cleanups to compaction".
- Joel Fernandes has a patchset ("Optimize mremap during mutual
alignment within PMD") which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested.
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i the
following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series "Do not try to access unaccepted memory" Adrian Hunter
provides some fixups for the recently-added "unaccepted memory' feature.
To increase the feature's checking coverage. "Plug a few gaps where
RAM is exposed without checking if it is unaccepted memory".
- In the series "cleanups for lockless slab shrink" Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code.
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series "use refcount+RCU method to implement
lockless slab shrink".
- David Hildenbrand contributes some maintenance work for the rmap code
in the series "Anon rmap cleanups".
- Kefeng Wang does more folio conversions and some maintenance work in
the migration code. Series "mm: migrate: more folio conversion and
unification".
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series "Add and use bdev_getblk()".
- In the series "Use nth_page() in place of direct struct page
manipulation" Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames.
- In the series "mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO" has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of gigantic
pages are in use.
- Matthew Wilcox has sent the series "Small hugetlb cleanups" - code
rationalization and folio conversions in the hugetlb code.
- Yin Fengwei has improved mlock()'s handling of large folios in the
series "support large folio for mlock"
- In the series "Expose swapcache stat for memcg v1" Liu Shixin has
added statistics for memcg v1 users which are available (and useful)
under memcg v2.
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named "MDWE
without inheritance".
- Kefeng Wang has provided the series "mm: convert numa balancing
functions to use a folio" which does what it says.
- In the series "mm/ksm: add fork-exec support for prctl" Stefan Roesch
makes is possible for a process to propagate KSM treatment across
exec().
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use "high
bandwidth memory" in addition to Optane Data Center Persistent Memory
Modules (DCPMM). The series is named "memory tiering: calculate
abstract distance based on ACPI HMAT"
- In the series "Smart scanning mode for KSM" Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans.
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in the
series "mm: memcg: fix tracking of pending stats updates values".
- In the series "Implement IOCTL to get and optionally clear info about
PTEs" Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits
us to atomically read-then-clear page softdirty state. This is mainly
used by CRIU.
- Hugh Dickins contributed the series "shmem,tmpfs: general maintenance"
- a bunch of relatively minor maintenance tweaks to this code.
- Matthew Wilcox has increased the use of the VMA lock over file-backed
page faults in the series "Handle more faults under the VMA lock". Some
rationalizations of the fault path became possible as a result.
- In the series "mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()" David Hildenbrand has implemented some cleanups
and folio conversions.
- In the series "various improvements to the GUP interface" Lorenzo
Stoakes has simplified and improved the GUP interface with an eye to
providing groundwork for future improvements.
- Andrey Konovalov has sent along the series "kasan: assorted fixes and
improvements" which does those things.
- Some page allocator maintenance work from Kemeng Shi in the series
"Two minor cleanups to break_down_buddy_pages".
- In thes series "New selftest for mm" Breno Leitao has developed
another MM self test which tickles a race we had between madvise() and
page faults.
- In the series "Add folio_end_read" Matthew Wilcox provides cleanups
and an optimization to the core pagecache code.
- Nhat Pham has added memcg accounting for hugetlb memory in the series
"hugetlb memcg accounting".
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series "Abstract vma_merge() and split_vma()".
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series "Fix page_owner's use of free timestamps".
- Lorenzo Stoakes has fixed the handling of new mappings of sealed files
in the series "permit write-sealed memfd read-only shared mappings".
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series "Batch hugetlb vmemmap modification operations".
- Some buffer_head folio conversions and cleanups from Matthew Wilcox in
the series "Finish the create_empty_buffers() transition".
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the series
"mm: PCP high auto-tuning".
- Roman Gushchin has contributed the patchset "mm: improve performance
of accounted kernel memory allocations" which improves their performance
by ~30% as measured by a micro-benchmark.
- folio conversions from Kefeng Wang in the series "mm: convert page
cpupid functions to folios".
- Some kmemleak fixups in Liu Shixin's series "Some bugfix about
kmemleak".
- Qi Zheng has improved our handling of memoryless nodes by keeping them
off the allocation fallback list. This is done in the series "handle
memoryless nodes more appropriately".
- khugepaged conversions from Vishal Moola in the series "Some
khugepaged folio conversions".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZULEMwAKCRDdBJ7gKXxA
jhQHAQCYpD3g849x69DmHnHWHm/EHQLvQmRMDeYZI+nx/sCJOwEAw4AKg0Oemv9y
FgeUPAD1oasg6CP+INZvCj34waNxwAc=
=E+Y4
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series 'Fixes and cleanups to compaction'
- Joel Fernandes has a patchset ('Optimize mremap during mutual
alignment within PMD') which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i
the following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series 'Do not try to access unaccepted memory' Adrian
Hunter provides some fixups for the recently-added 'unaccepted
memory' feature. To increase the feature's checking coverage. 'Plug
a few gaps where RAM is exposed without checking if it is
unaccepted memory'
- In the series 'cleanups for lockless slab shrink' Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series 'use refcount+RCU method to
implement lockless slab shrink'
- David Hildenbrand contributes some maintenance work for the rmap
code in the series 'Anon rmap cleanups'
- Kefeng Wang does more folio conversions and some maintenance work
in the migration code. Series 'mm: migrate: more folio conversion
and unification'
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series 'Add and use bdev_getblk()'
- In the series 'Use nth_page() in place of direct struct page
manipulation' Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames
- In the series 'mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO' has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of
gigantic pages are in use
- Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code
rationalization and folio conversions in the hugetlb code
- Yin Fengwei has improved mlock()'s handling of large folios in the
series 'support large folio for mlock'
- In the series 'Expose swapcache stat for memcg v1' Liu Shixin has
added statistics for memcg v1 users which are available (and
useful) under memcg v2
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named 'MDWE
without inheritance'
- Kefeng Wang has provided the series 'mm: convert numa balancing
functions to use a folio' which does what it says
- In the series 'mm/ksm: add fork-exec support for prctl' Stefan
Roesch makes is possible for a process to propagate KSM treatment
across exec()
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use 'high
bandwidth memory' in addition to Optane Data Center Persistent
Memory Modules (DCPMM). The series is named 'memory tiering:
calculate abstract distance based on ACPI HMAT'
- In the series 'Smart scanning mode for KSM' Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in
the series 'mm: memcg: fix tracking of pending stats updates
values'
- In the series 'Implement IOCTL to get and optionally clear info
about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap
which permits us to atomically read-then-clear page softdirty
state. This is mainly used by CRIU
- Hugh Dickins contributed the series 'shmem,tmpfs: general
maintenance', a bunch of relatively minor maintenance tweaks to
this code
- Matthew Wilcox has increased the use of the VMA lock over
file-backed page faults in the series 'Handle more faults under the
VMA lock'. Some rationalizations of the fault path became possible
as a result
- In the series 'mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()' David Hildenbrand has implemented some
cleanups and folio conversions
- In the series 'various improvements to the GUP interface' Lorenzo
Stoakes has simplified and improved the GUP interface with an eye
to providing groundwork for future improvements
- Andrey Konovalov has sent along the series 'kasan: assorted fixes
and improvements' which does those things
- Some page allocator maintenance work from Kemeng Shi in the series
'Two minor cleanups to break_down_buddy_pages'
- In thes series 'New selftest for mm' Breno Leitao has developed
another MM self test which tickles a race we had between madvise()
and page faults
- In the series 'Add folio_end_read' Matthew Wilcox provides cleanups
and an optimization to the core pagecache code
- Nhat Pham has added memcg accounting for hugetlb memory in the
series 'hugetlb memcg accounting'
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series 'Abstract vma_merge() and split_vma()'
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series 'Fix page_owner's use of free timestamps'
- Lorenzo Stoakes has fixed the handling of new mappings of sealed
files in the series 'permit write-sealed memfd read-only shared
mappings'
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series 'Batch hugetlb vmemmap modification operations'
- Some buffer_head folio conversions and cleanups from Matthew Wilcox
in the series 'Finish the create_empty_buffers() transition'
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the
series 'mm: PCP high auto-tuning'
- Roman Gushchin has contributed the patchset 'mm: improve
performance of accounted kernel memory allocations' which improves
their performance by ~30% as measured by a micro-benchmark
- folio conversions from Kefeng Wang in the series 'mm: convert page
cpupid functions to folios'
- Some kmemleak fixups in Liu Shixin's series 'Some bugfix about
kmemleak'
- Qi Zheng has improved our handling of memoryless nodes by keeping
them off the allocation fallback list. This is done in the series
'handle memoryless nodes more appropriately'
- khugepaged conversions from Vishal Moola in the series 'Some
khugepaged folio conversions'"
[ bcachefs conflicts with the dynamically allocated shrinkers have been
resolved as per Stephen Rothwell in
https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/
with help from Qi Zheng.
The clone3 test filtering conflict was half-arsed by yours truly ]
* tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits)
mm/damon/sysfs: update monitoring target regions for online input commit
mm/damon/sysfs: remove requested targets when online-commit inputs
selftests: add a sanity check for zswap
Documentation: maple_tree: fix word spelling error
mm/vmalloc: fix the unchecked dereference warning in vread_iter()
zswap: export compression failure stats
Documentation: ubsan: drop "the" from article title
mempolicy: migration attempt to match interleave nodes
mempolicy: mmap_lock is not needed while migrating folios
mempolicy: alloc_pages_mpol() for NUMA policy without vma
mm: add page_rmappable_folio() wrapper
mempolicy: remove confusing MPOL_MF_LAZY dead code
mempolicy: mpol_shared_policy_init() without pseudo-vma
mempolicy trivia: use pgoff_t in shared mempolicy tree
mempolicy trivia: slightly more consistent naming
mempolicy trivia: delete those ancient pr_debug()s
mempolicy: fix migrate_pages(2) syscall return nr_failed
kernfs: drop shared NUMA mempolicy hooks
hugetlbfs: drop shared NUMA mempolicy pretence
mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets()
...
2023-11-03 13:38:47 +08:00
|
|
|
struct bch_fs *c = shrink->private_data;
|
2020-11-13 06:19:47 +08:00
|
|
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
2021-04-28 02:02:00 +08:00
|
|
|
long nr = atomic_long_read(&bc->nr_keys) -
|
|
|
|
atomic_long_read(&bc->nr_dirty);
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2021-04-28 02:02:00 +08:00
|
|
|
return max(0L, nr);
|
2020-11-13 06:19:47 +08:00
|
|
|
}
|
|
|
|
|
2020-11-10 02:01:52 +08:00
|
|
|
void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2020-11-10 02:01:52 +08:00
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
2021-03-25 11:37:33 +08:00
|
|
|
struct bucket_table *tbl;
|
2019-03-08 08:46:10 +08:00
|
|
|
struct bkey_cached *ck, *n;
|
2021-03-25 11:37:33 +08:00
|
|
|
struct rhash_head *pos;
|
2023-03-03 12:51:47 +08:00
|
|
|
LIST_HEAD(items);
|
2021-03-25 11:37:33 +08:00
|
|
|
unsigned i;
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-06-17 13:07:54 +08:00
|
|
|
int cpu;
|
2022-10-15 12:47:21 +08:00
|
|
|
#endif
|
2019-03-08 08:46:10 +08:00
|
|
|
|
Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series "Fixes and cleanups to compaction".
- Joel Fernandes has a patchset ("Optimize mremap during mutual
alignment within PMD") which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested.
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i the
following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series "Do not try to access unaccepted memory" Adrian Hunter
provides some fixups for the recently-added "unaccepted memory' feature.
To increase the feature's checking coverage. "Plug a few gaps where
RAM is exposed without checking if it is unaccepted memory".
- In the series "cleanups for lockless slab shrink" Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code.
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series "use refcount+RCU method to implement
lockless slab shrink".
- David Hildenbrand contributes some maintenance work for the rmap code
in the series "Anon rmap cleanups".
- Kefeng Wang does more folio conversions and some maintenance work in
the migration code. Series "mm: migrate: more folio conversion and
unification".
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series "Add and use bdev_getblk()".
- In the series "Use nth_page() in place of direct struct page
manipulation" Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames.
- In the series "mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO" has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of gigantic
pages are in use.
- Matthew Wilcox has sent the series "Small hugetlb cleanups" - code
rationalization and folio conversions in the hugetlb code.
- Yin Fengwei has improved mlock()'s handling of large folios in the
series "support large folio for mlock"
- In the series "Expose swapcache stat for memcg v1" Liu Shixin has
added statistics for memcg v1 users which are available (and useful)
under memcg v2.
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named "MDWE
without inheritance".
- Kefeng Wang has provided the series "mm: convert numa balancing
functions to use a folio" which does what it says.
- In the series "mm/ksm: add fork-exec support for prctl" Stefan Roesch
makes is possible for a process to propagate KSM treatment across
exec().
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use "high
bandwidth memory" in addition to Optane Data Center Persistent Memory
Modules (DCPMM). The series is named "memory tiering: calculate
abstract distance based on ACPI HMAT"
- In the series "Smart scanning mode for KSM" Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans.
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in the
series "mm: memcg: fix tracking of pending stats updates values".
- In the series "Implement IOCTL to get and optionally clear info about
PTEs" Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits
us to atomically read-then-clear page softdirty state. This is mainly
used by CRIU.
- Hugh Dickins contributed the series "shmem,tmpfs: general maintenance"
- a bunch of relatively minor maintenance tweaks to this code.
- Matthew Wilcox has increased the use of the VMA lock over file-backed
page faults in the series "Handle more faults under the VMA lock". Some
rationalizations of the fault path became possible as a result.
- In the series "mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()" David Hildenbrand has implemented some cleanups
and folio conversions.
- In the series "various improvements to the GUP interface" Lorenzo
Stoakes has simplified and improved the GUP interface with an eye to
providing groundwork for future improvements.
- Andrey Konovalov has sent along the series "kasan: assorted fixes and
improvements" which does those things.
- Some page allocator maintenance work from Kemeng Shi in the series
"Two minor cleanups to break_down_buddy_pages".
- In thes series "New selftest for mm" Breno Leitao has developed
another MM self test which tickles a race we had between madvise() and
page faults.
- In the series "Add folio_end_read" Matthew Wilcox provides cleanups
and an optimization to the core pagecache code.
- Nhat Pham has added memcg accounting for hugetlb memory in the series
"hugetlb memcg accounting".
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series "Abstract vma_merge() and split_vma()".
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series "Fix page_owner's use of free timestamps".
- Lorenzo Stoakes has fixed the handling of new mappings of sealed files
in the series "permit write-sealed memfd read-only shared mappings".
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series "Batch hugetlb vmemmap modification operations".
- Some buffer_head folio conversions and cleanups from Matthew Wilcox in
the series "Finish the create_empty_buffers() transition".
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the series
"mm: PCP high auto-tuning".
- Roman Gushchin has contributed the patchset "mm: improve performance
of accounted kernel memory allocations" which improves their performance
by ~30% as measured by a micro-benchmark.
- folio conversions from Kefeng Wang in the series "mm: convert page
cpupid functions to folios".
- Some kmemleak fixups in Liu Shixin's series "Some bugfix about
kmemleak".
- Qi Zheng has improved our handling of memoryless nodes by keeping them
off the allocation fallback list. This is done in the series "handle
memoryless nodes more appropriately".
- khugepaged conversions from Vishal Moola in the series "Some
khugepaged folio conversions".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZULEMwAKCRDdBJ7gKXxA
jhQHAQCYpD3g849x69DmHnHWHm/EHQLvQmRMDeYZI+nx/sCJOwEAw4AKg0Oemv9y
FgeUPAD1oasg6CP+INZvCj34waNxwAc=
=E+Y4
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series 'Fixes and cleanups to compaction'
- Joel Fernandes has a patchset ('Optimize mremap during mutual
alignment within PMD') which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i
the following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series 'Do not try to access unaccepted memory' Adrian
Hunter provides some fixups for the recently-added 'unaccepted
memory' feature. To increase the feature's checking coverage. 'Plug
a few gaps where RAM is exposed without checking if it is
unaccepted memory'
- In the series 'cleanups for lockless slab shrink' Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series 'use refcount+RCU method to
implement lockless slab shrink'
- David Hildenbrand contributes some maintenance work for the rmap
code in the series 'Anon rmap cleanups'
- Kefeng Wang does more folio conversions and some maintenance work
in the migration code. Series 'mm: migrate: more folio conversion
and unification'
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series 'Add and use bdev_getblk()'
- In the series 'Use nth_page() in place of direct struct page
manipulation' Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames
- In the series 'mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO' has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of
gigantic pages are in use
- Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code
rationalization and folio conversions in the hugetlb code
- Yin Fengwei has improved mlock()'s handling of large folios in the
series 'support large folio for mlock'
- In the series 'Expose swapcache stat for memcg v1' Liu Shixin has
added statistics for memcg v1 users which are available (and
useful) under memcg v2
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named 'MDWE
without inheritance'
- Kefeng Wang has provided the series 'mm: convert numa balancing
functions to use a folio' which does what it says
- In the series 'mm/ksm: add fork-exec support for prctl' Stefan
Roesch makes is possible for a process to propagate KSM treatment
across exec()
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use 'high
bandwidth memory' in addition to Optane Data Center Persistent
Memory Modules (DCPMM). The series is named 'memory tiering:
calculate abstract distance based on ACPI HMAT'
- In the series 'Smart scanning mode for KSM' Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in
the series 'mm: memcg: fix tracking of pending stats updates
values'
- In the series 'Implement IOCTL to get and optionally clear info
about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap
which permits us to atomically read-then-clear page softdirty
state. This is mainly used by CRIU
- Hugh Dickins contributed the series 'shmem,tmpfs: general
maintenance', a bunch of relatively minor maintenance tweaks to
this code
- Matthew Wilcox has increased the use of the VMA lock over
file-backed page faults in the series 'Handle more faults under the
VMA lock'. Some rationalizations of the fault path became possible
as a result
- In the series 'mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()' David Hildenbrand has implemented some
cleanups and folio conversions
- In the series 'various improvements to the GUP interface' Lorenzo
Stoakes has simplified and improved the GUP interface with an eye
to providing groundwork for future improvements
- Andrey Konovalov has sent along the series 'kasan: assorted fixes
and improvements' which does those things
- Some page allocator maintenance work from Kemeng Shi in the series
'Two minor cleanups to break_down_buddy_pages'
- In thes series 'New selftest for mm' Breno Leitao has developed
another MM self test which tickles a race we had between madvise()
and page faults
- In the series 'Add folio_end_read' Matthew Wilcox provides cleanups
and an optimization to the core pagecache code
- Nhat Pham has added memcg accounting for hugetlb memory in the
series 'hugetlb memcg accounting'
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series 'Abstract vma_merge() and split_vma()'
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series 'Fix page_owner's use of free timestamps'
- Lorenzo Stoakes has fixed the handling of new mappings of sealed
files in the series 'permit write-sealed memfd read-only shared
mappings'
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series 'Batch hugetlb vmemmap modification operations'
- Some buffer_head folio conversions and cleanups from Matthew Wilcox
in the series 'Finish the create_empty_buffers() transition'
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the
series 'mm: PCP high auto-tuning'
- Roman Gushchin has contributed the patchset 'mm: improve
performance of accounted kernel memory allocations' which improves
their performance by ~30% as measured by a micro-benchmark
- folio conversions from Kefeng Wang in the series 'mm: convert page
cpupid functions to folios'
- Some kmemleak fixups in Liu Shixin's series 'Some bugfix about
kmemleak'
- Qi Zheng has improved our handling of memoryless nodes by keeping
them off the allocation fallback list. This is done in the series
'handle memoryless nodes more appropriately'
- khugepaged conversions from Vishal Moola in the series 'Some
khugepaged folio conversions'"
[ bcachefs conflicts with the dynamically allocated shrinkers have been
resolved as per Stephen Rothwell in
https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/
with help from Qi Zheng.
The clone3 test filtering conflict was half-arsed by yours truly ]
* tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits)
mm/damon/sysfs: update monitoring target regions for online input commit
mm/damon/sysfs: remove requested targets when online-commit inputs
selftests: add a sanity check for zswap
Documentation: maple_tree: fix word spelling error
mm/vmalloc: fix the unchecked dereference warning in vread_iter()
zswap: export compression failure stats
Documentation: ubsan: drop "the" from article title
mempolicy: migration attempt to match interleave nodes
mempolicy: mmap_lock is not needed while migrating folios
mempolicy: alloc_pages_mpol() for NUMA policy without vma
mm: add page_rmappable_folio() wrapper
mempolicy: remove confusing MPOL_MF_LAZY dead code
mempolicy: mpol_shared_policy_init() without pseudo-vma
mempolicy trivia: use pgoff_t in shared mempolicy tree
mempolicy trivia: slightly more consistent naming
mempolicy trivia: delete those ancient pr_debug()s
mempolicy: fix migrate_pages(2) syscall return nr_failed
kernfs: drop shared NUMA mempolicy hooks
hugetlbfs: drop shared NUMA mempolicy pretence
mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets()
...
2023-11-03 13:38:47 +08:00
|
|
|
shrinker_free(bc->shrink);
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2020-11-10 02:01:52 +08:00
|
|
|
mutex_lock(&bc->lock);
|
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
/*
|
|
|
|
* The loop is needed to guard against racing with rehash:
|
|
|
|
*/
|
|
|
|
while (atomic_long_read(&bc->nr_keys)) {
|
|
|
|
rcu_read_lock();
|
|
|
|
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
|
|
|
|
if (tbl)
|
|
|
|
for (i = 0; i < tbl->size; i++)
|
|
|
|
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
|
|
|
|
bkey_cached_evict(bc, ck);
|
2023-03-03 12:51:47 +08:00
|
|
|
list_add(&ck->list, &items);
|
2022-10-15 12:47:21 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2021-03-25 11:37:33 +08:00
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-06-17 13:07:54 +08:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct btree_key_cache_freelist *f =
|
|
|
|
per_cpu_ptr(bc->pcpu_freed, cpu);
|
|
|
|
|
|
|
|
for (i = 0; i < f->nr; i++) {
|
|
|
|
ck = f->objs[i];
|
2023-03-03 12:51:47 +08:00
|
|
|
list_add(&ck->list, &items);
|
2022-06-17 13:07:54 +08:00
|
|
|
}
|
|
|
|
}
|
2022-10-15 12:47:21 +08:00
|
|
|
#endif
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2023-11-06 22:53:14 +08:00
|
|
|
BUG_ON(list_count_nodes(&bc->freed_pcpu) != bc->nr_freed_pcpu);
|
|
|
|
BUG_ON(list_count_nodes(&bc->freed_nonpcpu) != bc->nr_freed_nonpcpu);
|
|
|
|
|
2023-03-03 12:51:47 +08:00
|
|
|
list_splice(&bc->freed_pcpu, &items);
|
|
|
|
list_splice(&bc->freed_nonpcpu, &items);
|
2022-09-04 10:07:31 +08:00
|
|
|
|
2023-03-03 12:51:47 +08:00
|
|
|
mutex_unlock(&bc->lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(ck, n, &items, list) {
|
2020-12-14 05:12:04 +08:00
|
|
|
cond_resched();
|
|
|
|
|
2020-11-19 03:09:33 +08:00
|
|
|
list_del(&ck->list);
|
2021-03-25 11:37:33 +08:00
|
|
|
kfree(ck->k);
|
2023-05-21 08:57:55 +08:00
|
|
|
six_lock_exit(&ck->c.lock);
|
2020-11-19 03:09:33 +08:00
|
|
|
kmem_cache_free(bch2_key_cache, ck);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
2020-11-10 02:01:52 +08:00
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
if (atomic_long_read(&bc->nr_dirty) &&
|
|
|
|
!bch2_journal_error(&c->journal) &&
|
|
|
|
test_bit(BCH_FS_WAS_RW, &c->flags))
|
|
|
|
panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
|
|
|
|
atomic_long_read(&bc->nr_dirty));
|
|
|
|
|
|
|
|
if (atomic_long_read(&bc->nr_keys))
|
|
|
|
panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
|
|
|
|
atomic_long_read(&bc->nr_keys));
|
2020-12-14 05:12:04 +08:00
|
|
|
|
2020-11-30 12:48:20 +08:00
|
|
|
if (bc->table_init_done)
|
|
|
|
rhashtable_destroy(&bc->table);
|
2022-06-17 13:07:54 +08:00
|
|
|
|
|
|
|
free_percpu(bc->pcpu_freed);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
|
|
|
|
{
|
|
|
|
mutex_init(&c->lock);
|
2022-09-04 10:07:31 +08:00
|
|
|
INIT_LIST_HEAD(&c->freed_pcpu);
|
|
|
|
INIT_LIST_HEAD(&c->freed_nonpcpu);
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
|
|
|
|
2020-11-13 06:19:47 +08:00
|
|
|
int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
|
2019-03-08 08:46:10 +08:00
|
|
|
{
|
2020-11-13 06:19:47 +08:00
|
|
|
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
|
Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series "Fixes and cleanups to compaction".
- Joel Fernandes has a patchset ("Optimize mremap during mutual
alignment within PMD") which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested.
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i the
following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series "Do not try to access unaccepted memory" Adrian Hunter
provides some fixups for the recently-added "unaccepted memory' feature.
To increase the feature's checking coverage. "Plug a few gaps where
RAM is exposed without checking if it is unaccepted memory".
- In the series "cleanups for lockless slab shrink" Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code.
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series "use refcount+RCU method to implement
lockless slab shrink".
- David Hildenbrand contributes some maintenance work for the rmap code
in the series "Anon rmap cleanups".
- Kefeng Wang does more folio conversions and some maintenance work in
the migration code. Series "mm: migrate: more folio conversion and
unification".
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series "Add and use bdev_getblk()".
- In the series "Use nth_page() in place of direct struct page
manipulation" Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames.
- In the series "mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO" has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of gigantic
pages are in use.
- Matthew Wilcox has sent the series "Small hugetlb cleanups" - code
rationalization and folio conversions in the hugetlb code.
- Yin Fengwei has improved mlock()'s handling of large folios in the
series "support large folio for mlock"
- In the series "Expose swapcache stat for memcg v1" Liu Shixin has
added statistics for memcg v1 users which are available (and useful)
under memcg v2.
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named "MDWE
without inheritance".
- Kefeng Wang has provided the series "mm: convert numa balancing
functions to use a folio" which does what it says.
- In the series "mm/ksm: add fork-exec support for prctl" Stefan Roesch
makes is possible for a process to propagate KSM treatment across
exec().
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use "high
bandwidth memory" in addition to Optane Data Center Persistent Memory
Modules (DCPMM). The series is named "memory tiering: calculate
abstract distance based on ACPI HMAT"
- In the series "Smart scanning mode for KSM" Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans.
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in the
series "mm: memcg: fix tracking of pending stats updates values".
- In the series "Implement IOCTL to get and optionally clear info about
PTEs" Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits
us to atomically read-then-clear page softdirty state. This is mainly
used by CRIU.
- Hugh Dickins contributed the series "shmem,tmpfs: general maintenance"
- a bunch of relatively minor maintenance tweaks to this code.
- Matthew Wilcox has increased the use of the VMA lock over file-backed
page faults in the series "Handle more faults under the VMA lock". Some
rationalizations of the fault path became possible as a result.
- In the series "mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()" David Hildenbrand has implemented some cleanups
and folio conversions.
- In the series "various improvements to the GUP interface" Lorenzo
Stoakes has simplified and improved the GUP interface with an eye to
providing groundwork for future improvements.
- Andrey Konovalov has sent along the series "kasan: assorted fixes and
improvements" which does those things.
- Some page allocator maintenance work from Kemeng Shi in the series
"Two minor cleanups to break_down_buddy_pages".
- In thes series "New selftest for mm" Breno Leitao has developed
another MM self test which tickles a race we had between madvise() and
page faults.
- In the series "Add folio_end_read" Matthew Wilcox provides cleanups
and an optimization to the core pagecache code.
- Nhat Pham has added memcg accounting for hugetlb memory in the series
"hugetlb memcg accounting".
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series "Abstract vma_merge() and split_vma()".
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series "Fix page_owner's use of free timestamps".
- Lorenzo Stoakes has fixed the handling of new mappings of sealed files
in the series "permit write-sealed memfd read-only shared mappings".
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series "Batch hugetlb vmemmap modification operations".
- Some buffer_head folio conversions and cleanups from Matthew Wilcox in
the series "Finish the create_empty_buffers() transition".
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the series
"mm: PCP high auto-tuning".
- Roman Gushchin has contributed the patchset "mm: improve performance
of accounted kernel memory allocations" which improves their performance
by ~30% as measured by a micro-benchmark.
- folio conversions from Kefeng Wang in the series "mm: convert page
cpupid functions to folios".
- Some kmemleak fixups in Liu Shixin's series "Some bugfix about
kmemleak".
- Qi Zheng has improved our handling of memoryless nodes by keeping them
off the allocation fallback list. This is done in the series "handle
memoryless nodes more appropriately".
- khugepaged conversions from Vishal Moola in the series "Some
khugepaged folio conversions".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZULEMwAKCRDdBJ7gKXxA
jhQHAQCYpD3g849x69DmHnHWHm/EHQLvQmRMDeYZI+nx/sCJOwEAw4AKg0Oemv9y
FgeUPAD1oasg6CP+INZvCj34waNxwAc=
=E+Y4
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series 'Fixes and cleanups to compaction'
- Joel Fernandes has a patchset ('Optimize mremap during mutual
alignment within PMD') which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i
the following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series 'Do not try to access unaccepted memory' Adrian
Hunter provides some fixups for the recently-added 'unaccepted
memory' feature. To increase the feature's checking coverage. 'Plug
a few gaps where RAM is exposed without checking if it is
unaccepted memory'
- In the series 'cleanups for lockless slab shrink' Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series 'use refcount+RCU method to
implement lockless slab shrink'
- David Hildenbrand contributes some maintenance work for the rmap
code in the series 'Anon rmap cleanups'
- Kefeng Wang does more folio conversions and some maintenance work
in the migration code. Series 'mm: migrate: more folio conversion
and unification'
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series 'Add and use bdev_getblk()'
- In the series 'Use nth_page() in place of direct struct page
manipulation' Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames
- In the series 'mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO' has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of
gigantic pages are in use
- Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code
rationalization and folio conversions in the hugetlb code
- Yin Fengwei has improved mlock()'s handling of large folios in the
series 'support large folio for mlock'
- In the series 'Expose swapcache stat for memcg v1' Liu Shixin has
added statistics for memcg v1 users which are available (and
useful) under memcg v2
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named 'MDWE
without inheritance'
- Kefeng Wang has provided the series 'mm: convert numa balancing
functions to use a folio' which does what it says
- In the series 'mm/ksm: add fork-exec support for prctl' Stefan
Roesch makes is possible for a process to propagate KSM treatment
across exec()
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use 'high
bandwidth memory' in addition to Optane Data Center Persistent
Memory Modules (DCPMM). The series is named 'memory tiering:
calculate abstract distance based on ACPI HMAT'
- In the series 'Smart scanning mode for KSM' Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in
the series 'mm: memcg: fix tracking of pending stats updates
values'
- In the series 'Implement IOCTL to get and optionally clear info
about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap
which permits us to atomically read-then-clear page softdirty
state. This is mainly used by CRIU
- Hugh Dickins contributed the series 'shmem,tmpfs: general
maintenance', a bunch of relatively minor maintenance tweaks to
this code
- Matthew Wilcox has increased the use of the VMA lock over
file-backed page faults in the series 'Handle more faults under the
VMA lock'. Some rationalizations of the fault path became possible
as a result
- In the series 'mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()' David Hildenbrand has implemented some
cleanups and folio conversions
- In the series 'various improvements to the GUP interface' Lorenzo
Stoakes has simplified and improved the GUP interface with an eye
to providing groundwork for future improvements
- Andrey Konovalov has sent along the series 'kasan: assorted fixes
and improvements' which does those things
- Some page allocator maintenance work from Kemeng Shi in the series
'Two minor cleanups to break_down_buddy_pages'
- In thes series 'New selftest for mm' Breno Leitao has developed
another MM self test which tickles a race we had between madvise()
and page faults
- In the series 'Add folio_end_read' Matthew Wilcox provides cleanups
and an optimization to the core pagecache code
- Nhat Pham has added memcg accounting for hugetlb memory in the
series 'hugetlb memcg accounting'
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series 'Abstract vma_merge() and split_vma()'
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series 'Fix page_owner's use of free timestamps'
- Lorenzo Stoakes has fixed the handling of new mappings of sealed
files in the series 'permit write-sealed memfd read-only shared
mappings'
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series 'Batch hugetlb vmemmap modification operations'
- Some buffer_head folio conversions and cleanups from Matthew Wilcox
in the series 'Finish the create_empty_buffers() transition'
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the
series 'mm: PCP high auto-tuning'
- Roman Gushchin has contributed the patchset 'mm: improve
performance of accounted kernel memory allocations' which improves
their performance by ~30% as measured by a micro-benchmark
- folio conversions from Kefeng Wang in the series 'mm: convert page
cpupid functions to folios'
- Some kmemleak fixups in Liu Shixin's series 'Some bugfix about
kmemleak'
- Qi Zheng has improved our handling of memoryless nodes by keeping
them off the allocation fallback list. This is done in the series
'handle memoryless nodes more appropriately'
- khugepaged conversions from Vishal Moola in the series 'Some
khugepaged folio conversions'"
[ bcachefs conflicts with the dynamically allocated shrinkers have been
resolved as per Stephen Rothwell in
https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/
with help from Qi Zheng.
The clone3 test filtering conflict was half-arsed by yours truly ]
* tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits)
mm/damon/sysfs: update monitoring target regions for online input commit
mm/damon/sysfs: remove requested targets when online-commit inputs
selftests: add a sanity check for zswap
Documentation: maple_tree: fix word spelling error
mm/vmalloc: fix the unchecked dereference warning in vread_iter()
zswap: export compression failure stats
Documentation: ubsan: drop "the" from article title
mempolicy: migration attempt to match interleave nodes
mempolicy: mmap_lock is not needed while migrating folios
mempolicy: alloc_pages_mpol() for NUMA policy without vma
mm: add page_rmappable_folio() wrapper
mempolicy: remove confusing MPOL_MF_LAZY dead code
mempolicy: mpol_shared_policy_init() without pseudo-vma
mempolicy trivia: use pgoff_t in shared mempolicy tree
mempolicy trivia: slightly more consistent naming
mempolicy trivia: delete those ancient pr_debug()s
mempolicy: fix migrate_pages(2) syscall return nr_failed
kernfs: drop shared NUMA mempolicy hooks
hugetlbfs: drop shared NUMA mempolicy pretence
mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets()
...
2023-11-03 13:38:47 +08:00
|
|
|
struct shrinker *shrink;
|
2020-11-13 06:19:47 +08:00
|
|
|
|
2022-10-15 12:47:21 +08:00
|
|
|
#ifdef __KERNEL__
|
2022-06-17 13:07:54 +08:00
|
|
|
bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
|
|
|
|
if (!bc->pcpu_freed)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
|
2022-10-15 12:47:21 +08:00
|
|
|
#endif
|
2022-06-17 13:07:54 +08:00
|
|
|
|
2023-03-15 03:35:57 +08:00
|
|
|
if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
|
|
|
|
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
|
2020-11-30 12:48:20 +08:00
|
|
|
|
|
|
|
bc->table_init_done = true;
|
2021-04-05 13:23:55 +08:00
|
|
|
|
Second bcachefs pull request for 6.7-rc1
Here's the second big bcachefs pull request. This brings your tree up to
date with my master branch, which is what existing bcachefs users are
currently running.
All but the last few patches have been in linux-next, those being small
fixes. Test results from my dashboard:
https://evilpiepirate.org/~testdashboard/ci?commit=c7046ed0cf9bb33599aa7e72e7b67bba4be42d64
New features:
- rebalance_work btree (and metadata version 1.3): the rebalance thread
no longer has to scan to find extents that need processing - big
scalability improvement.
- sb_errors superblock section: this adds counters for each fsck error
type, since filesystem creation, along with the date of the most
recent error. It'll get us better bug reports (since users do not
typically report errors that fsck was able to fix), and I might add
telemetry for this in the future.
Fixes include:
- multiple snapshot deletion fixes
- members_v2 fixups
- deleted_inodes btree fixes
- copygc thread no longer spins when a device is full but has no
fragmented buckets (i.e. rebalance needs to move data around instead)
- a fix for a memory reclaim issue with the btree key cache: we're now
careful not to hold the srcu read lock that blocks key cache reclaim
for too long
- an early allocator locking fix, from Brian
- endianness fixes, from Brian
- CONFIG_BCACHEFS_DEBUG_TRANSACTIONS no longer defaults to y, a big
performance improvement on multithreaded workloads
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEKnAFLkS8Qha+jvQrE6szbY3KbnYFAmVH9xYACgkQE6szbY3K
bnahLRAAiNRZL73SQ+MW79o4yPqGwt0Eyy/mvoiGpZf1B8uXp0oZ55j2w3l887Uf
LeM03mInAYCPdyp/d4vxqIr96j9BODmRRl8sEkkGdJDzokLG+22F0ovOe45KWTxL
kBoNdng/O/oeOe/1K7taP3KzBvMx2nOF6oA+xfgyCjECMArAIXek0iocyEUR4Ywd
vGKhLNn1k2c+94wacnDYwjjdcLBxoqxsFXlpu6V0BcaY+DX4J3aBaGmj75KEoCI0
VbBOzxrOO4QzJrzW2+hxZZWgGyvReCkBJvqfORfuPxiSbFobTim10MdfZOAMQA1U
Xr1FTEpK1wMX0/pPVgZRqaOsttC+yc/SsfPNgSxybgHPbDlMLaakDHjvYssbKOYG
urDWSMG5yCsktSLj95SXsvUFKZaZFD72SKBNdgdt/nZjwTHuNQ7IkdrMwIrCQ/PT
Ifn50UrR/Ahd8RAd5tyNCPw6U9VfwnxACSNl2KA7ONKpvHb+gSt1JsJTDyz1+gN9
nFVrw1SHKQ6EIV6XhVon/5DEuRTzqoYGWoN08FHEUq9fBlvnVpmbJErCQMplOjz9
OQnAfpJH4YqkpXyjFAjP1V0An+RUn8QvDgXNqC9TyvCYuOliVFuil4y7/c+7oIQU
NEoz+jVLenqsGOGAbduI4/Q567COojRgwEvbebSIxSImXuhCNj4=
=Lo4N
-----END PGP SIGNATURE-----
Merge tag 'bcachefs-2023-11-5' of https://evilpiepirate.org/git/bcachefs
Pull more bcachefs updates from Kent Overstreet:
"Here's the second big bcachefs pull request. This brings your tree up
to date with my master branch, which is what existing bcachefs users
are currently running.
New features:
- rebalance_work btree (and metadata version 1.3): the rebalance
thread no longer has to scan to find extents that need processing -
big scalability improvement.
- sb_errors superblock section: this adds counters for each fsck
error type, since filesystem creation, along with the date of the
most recent error. It'll get us better bug reports (since users do
not typically report errors that fsck was able to fix), and I might
add telemetry for this in the future.
Fixes include:
- multiple snapshot deletion fixes
- members_v2 fixups
- deleted_inodes btree fixes
- copygc thread no longer spins when a device is full but has no
fragmented buckets (i.e. rebalance needs to move data around
instead)
- a fix for a memory reclaim issue with the btree key cache: we're
now careful not to hold the srcu read lock that blocks key cache
reclaim for too long
- an early allocator locking fix, from Brian
- endianness fixes, from Brian
- CONFIG_BCACHEFS_DEBUG_TRANSACTIONS no longer defaults to y, a big
performance improvement on multithreaded workloads"
* tag 'bcachefs-2023-11-5' of https://evilpiepirate.org/git/bcachefs: (70 commits)
bcachefs: Improve stripe checksum error message
bcachefs: Simplify, fix bch2_backpointer_get_key()
bcachefs: kill thing_it_points_to arg to backpointer_not_found()
bcachefs: bch2_ec_read_extent() now takes btree_trans
bcachefs: bch2_stripe_to_text() now prints ptr gens
bcachefs: Don't iterate over journal entries just for btree roots
bcachefs: Break up bch2_journal_write()
bcachefs: Replace ERANGE with private error codes
bcachefs: bkey_copy() is no longer a macro
bcachefs: x-macro-ify inode flags enum
bcachefs: Convert bch2_fs_open() to darray
bcachefs: Move __bch2_members_v2_get_mut to sb-members.h
bcachefs: bch2_prt_datetime()
bcachefs: CONFIG_BCACHEFS_DEBUG_TRANSACTIONS no longer defaults to y
bcachefs: Add a comment for BTREE_INSERT_NOJOURNAL usage
bcachefs: rebalance_work btree is not a snapshots btree
bcachefs: Add missing printk newlines
bcachefs: Fix recovery when forced to use JSET_NO_FLUSH journal entry
bcachefs: .get_parent() should return an error pointer
bcachefs: Fix bch2_delete_dead_inodes()
...
2023-11-08 03:38:38 +08:00
|
|
|
shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name);
|
Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series "Fixes and cleanups to compaction".
- Joel Fernandes has a patchset ("Optimize mremap during mutual
alignment within PMD") which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested.
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i the
following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series "Do not try to access unaccepted memory" Adrian Hunter
provides some fixups for the recently-added "unaccepted memory' feature.
To increase the feature's checking coverage. "Plug a few gaps where
RAM is exposed without checking if it is unaccepted memory".
- In the series "cleanups for lockless slab shrink" Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code.
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series "use refcount+RCU method to implement
lockless slab shrink".
- David Hildenbrand contributes some maintenance work for the rmap code
in the series "Anon rmap cleanups".
- Kefeng Wang does more folio conversions and some maintenance work in
the migration code. Series "mm: migrate: more folio conversion and
unification".
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series "Add and use bdev_getblk()".
- In the series "Use nth_page() in place of direct struct page
manipulation" Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames.
- In the series "mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO" has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of gigantic
pages are in use.
- Matthew Wilcox has sent the series "Small hugetlb cleanups" - code
rationalization and folio conversions in the hugetlb code.
- Yin Fengwei has improved mlock()'s handling of large folios in the
series "support large folio for mlock"
- In the series "Expose swapcache stat for memcg v1" Liu Shixin has
added statistics for memcg v1 users which are available (and useful)
under memcg v2.
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named "MDWE
without inheritance".
- Kefeng Wang has provided the series "mm: convert numa balancing
functions to use a folio" which does what it says.
- In the series "mm/ksm: add fork-exec support for prctl" Stefan Roesch
makes is possible for a process to propagate KSM treatment across
exec().
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use "high
bandwidth memory" in addition to Optane Data Center Persistent Memory
Modules (DCPMM). The series is named "memory tiering: calculate
abstract distance based on ACPI HMAT"
- In the series "Smart scanning mode for KSM" Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans.
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in the
series "mm: memcg: fix tracking of pending stats updates values".
- In the series "Implement IOCTL to get and optionally clear info about
PTEs" Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits
us to atomically read-then-clear page softdirty state. This is mainly
used by CRIU.
- Hugh Dickins contributed the series "shmem,tmpfs: general maintenance"
- a bunch of relatively minor maintenance tweaks to this code.
- Matthew Wilcox has increased the use of the VMA lock over file-backed
page faults in the series "Handle more faults under the VMA lock". Some
rationalizations of the fault path became possible as a result.
- In the series "mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()" David Hildenbrand has implemented some cleanups
and folio conversions.
- In the series "various improvements to the GUP interface" Lorenzo
Stoakes has simplified and improved the GUP interface with an eye to
providing groundwork for future improvements.
- Andrey Konovalov has sent along the series "kasan: assorted fixes and
improvements" which does those things.
- Some page allocator maintenance work from Kemeng Shi in the series
"Two minor cleanups to break_down_buddy_pages".
- In thes series "New selftest for mm" Breno Leitao has developed
another MM self test which tickles a race we had between madvise() and
page faults.
- In the series "Add folio_end_read" Matthew Wilcox provides cleanups
and an optimization to the core pagecache code.
- Nhat Pham has added memcg accounting for hugetlb memory in the series
"hugetlb memcg accounting".
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series "Abstract vma_merge() and split_vma()".
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series "Fix page_owner's use of free timestamps".
- Lorenzo Stoakes has fixed the handling of new mappings of sealed files
in the series "permit write-sealed memfd read-only shared mappings".
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series "Batch hugetlb vmemmap modification operations".
- Some buffer_head folio conversions and cleanups from Matthew Wilcox in
the series "Finish the create_empty_buffers() transition".
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the series
"mm: PCP high auto-tuning".
- Roman Gushchin has contributed the patchset "mm: improve performance
of accounted kernel memory allocations" which improves their performance
by ~30% as measured by a micro-benchmark.
- folio conversions from Kefeng Wang in the series "mm: convert page
cpupid functions to folios".
- Some kmemleak fixups in Liu Shixin's series "Some bugfix about
kmemleak".
- Qi Zheng has improved our handling of memoryless nodes by keeping them
off the allocation fallback list. This is done in the series "handle
memoryless nodes more appropriately".
- khugepaged conversions from Vishal Moola in the series "Some
khugepaged folio conversions".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZULEMwAKCRDdBJ7gKXxA
jhQHAQCYpD3g849x69DmHnHWHm/EHQLvQmRMDeYZI+nx/sCJOwEAw4AKg0Oemv9y
FgeUPAD1oasg6CP+INZvCj34waNxwAc=
=E+Y4
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series 'Fixes and cleanups to compaction'
- Joel Fernandes has a patchset ('Optimize mremap during mutual
alignment within PMD') which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i
the following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series 'Do not try to access unaccepted memory' Adrian
Hunter provides some fixups for the recently-added 'unaccepted
memory' feature. To increase the feature's checking coverage. 'Plug
a few gaps where RAM is exposed without checking if it is
unaccepted memory'
- In the series 'cleanups for lockless slab shrink' Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series 'use refcount+RCU method to
implement lockless slab shrink'
- David Hildenbrand contributes some maintenance work for the rmap
code in the series 'Anon rmap cleanups'
- Kefeng Wang does more folio conversions and some maintenance work
in the migration code. Series 'mm: migrate: more folio conversion
and unification'
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series 'Add and use bdev_getblk()'
- In the series 'Use nth_page() in place of direct struct page
manipulation' Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames
- In the series 'mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO' has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of
gigantic pages are in use
- Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code
rationalization and folio conversions in the hugetlb code
- Yin Fengwei has improved mlock()'s handling of large folios in the
series 'support large folio for mlock'
- In the series 'Expose swapcache stat for memcg v1' Liu Shixin has
added statistics for memcg v1 users which are available (and
useful) under memcg v2
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named 'MDWE
without inheritance'
- Kefeng Wang has provided the series 'mm: convert numa balancing
functions to use a folio' which does what it says
- In the series 'mm/ksm: add fork-exec support for prctl' Stefan
Roesch makes is possible for a process to propagate KSM treatment
across exec()
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use 'high
bandwidth memory' in addition to Optane Data Center Persistent
Memory Modules (DCPMM). The series is named 'memory tiering:
calculate abstract distance based on ACPI HMAT'
- In the series 'Smart scanning mode for KSM' Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in
the series 'mm: memcg: fix tracking of pending stats updates
values'
- In the series 'Implement IOCTL to get and optionally clear info
about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap
which permits us to atomically read-then-clear page softdirty
state. This is mainly used by CRIU
- Hugh Dickins contributed the series 'shmem,tmpfs: general
maintenance', a bunch of relatively minor maintenance tweaks to
this code
- Matthew Wilcox has increased the use of the VMA lock over
file-backed page faults in the series 'Handle more faults under the
VMA lock'. Some rationalizations of the fault path became possible
as a result
- In the series 'mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()' David Hildenbrand has implemented some
cleanups and folio conversions
- In the series 'various improvements to the GUP interface' Lorenzo
Stoakes has simplified and improved the GUP interface with an eye
to providing groundwork for future improvements
- Andrey Konovalov has sent along the series 'kasan: assorted fixes
and improvements' which does those things
- Some page allocator maintenance work from Kemeng Shi in the series
'Two minor cleanups to break_down_buddy_pages'
- In thes series 'New selftest for mm' Breno Leitao has developed
another MM self test which tickles a race we had between madvise()
and page faults
- In the series 'Add folio_end_read' Matthew Wilcox provides cleanups
and an optimization to the core pagecache code
- Nhat Pham has added memcg accounting for hugetlb memory in the
series 'hugetlb memcg accounting'
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series 'Abstract vma_merge() and split_vma()'
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series 'Fix page_owner's use of free timestamps'
- Lorenzo Stoakes has fixed the handling of new mappings of sealed
files in the series 'permit write-sealed memfd read-only shared
mappings'
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series 'Batch hugetlb vmemmap modification operations'
- Some buffer_head folio conversions and cleanups from Matthew Wilcox
in the series 'Finish the create_empty_buffers() transition'
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the
series 'mm: PCP high auto-tuning'
- Roman Gushchin has contributed the patchset 'mm: improve
performance of accounted kernel memory allocations' which improves
their performance by ~30% as measured by a micro-benchmark
- folio conversions from Kefeng Wang in the series 'mm: convert page
cpupid functions to folios'
- Some kmemleak fixups in Liu Shixin's series 'Some bugfix about
kmemleak'
- Qi Zheng has improved our handling of memoryless nodes by keeping
them off the allocation fallback list. This is done in the series
'handle memoryless nodes more appropriately'
- khugepaged conversions from Vishal Moola in the series 'Some
khugepaged folio conversions'"
[ bcachefs conflicts with the dynamically allocated shrinkers have been
resolved as per Stephen Rothwell in
https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/
with help from Qi Zheng.
The clone3 test filtering conflict was half-arsed by yours truly ]
* tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits)
mm/damon/sysfs: update monitoring target regions for online input commit
mm/damon/sysfs: remove requested targets when online-commit inputs
selftests: add a sanity check for zswap
Documentation: maple_tree: fix word spelling error
mm/vmalloc: fix the unchecked dereference warning in vread_iter()
zswap: export compression failure stats
Documentation: ubsan: drop "the" from article title
mempolicy: migration attempt to match interleave nodes
mempolicy: mmap_lock is not needed while migrating folios
mempolicy: alloc_pages_mpol() for NUMA policy without vma
mm: add page_rmappable_folio() wrapper
mempolicy: remove confusing MPOL_MF_LAZY dead code
mempolicy: mpol_shared_policy_init() without pseudo-vma
mempolicy trivia: use pgoff_t in shared mempolicy tree
mempolicy trivia: slightly more consistent naming
mempolicy trivia: delete those ancient pr_debug()s
mempolicy: fix migrate_pages(2) syscall return nr_failed
kernfs: drop shared NUMA mempolicy hooks
hugetlbfs: drop shared NUMA mempolicy pretence
mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets()
...
2023-11-03 13:38:47 +08:00
|
|
|
if (!shrink)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
|
Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series "Fixes and cleanups to compaction".
- Joel Fernandes has a patchset ("Optimize mremap during mutual
alignment within PMD") which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested.
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i the
following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series "Do not try to access unaccepted memory" Adrian Hunter
provides some fixups for the recently-added "unaccepted memory' feature.
To increase the feature's checking coverage. "Plug a few gaps where
RAM is exposed without checking if it is unaccepted memory".
- In the series "cleanups for lockless slab shrink" Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code.
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series "use refcount+RCU method to implement
lockless slab shrink".
- David Hildenbrand contributes some maintenance work for the rmap code
in the series "Anon rmap cleanups".
- Kefeng Wang does more folio conversions and some maintenance work in
the migration code. Series "mm: migrate: more folio conversion and
unification".
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series "Add and use bdev_getblk()".
- In the series "Use nth_page() in place of direct struct page
manipulation" Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames.
- In the series "mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO" has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of gigantic
pages are in use.
- Matthew Wilcox has sent the series "Small hugetlb cleanups" - code
rationalization and folio conversions in the hugetlb code.
- Yin Fengwei has improved mlock()'s handling of large folios in the
series "support large folio for mlock"
- In the series "Expose swapcache stat for memcg v1" Liu Shixin has
added statistics for memcg v1 users which are available (and useful)
under memcg v2.
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named "MDWE
without inheritance".
- Kefeng Wang has provided the series "mm: convert numa balancing
functions to use a folio" which does what it says.
- In the series "mm/ksm: add fork-exec support for prctl" Stefan Roesch
makes is possible for a process to propagate KSM treatment across
exec().
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use "high
bandwidth memory" in addition to Optane Data Center Persistent Memory
Modules (DCPMM). The series is named "memory tiering: calculate
abstract distance based on ACPI HMAT"
- In the series "Smart scanning mode for KSM" Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans.
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in the
series "mm: memcg: fix tracking of pending stats updates values".
- In the series "Implement IOCTL to get and optionally clear info about
PTEs" Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits
us to atomically read-then-clear page softdirty state. This is mainly
used by CRIU.
- Hugh Dickins contributed the series "shmem,tmpfs: general maintenance"
- a bunch of relatively minor maintenance tweaks to this code.
- Matthew Wilcox has increased the use of the VMA lock over file-backed
page faults in the series "Handle more faults under the VMA lock". Some
rationalizations of the fault path became possible as a result.
- In the series "mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()" David Hildenbrand has implemented some cleanups
and folio conversions.
- In the series "various improvements to the GUP interface" Lorenzo
Stoakes has simplified and improved the GUP interface with an eye to
providing groundwork for future improvements.
- Andrey Konovalov has sent along the series "kasan: assorted fixes and
improvements" which does those things.
- Some page allocator maintenance work from Kemeng Shi in the series
"Two minor cleanups to break_down_buddy_pages".
- In thes series "New selftest for mm" Breno Leitao has developed
another MM self test which tickles a race we had between madvise() and
page faults.
- In the series "Add folio_end_read" Matthew Wilcox provides cleanups
and an optimization to the core pagecache code.
- Nhat Pham has added memcg accounting for hugetlb memory in the series
"hugetlb memcg accounting".
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series "Abstract vma_merge() and split_vma()".
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series "Fix page_owner's use of free timestamps".
- Lorenzo Stoakes has fixed the handling of new mappings of sealed files
in the series "permit write-sealed memfd read-only shared mappings".
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series "Batch hugetlb vmemmap modification operations".
- Some buffer_head folio conversions and cleanups from Matthew Wilcox in
the series "Finish the create_empty_buffers() transition".
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the series
"mm: PCP high auto-tuning".
- Roman Gushchin has contributed the patchset "mm: improve performance
of accounted kernel memory allocations" which improves their performance
by ~30% as measured by a micro-benchmark.
- folio conversions from Kefeng Wang in the series "mm: convert page
cpupid functions to folios".
- Some kmemleak fixups in Liu Shixin's series "Some bugfix about
kmemleak".
- Qi Zheng has improved our handling of memoryless nodes by keeping them
off the allocation fallback list. This is done in the series "handle
memoryless nodes more appropriately".
- khugepaged conversions from Vishal Moola in the series "Some
khugepaged folio conversions".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZULEMwAKCRDdBJ7gKXxA
jhQHAQCYpD3g849x69DmHnHWHm/EHQLvQmRMDeYZI+nx/sCJOwEAw4AKg0Oemv9y
FgeUPAD1oasg6CP+INZvCj34waNxwAc=
=E+Y4
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series 'Fixes and cleanups to compaction'
- Joel Fernandes has a patchset ('Optimize mremap during mutual
alignment within PMD') which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i
the following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series 'Do not try to access unaccepted memory' Adrian
Hunter provides some fixups for the recently-added 'unaccepted
memory' feature. To increase the feature's checking coverage. 'Plug
a few gaps where RAM is exposed without checking if it is
unaccepted memory'
- In the series 'cleanups for lockless slab shrink' Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series 'use refcount+RCU method to
implement lockless slab shrink'
- David Hildenbrand contributes some maintenance work for the rmap
code in the series 'Anon rmap cleanups'
- Kefeng Wang does more folio conversions and some maintenance work
in the migration code. Series 'mm: migrate: more folio conversion
and unification'
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series 'Add and use bdev_getblk()'
- In the series 'Use nth_page() in place of direct struct page
manipulation' Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames
- In the series 'mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO' has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of
gigantic pages are in use
- Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code
rationalization and folio conversions in the hugetlb code
- Yin Fengwei has improved mlock()'s handling of large folios in the
series 'support large folio for mlock'
- In the series 'Expose swapcache stat for memcg v1' Liu Shixin has
added statistics for memcg v1 users which are available (and
useful) under memcg v2
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named 'MDWE
without inheritance'
- Kefeng Wang has provided the series 'mm: convert numa balancing
functions to use a folio' which does what it says
- In the series 'mm/ksm: add fork-exec support for prctl' Stefan
Roesch makes is possible for a process to propagate KSM treatment
across exec()
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use 'high
bandwidth memory' in addition to Optane Data Center Persistent
Memory Modules (DCPMM). The series is named 'memory tiering:
calculate abstract distance based on ACPI HMAT'
- In the series 'Smart scanning mode for KSM' Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in
the series 'mm: memcg: fix tracking of pending stats updates
values'
- In the series 'Implement IOCTL to get and optionally clear info
about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap
which permits us to atomically read-then-clear page softdirty
state. This is mainly used by CRIU
- Hugh Dickins contributed the series 'shmem,tmpfs: general
maintenance', a bunch of relatively minor maintenance tweaks to
this code
- Matthew Wilcox has increased the use of the VMA lock over
file-backed page faults in the series 'Handle more faults under the
VMA lock'. Some rationalizations of the fault path became possible
as a result
- In the series 'mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()' David Hildenbrand has implemented some
cleanups and folio conversions
- In the series 'various improvements to the GUP interface' Lorenzo
Stoakes has simplified and improved the GUP interface with an eye
to providing groundwork for future improvements
- Andrey Konovalov has sent along the series 'kasan: assorted fixes
and improvements' which does those things
- Some page allocator maintenance work from Kemeng Shi in the series
'Two minor cleanups to break_down_buddy_pages'
- In thes series 'New selftest for mm' Breno Leitao has developed
another MM self test which tickles a race we had between madvise()
and page faults
- In the series 'Add folio_end_read' Matthew Wilcox provides cleanups
and an optimization to the core pagecache code
- Nhat Pham has added memcg accounting for hugetlb memory in the
series 'hugetlb memcg accounting'
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series 'Abstract vma_merge() and split_vma()'
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series 'Fix page_owner's use of free timestamps'
- Lorenzo Stoakes has fixed the handling of new mappings of sealed
files in the series 'permit write-sealed memfd read-only shared
mappings'
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series 'Batch hugetlb vmemmap modification operations'
- Some buffer_head folio conversions and cleanups from Matthew Wilcox
in the series 'Finish the create_empty_buffers() transition'
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the
series 'mm: PCP high auto-tuning'
- Roman Gushchin has contributed the patchset 'mm: improve
performance of accounted kernel memory allocations' which improves
their performance by ~30% as measured by a micro-benchmark
- folio conversions from Kefeng Wang in the series 'mm: convert page
cpupid functions to folios'
- Some kmemleak fixups in Liu Shixin's series 'Some bugfix about
kmemleak'
- Qi Zheng has improved our handling of memoryless nodes by keeping
them off the allocation fallback list. This is done in the series
'handle memoryless nodes more appropriately'
- khugepaged conversions from Vishal Moola in the series 'Some
khugepaged folio conversions'"
[ bcachefs conflicts with the dynamically allocated shrinkers have been
resolved as per Stephen Rothwell in
https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/
with help from Qi Zheng.
The clone3 test filtering conflict was half-arsed by yours truly ]
* tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits)
mm/damon/sysfs: update monitoring target regions for online input commit
mm/damon/sysfs: remove requested targets when online-commit inputs
selftests: add a sanity check for zswap
Documentation: maple_tree: fix word spelling error
mm/vmalloc: fix the unchecked dereference warning in vread_iter()
zswap: export compression failure stats
Documentation: ubsan: drop "the" from article title
mempolicy: migration attempt to match interleave nodes
mempolicy: mmap_lock is not needed while migrating folios
mempolicy: alloc_pages_mpol() for NUMA policy without vma
mm: add page_rmappable_folio() wrapper
mempolicy: remove confusing MPOL_MF_LAZY dead code
mempolicy: mpol_shared_policy_init() without pseudo-vma
mempolicy trivia: use pgoff_t in shared mempolicy tree
mempolicy trivia: slightly more consistent naming
mempolicy trivia: delete those ancient pr_debug()s
mempolicy: fix migrate_pages(2) syscall return nr_failed
kernfs: drop shared NUMA mempolicy hooks
hugetlbfs: drop shared NUMA mempolicy pretence
mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets()
...
2023-11-03 13:38:47 +08:00
|
|
|
bc->shrink = shrink;
|
|
|
|
shrink->seeks = 0;
|
|
|
|
shrink->count_objects = bch2_btree_key_cache_count;
|
|
|
|
shrink->scan_objects = bch2_btree_key_cache_scan;
|
|
|
|
shrink->private_data = c;
|
|
|
|
shrinker_register(shrink);
|
2023-03-15 03:35:57 +08:00
|
|
|
return 0;
|
2019-03-08 08:46:10 +08:00
|
|
|
}
|
2020-06-16 07:53:46 +08:00
|
|
|
|
|
|
|
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
|
|
|
|
{
|
2023-09-13 03:15:39 +08:00
|
|
|
prt_printf(out, "nr_freed:\t%lu", atomic_long_read(&c->nr_freed));
|
2022-10-17 14:08:07 +08:00
|
|
|
prt_newline(out);
|
|
|
|
prt_printf(out, "nr_keys:\t%lu", atomic_long_read(&c->nr_keys));
|
|
|
|
prt_newline(out);
|
|
|
|
prt_printf(out, "nr_dirty:\t%lu", atomic_long_read(&c->nr_dirty));
|
|
|
|
prt_newline(out);
|
2020-06-16 07:53:46 +08:00
|
|
|
}
|
2020-11-19 03:09:33 +08:00
|
|
|
|
|
|
|
void bch2_btree_key_cache_exit(void)
|
|
|
|
{
|
2022-10-20 06:31:33 +08:00
|
|
|
kmem_cache_destroy(bch2_key_cache);
|
2020-11-19 03:09:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int __init bch2_btree_key_cache_init(void)
|
|
|
|
{
|
2023-07-14 00:00:28 +08:00
|
|
|
bch2_key_cache = KMEM_CACHE(bkey_cached, SLAB_RECLAIM_ACCOUNT);
|
2020-11-19 03:09:33 +08:00
|
|
|
if (!bch2_key_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|