2017-03-17 14:18:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
|
|
|
* Copyright 2012 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2018-10-06 12:46:55 +08:00
|
|
|
#include "alloc_foreground.h"
|
2020-12-18 04:08:58 +08:00
|
|
|
#include "bkey_buf.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "bset.h"
|
|
|
|
#include "btree_update.h"
|
|
|
|
#include "buckets.h"
|
|
|
|
#include "checksum.h"
|
|
|
|
#include "clock.h"
|
2023-09-11 06:05:17 +08:00
|
|
|
#include "compress.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "debug.h"
|
2018-11-02 03:13:19 +08:00
|
|
|
#include "ec.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "error.h"
|
2019-11-16 04:52:28 +08:00
|
|
|
#include "extent_update.h"
|
2019-10-10 00:11:00 +08:00
|
|
|
#include "inode.h"
|
2023-09-11 06:05:17 +08:00
|
|
|
#include "io_write.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "journal.h"
|
|
|
|
#include "keylist.h"
|
|
|
|
#include "move.h"
|
2022-12-15 09:52:11 +08:00
|
|
|
#include "nocow_locking.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "rebalance.h"
|
2021-03-13 09:30:39 +08:00
|
|
|
#include "subvolume.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "super.h"
|
|
|
|
#include "super-io.h"
|
|
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
#include <linux/prefetch.h>
|
2017-03-17 14:18:50 +08:00
|
|
|
#include <linux/random.h>
|
2020-07-21 01:00:15 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
|
|
|
|
|
|
|
|
static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
|
|
|
|
u64 now, int rw)
|
|
|
|
{
|
|
|
|
u64 latency_capable =
|
|
|
|
ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
|
|
|
|
/* ideally we'd be taking into account the device's variance here: */
|
|
|
|
u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
|
|
|
|
s64 latency_over = io_latency - latency_threshold;
|
|
|
|
|
|
|
|
if (latency_threshold && latency_over > 0) {
|
|
|
|
/*
|
|
|
|
* bump up congested by approximately latency_over * 4 /
|
|
|
|
* latency_threshold - we don't need much accuracy here so don't
|
|
|
|
* bother with the divide:
|
|
|
|
*/
|
|
|
|
if (atomic_read(&ca->congested) < CONGESTED_MAX)
|
|
|
|
atomic_add(latency_over >>
|
|
|
|
max_t(int, ilog2(latency_threshold) - 2, 0),
|
|
|
|
&ca->congested);
|
|
|
|
|
|
|
|
ca->congested_last = now;
|
|
|
|
} else if (atomic_read(&ca->congested) > 0) {
|
|
|
|
atomic_dec(&ca->congested);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
|
|
|
|
{
|
|
|
|
atomic64_t *latency = &ca->cur_latency[rw];
|
|
|
|
u64 now = local_clock();
|
|
|
|
u64 io_latency = time_after64(now, submit_time)
|
|
|
|
? now - submit_time
|
|
|
|
: 0;
|
|
|
|
u64 old, new, v = atomic64_read(latency);
|
|
|
|
|
|
|
|
do {
|
|
|
|
old = v;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the io latency was reasonably close to the current
|
|
|
|
* latency, skip doing the update and atomic operation - most of
|
|
|
|
* the time:
|
|
|
|
*/
|
|
|
|
if (abs((int) (old - io_latency)) < (old >> 1) &&
|
2021-05-17 11:53:55 +08:00
|
|
|
now & ~(~0U << 5))
|
2017-03-17 14:18:50 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
new = ewma_add(old, io_latency, 5);
|
|
|
|
} while ((v = atomic64_cmpxchg(latency, old, new)) != old);
|
|
|
|
|
|
|
|
bch2_congested_acct(ca, io_latency, now, rw);
|
|
|
|
|
|
|
|
__bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Allocate, free from mempool: */
|
|
|
|
|
|
|
|
void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
|
|
|
|
{
|
|
|
|
struct bvec_iter_all iter;
|
|
|
|
struct bio_vec *bv;
|
|
|
|
|
|
|
|
bio_for_each_segment_all(bv, bio, iter)
|
|
|
|
if (bv->bv_page != ZERO_PAGE(0))
|
|
|
|
mempool_free(bv->bv_page, &c->bio_bounce_pages);
|
|
|
|
bio->bi_vcnt = 0;
|
|
|
|
}
|
|
|
|
|
2019-07-04 07:27:42 +08:00
|
|
|
static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2019-07-04 07:27:42 +08:00
|
|
|
struct page *page;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (likely(!*using_mempool)) {
|
2023-05-29 06:02:38 +08:00
|
|
|
page = alloc_page(GFP_NOFS);
|
2019-07-04 07:27:42 +08:00
|
|
|
if (unlikely(!page)) {
|
2017-03-17 14:18:50 +08:00
|
|
|
mutex_lock(&c->bio_bounce_pages_lock);
|
|
|
|
*using_mempool = true;
|
|
|
|
goto pool_alloc;
|
|
|
|
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pool_alloc:
|
2023-05-29 06:02:38 +08:00
|
|
|
page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-07-04 07:27:42 +08:00
|
|
|
return page;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
|
2019-07-04 07:27:42 +08:00
|
|
|
size_t size)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
bool using_mempool = false;
|
|
|
|
|
2019-07-04 07:27:42 +08:00
|
|
|
while (size) {
|
|
|
|
struct page *page = __bio_alloc_page_pool(c, &using_mempool);
|
2020-11-06 01:16:05 +08:00
|
|
|
unsigned len = min_t(size_t, PAGE_SIZE, size);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-07-04 07:27:42 +08:00
|
|
|
BUG_ON(!bio_add_page(bio, page, len, 0));
|
|
|
|
size -= len;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (using_mempool)
|
|
|
|
mutex_unlock(&c->bio_bounce_pages_lock);
|
|
|
|
}
|
|
|
|
|
2019-10-10 00:11:00 +08:00
|
|
|
/* Extent update path: */
|
|
|
|
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
int bch2_sum_sector_overwrites(struct btree_trans *trans,
|
|
|
|
struct btree_iter *extent_iter,
|
|
|
|
struct bkey_i *new,
|
2021-05-21 03:49:23 +08:00
|
|
|
bool *usage_increasing,
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
s64 *i_sectors_delta,
|
|
|
|
s64 *disk_sectors_delta)
|
2019-10-10 00:11:00 +08:00
|
|
|
{
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
struct bch_fs *c = trans->c;
|
2021-08-31 03:18:31 +08:00
|
|
|
struct btree_iter iter;
|
2019-10-10 00:11:00 +08:00
|
|
|
struct bkey_s_c old;
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
|
|
|
|
bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
|
2019-10-10 00:11:00 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-05-21 03:49:23 +08:00
|
|
|
*usage_increasing = false;
|
2020-12-11 02:38:54 +08:00
|
|
|
*i_sectors_delta = 0;
|
|
|
|
*disk_sectors_delta = 0;
|
2019-10-10 00:11:00 +08:00
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_copy_iter(&iter, extent_iter);
|
2019-10-10 00:11:00 +08:00
|
|
|
|
2023-03-06 23:20:36 +08:00
|
|
|
for_each_btree_key_upto_continue_norestart(iter,
|
|
|
|
new->k.p, BTREE_ITER_SLOTS, old, ret) {
|
2020-12-11 02:38:54 +08:00
|
|
|
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
|
|
|
|
max(bkey_start_offset(&new->k),
|
|
|
|
bkey_start_offset(old.k));
|
2019-10-10 00:11:00 +08:00
|
|
|
|
2020-12-11 02:38:54 +08:00
|
|
|
*i_sectors_delta += sectors *
|
2019-10-10 00:11:00 +08:00
|
|
|
(bkey_extent_is_allocation(&new->k) -
|
|
|
|
bkey_extent_is_allocation(old.k));
|
|
|
|
|
2021-03-21 12:03:34 +08:00
|
|
|
*disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
|
|
|
|
*disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
|
|
|
|
? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
|
|
|
|
: 0;
|
2020-12-11 02:38:54 +08:00
|
|
|
|
2021-05-21 03:49:23 +08:00
|
|
|
if (!*usage_increasing &&
|
2021-03-13 09:30:39 +08:00
|
|
|
(new->k.p.snapshot != old.k->p.snapshot ||
|
|
|
|
new_replicas > bch2_bkey_replicas(c, old) ||
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
(!new_compressed && bch2_bkey_sectors_compressed(old))))
|
2021-05-21 03:49:23 +08:00
|
|
|
*usage_increasing = true;
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
|
2021-11-09 01:30:47 +08:00
|
|
|
if (bkey_ge(old.k->p, new->k.p))
|
2019-10-10 00:11:00 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-08-31 03:18:31 +08:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2019-10-10 00:11:00 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-11-15 12:41:18 +08:00
|
|
|
static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
|
|
|
struct btree_iter *extent_iter,
|
|
|
|
u64 new_i_size,
|
|
|
|
s64 i_sectors_delta)
|
|
|
|
{
|
|
|
|
struct btree_iter iter;
|
|
|
|
struct bkey_i *k;
|
|
|
|
struct bkey_i_inode_v3 *inode;
|
2023-11-05 01:49:31 +08:00
|
|
|
/*
|
|
|
|
* Crazy performance optimization:
|
|
|
|
* Every extent update needs to also update the inode: the inode trigger
|
|
|
|
* will set bi->journal_seq to the journal sequence number of this
|
|
|
|
* transaction - for fsync.
|
|
|
|
*
|
|
|
|
* But if that's the only reason we're updating the inode (we're not
|
|
|
|
* updating bi_size or bi_sectors), then we don't need the inode update
|
|
|
|
* to be journalled - if we crash, the bi_journal_seq update will be
|
|
|
|
* lost, but that's fine.
|
|
|
|
*/
|
2022-11-15 12:41:18 +08:00
|
|
|
unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
|
|
|
|
int ret;
|
|
|
|
|
2023-05-01 06:46:24 +08:00
|
|
|
k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
|
2023-04-28 11:48:33 +08:00
|
|
|
SPOS(0,
|
|
|
|
extent_iter->pos.inode,
|
|
|
|
extent_iter->snapshot),
|
|
|
|
BTREE_ITER_CACHED);
|
2022-11-15 12:41:18 +08:00
|
|
|
ret = PTR_ERR_OR_ZERO(k);
|
|
|
|
if (unlikely(ret))
|
2023-04-28 11:48:33 +08:00
|
|
|
return ret;
|
2022-11-15 12:41:18 +08:00
|
|
|
|
|
|
|
if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
|
|
|
|
k = bch2_inode_to_v3(trans, k);
|
|
|
|
ret = PTR_ERR_OR_ZERO(k);
|
|
|
|
if (unlikely(ret))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
inode = bkey_i_to_inode_v3(k);
|
|
|
|
|
2023-11-02 23:42:48 +08:00
|
|
|
if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
|
2022-11-15 12:41:18 +08:00
|
|
|
new_i_size > le64_to_cpu(inode->v.bi_size)) {
|
|
|
|
inode->v.bi_size = cpu_to_le64(new_i_size);
|
|
|
|
inode_update_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i_sectors_delta) {
|
|
|
|
le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
|
|
|
|
inode_update_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inode->k.p.snapshot != iter.snapshot) {
|
|
|
|
inode->k.p.snapshot = iter.snapshot;
|
|
|
|
inode_update_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_trans_update(trans, &iter, &inode->k_i,
|
|
|
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
|
|
|
inode_update_flags);
|
|
|
|
err:
|
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-10 00:11:00 +08:00
|
|
|
int bch2_extent_update(struct btree_trans *trans,
|
2021-03-13 09:30:39 +08:00
|
|
|
subvol_inum inum,
|
2019-10-10 00:11:00 +08:00
|
|
|
struct btree_iter *iter,
|
|
|
|
struct bkey_i *k,
|
|
|
|
struct disk_reservation *disk_res,
|
|
|
|
u64 new_i_size,
|
2021-05-21 03:49:23 +08:00
|
|
|
s64 *i_sectors_delta_total,
|
|
|
|
bool check_enospc)
|
2019-10-10 00:11:00 +08:00
|
|
|
{
|
2021-08-30 07:34:37 +08:00
|
|
|
struct bpos next_pos;
|
2021-11-09 01:30:47 +08:00
|
|
|
bool usage_increasing;
|
2020-12-11 02:38:54 +08:00
|
|
|
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
|
2019-10-10 00:11:00 +08:00
|
|
|
int ret;
|
|
|
|
|
2021-09-08 03:34:16 +08:00
|
|
|
/*
|
|
|
|
* This traverses us the iterator without changing iter->path->pos to
|
|
|
|
* search_key() (which is pos + 1 for extents): we want there to be a
|
|
|
|
* path already traversed at iter->pos because
|
|
|
|
* bch2_trans_extent_update() will use it to attempt extent merging
|
|
|
|
*/
|
|
|
|
ret = __bch2_btree_iter_traverse(iter);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-08-25 09:30:06 +08:00
|
|
|
ret = bch2_extent_trim_atomic(trans, iter, k);
|
2019-10-10 00:11:00 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-11-04 10:35:34 +08:00
|
|
|
next_pos = k->k.p;
|
|
|
|
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
ret = bch2_sum_sector_overwrites(trans, iter, k,
|
2021-05-21 03:49:23 +08:00
|
|
|
&usage_increasing,
|
2020-12-11 02:38:54 +08:00
|
|
|
&i_sectors_delta,
|
|
|
|
&disk_sectors_delta);
|
2019-10-10 00:11:00 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-12-11 02:38:54 +08:00
|
|
|
if (disk_res &&
|
|
|
|
disk_sectors_delta > (s64) disk_res->sectors) {
|
|
|
|
ret = bch2_disk_reservation_add(trans->c, disk_res,
|
|
|
|
disk_sectors_delta - disk_res->sectors,
|
2021-11-04 10:35:34 +08:00
|
|
|
!check_enospc || !usage_increasing
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 10:59:33 +08:00
|
|
|
? BCH_DISK_RESERVATION_NOFAIL : 0);
|
2020-12-11 02:38:54 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-10-22 01:42:38 +08:00
|
|
|
/*
|
|
|
|
* Note:
|
2022-11-15 12:41:18 +08:00
|
|
|
* We always have to do an inode update - even when i_size/i_sectors
|
2022-10-22 01:42:38 +08:00
|
|
|
* aren't changing - for fsync to work properly; fsync relies on
|
|
|
|
* inode->bi_journal_seq which is updated by the trigger code:
|
|
|
|
*/
|
2022-11-15 12:41:18 +08:00
|
|
|
ret = bch2_extent_update_i_size_sectors(trans, iter,
|
|
|
|
min(k->k.p.offset << 9, new_i_size),
|
|
|
|
i_sectors_delta) ?:
|
2022-10-22 01:42:38 +08:00
|
|
|
bch2_trans_update(trans, iter, k, 0) ?:
|
2022-11-03 12:29:43 +08:00
|
|
|
bch2_trans_commit(trans, disk_res, NULL,
|
2019-10-20 07:03:23 +08:00
|
|
|
BTREE_INSERT_NOCHECK_RW|
|
2020-12-22 06:17:18 +08:00
|
|
|
BTREE_INSERT_NOFAIL);
|
2022-10-22 01:42:38 +08:00
|
|
|
if (unlikely(ret))
|
2022-11-15 12:41:18 +08:00
|
|
|
return ret;
|
2019-10-10 00:11:00 +08:00
|
|
|
|
2020-12-11 02:38:54 +08:00
|
|
|
if (i_sectors_delta_total)
|
|
|
|
*i_sectors_delta_total += i_sectors_delta;
|
2021-11-04 10:35:34 +08:00
|
|
|
bch2_btree_iter_set_pos(iter, next_pos);
|
2022-11-15 12:41:18 +08:00
|
|
|
return 0;
|
2022-11-14 07:54:37 +08:00
|
|
|
}
|
|
|
|
|
2022-10-29 11:57:01 +08:00
|
|
|
static int bch2_write_index_default(struct bch_write_op *op)
|
2019-10-10 00:50:39 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2020-12-18 04:08:58 +08:00
|
|
|
struct bkey_buf sk;
|
2019-10-10 00:50:39 +08:00
|
|
|
struct keylist *keys = &op->insert_keys;
|
|
|
|
struct bkey_i *k = bch2_keylist_front(keys);
|
2023-09-13 05:16:02 +08:00
|
|
|
struct btree_trans *trans = bch2_trans_get(c);
|
2021-08-31 03:18:31 +08:00
|
|
|
struct btree_iter iter;
|
2021-03-13 09:30:39 +08:00
|
|
|
subvol_inum inum = {
|
|
|
|
.subvol = op->subvol,
|
|
|
|
.inum = k->k.p.inode,
|
|
|
|
};
|
2019-10-10 00:50:39 +08:00
|
|
|
int ret;
|
|
|
|
|
2021-03-13 09:30:39 +08:00
|
|
|
BUG_ON(!inum.subvol);
|
|
|
|
|
2020-12-18 04:08:58 +08:00
|
|
|
bch2_bkey_buf_init(&sk);
|
2019-10-10 00:50:39 +08:00
|
|
|
|
|
|
|
do {
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_begin(trans);
|
2019-12-21 05:35:24 +08:00
|
|
|
|
2019-11-10 05:01:15 +08:00
|
|
|
k = bch2_keylist_front(keys);
|
2021-03-13 09:30:39 +08:00
|
|
|
bch2_bkey_buf_copy(&sk, c, k);
|
2019-10-10 00:50:39 +08:00
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
|
2021-03-13 09:30:39 +08:00
|
|
|
&sk.k->k.p.snapshot);
|
2022-07-18 11:06:38 +08:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
2021-03-13 09:30:39 +08:00
|
|
|
continue;
|
|
|
|
if (ret)
|
|
|
|
break;
|
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 06:02:16 +08:00
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
2021-03-13 09:30:39 +08:00
|
|
|
bkey_start_pos(&sk.k->k),
|
|
|
|
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
2019-10-10 00:50:39 +08:00
|
|
|
|
bcachefs: rebalance_work
This adds a new btree, rebalance_work, to eliminate scanning required
for finding extents that need work done on them in the background - i.e.
for the background_target and background_compression options.
rebalance_work is a bitset btree, where a KEY_TYPE_set corresponds to an
extent in the extents or reflink btree at the same pos.
A new extent field is added, bch_extent_rebalance, which indicates that
this extent has work that needs to be done in the background - and which
options to use. This allows per-inode options to be propagated to
indirect extents - at least in some circumstances. In this patch,
changing IO options on a file will not propagate the new options to
indirect extents pointed to by that file.
Updating (setting/clearing) the rebalance_work btree is done by the
extent trigger, which looks at the bch_extent_rebalance field.
Scanning is still requrired after changing IO path options - either just
for a given inode, or for the whole filesystem. We indicate that
scanning is required by adding a KEY_TYPE_cookie key to the
rebalance_work btree: the cookie counter is so that we can detect that
scanning is still required when an option has been flipped mid-way
through an existing scan.
Future possible work:
- Propagate options to indirect extents when being changed
- Add other IO path options - nr_replicas, ec, to rebalance_work so
they can be applied in the background when they change
- Add a counter, for bcachefs fs usage output, showing the pending
amount of rebalance work: we'll probably want to do this after the
disk space accounting rewrite (moving it to a new btree)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-10-21 01:33:14 +08:00
|
|
|
ret = bch2_bkey_set_needs_rebalance(c, sk.k,
|
|
|
|
op->opts.background_target,
|
|
|
|
op->opts.background_compression) ?:
|
|
|
|
bch2_extent_update(trans, inum, &iter, sk.k,
|
|
|
|
&op->res,
|
|
|
|
op->new_i_size, &op->i_sectors_delta,
|
|
|
|
op->flags & BCH_WRITE_CHECK_ENOSPC);
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2021-03-13 09:30:39 +08:00
|
|
|
|
2022-07-18 11:06:38 +08:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
2019-10-10 00:50:39 +08:00
|
|
|
continue;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
2022-11-24 16:12:22 +08:00
|
|
|
if (bkey_ge(iter.pos, k->k.p))
|
2021-03-13 09:30:39 +08:00
|
|
|
bch2_keylist_pop_front(&op->insert_keys);
|
|
|
|
else
|
|
|
|
bch2_cut_front(iter.pos, k);
|
2019-10-10 00:50:39 +08:00
|
|
|
} while (!bch2_keylist_empty(keys));
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_put(trans);
|
2020-12-18 04:08:58 +08:00
|
|
|
bch2_bkey_buf_exit(&sk, c);
|
2019-10-10 00:50:39 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
/* Writes */
|
|
|
|
|
|
|
|
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
|
|
|
enum bch_data_type type,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
const struct bkey_i *k,
|
|
|
|
bool nocow)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2018-11-02 03:10:01 +08:00
|
|
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
|
2017-03-17 14:18:50 +08:00
|
|
|
const struct bch_extent_ptr *ptr;
|
|
|
|
struct bch_write_bio *n;
|
|
|
|
struct bch_dev *ca;
|
|
|
|
|
|
|
|
BUG_ON(c->opts.nochanges);
|
|
|
|
|
2018-11-02 03:10:01 +08:00
|
|
|
bkey_for_each_ptr(ptrs, ptr) {
|
2017-03-17 14:18:50 +08:00
|
|
|
BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
|
|
|
|
!c->devs[ptr->dev]);
|
|
|
|
|
|
|
|
ca = bch_dev_bkey_exists(c, ptr->dev);
|
|
|
|
|
2018-11-02 03:10:01 +08:00
|
|
|
if (to_entry(ptr + 1) < ptrs.end) {
|
2017-03-17 14:18:50 +08:00
|
|
|
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
|
2023-05-29 06:02:38 +08:00
|
|
|
GFP_NOFS, &ca->replica_set));
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
n->bio.bi_end_io = wbio->bio.bi_end_io;
|
|
|
|
n->bio.bi_private = wbio->bio.bi_private;
|
|
|
|
n->parent = wbio;
|
|
|
|
n->split = true;
|
|
|
|
n->bounce = false;
|
|
|
|
n->put_bio = true;
|
|
|
|
n->bio.bi_opf = wbio->bio.bi_opf;
|
|
|
|
bio_inc_remaining(&wbio->bio);
|
|
|
|
} else {
|
|
|
|
n = wbio;
|
|
|
|
n->split = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
n->c = c;
|
|
|
|
n->dev = ptr->dev;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
n->have_ioref = nocow || bch2_dev_get_ioref(ca,
|
2020-08-04 01:58:36 +08:00
|
|
|
type == BCH_DATA_btree ? READ : WRITE);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
n->nocow = nocow;
|
2017-03-17 14:18:50 +08:00
|
|
|
n->submit_time = local_clock();
|
2022-11-16 09:25:08 +08:00
|
|
|
n->inode_offset = bkey_start_offset(&k->k);
|
2017-03-17 14:18:50 +08:00
|
|
|
n->bio.bi_iter.bi_sector = ptr->offset;
|
|
|
|
|
|
|
|
if (likely(n->have_ioref)) {
|
|
|
|
this_cpu_add(ca->io_done->sectors[WRITE][type],
|
|
|
|
bio_sectors(&n->bio));
|
|
|
|
|
|
|
|
bio_set_dev(&n->bio, ca->disk_sb.bdev);
|
|
|
|
|
2020-07-10 06:28:11 +08:00
|
|
|
if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
|
2017-03-17 14:18:50 +08:00
|
|
|
bio_endio(&n->bio);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
submit_bio(&n->bio);
|
|
|
|
} else {
|
|
|
|
n->bio.bi_status = BLK_STS_REMOVED;
|
|
|
|
bio_endio(&n->bio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
static void __bch2_write(struct bch_write_op *);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-11-03 12:29:43 +08:00
|
|
|
static void bch2_write_done(struct closure *cl)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
|
2023-08-13 04:52:33 +08:00
|
|
|
EBUG_ON(op->open_buckets.nr);
|
|
|
|
|
|
|
|
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
|
2020-06-30 06:22:06 +08:00
|
|
|
bch2_disk_reservation_put(c, &op->res);
|
2023-08-13 04:52:33 +08:00
|
|
|
|
2023-03-14 10:01:47 +08:00
|
|
|
if (!(op->flags & BCH_WRITE_MOVE))
|
|
|
|
bch2_write_ref_put(c, BCH_WRITE_REF_write);
|
2017-03-17 14:18:50 +08:00
|
|
|
bch2_keylist_free(&op->insert_keys, op->inline_keys);
|
|
|
|
|
2022-10-29 14:47:33 +08:00
|
|
|
EBUG_ON(cl->parent);
|
|
|
|
closure_debug_destroy(cl);
|
2022-10-30 03:54:17 +08:00
|
|
|
if (op->end_io)
|
|
|
|
op->end_io(op);
|
|
|
|
}
|
|
|
|
|
2022-09-28 05:17:23 +08:00
|
|
|
static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct keylist *keys = &op->insert_keys;
|
|
|
|
struct bch_extent_ptr *ptr;
|
2022-09-28 05:17:23 +08:00
|
|
|
struct bkey_i *src, *dst = keys->keys, *n;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
for (src = keys->keys; src != keys->top; src = n) {
|
|
|
|
n = bkey_next(src);
|
|
|
|
|
2019-11-10 05:43:16 +08:00
|
|
|
if (bkey_extent_is_direct_data(&src->k)) {
|
|
|
|
bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
|
|
|
|
test_bit(ptr->dev, op->failed.d));
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-09-28 05:17:23 +08:00
|
|
|
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
|
|
|
|
return -EIO;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 05:43:16 +08:00
|
|
|
if (dst != src)
|
2023-03-05 12:05:55 +08:00
|
|
|
memmove_u64s_down(dst, src, src->k.u64s);
|
2017-03-17 14:18:50 +08:00
|
|
|
dst = bkey_next(dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
keys->top = dst;
|
2022-09-28 05:17:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2023-09-13 06:41:22 +08:00
|
|
|
* __bch2_write_index - after a write, update index to point to new data
|
|
|
|
* @op: bch_write_op to process
|
2022-09-28 05:17:23 +08:00
|
|
|
*/
|
|
|
|
static void __bch2_write_index(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct keylist *keys = &op->insert_keys;
|
|
|
|
unsigned dev;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
|
|
|
|
ret = bch2_write_drop_io_error_ptrs(op);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (!bch2_keylist_empty(keys)) {
|
|
|
|
u64 sectors_start = keylist_sectors(keys);
|
2022-10-29 11:57:01 +08:00
|
|
|
|
|
|
|
ret = !(op->flags & BCH_WRITE_MOVE)
|
|
|
|
? bch2_write_index_default(op)
|
2022-06-14 07:07:19 +08:00
|
|
|
: bch2_data_update_index_update(op);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-07-18 11:06:38 +08:00
|
|
|
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
|
2017-03-17 14:18:50 +08:00
|
|
|
BUG_ON(keylist_sectors(keys) && !ret);
|
|
|
|
|
|
|
|
op->written += sectors_start - keylist_sectors(keys);
|
|
|
|
|
2022-10-13 04:11:31 +08:00
|
|
|
if (ret && !bch2_err_matches(ret, EROFS)) {
|
2023-09-13 06:41:22 +08:00
|
|
|
struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
|
2022-11-16 09:25:08 +08:00
|
|
|
|
|
|
|
bch_err_inum_offset_ratelimited(c,
|
2023-09-13 06:41:22 +08:00
|
|
|
insert->k.p.inode, insert->k.p.offset << 9,
|
2022-11-16 09:25:08 +08:00
|
|
|
"write error while doing btree update: %s",
|
|
|
|
bch2_err_str(ret));
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
2022-10-13 04:11:31 +08:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
out:
|
2018-11-02 03:13:19 +08:00
|
|
|
/* If some a bucket wasn't written, we can't erasure code it: */
|
|
|
|
for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
|
|
|
|
bch2_open_bucket_write_error(c, &op->open_buckets, dev);
|
|
|
|
|
2018-10-06 16:12:42 +08:00
|
|
|
bch2_open_buckets_put(c, &op->open_buckets);
|
2017-03-17 14:18:50 +08:00
|
|
|
return;
|
|
|
|
err:
|
|
|
|
keys->top = keys->keys;
|
|
|
|
op->error = ret;
|
2022-10-30 03:54:17 +08:00
|
|
|
op->flags |= BCH_WRITE_DONE;
|
2017-03-17 14:18:50 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
|
|
|
|
{
|
|
|
|
if (state != wp->state) {
|
|
|
|
u64 now = ktime_get_ns();
|
|
|
|
|
|
|
|
if (wp->last_state_change &&
|
|
|
|
time_after64(now, wp->last_state_change))
|
|
|
|
wp->time[wp->state] += now - wp->last_state_change;
|
|
|
|
wp->state = state;
|
|
|
|
wp->last_state_change = now;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void wp_update_state(struct write_point *wp, bool running)
|
|
|
|
{
|
|
|
|
enum write_point_state state;
|
|
|
|
|
|
|
|
state = running ? WRITE_POINT_running :
|
|
|
|
!list_empty(&wp->writes) ? WRITE_POINT_waiting_io
|
|
|
|
: WRITE_POINT_stopped;
|
|
|
|
|
|
|
|
__wp_update_state(wp, state);
|
|
|
|
}
|
|
|
|
|
2023-11-18 08:13:27 +08:00
|
|
|
static CLOSURE_CALLBACK(bch2_write_index)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2023-11-18 08:13:27 +08:00
|
|
|
closure_type(op, struct bch_write_op, cl);
|
2022-11-01 04:13:05 +08:00
|
|
|
struct write_point *wp = op->wp;
|
|
|
|
struct workqueue_struct *wq = index_update_wq(op);
|
2023-03-12 06:21:30 +08:00
|
|
|
unsigned long flags;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-03-05 16:11:00 +08:00
|
|
|
if ((op->flags & BCH_WRITE_DONE) &&
|
|
|
|
(op->flags & BCH_WRITE_MOVE))
|
|
|
|
bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
|
|
|
|
|
2023-03-12 06:21:30 +08:00
|
|
|
spin_lock_irqsave(&wp->writes_lock, flags);
|
2022-11-01 04:13:05 +08:00
|
|
|
if (wp->state == WRITE_POINT_waiting_io)
|
|
|
|
__wp_update_state(wp, WRITE_POINT_waiting_work);
|
2023-03-12 06:21:30 +08:00
|
|
|
list_add_tail(&op->wp_list, &wp->writes);
|
|
|
|
spin_unlock_irqrestore (&wp->writes_lock, flags);
|
2022-11-01 04:13:05 +08:00
|
|
|
|
|
|
|
queue_work(wq, &wp->index_update_work);
|
|
|
|
}
|
|
|
|
|
2023-03-01 12:08:04 +08:00
|
|
|
static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
|
|
|
|
{
|
|
|
|
op->wp = wp;
|
|
|
|
|
2023-03-12 06:21:30 +08:00
|
|
|
if (wp->state == WRITE_POINT_stopped) {
|
|
|
|
spin_lock_irq(&wp->writes_lock);
|
2023-03-01 12:08:04 +08:00
|
|
|
__wp_update_state(wp, WRITE_POINT_waiting_io);
|
2023-03-12 06:21:30 +08:00
|
|
|
spin_unlock_irq(&wp->writes_lock);
|
|
|
|
}
|
2023-03-01 12:08:04 +08:00
|
|
|
}
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
void bch2_write_point_do_index_updates(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct write_point *wp =
|
|
|
|
container_of(work, struct write_point, index_update_work);
|
|
|
|
struct bch_write_op *op;
|
|
|
|
|
|
|
|
while (1) {
|
2023-03-12 06:21:30 +08:00
|
|
|
spin_lock_irq(&wp->writes_lock);
|
|
|
|
op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
|
|
|
|
if (op)
|
|
|
|
list_del(&op->wp_list);
|
2022-11-01 04:13:05 +08:00
|
|
|
wp_update_state(wp, op != NULL);
|
2023-03-12 06:21:30 +08:00
|
|
|
spin_unlock_irq(&wp->writes_lock);
|
2022-11-01 04:13:05 +08:00
|
|
|
|
|
|
|
if (!op)
|
|
|
|
break;
|
|
|
|
|
|
|
|
op->flags |= BCH_WRITE_IN_WORKER;
|
|
|
|
|
|
|
|
__bch2_write_index(op);
|
|
|
|
|
2022-11-03 12:29:43 +08:00
|
|
|
if (!(op->flags & BCH_WRITE_DONE))
|
2022-11-01 04:13:05 +08:00
|
|
|
__bch2_write(op);
|
2022-11-03 12:29:43 +08:00
|
|
|
else
|
|
|
|
bch2_write_done(&op->cl);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_write_endio(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct closure *cl = bio->bi_private;
|
|
|
|
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
|
|
|
|
struct bch_write_bio *wbio = to_wbio(bio);
|
|
|
|
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
|
|
|
|
struct bch_fs *c = wbio->c;
|
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
|
|
|
|
|
2023-10-26 04:29:37 +08:00
|
|
|
if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
|
2020-12-04 02:57:22 +08:00
|
|
|
op->pos.inode,
|
2022-11-16 09:25:08 +08:00
|
|
|
wbio->inode_offset << 9,
|
2020-12-04 02:57:22 +08:00
|
|
|
"data write error: %s",
|
2022-09-28 05:17:23 +08:00
|
|
|
bch2_blk_status_to_str(bio->bi_status))) {
|
2017-03-17 14:18:50 +08:00
|
|
|
set_bit(wbio->dev, op->failed.d);
|
2022-09-28 05:17:23 +08:00
|
|
|
op->flags |= BCH_WRITE_IO_ERROR;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
if (wbio->nocow)
|
|
|
|
set_bit(wbio->dev, op->devs_need_flush->d);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
if (wbio->have_ioref) {
|
|
|
|
bch2_latency_acct(ca, wbio->submit_time, WRITE);
|
|
|
|
percpu_ref_put(&ca->io_ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wbio->bounce)
|
|
|
|
bch2_bio_free_pages_pool(c, bio);
|
|
|
|
|
|
|
|
if (wbio->put_bio)
|
|
|
|
bio_put(bio);
|
|
|
|
|
|
|
|
if (parent)
|
|
|
|
bio_endio(&parent->bio);
|
2019-11-02 09:16:51 +08:00
|
|
|
else
|
2022-11-01 04:13:05 +08:00
|
|
|
closure_put(cl);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void init_append_extent(struct bch_write_op *op,
|
|
|
|
struct write_point *wp,
|
|
|
|
struct bversion version,
|
|
|
|
struct bch_extent_crc_unpacked crc)
|
|
|
|
{
|
2019-10-29 15:57:58 +08:00
|
|
|
struct bkey_i_extent *e;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
op->pos.offset += crc.uncompressed_size;
|
2019-10-29 15:57:58 +08:00
|
|
|
|
|
|
|
e = bkey_extent_init(op->insert_keys.top);
|
2018-09-28 09:08:39 +08:00
|
|
|
e->k.p = op->pos;
|
|
|
|
e->k.size = crc.uncompressed_size;
|
|
|
|
e->k.version = version;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-09-28 09:08:39 +08:00
|
|
|
if (crc.csum_type ||
|
|
|
|
crc.compression_type ||
|
|
|
|
crc.nonce)
|
2019-07-26 01:52:14 +08:00
|
|
|
bch2_extent_crc_append(&e->k_i, crc);
|
2018-09-28 09:08:39 +08:00
|
|
|
|
2022-11-25 07:03:55 +08:00
|
|
|
bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
|
2021-12-26 10:14:49 +08:00
|
|
|
op->flags & BCH_WRITE_CACHED);
|
2018-12-06 01:28:35 +08:00
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
bch2_keylist_push(&op->insert_keys);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
|
|
|
|
struct write_point *wp,
|
|
|
|
struct bio *src,
|
2018-11-02 03:13:19 +08:00
|
|
|
bool *page_alloc_failed,
|
|
|
|
void *buf)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_write_bio *wbio;
|
|
|
|
struct bio *bio;
|
|
|
|
unsigned output_available =
|
|
|
|
min(wp->sectors_free << 9, src->bi_iter.bi_size);
|
2019-08-23 04:41:50 +08:00
|
|
|
unsigned pages = DIV_ROUND_UP(output_available +
|
|
|
|
(buf
|
|
|
|
? ((unsigned long) buf & (PAGE_SIZE - 1))
|
|
|
|
: 0), PAGE_SIZE);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2021-10-14 01:45:46 +08:00
|
|
|
pages = min(pages, BIO_MAX_VECS);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
bio = bio_alloc_bioset(NULL, pages, 0,
|
2023-05-29 06:02:38 +08:00
|
|
|
GFP_NOFS, &c->bio_write);
|
2017-03-17 14:18:50 +08:00
|
|
|
wbio = wbio_init(bio);
|
|
|
|
wbio->put_bio = true;
|
|
|
|
/* copy WRITE_SYNC flag */
|
|
|
|
wbio->bio.bi_opf = src->bi_opf;
|
|
|
|
|
2018-11-02 03:13:19 +08:00
|
|
|
if (buf) {
|
2019-07-04 07:27:42 +08:00
|
|
|
bch2_bio_map(bio, buf, output_available);
|
2018-11-02 03:13:19 +08:00
|
|
|
return bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
wbio->bounce = true;
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
/*
|
|
|
|
* We can't use mempool for more than c->sb.encoded_extent_max
|
|
|
|
* worth of pages, but we'd like to allocate more if we can:
|
|
|
|
*/
|
2019-07-04 07:27:42 +08:00
|
|
|
bch2_bio_alloc_pages_pool(c, bio,
|
|
|
|
min_t(unsigned, output_available,
|
2021-12-15 03:34:03 +08:00
|
|
|
c->opts.encoded_extent_max));
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-07-04 07:27:42 +08:00
|
|
|
if (bio->bi_iter.bi_size < output_available)
|
|
|
|
*page_alloc_failed =
|
|
|
|
bch2_bio_alloc_pages(bio,
|
|
|
|
output_available -
|
|
|
|
bio->bi_iter.bi_size,
|
|
|
|
GFP_NOFS) != 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
return bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_write_rechecksum(struct bch_fs *c,
|
|
|
|
struct bch_write_op *op,
|
|
|
|
unsigned new_csum_type)
|
|
|
|
{
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
struct bch_extent_crc_unpacked new_crc;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* bch2_rechecksum_bio() can't encrypt or decrypt data: */
|
|
|
|
|
|
|
|
if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
|
|
|
|
bch2_csum_type_is_encryption(new_csum_type))
|
|
|
|
new_csum_type = op->crc.csum_type;
|
|
|
|
|
|
|
|
ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
|
|
|
|
NULL, &new_crc,
|
|
|
|
op->crc.offset, op->crc.live_size,
|
|
|
|
new_csum_type);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bio_advance(bio, op->crc.offset << 9);
|
|
|
|
bio->bi_iter.bi_size = op->crc.live_size << 9;
|
|
|
|
op->crc = new_crc;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_write_decrypt(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct nonce nonce = extent_nonce(op->version, op->crc);
|
|
|
|
struct bch_csum csum;
|
2022-02-19 13:42:12 +08:00
|
|
|
int ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (!bch2_csum_type_is_encryption(op->crc.csum_type))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we need to decrypt data in the write path, we'll no longer be able
|
|
|
|
* to verify the existing checksum (poly1305 mac, in this case) after
|
|
|
|
* it's decrypted - this is the last point we'll be able to reverify the
|
|
|
|
* checksum:
|
|
|
|
*/
|
|
|
|
csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
|
2023-11-13 10:46:52 +08:00
|
|
|
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
|
2017-03-17 14:18:50 +08:00
|
|
|
return -EIO;
|
|
|
|
|
2022-02-19 13:42:12 +08:00
|
|
|
ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
|
2017-03-17 14:18:50 +08:00
|
|
|
op->crc.csum_type = 0;
|
|
|
|
op->crc.csum = (struct bch_csum) { 0, 0 };
|
2022-02-19 13:42:12 +08:00
|
|
|
return ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum prep_encoded_ret {
|
|
|
|
PREP_ENCODED_OK,
|
|
|
|
PREP_ENCODED_ERR,
|
|
|
|
PREP_ENCODED_CHECKSUM_ERR,
|
|
|
|
PREP_ENCODED_DO_WRITE,
|
|
|
|
} bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
|
|
|
|
if (!(op->flags & BCH_WRITE_DATA_ENCODED))
|
|
|
|
return PREP_ENCODED_OK;
|
|
|
|
|
|
|
|
BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
|
|
|
|
|
|
|
|
/* Can we just write the entire extent as is? */
|
|
|
|
if (op->crc.uncompressed_size == op->crc.live_size &&
|
2023-10-22 23:19:34 +08:00
|
|
|
op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
|
2017-03-17 14:18:50 +08:00
|
|
|
op->crc.compressed_size <= wp->sectors_free &&
|
2023-07-13 10:27:16 +08:00
|
|
|
(op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
|
2018-02-24 05:26:10 +08:00
|
|
|
op->incompressible)) {
|
|
|
|
if (!crc_is_compressed(op->crc) &&
|
2017-03-17 14:18:50 +08:00
|
|
|
op->csum_type != op->crc.csum_type &&
|
2023-07-17 10:31:19 +08:00
|
|
|
bch2_write_rechecksum(c, op, op->csum_type) &&
|
|
|
|
!c->opts.no_data_io)
|
2017-03-17 14:18:50 +08:00
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
return PREP_ENCODED_DO_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the data is compressed and we couldn't write the entire extent as
|
|
|
|
* is, we have to decompress it:
|
|
|
|
*/
|
2018-02-24 05:26:10 +08:00
|
|
|
if (crc_is_compressed(op->crc)) {
|
2017-03-17 14:18:50 +08:00
|
|
|
struct bch_csum csum;
|
|
|
|
|
|
|
|
if (bch2_write_decrypt(op))
|
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
/* Last point we can still verify checksum: */
|
|
|
|
csum = bch2_checksum_bio(c, op->crc.csum_type,
|
|
|
|
extent_nonce(op->version, op->crc),
|
|
|
|
bio);
|
2023-07-17 10:31:19 +08:00
|
|
|
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
|
2017-03-17 14:18:50 +08:00
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
|
|
|
|
return PREP_ENCODED_ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No longer have compressed data after this point - data might be
|
|
|
|
* encrypted:
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the data is checksummed and we're only writing a subset,
|
|
|
|
* rechecksum and adjust bio to point to currently live data:
|
|
|
|
*/
|
|
|
|
if ((op->crc.live_size != op->crc.uncompressed_size ||
|
|
|
|
op->crc.csum_type != op->csum_type) &&
|
2023-07-17 10:31:19 +08:00
|
|
|
bch2_write_rechecksum(c, op, op->csum_type) &&
|
|
|
|
!c->opts.no_data_io)
|
2017-03-17 14:18:50 +08:00
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we want to compress the data, it has to be decrypted:
|
|
|
|
*/
|
2023-07-13 10:27:16 +08:00
|
|
|
if ((op->compression_opt ||
|
2017-03-17 14:18:50 +08:00
|
|
|
bch2_csum_type_is_encryption(op->crc.csum_type) !=
|
|
|
|
bch2_csum_type_is_encryption(op->csum_type)) &&
|
|
|
|
bch2_write_decrypt(op))
|
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
return PREP_ENCODED_OK;
|
|
|
|
}
|
|
|
|
|
2019-11-02 09:16:51 +08:00
|
|
|
static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
|
|
|
|
struct bio **_dst)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct bio *src = &op->wbio.bio, *dst = src;
|
|
|
|
struct bvec_iter saved_iter;
|
2018-11-02 03:13:19 +08:00
|
|
|
void *ec_buf;
|
|
|
|
unsigned total_output = 0, total_input = 0;
|
|
|
|
bool bounce = false;
|
|
|
|
bool page_alloc_failed = false;
|
2017-03-17 14:18:50 +08:00
|
|
|
int ret, more = 0;
|
|
|
|
|
|
|
|
BUG_ON(!bio_sectors(src));
|
|
|
|
|
2018-11-02 03:13:19 +08:00
|
|
|
ec_buf = bch2_writepoint_ec_buf(c, wp);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
switch (bch2_write_prep_encoded_data(op, wp)) {
|
|
|
|
case PREP_ENCODED_OK:
|
|
|
|
break;
|
|
|
|
case PREP_ENCODED_ERR:
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
case PREP_ENCODED_CHECKSUM_ERR:
|
|
|
|
goto csum_err;
|
|
|
|
case PREP_ENCODED_DO_WRITE:
|
2019-11-02 09:16:51 +08:00
|
|
|
/* XXX look for bug here */
|
2018-11-02 03:13:19 +08:00
|
|
|
if (ec_buf) {
|
|
|
|
dst = bch2_write_bio_alloc(c, wp, src,
|
|
|
|
&page_alloc_failed,
|
|
|
|
ec_buf);
|
|
|
|
bio_copy_data(dst, src);
|
|
|
|
bounce = true;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
init_append_extent(op, wp, op->version, op->crc);
|
|
|
|
goto do_write;
|
|
|
|
}
|
|
|
|
|
2018-11-02 03:13:19 +08:00
|
|
|
if (ec_buf ||
|
2023-07-13 10:27:16 +08:00
|
|
|
op->compression_opt ||
|
2017-03-17 14:18:50 +08:00
|
|
|
(op->csum_type &&
|
|
|
|
!(op->flags & BCH_WRITE_PAGES_STABLE)) ||
|
|
|
|
(bch2_csum_type_is_encryption(op->csum_type) &&
|
|
|
|
!(op->flags & BCH_WRITE_PAGES_OWNED))) {
|
2018-11-02 03:13:19 +08:00
|
|
|
dst = bch2_write_bio_alloc(c, wp, src,
|
|
|
|
&page_alloc_failed,
|
|
|
|
ec_buf);
|
2017-03-17 14:18:50 +08:00
|
|
|
bounce = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
saved_iter = dst->bi_iter;
|
|
|
|
|
|
|
|
do {
|
2022-10-11 16:33:56 +08:00
|
|
|
struct bch_extent_crc_unpacked crc = { 0 };
|
2017-03-17 14:18:50 +08:00
|
|
|
struct bversion version = op->version;
|
2023-09-20 13:32:20 +08:00
|
|
|
size_t dst_len = 0, src_len = 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (page_alloc_failed &&
|
2021-12-15 03:34:03 +08:00
|
|
|
dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
|
|
|
|
dst->bi_iter.bi_size < c->opts.encoded_extent_max)
|
2017-03-17 14:18:50 +08:00
|
|
|
break;
|
|
|
|
|
2023-07-13 10:27:16 +08:00
|
|
|
BUG_ON(op->compression_opt &&
|
2017-03-17 14:18:50 +08:00
|
|
|
(op->flags & BCH_WRITE_DATA_ENCODED) &&
|
|
|
|
bch2_csum_type_is_encryption(op->crc.csum_type));
|
2023-07-13 10:27:16 +08:00
|
|
|
BUG_ON(op->compression_opt && !bounce);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-02-24 05:26:10 +08:00
|
|
|
crc.compression_type = op->incompressible
|
|
|
|
? BCH_COMPRESSION_TYPE_incompressible
|
2023-07-13 10:27:16 +08:00
|
|
|
: op->compression_opt
|
2018-02-24 05:26:10 +08:00
|
|
|
? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
|
2023-07-13 10:27:16 +08:00
|
|
|
op->compression_opt)
|
2017-03-17 14:18:50 +08:00
|
|
|
: 0;
|
2018-02-24 05:26:10 +08:00
|
|
|
if (!crc_is_compressed(crc)) {
|
2017-03-17 14:18:50 +08:00
|
|
|
dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
|
|
|
|
dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
|
|
|
|
|
|
|
|
if (op->csum_type)
|
|
|
|
dst_len = min_t(unsigned, dst_len,
|
2021-12-15 03:34:03 +08:00
|
|
|
c->opts.encoded_extent_max);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (bounce) {
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
|
|
|
bio_copy_data(dst, src);
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
src_len = dst_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(!src_len || !dst_len);
|
|
|
|
|
|
|
|
if (bch2_csum_type_is_encryption(op->csum_type)) {
|
|
|
|
if (bversion_zero(version)) {
|
2020-02-19 09:02:41 +08:00
|
|
|
version.lo = atomic64_inc_return(&c->key_version);
|
2017-03-17 14:18:50 +08:00
|
|
|
} else {
|
|
|
|
crc.nonce = op->nonce;
|
|
|
|
op->nonce += src_len >> 9;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
|
2018-02-24 05:26:10 +08:00
|
|
|
!crc_is_compressed(crc) &&
|
2017-03-17 14:18:50 +08:00
|
|
|
bch2_csum_type_is_encryption(op->crc.csum_type) ==
|
|
|
|
bch2_csum_type_is_encryption(op->csum_type)) {
|
2022-10-11 16:33:56 +08:00
|
|
|
u8 compression_type = crc.compression_type;
|
|
|
|
u16 nonce = crc.nonce;
|
2017-03-17 14:18:50 +08:00
|
|
|
/*
|
|
|
|
* Note: when we're using rechecksum(), we need to be
|
|
|
|
* checksumming @src because it has all the data our
|
|
|
|
* existing checksum covers - if we bounced (because we
|
|
|
|
* were trying to compress), @dst will only have the
|
|
|
|
* part of the data the new checksum will cover.
|
|
|
|
*
|
|
|
|
* But normally we want to be checksumming post bounce,
|
|
|
|
* because part of the reason for bouncing is so the
|
|
|
|
* data can't be modified (by userspace) while it's in
|
|
|
|
* flight.
|
|
|
|
*/
|
|
|
|
if (bch2_rechecksum_bio(c, src, version, op->crc,
|
|
|
|
&crc, &op->crc,
|
|
|
|
src_len >> 9,
|
|
|
|
bio_sectors(src) - (src_len >> 9),
|
|
|
|
op->csum_type))
|
|
|
|
goto csum_err;
|
2022-10-11 16:33:56 +08:00
|
|
|
/*
|
|
|
|
* rchecksum_bio sets compression_type on crc from op->crc,
|
|
|
|
* this isn't always correct as sometimes we're changing
|
|
|
|
* an extent from uncompressed to incompressible.
|
|
|
|
*/
|
|
|
|
crc.compression_type = compression_type;
|
|
|
|
crc.nonce = nonce;
|
2017-03-17 14:18:50 +08:00
|
|
|
} else {
|
|
|
|
if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
|
|
|
|
bch2_rechecksum_bio(c, src, version, op->crc,
|
|
|
|
NULL, &op->crc,
|
|
|
|
src_len >> 9,
|
|
|
|
bio_sectors(src) - (src_len >> 9),
|
|
|
|
op->crc.csum_type))
|
|
|
|
goto csum_err;
|
|
|
|
|
|
|
|
crc.compressed_size = dst_len >> 9;
|
|
|
|
crc.uncompressed_size = src_len >> 9;
|
|
|
|
crc.live_size = src_len >> 9;
|
|
|
|
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
2022-02-19 13:42:12 +08:00
|
|
|
ret = bch2_encrypt_bio(c, op->csum_type,
|
|
|
|
extent_nonce(version, crc), dst);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
crc.csum = bch2_checksum_bio(c, op->csum_type,
|
|
|
|
extent_nonce(version, crc), dst);
|
|
|
|
crc.csum_type = op->csum_type;
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
init_append_extent(op, wp, version, crc);
|
|
|
|
|
|
|
|
if (dst != src)
|
|
|
|
bio_advance(dst, dst_len);
|
|
|
|
bio_advance(src, src_len);
|
2018-11-02 03:13:19 +08:00
|
|
|
total_output += dst_len;
|
|
|
|
total_input += src_len;
|
2017-03-17 14:18:50 +08:00
|
|
|
} while (dst->bi_iter.bi_size &&
|
|
|
|
src->bi_iter.bi_size &&
|
|
|
|
wp->sectors_free &&
|
|
|
|
!bch2_keylist_realloc(&op->insert_keys,
|
|
|
|
op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
BKEY_EXTENT_U64s_MAX));
|
|
|
|
|
|
|
|
more = src->bi_iter.bi_size != 0;
|
|
|
|
|
|
|
|
dst->bi_iter = saved_iter;
|
|
|
|
|
2018-11-02 03:13:19 +08:00
|
|
|
if (dst == src && more) {
|
|
|
|
BUG_ON(total_output != total_input);
|
|
|
|
|
|
|
|
dst = bio_split(src, total_input >> 9,
|
2023-05-29 06:02:38 +08:00
|
|
|
GFP_NOFS, &c->bio_write);
|
2018-11-02 03:13:19 +08:00
|
|
|
wbio_init(dst)->put_bio = true;
|
|
|
|
/* copy WRITE_SYNC flag */
|
|
|
|
dst->bi_opf = src->bi_opf;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
dst->bi_iter.bi_size = total_output;
|
|
|
|
do_write:
|
2019-11-02 09:16:51 +08:00
|
|
|
*_dst = dst;
|
2017-03-17 14:18:50 +08:00
|
|
|
return more;
|
|
|
|
csum_err:
|
2022-06-19 07:03:25 +08:00
|
|
|
bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
|
2017-03-17 14:18:50 +08:00
|
|
|
ret = -EIO;
|
|
|
|
err:
|
2018-11-02 03:13:19 +08:00
|
|
|
if (to_wbio(dst)->bounce)
|
2017-03-17 14:18:50 +08:00
|
|
|
bch2_bio_free_pages_pool(c, dst);
|
2018-11-02 03:13:19 +08:00
|
|
|
if (to_wbio(dst)->put_bio)
|
2017-03-17 14:18:50 +08:00
|
|
|
bio_put(dst);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
static bool bch2_extent_is_writeable(struct bch_write_op *op,
|
|
|
|
struct bkey_s_c k)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct bkey_s_c_extent e;
|
|
|
|
struct extent_ptr_decoded p;
|
|
|
|
const union bch_extent_entry *entry;
|
|
|
|
unsigned replicas = 0;
|
|
|
|
|
|
|
|
if (k.k->type != KEY_TYPE_extent)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
e = bkey_s_c_to_extent(k);
|
|
|
|
extent_for_each_ptr_decode(e, p, entry) {
|
2023-10-22 23:33:02 +08:00
|
|
|
if (crc_is_encoded(p.crc) || p.has_ec)
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
replicas += bch2_extent_ptr_durability(c, &p);
|
|
|
|
}
|
|
|
|
|
|
|
|
return replicas >= op->opts.data_replicas;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
const struct bch_extent_ptr *ptr;
|
|
|
|
struct bkey_i *k;
|
|
|
|
|
|
|
|
for_each_keylist_key(&op->insert_keys, k) {
|
|
|
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
|
|
|
|
|
|
|
|
bkey_for_each_ptr(ptrs, ptr)
|
|
|
|
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
|
|
|
PTR_BUCKET_POS(c, ptr),
|
|
|
|
BUCKET_NOCOW_LOCK_UPDATE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
|
|
|
|
struct btree_iter *iter,
|
|
|
|
struct bkey_i *orig,
|
|
|
|
struct bkey_s_c k,
|
|
|
|
u64 new_i_size)
|
|
|
|
{
|
|
|
|
struct bkey_i *new;
|
|
|
|
struct bkey_ptrs ptrs;
|
|
|
|
struct bch_extent_ptr *ptr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
|
|
|
|
/* trace this */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-05-01 07:21:06 +08:00
|
|
|
new = bch2_bkey_make_mut_noupdate(trans, k);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
ret = PTR_ERR_OR_ZERO(new);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bch2_cut_front(bkey_start_pos(&orig->k), new);
|
|
|
|
bch2_cut_back(orig->k.p, new);
|
|
|
|
|
|
|
|
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
|
|
|
bkey_for_each_ptr(ptrs, ptr)
|
|
|
|
ptr->unwritten = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we're not calling bch2_subvol_get_snapshot() in this path -
|
|
|
|
* that was done when we kicked off the write, and here it's important
|
|
|
|
* that we update the extent that we wrote to - even if a snapshot has
|
|
|
|
* since been created. The write is still outstanding, so we're ok
|
|
|
|
* w.r.t. snapshot atomicity:
|
|
|
|
*/
|
|
|
|
return bch2_extent_update_i_size_sectors(trans, iter,
|
|
|
|
min(new->k.p.offset << 9, new_i_size), 0) ?:
|
|
|
|
bch2_trans_update(trans, iter, new,
|
|
|
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2023-09-13 05:16:02 +08:00
|
|
|
struct btree_trans *trans = bch2_trans_get(c);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
struct btree_iter iter;
|
|
|
|
struct bkey_i *orig;
|
|
|
|
struct bkey_s_c k;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for_each_keylist_key(&op->insert_keys, orig) {
|
2023-09-13 05:16:02 +08:00
|
|
|
ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
bkey_start_pos(&orig->k), orig->k.p,
|
|
|
|
BTREE_ITER_INTENT, k,
|
|
|
|
NULL, NULL, BTREE_INSERT_NOFAIL, ({
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
}));
|
|
|
|
|
|
|
|
if (ret && !bch2_err_matches(ret, EROFS)) {
|
2023-09-13 06:41:22 +08:00
|
|
|
struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
bch_err_inum_offset_ratelimited(c,
|
2023-09-13 06:41:22 +08:00
|
|
|
insert->k.p.inode, insert->k.p.offset << 9,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
"write error while doing btree update: %s",
|
|
|
|
bch2_err_str(ret));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
op->error = ret;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_put(trans);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __bch2_nocow_write_done(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
bch2_nocow_write_unlock(op);
|
|
|
|
|
|
|
|
if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
|
|
|
|
op->error = -EIO;
|
|
|
|
} else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
|
|
|
|
bch2_nocow_write_convert_unwritten(op);
|
|
|
|
}
|
|
|
|
|
2023-11-18 08:13:27 +08:00
|
|
|
static CLOSURE_CALLBACK(bch2_nocow_write_done)
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
{
|
2023-11-18 08:13:27 +08:00
|
|
|
closure_type(op, struct bch_write_op, cl);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
__bch2_nocow_write_done(op);
|
|
|
|
bch2_write_done(cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_nocow_write(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2023-09-13 05:16:02 +08:00
|
|
|
struct btree_trans *trans;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
struct btree_iter iter;
|
|
|
|
struct bkey_s_c k;
|
|
|
|
struct bkey_ptrs_c ptrs;
|
2023-03-20 00:50:05 +08:00
|
|
|
const struct bch_extent_ptr *ptr;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
struct {
|
|
|
|
struct bpos b;
|
|
|
|
unsigned gen;
|
2022-12-15 09:52:11 +08:00
|
|
|
struct nocow_lock_bucket *l;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
} buckets[BCH_REPLICAS_MAX];
|
|
|
|
unsigned nr_buckets = 0;
|
|
|
|
u32 snapshot;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (op->flags & BCH_WRITE_MOVE)
|
|
|
|
return;
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
trans = bch2_trans_get(c);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
retry:
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_begin(trans);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
if (unlikely(ret))
|
|
|
|
goto err;
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
SPOS(op->pos.inode, op->pos.offset, snapshot),
|
|
|
|
BTREE_ITER_SLOTS);
|
|
|
|
while (1) {
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
|
|
|
|
nr_buckets = 0;
|
|
|
|
|
|
|
|
k = bch2_btree_iter_peek_slot(&iter);
|
|
|
|
ret = bkey_err(k);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* fall back to normal cow write path? */
|
|
|
|
if (unlikely(k.k->p.snapshot != snapshot ||
|
|
|
|
!bch2_extent_is_writeable(op, k)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (bch2_keylist_realloc(&op->insert_keys,
|
|
|
|
op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
k.k->u64s))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Get iorefs before dropping btree locks: */
|
|
|
|
ptrs = bch2_bkey_ptrs_c(k);
|
|
|
|
bkey_for_each_ptr(ptrs, ptr) {
|
|
|
|
buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr);
|
|
|
|
buckets[nr_buckets].gen = ptr->gen;
|
|
|
|
buckets[nr_buckets].l =
|
2022-12-15 09:52:11 +08:00
|
|
|
bucket_nocow_lock(&c->nocow_locks,
|
|
|
|
bucket_to_u64(buckets[nr_buckets].b));
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
prefetch(buckets[nr_buckets].l);
|
|
|
|
|
|
|
|
if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
|
|
|
|
goto err_get_ioref;
|
|
|
|
|
2023-03-20 00:50:05 +08:00
|
|
|
nr_buckets++;
|
|
|
|
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
if (ptr->unwritten)
|
|
|
|
op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock before taking nocow locks, doing IO: */
|
|
|
|
bkey_reassemble(op->insert_keys.top, k);
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_unlock(trans);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
bch2_cut_front(op->pos, op->insert_keys.top);
|
|
|
|
if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
|
|
|
|
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
|
|
|
|
|
|
|
|
for (i = 0; i < nr_buckets; i++) {
|
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode);
|
2022-12-15 09:52:11 +08:00
|
|
|
struct nocow_lock_bucket *l = buckets[i].l;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
bool stale;
|
|
|
|
|
2022-12-15 09:52:11 +08:00
|
|
|
__bch2_bucket_nocow_lock(&c->nocow_locks, l,
|
|
|
|
bucket_to_u64(buckets[i].b),
|
|
|
|
BUCKET_NOCOW_LOCK_UPDATE);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (unlikely(stale))
|
|
|
|
goto err_bucket_stale;
|
|
|
|
}
|
|
|
|
|
|
|
|
bio = &op->wbio.bio;
|
|
|
|
if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
|
|
|
|
bio = bio_split(bio, k.k->p.offset - op->pos.offset,
|
|
|
|
GFP_KERNEL, &c->bio_write);
|
|
|
|
wbio_init(bio)->put_bio = true;
|
|
|
|
bio->bi_opf = op->wbio.bio.bi_opf;
|
|
|
|
} else {
|
|
|
|
op->flags |= BCH_WRITE_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
op->pos.offset += bio_sectors(bio);
|
|
|
|
op->written += bio_sectors(bio);
|
|
|
|
|
|
|
|
bio->bi_end_io = bch2_write_endio;
|
|
|
|
bio->bi_private = &op->cl;
|
|
|
|
bio->bi_opf |= REQ_OP_WRITE;
|
|
|
|
closure_get(&op->cl);
|
|
|
|
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
|
|
|
|
op->insert_keys.top, true);
|
|
|
|
|
|
|
|
bch2_keylist_push(&op->insert_keys);
|
|
|
|
if (op->flags & BCH_WRITE_DONE)
|
|
|
|
break;
|
|
|
|
bch2_btree_iter_advance(&iter);
|
|
|
|
}
|
|
|
|
out:
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
err:
|
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
bch_err_inum_offset_ratelimited(c,
|
|
|
|
op->pos.inode,
|
|
|
|
op->pos.offset << 9,
|
|
|
|
"%s: btree lookup error %s",
|
|
|
|
__func__, bch2_err_str(ret));
|
|
|
|
op->error = ret;
|
|
|
|
op->flags |= BCH_WRITE_DONE;
|
|
|
|
}
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_put(trans);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
/* fallback to cow write path? */
|
|
|
|
if (!(op->flags & BCH_WRITE_DONE)) {
|
|
|
|
closure_sync(&op->cl);
|
|
|
|
__bch2_nocow_write_done(op);
|
|
|
|
op->insert_keys.top = op->insert_keys.keys;
|
|
|
|
} else if (op->flags & BCH_WRITE_SYNC) {
|
|
|
|
closure_sync(&op->cl);
|
2023-11-18 08:13:27 +08:00
|
|
|
bch2_nocow_write_done(&op->cl.work);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* XXX
|
|
|
|
* needs to run out of process context because ei_quota_lock is
|
|
|
|
* a mutex
|
|
|
|
*/
|
|
|
|
continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
err_get_ioref:
|
2023-03-20 00:50:05 +08:00
|
|
|
for (i = 0; i < nr_buckets; i++)
|
|
|
|
percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
/* Fall back to COW path: */
|
|
|
|
goto out;
|
|
|
|
err_bucket_stale:
|
2023-09-25 09:05:50 +08:00
|
|
|
while (i >= 0) {
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
|
|
|
buckets[i].b,
|
|
|
|
BUCKET_NOCOW_LOCK_UPDATE);
|
2023-09-25 09:05:50 +08:00
|
|
|
--i;
|
|
|
|
}
|
2023-03-20 00:50:05 +08:00
|
|
|
for (i = 0; i < nr_buckets; i++)
|
|
|
|
percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
|
|
|
/* We can retry this: */
|
2023-06-21 18:44:44 +08:00
|
|
|
ret = -BCH_ERR_transaction_restart;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
static void __bch2_write(struct bch_write_op *op)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2022-11-01 04:13:05 +08:00
|
|
|
struct write_point *wp = NULL;
|
2022-03-22 07:34:48 +08:00
|
|
|
struct bio *bio = NULL;
|
2020-07-21 01:00:15 +08:00
|
|
|
unsigned nofs_flags;
|
2017-03-17 14:18:50 +08:00
|
|
|
int ret;
|
2020-07-21 01:00:15 +08:00
|
|
|
|
|
|
|
nofs_flags = memalloc_nofs_save();
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
|
2023-02-25 08:07:21 +08:00
|
|
|
if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
bch2_nocow_write(op);
|
|
|
|
if (op->flags & BCH_WRITE_DONE)
|
|
|
|
goto out_nofs_restore;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
again:
|
2018-11-02 03:13:19 +08:00
|
|
|
memset(&op->failed, 0, sizeof(op->failed));
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
do {
|
2019-11-02 09:16:51 +08:00
|
|
|
struct bkey_i *key_to_write;
|
|
|
|
unsigned key_to_write_offset = op->insert_keys.top_p -
|
|
|
|
op->insert_keys.keys_p;
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
/* +1 for possible cache device: */
|
2018-10-06 16:12:42 +08:00
|
|
|
if (op->open_buckets.nr + op->nr_replicas + 1 >
|
|
|
|
ARRAY_SIZE(op->open_buckets.v))
|
2022-11-01 04:13:05 +08:00
|
|
|
break;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (bch2_keylist_realloc(&op->insert_keys,
|
|
|
|
op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
BKEY_EXTENT_U64s_MAX))
|
2022-11-01 04:13:05 +08:00
|
|
|
break;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2020-08-13 01:48:02 +08:00
|
|
|
/*
|
|
|
|
* The copygc thread is now global, which means it's no longer
|
|
|
|
* freeing up space on specific disks, which means that
|
|
|
|
* allocations for specific disks may hang arbitrarily long:
|
|
|
|
*/
|
2022-11-03 03:41:32 +08:00
|
|
|
ret = bch2_trans_do(c, NULL, NULL, 0,
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_alloc_sectors_start_trans(trans,
|
2022-11-03 03:41:32 +08:00
|
|
|
op->target,
|
|
|
|
op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
|
|
|
|
op->write_point,
|
|
|
|
&op->devs_have,
|
|
|
|
op->nr_replicas,
|
|
|
|
op->nr_replicas_required,
|
2023-06-25 07:30:10 +08:00
|
|
|
op->watermark,
|
2022-11-03 03:41:32 +08:00
|
|
|
op->flags,
|
|
|
|
(op->flags & (BCH_WRITE_ALLOC_NOWAIT|
|
|
|
|
BCH_WRITE_ONLY_SPECIFIED_DEVS))
|
|
|
|
? NULL : &op->cl, &wp));
|
2022-11-01 04:13:05 +08:00
|
|
|
if (unlikely(ret)) {
|
2022-12-14 04:17:40 +08:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
|
2022-10-30 03:54:17 +08:00
|
|
|
break;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-10-30 03:54:17 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
EBUG_ON(!wp);
|
2020-07-23 10:40:32 +08:00
|
|
|
|
2018-10-06 16:12:42 +08:00
|
|
|
bch2_open_bucket_get(c, wp, &op->open_buckets);
|
2019-11-02 09:16:51 +08:00
|
|
|
ret = bch2_write_extent(op, wp, &bio);
|
2022-11-01 04:13:05 +08:00
|
|
|
|
2022-11-25 07:03:55 +08:00
|
|
|
bch2_alloc_sectors_done_inlined(c, wp);
|
2022-10-30 03:54:17 +08:00
|
|
|
err:
|
|
|
|
if (ret <= 0) {
|
2020-05-13 12:15:28 +08:00
|
|
|
op->flags |= BCH_WRITE_DONE;
|
2019-11-02 09:16:51 +08:00
|
|
|
|
2022-10-30 03:54:17 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
op->error = ret;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-02 09:16:51 +08:00
|
|
|
bio->bi_end_io = bch2_write_endio;
|
|
|
|
bio->bi_private = &op->cl;
|
2019-11-10 05:43:16 +08:00
|
|
|
bio->bi_opf |= REQ_OP_WRITE;
|
2019-11-02 09:16:51 +08:00
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
closure_get(bio->bi_private);
|
2019-11-02 09:16:51 +08:00
|
|
|
|
|
|
|
key_to_write = (void *) (op->insert_keys.keys_p +
|
|
|
|
key_to_write_offset);
|
|
|
|
|
2020-07-10 06:28:11 +08:00
|
|
|
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
key_to_write, false);
|
2017-03-17 14:18:50 +08:00
|
|
|
} while (ret);
|
2022-10-30 03:54:17 +08:00
|
|
|
|
2020-05-13 12:15:28 +08:00
|
|
|
/*
|
2022-10-30 03:54:17 +08:00
|
|
|
* Sync or no?
|
|
|
|
*
|
|
|
|
* If we're running asynchronously, wne may still want to block
|
|
|
|
* synchronously here if we weren't able to submit all of the IO at
|
|
|
|
* once, as that signals backpressure to the caller.
|
2020-05-13 12:15:28 +08:00
|
|
|
*/
|
2022-10-30 03:54:17 +08:00
|
|
|
if ((op->flags & BCH_WRITE_SYNC) ||
|
|
|
|
(!(op->flags & BCH_WRITE_DONE) &&
|
|
|
|
!(op->flags & BCH_WRITE_IN_WORKER))) {
|
2022-11-01 04:13:05 +08:00
|
|
|
closure_sync(&op->cl);
|
2017-03-17 14:18:50 +08:00
|
|
|
__bch2_write_index(op);
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
if (!(op->flags & BCH_WRITE_DONE))
|
|
|
|
goto again;
|
2022-11-03 12:29:43 +08:00
|
|
|
bch2_write_done(&op->cl);
|
2022-11-01 04:13:05 +08:00
|
|
|
} else {
|
2023-03-01 12:08:04 +08:00
|
|
|
bch2_write_queue(op, wp);
|
2022-11-01 04:13:05 +08:00
|
|
|
continue_at(&op->cl, bch2_write_index, NULL);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 05:12:00 +08:00
|
|
|
out_nofs_restore:
|
2022-11-01 04:13:05 +08:00
|
|
|
memalloc_nofs_restore(nofs_flags);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 05:43:16 +08:00
|
|
|
static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
|
|
|
|
{
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
struct bvec_iter iter;
|
|
|
|
struct bkey_i_inline_data *id;
|
|
|
|
unsigned sectors;
|
|
|
|
int ret;
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
|
|
|
|
op->flags |= BCH_WRITE_DONE;
|
|
|
|
|
2019-12-29 09:17:06 +08:00
|
|
|
bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
|
2019-11-30 02:47:42 +08:00
|
|
|
|
2019-11-10 05:43:16 +08:00
|
|
|
ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
BKEY_U64s + DIV_ROUND_UP(data_len, 8));
|
|
|
|
if (ret) {
|
|
|
|
op->error = ret;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
sectors = bio_sectors(bio);
|
|
|
|
op->pos.offset += sectors;
|
|
|
|
|
|
|
|
id = bkey_inline_data_init(op->insert_keys.top);
|
|
|
|
id->k.p = op->pos;
|
|
|
|
id->k.version = op->version;
|
|
|
|
id->k.size = sectors;
|
|
|
|
|
|
|
|
iter = bio->bi_iter;
|
|
|
|
iter.bi_size = data_len;
|
|
|
|
memcpy_from_bio(id->v.data, bio, iter);
|
|
|
|
|
|
|
|
while (data_len & 7)
|
|
|
|
id->v.data[data_len++] = '\0';
|
|
|
|
set_bkey_val_bytes(&id->k, data_len);
|
|
|
|
bch2_keylist_push(&op->insert_keys);
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
__bch2_write_index(op);
|
2019-11-10 05:43:16 +08:00
|
|
|
err:
|
2022-11-03 12:29:43 +08:00
|
|
|
bch2_write_done(&op->cl);
|
2019-11-10 05:43:16 +08:00
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
/**
|
2023-09-13 06:41:22 +08:00
|
|
|
* bch2_write() - handle a write to a cache device or flash only volume
|
|
|
|
* @cl: &bch_write_op->cl
|
2017-03-17 14:18:50 +08:00
|
|
|
*
|
|
|
|
* This is the starting point for any data to end up in a cache device; it could
|
|
|
|
* be from a normal write, or a writeback write, or a write to a flash only
|
|
|
|
* volume - it's also used by the moving garbage collector to compact data in
|
|
|
|
* mostly empty buckets.
|
|
|
|
*
|
|
|
|
* It first writes the data to the cache, creating a list of keys to be inserted
|
|
|
|
* (if the data won't fit in a single open bucket, there will be multiple keys);
|
|
|
|
* after the data is written it calls bch_journal, and after the keys have been
|
|
|
|
* added to the next journal write they're inserted into the btree.
|
|
|
|
*
|
|
|
|
* If op->discard is true, instead of inserting the data it invalidates the
|
|
|
|
* region of the cache represented by op->bio and op->inode.
|
|
|
|
*/
|
2023-11-18 08:13:27 +08:00
|
|
|
CLOSURE_CALLBACK(bch2_write)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2023-11-18 08:13:27 +08:00
|
|
|
closure_type(op, struct bch_write_op, cl);
|
2019-08-22 08:16:42 +08:00
|
|
|
struct bio *bio = &op->wbio.bio;
|
2017-03-17 14:18:50 +08:00
|
|
|
struct bch_fs *c = op->c;
|
2019-11-10 05:43:16 +08:00
|
|
|
unsigned data_len;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
EBUG_ON(op->cl.parent);
|
2017-03-17 14:18:50 +08:00
|
|
|
BUG_ON(!op->nr_replicas);
|
|
|
|
BUG_ON(!op->write_point.v);
|
2022-11-24 16:12:22 +08:00
|
|
|
BUG_ON(bkey_eq(op->pos, POS_MAX));
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-11-10 05:43:16 +08:00
|
|
|
op->start_time = local_clock();
|
|
|
|
bch2_keylist_init(&op->insert_keys, op->inline_keys);
|
|
|
|
wbio_init(bio)->put_bio = false;
|
|
|
|
|
2021-12-15 03:24:41 +08:00
|
|
|
if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
|
2022-11-16 09:25:08 +08:00
|
|
|
bch_err_inum_offset_ratelimited(c,
|
|
|
|
op->pos.inode,
|
|
|
|
op->pos.offset << 9,
|
|
|
|
"misaligned write");
|
2019-08-22 08:16:42 +08:00
|
|
|
op->error = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2023-03-14 10:01:47 +08:00
|
|
|
if (c->opts.nochanges) {
|
|
|
|
op->error = -BCH_ERR_erofs_no_writes;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(op->flags & BCH_WRITE_MOVE) &&
|
2023-02-10 01:21:45 +08:00
|
|
|
!bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
|
2022-12-12 09:37:11 +08:00
|
|
|
op->error = -BCH_ERR_erofs_no_writes;
|
2019-08-22 08:16:42 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2022-03-15 16:36:33 +08:00
|
|
|
this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
|
2019-08-22 08:16:42 +08:00
|
|
|
bch2_increment_clock(c, bio_sectors(bio), WRITE);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-11-10 05:43:16 +08:00
|
|
|
data_len = min_t(u64, bio->bi_iter.bi_size,
|
|
|
|
op->new_i_size - (op->pos.offset << 9));
|
|
|
|
|
2019-11-30 02:48:09 +08:00
|
|
|
if (c->opts.inline_data &&
|
|
|
|
data_len <= min(block_bytes(c) / 2, 1024U)) {
|
2019-11-10 05:43:16 +08:00
|
|
|
bch2_write_data_inline(op, data_len);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
__bch2_write(op);
|
2019-08-22 08:16:42 +08:00
|
|
|
return;
|
|
|
|
err:
|
2020-06-30 06:22:06 +08:00
|
|
|
bch2_disk_reservation_put(c, &op->res);
|
2019-12-28 02:44:03 +08:00
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
closure_debug_destroy(&op->cl);
|
|
|
|
if (op->end_io)
|
2019-11-12 02:42:10 +08:00
|
|
|
op->end_io(op);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2023-07-07 10:47:42 +08:00
|
|
|
static const char * const bch2_write_flags[] = {
|
2023-03-12 09:38:46 +08:00
|
|
|
#define x(f) #f,
|
|
|
|
BCH_WRITE_FLAGS()
|
|
|
|
#undef x
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
prt_str(out, "pos: ");
|
|
|
|
bch2_bpos_to_text(out, op->pos);
|
|
|
|
prt_newline(out);
|
|
|
|
printbuf_indent_add(out, 2);
|
|
|
|
|
|
|
|
prt_str(out, "started: ");
|
|
|
|
bch2_pr_time_units(out, local_clock() - op->start_time);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "flags: ");
|
|
|
|
prt_bitflags(out, bch2_write_flags, op->flags);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
printbuf_indent_sub(out, 2);
|
|
|
|
}
|
|
|
|
|
2023-09-11 06:05:17 +08:00
|
|
|
void bch2_fs_io_write_exit(struct bch_fs *c)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2023-09-11 06:05:17 +08:00
|
|
|
mempool_exit(&c->bio_bounce_pages);
|
|
|
|
bioset_exit(&c->bio_write);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2023-09-11 06:05:17 +08:00
|
|
|
int bch2_fs_io_write_init(struct bch_fs *c)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2023-09-11 06:05:17 +08:00
|
|
|
if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
|
|
|
|
BIOSET_NEED_BVECS))
|
|
|
|
return -BCH_ERR_ENOMEM_bio_write_init;
|
2023-03-15 03:35:57 +08:00
|
|
|
|
|
|
|
if (mempool_init_page_pool(&c->bio_bounce_pages,
|
2017-03-17 14:18:50 +08:00
|
|
|
max_t(unsigned,
|
2021-12-15 03:34:03 +08:00
|
|
|
c->opts.btree_node_size,
|
|
|
|
c->opts.encoded_extent_max) /
|
2023-03-15 03:35:57 +08:00
|
|
|
PAGE_SIZE, 0))
|
|
|
|
return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
return 0;
|
|
|
|
}
|