2018-06-06 10:42:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-11-02 11:58:39 +08:00
|
|
|
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_TRANS_H__
|
|
|
|
#define __XFS_TRANS_H__
|
|
|
|
|
2013-08-12 18:49:32 +08:00
|
|
|
/* kernel only transaction subsystem defines */
|
2008-10-30 14:05:38 +08:00
|
|
|
|
2022-03-18 00:09:12 +08:00
|
|
|
struct xlog;
|
2008-10-30 14:05:38 +08:00
|
|
|
struct xfs_buf;
|
|
|
|
struct xfs_buftarg;
|
|
|
|
struct xfs_efd_log_item;
|
|
|
|
struct xfs_efi_log_item;
|
|
|
|
struct xfs_inode;
|
|
|
|
struct xfs_item_ops;
|
|
|
|
struct xfs_log_iovec;
|
|
|
|
struct xfs_mount;
|
|
|
|
struct xfs_trans;
|
2013-08-12 18:49:59 +08:00
|
|
|
struct xfs_trans_res;
|
2008-10-30 14:05:38 +08:00
|
|
|
struct xfs_dquot_acct;
|
2016-08-03 10:11:01 +08:00
|
|
|
struct xfs_rud_log_item;
|
|
|
|
struct xfs_rui_log_item;
|
xfs: propagate bmap updates to rmapbt
When we map, unmap, or convert an extent in a file's data or attr
fork, schedule a respective update in the rmapbt. Previous versions
of this patch required a 1:1 correspondence between bmap and rmap,
but this is no longer true as we now have ability to make interval
queries against the rmapbt.
We use the deferred operations code to handle redo operations
atomically and deadlock free. This plumbs in all five rmap actions
(map, unmap, convert extent, alloc, free); we'll use the first three
now for file data, and reflink will want the last two. We also add
an error injection site to test log recovery.
Finally, we need to fix the bmap shift extent code to adjust the
rmaps correctly.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 10:16:05 +08:00
|
|
|
struct xfs_btree_cur;
|
2016-10-04 00:11:21 +08:00
|
|
|
struct xfs_cui_log_item;
|
2016-10-04 00:11:22 +08:00
|
|
|
struct xfs_cud_log_item;
|
2016-10-04 00:11:26 +08:00
|
|
|
struct xfs_bui_log_item;
|
2016-10-04 00:11:28 +08:00
|
|
|
struct xfs_bud_log_item;
|
2008-10-30 14:05:38 +08:00
|
|
|
|
2019-06-29 10:27:33 +08:00
|
|
|
struct xfs_log_item {
|
2008-10-30 14:05:38 +08:00
|
|
|
struct list_head li_ail; /* AIL pointers */
|
2018-05-09 22:49:37 +08:00
|
|
|
struct list_head li_trans; /* transaction list */
|
2008-10-30 14:05:38 +08:00
|
|
|
xfs_lsn_t li_lsn; /* last on-disk lsn */
|
2022-03-18 00:09:12 +08:00
|
|
|
struct xlog *li_log;
|
2008-10-30 14:39:46 +08:00
|
|
|
struct xfs_ail *li_ailp; /* ptr to AIL */
|
2008-10-30 14:05:38 +08:00
|
|
|
uint li_type; /* item type */
|
2018-05-09 22:47:34 +08:00
|
|
|
unsigned long li_flags; /* misc flags */
|
xfs: Properly retry failed inode items in case of error during buffer writeback
When a buffer has been failed during writeback, the inode items into it
are kept flush locked, and are never resubmitted due the flush lock, so,
if any buffer fails to be written, the items in AIL are never written to
disk and never unlocked.
This causes unmount operation to hang due these items flush locked in AIL,
but this also causes the items in AIL to never be written back, even when
the IO device comes back to normal.
I've been testing this patch with a DM-thin device, creating a
filesystem larger than the real device.
When writing enough data to fill the DM-thin device, XFS receives ENOSPC
errors from the device, and keep spinning on xfsaild (when 'retry
forever' configuration is set).
At this point, the filesystem can not be unmounted because of the flush locked
items in AIL, but worse, the items in AIL are never retried at all
(once xfs_inode_item_push() will skip the items that are flush locked),
even if the underlying DM-thin device is expanded to the proper size.
This patch fixes both cases, retrying any item that has been failed
previously, using the infra-structure provided by the previous patch.
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-08-09 09:21:50 +08:00
|
|
|
struct xfs_buf *li_buf; /* real buffer pointer */
|
2018-01-25 05:38:49 +08:00
|
|
|
struct list_head li_bio_list; /* buffer item list */
|
2011-10-28 17:54:24 +08:00
|
|
|
const struct xfs_item_ops *li_ops; /* function list */
|
xfs: Introduce delayed logging core code
The delayed logging code only changes in-memory structures and as
such can be enabled and disabled with a mount option. Add the mount
option and emit a warning that this is an experimental feature that
should not be used in production yet.
We also need infrastructure to track committed items that have not
yet been written to the log. This is what the Committed Item List
(CIL) is for.
The log item also needs to be extended to track the current log
vector, the associated memory buffer and it's location in the Commit
Item List. Extend the log item and log vector structures to enable
this tracking.
To maintain the current log format for transactions with delayed
logging, we need to introduce a checkpoint transaction and a context
for tracking each checkpoint from initiation to transaction
completion. This includes adding a log ticket for tracking space
log required/used by the context checkpoint.
To track all the changes we need an io vector array per log item,
rather than a single array for the entire transaction. Using the new
log vector structure for this requires two passes - the first to
allocate the log vector structures and chain them together, and the
second to fill them out. This log vector chain can then be passed
to the CIL for formatting, pinning and insertion into the CIL.
Formatting of the log vector chain is relatively simple - it's just
a loop over the iovecs on each log vector, but it is made slightly
more complex because we re-write the iovec after the copy to point
back at the memory buffer we just copied into.
This code also needs to pin log items. If the log item is not
already tracked in this checkpoint context, then it needs to be
pinned. Otherwise it is already pinned and we don't need to pin it
again.
The only other complexity is calculating the amount of new log space
the formatting has consumed. This needs to be accounted to the
transaction in progress, and the accounting is made more complex
becase we need also to steal space from it for log metadata in the
checkpoint transaction. Calculate all this at insert time and update
all the tickets, counters, etc correctly.
Once we've formatted all the log items in the transaction, attach
the busy extents to the checkpoint context so the busy extents live
until checkpoint completion and can be processed at that point in
time. Transactions can then be freed at this point in time.
Now we need to issue checkpoints - we are tracking the amount of log space
used by the items in the CIL, so we can trigger background checkpoints when the
space usage gets to a certain threshold. Otherwise, checkpoints need ot be
triggered when a log synchronisation point is reached - a log force event.
Because the log write code already handles chained log vectors, writing the
transaction is trivial, too. Construct a transaction header, add it
to the head of the chain and write it into the log, then issue a
commit record write. Then we can release the checkpoint log ticket
and attach the context to the log buffer so it can be called during
Io completion to complete the checkpoint.
We also need to allow for synchronising multiple in-flight
checkpoints. This is needed for two things - the first is to ensure
that checkpoint commit records appear in the log in the correct
sequence order (so they are replayed in the correct order). The
second is so that xfs_log_force_lsn() operates correctly and only
flushes and/or waits for the specific sequence it was provided with.
To do this we need a wait variable and a list tracking the
checkpoint commits in progress. We can walk this list and wait for
the checkpoints to change state or complete easily, an this provides
the necessary synchronisation for correct operation in both cases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 12:37:18 +08:00
|
|
|
|
|
|
|
/* delayed logging */
|
|
|
|
struct list_head li_cil; /* CIL pointers */
|
|
|
|
struct xfs_log_vec *li_lv; /* active log vector */
|
xfs: allocate log vector buffers outside CIL context lock
One of the problems we currently have with delayed logging is that
under serious memory pressure we can deadlock memory reclaim. THis
occurs when memory reclaim (such as run by kswapd) is reclaiming XFS
inodes and issues a log force to unpin inodes that are dirty in the
CIL.
The CIL is pushed, but this will only occur once it gets the CIL
context lock to ensure that all committing transactions are complete
and no new transactions start being committed to the CIL while the
push switches to a new context.
The deadlock occurs when the CIL context lock is held by a
committing process that is doing memory allocation for log vector
buffers, and that allocation is then blocked on memory reclaim
making progress. Memory reclaim, however, is blocked waiting for
a log force to make progress, and so we effectively deadlock at this
point.
To solve this problem, we have to move the CIL log vector buffer
allocation outside of the context lock so that memory reclaim can
always make progress when it needs to force the log. The problem
with doing this is that a CIL push can take place while we are
determining if we need to allocate a new log vector buffer for
an item and hence the current log vector may go away without
warning. That means we canot rely on the existing log vector being
present when we finally grab the context lock and so we must have a
replacement buffer ready to go at all times.
To ensure this, introduce a "shadow log vector" buffer that is
always guaranteed to be present when we gain the CIL context lock
and format the item. This shadow buffer may or may not be used
during the formatting, but if the log item does not have an existing
log vector buffer or that buffer is too small for the new
modifications, we swap it for the new shadow buffer and format
the modifications into that new log vector buffer.
The result of this is that for any object we modify more than once
in a given CIL checkpoint, we double the memory required
to track dirty regions in the log. For single modifications then
we consume the shadow log vectorwe allocate on commit, and that gets
consumed by the checkpoint. However, if we make multiple
modifications, then the second transaction commit will allocate a
shadow log vector and hence we will end up with double the memory
usage as only one of the log vectors is consumed by the CIL
checkpoint. The remaining shadow vector will be freed when th elog
item is freed.
This can probably be optimised in future - access to the shadow log
vector is serialised by the object lock (as opposited to the active
log vector, which is controlled by the CIL context lock) and so we
can probably free shadow log vector from some objects when the log
item is marked clean on removal from the AIL.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-07-22 07:52:35 +08:00
|
|
|
struct xfs_log_vec *li_lv_shadow; /* standby vector */
|
2021-06-18 23:21:52 +08:00
|
|
|
xfs_csn_t li_seq; /* CIL commit seq */
|
2019-06-29 10:27:33 +08:00
|
|
|
};
|
2008-10-30 14:05:38 +08:00
|
|
|
|
2018-05-09 22:47:34 +08:00
|
|
|
/*
|
|
|
|
* li_flags use the (set/test/clear)_bit atomic interfaces because updates can
|
|
|
|
* race with each other and we don't want to have to use the AIL lock to
|
|
|
|
* serialise all updates.
|
|
|
|
*/
|
|
|
|
#define XFS_LI_IN_AIL 0
|
|
|
|
#define XFS_LI_ABORTED 1
|
|
|
|
#define XFS_LI_FAILED 2
|
2018-05-09 22:49:37 +08:00
|
|
|
#define XFS_LI_DIRTY 3 /* log item dirty in transaction */
|
2008-10-30 14:05:38 +08:00
|
|
|
|
2009-12-15 07:14:59 +08:00
|
|
|
#define XFS_LI_FLAGS \
|
2022-04-21 08:47:07 +08:00
|
|
|
{ (1u << XFS_LI_IN_AIL), "IN_AIL" }, \
|
|
|
|
{ (1u << XFS_LI_ABORTED), "ABORTED" }, \
|
|
|
|
{ (1u << XFS_LI_FAILED), "FAILED" }, \
|
|
|
|
{ (1u << XFS_LI_DIRTY), "DIRTY" }
|
2009-12-15 07:14:59 +08:00
|
|
|
|
2011-10-28 17:54:24 +08:00
|
|
|
struct xfs_item_ops {
|
2019-06-29 10:27:32 +08:00
|
|
|
unsigned flags;
|
2019-06-29 10:27:33 +08:00
|
|
|
void (*iop_size)(struct xfs_log_item *, int *, int *);
|
|
|
|
void (*iop_format)(struct xfs_log_item *, struct xfs_log_vec *);
|
|
|
|
void (*iop_pin)(struct xfs_log_item *);
|
|
|
|
void (*iop_unpin)(struct xfs_log_item *, int remove);
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
uint (*iop_push)(struct xfs_log_item *, struct list_head *);
|
2021-06-18 23:21:52 +08:00
|
|
|
void (*iop_committing)(struct xfs_log_item *lip, xfs_csn_t seq);
|
2019-06-29 10:27:32 +08:00
|
|
|
void (*iop_release)(struct xfs_log_item *);
|
2019-06-29 10:27:33 +08:00
|
|
|
xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
|
xfs: proper replay of deferred ops queued during log recovery
When we replay unfinished intent items that have been recovered from the
log, it's possible that the replay will cause the creation of more
deferred work items. As outlined in commit 509955823cc9c ("xfs: log
recovery should replay deferred ops in order"), later work items have an
implicit ordering dependency on earlier work items. Therefore, recovery
must replay the items (both recovered and created) in the same order
that they would have been during normal operation.
For log recovery, we enforce this ordering by using an empty transaction
to collect deferred ops that get created in the process of recovering a
log intent item to prevent them from being committed before the rest of
the recovered intent items. After we finish committing all the
recovered log items, we allocate a transaction with an enormous block
reservation, splice our huge list of created deferred ops into that
transaction, and commit it, thereby finishing all those ops.
This is /really/ hokey -- it's the one place in XFS where we allow
nested transactions; the splicing of the defer ops list is is inelegant
and has to be done twice per recovery function; and the broken way we
handle inode pointers and block reservations cause subtle use-after-free
and allocator problems that will be fixed by this patch and the two
patches after it.
Therefore, replace the hokey empty transaction with a structure designed
to capture each chain of deferred ops that are created as part of
recovering a single unfinished log intent. Finally, refactor the loop
that replays those chains to do so using one transaction per chain.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2020-09-26 08:39:37 +08:00
|
|
|
int (*iop_recover)(struct xfs_log_item *lip,
|
|
|
|
struct list_head *capture_list);
|
2020-05-02 07:00:54 +08:00
|
|
|
bool (*iop_match)(struct xfs_log_item *item, uint64_t id);
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-28 07:18:13 +08:00
|
|
|
struct xfs_log_item *(*iop_relog)(struct xfs_log_item *intent,
|
|
|
|
struct xfs_trans *tp);
|
2011-10-28 17:54:24 +08:00
|
|
|
};
|
2008-10-30 14:05:38 +08:00
|
|
|
|
2022-05-04 09:46:09 +08:00
|
|
|
/*
|
|
|
|
* Log item ops flags
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Release the log item when the journal commits instead of inserting into the
|
|
|
|
* AIL for writeback tracking and/or log tail pinning.
|
|
|
|
*/
|
|
|
|
#define XFS_ITEM_RELEASE_WHEN_COMMITTED (1 << 0)
|
|
|
|
#define XFS_ITEM_INTENT (1 << 1)
|
|
|
|
#define XFS_ITEM_INTENT_DONE (1 << 2)
|
|
|
|
|
2020-09-24 00:13:28 +08:00
|
|
|
static inline bool
|
|
|
|
xlog_item_is_intent(struct xfs_log_item *lip)
|
|
|
|
{
|
2022-05-04 09:46:09 +08:00
|
|
|
return lip->li_ops->flags & XFS_ITEM_INTENT;
|
2020-09-24 00:13:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
xlog_item_is_intent_done(struct xfs_log_item *lip)
|
|
|
|
{
|
2022-05-04 09:46:09 +08:00
|
|
|
return lip->li_ops->flags & XFS_ITEM_INTENT_DONE;
|
2020-09-24 00:13:28 +08:00
|
|
|
}
|
|
|
|
|
2013-10-23 07:50:10 +08:00
|
|
|
void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
|
|
|
|
int type, const struct xfs_item_ops *ops);
|
|
|
|
|
2008-10-30 14:05:38 +08:00
|
|
|
/*
|
2013-08-28 19:12:03 +08:00
|
|
|
* Return values for the iop_push() routines.
|
2008-10-30 14:05:38 +08:00
|
|
|
*/
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
#define XFS_ITEM_SUCCESS 0
|
|
|
|
#define XFS_ITEM_PINNED 1
|
|
|
|
#define XFS_ITEM_LOCKED 2
|
|
|
|
#define XFS_ITEM_FLUSHING 3
|
2008-10-30 14:05:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the structure maintained for every active transaction.
|
|
|
|
*/
|
|
|
|
typedef struct xfs_trans {
|
|
|
|
unsigned int t_magic; /* magic number */
|
|
|
|
unsigned int t_log_res; /* amt of log space resvd */
|
|
|
|
unsigned int t_log_count; /* count for perm log res */
|
|
|
|
unsigned int t_blk_res; /* # of blocks resvd */
|
|
|
|
unsigned int t_blk_res_used; /* # of resvd blocks used */
|
|
|
|
unsigned int t_rtx_res; /* # of rt extents resvd */
|
|
|
|
unsigned int t_rtx_res_used; /* # of resvd rt extents used */
|
2018-07-25 04:43:11 +08:00
|
|
|
unsigned int t_flags; /* misc flags */
|
2018-07-12 13:26:20 +08:00
|
|
|
xfs_fsblock_t t_firstblock; /* first block allocated */
|
2010-02-16 07:34:54 +08:00
|
|
|
struct xlog_ticket *t_ticket; /* log mgr ticket */
|
2008-10-30 14:05:38 +08:00
|
|
|
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
|
|
|
|
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
|
|
|
|
int64_t t_icount_delta; /* superblock icount change */
|
|
|
|
int64_t t_ifree_delta; /* superblock ifree change */
|
|
|
|
int64_t t_fdblocks_delta; /* superblock fdblocks chg */
|
|
|
|
int64_t t_res_fdblocks_delta; /* on-disk only chg */
|
|
|
|
int64_t t_frextents_delta;/* superblock freextents chg*/
|
|
|
|
int64_t t_res_frextents_delta; /* on-disk only chg */
|
|
|
|
int64_t t_dblocks_delta;/* superblock dblocks change */
|
|
|
|
int64_t t_agcount_delta;/* superblock agcount change */
|
|
|
|
int64_t t_imaxpct_delta;/* superblock imaxpct change */
|
|
|
|
int64_t t_rextsize_delta;/* superblock rextsize chg */
|
|
|
|
int64_t t_rbmblocks_delta;/* superblock rbmblocks chg */
|
|
|
|
int64_t t_rblocks_delta;/* superblock rblocks change */
|
|
|
|
int64_t t_rextents_delta;/* superblocks rextents chg */
|
|
|
|
int64_t t_rextslog_delta;/* superblocks rextslog chg */
|
2010-06-23 16:11:15 +08:00
|
|
|
struct list_head t_items; /* log item descriptors */
|
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 10:07:08 +08:00
|
|
|
struct list_head t_busy; /* list of busy extents */
|
2018-08-01 22:20:35 +08:00
|
|
|
struct list_head t_dfops; /* deferred operations */
|
2008-10-30 14:05:38 +08:00
|
|
|
unsigned long t_pflags; /* saved process flags state */
|
|
|
|
} xfs_trans_t;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* XFS transaction mechanism exported interfaces that are
|
|
|
|
* actually macros.
|
|
|
|
*/
|
|
|
|
#define xfs_trans_set_sync(tp) ((tp)->t_flags |= XFS_TRANS_SYNC)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XFS transaction mechanism exported interfaces.
|
|
|
|
*/
|
2016-04-06 07:19:55 +08:00
|
|
|
int xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
|
|
|
|
uint blocks, uint rtextents, uint flags,
|
|
|
|
struct xfs_trans **tpp);
|
2017-03-29 05:56:37 +08:00
|
|
|
int xfs_trans_alloc_empty(struct xfs_mount *mp,
|
|
|
|
struct xfs_trans **tpp);
|
2007-02-10 15:36:10 +08:00
|
|
|
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
|
2012-06-22 16:50:11 +08:00
|
|
|
|
2020-01-24 09:01:18 +08:00
|
|
|
int xfs_trans_get_buf_map(struct xfs_trans *tp, struct xfs_buftarg *target,
|
|
|
|
struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags,
|
|
|
|
struct xfs_buf **bpp);
|
2012-06-22 16:50:11 +08:00
|
|
|
|
2020-01-24 09:01:18 +08:00
|
|
|
static inline int
|
2012-06-22 16:50:11 +08:00
|
|
|
xfs_trans_get_buf(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buftarg *target,
|
|
|
|
xfs_daddr_t blkno,
|
|
|
|
int numblks,
|
2022-04-21 06:44:59 +08:00
|
|
|
xfs_buf_flags_t flags,
|
2020-01-24 09:01:18 +08:00
|
|
|
struct xfs_buf **bpp)
|
2012-06-22 16:50:11 +08:00
|
|
|
{
|
2012-11-12 19:54:01 +08:00
|
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
2020-01-24 09:01:18 +08:00
|
|
|
return xfs_trans_get_buf_map(tp, target, &map, 1, flags, bpp);
|
2012-06-22 16:50:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int xfs_trans_read_buf_map(struct xfs_mount *mp,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buftarg *target,
|
|
|
|
struct xfs_buf_map *map, int nmaps,
|
|
|
|
xfs_buf_flags_t flags,
|
2012-11-12 19:54:01 +08:00
|
|
|
struct xfs_buf **bpp,
|
2012-11-14 14:54:40 +08:00
|
|
|
const struct xfs_buf_ops *ops);
|
2012-06-22 16:50:11 +08:00
|
|
|
|
|
|
|
static inline int
|
|
|
|
xfs_trans_read_buf(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buftarg *target,
|
|
|
|
xfs_daddr_t blkno,
|
|
|
|
int numblks,
|
|
|
|
xfs_buf_flags_t flags,
|
2012-11-12 19:54:01 +08:00
|
|
|
struct xfs_buf **bpp,
|
2012-11-14 14:54:40 +08:00
|
|
|
const struct xfs_buf_ops *ops)
|
2012-06-22 16:50:11 +08:00
|
|
|
{
|
2012-11-12 19:54:01 +08:00
|
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
|
|
|
return xfs_trans_read_buf_map(mp, tp, target, &map, 1,
|
2012-11-14 14:54:40 +08:00
|
|
|
flags, bpp, ops);
|
2012-06-22 16:50:11 +08:00
|
|
|
}
|
|
|
|
|
2020-09-02 01:55:47 +08:00
|
|
|
struct xfs_buf *xfs_trans_getsb(struct xfs_trans *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
|
2005-09-05 06:29:01 +08:00
|
|
|
void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
|
2005-04-17 06:20:36 +08:00
|
|
|
void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
|
2017-08-30 01:08:40 +08:00
|
|
|
bool xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *);
|
2005-04-17 06:20:36 +08:00
|
|
|
void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
|
|
|
|
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
|
2010-09-28 10:27:25 +08:00
|
|
|
void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
|
2011-09-19 23:00:54 +08:00
|
|
|
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
|
2017-08-30 01:08:38 +08:00
|
|
|
void xfs_trans_log_buf(struct xfs_trans *, struct xfs_buf *, uint,
|
|
|
|
uint);
|
|
|
|
void xfs_trans_dirty_buf(struct xfs_trans *, struct xfs_buf *);
|
2018-10-18 14:20:35 +08:00
|
|
|
bool xfs_trans_buf_is_dirty(struct xfs_buf *bp);
|
2005-04-17 06:20:36 +08:00
|
|
|
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
|
2016-08-03 09:14:35 +08:00
|
|
|
|
2015-06-04 11:48:08 +08:00
|
|
|
int xfs_trans_commit(struct xfs_trans *);
|
2017-08-29 01:21:03 +08:00
|
|
|
int xfs_trans_roll(struct xfs_trans **);
|
|
|
|
int xfs_trans_roll_inode(struct xfs_trans **, struct xfs_inode *);
|
2015-06-04 11:47:56 +08:00
|
|
|
void xfs_trans_cancel(xfs_trans_t *);
|
[XFS] Move AIL pushing into it's own thread
When many hundreds to thousands of threads all try to do simultaneous
transactions and the log is in a tail-pushing situation (i.e. full), we
can get multiple threads walking the AIL list and contending on the AIL
lock.
The AIL push is, in effect, a simple I/O dispatch algorithm complicated by
the ordering constraints placed on it by the transaction subsystem. It
really does not need multiple threads to push on it - even when only a
single CPU is pushing the AIL, it can push the I/O out far faster that
pretty much any disk subsystem can handle.
So, to avoid contention problems stemming from multiple list walkers, move
the list walk off into another thread and simply provide a "target" to
push to. When a thread requires a push, it sets the target and wakes the
push thread, then goes to sleep waiting for the required amount of space
to become available in the log.
This mechanism should also be a lot fairer under heavy load as the waiters
will queue in arrival order, rather than queuing in "who completed a push
first" order.
Also, by moving the pushing to a separate thread we can do more
effectively overload detection and prevention as we can keep context from
loop iteration to loop iteration. That is, we can push only part of the
list each loop and not have to loop back to the start of the list every
time we run. This should also help by reducing the number of items we try
to lock and/or push items that we cannot move.
Note that this patch is not intended to solve the inefficiencies in the
AIL structure and the associated issues with extremely large list
contents. That needs to be addresses separately; parallel access would
cause problems to any new structure as well, so I'm only aiming to isolate
the structure from unbounded parallelism here.
SGI-PV: 972759
SGI-Modid: xfs-linux-melb:xfs-kern:30371a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
2008-02-05 09:13:32 +08:00
|
|
|
int xfs_trans_ail_init(struct xfs_mount *);
|
|
|
|
void xfs_trans_ail_destroy(struct xfs_mount *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-10-23 07:51:50 +08:00
|
|
|
void xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
|
|
|
|
enum xfs_blft);
|
|
|
|
void xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
|
|
|
|
struct xfs_buf *src_bp);
|
|
|
|
|
2021-10-13 02:09:23 +08:00
|
|
|
extern struct kmem_cache *xfs_trans_cache;
|
2007-11-23 13:28:09 +08:00
|
|
|
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-28 07:18:13 +08:00
|
|
|
static inline struct xfs_log_item *
|
|
|
|
xfs_trans_item_relog(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
struct xfs_trans *tp)
|
|
|
|
{
|
|
|
|
return lip->li_ops->iop_relog(lip, tp);
|
|
|
|
}
|
|
|
|
|
2021-01-28 04:07:57 +08:00
|
|
|
struct xfs_dquot;
|
|
|
|
|
2021-01-27 08:33:29 +08:00
|
|
|
int xfs_trans_alloc_inode(struct xfs_inode *ip, struct xfs_trans_res *resv,
|
2021-01-27 08:44:07 +08:00
|
|
|
unsigned int dblocks, unsigned int rblocks, bool force,
|
|
|
|
struct xfs_trans **tpp);
|
2021-01-28 04:07:57 +08:00
|
|
|
int xfs_trans_alloc_icreate(struct xfs_mount *mp, struct xfs_trans_res *resv,
|
|
|
|
struct xfs_dquot *udqp, struct xfs_dquot *gdqp,
|
|
|
|
struct xfs_dquot *pdqp, unsigned int dblocks,
|
|
|
|
struct xfs_trans **tpp);
|
2021-01-30 03:32:09 +08:00
|
|
|
int xfs_trans_alloc_ichange(struct xfs_inode *ip, struct xfs_dquot *udqp,
|
|
|
|
struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, bool force,
|
|
|
|
struct xfs_trans **tpp);
|
2022-02-26 08:18:41 +08:00
|
|
|
int xfs_trans_alloc_dir(struct xfs_inode *dp, struct xfs_trans_res *resv,
|
|
|
|
struct xfs_inode *ip, unsigned int *dblocks,
|
|
|
|
struct xfs_trans **tpp, int *nospace_error);
|
2021-01-27 08:33:29 +08:00
|
|
|
|
2021-02-24 02:26:06 +08:00
|
|
|
static inline void
|
|
|
|
xfs_trans_set_context(
|
|
|
|
struct xfs_trans *tp)
|
|
|
|
{
|
|
|
|
ASSERT(current->journal_info == NULL);
|
|
|
|
tp->t_pflags = memalloc_nofs_save();
|
|
|
|
current->journal_info = tp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
xfs_trans_clear_context(
|
|
|
|
struct xfs_trans *tp)
|
|
|
|
{
|
|
|
|
if (current->journal_info == tp) {
|
|
|
|
memalloc_nofs_restore(tp->t_pflags);
|
|
|
|
current->journal_info = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
xfs_trans_switch_context(
|
|
|
|
struct xfs_trans *old_tp,
|
|
|
|
struct xfs_trans *new_tp)
|
|
|
|
{
|
|
|
|
ASSERT(current->journal_info == old_tp);
|
|
|
|
new_tp->t_pflags = old_tp->t_pflags;
|
|
|
|
old_tp->t_pflags = 0;
|
|
|
|
current->journal_info = new_tp;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __XFS_TRANS_H__ */
|