2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-11-02 11:59:41 +08:00
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:59:41 +08:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-17 06:20:36 +08:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 11:59:41 +08:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:59:41 +08:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-08-12 18:49:26 +08:00
|
|
|
#include "xfs_format.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_bit.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_log.h"
|
|
|
|
#include "xfs_trans.h"
|
|
|
|
#include "xfs_sb.h"
|
|
|
|
#include "xfs_ag.h"
|
|
|
|
#include "xfs_alloc.h"
|
|
|
|
#include "xfs_quota.h"
|
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_bmap_btree.h"
|
|
|
|
#include "xfs_ialloc_btree.h"
|
|
|
|
#include "xfs_dinode.h"
|
|
|
|
#include "xfs_inode.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_ialloc.h"
|
|
|
|
#include "xfs_itable.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_rtalloc.h"
|
|
|
|
#include "xfs_error.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_bmap.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_attr.h"
|
|
|
|
#include "xfs_buf_item.h"
|
|
|
|
#include "xfs_trans_space.h"
|
|
|
|
#include "xfs_qm.h"
|
2009-12-15 07:14:59 +08:00
|
|
|
#include "xfs_trace.h"
|
2012-10-08 18:56:11 +08:00
|
|
|
#include "xfs_icache.h"
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
#include "xfs_cksum.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The global quota manager. There is only one of these for the entire
|
|
|
|
* system, _not_ one per file system. XQM keeps track of the overall
|
|
|
|
* quota functionality, including maintaining the freelist and hash
|
|
|
|
* tables of dquots.
|
|
|
|
*/
|
|
|
|
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
|
2005-06-21 13:36:52 +08:00
|
|
|
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
|
2011-05-25 08:12:27 +08:00
|
|
|
STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-03-15 00:53:34 +08:00
|
|
|
/*
|
|
|
|
* We use the batch lookup interface to iterate over the dquots as it
|
|
|
|
* currently is the only interface into the radix tree code that allows
|
|
|
|
* fuzzy lookups instead of exact matches. Holding the lock over multiple
|
|
|
|
* operations is fine as all callers are used either during mount/umount
|
|
|
|
* or quotaoff.
|
|
|
|
*/
|
|
|
|
#define XFS_DQ_LOOKUP_BATCH 32
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_dquot_walk(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
int type,
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
int (*execute)(struct xfs_dquot *dqp, void *data),
|
|
|
|
void *data)
|
2012-03-15 00:53:34 +08:00
|
|
|
{
|
|
|
|
struct xfs_quotainfo *qi = mp->m_quotainfo;
|
2013-06-28 06:25:05 +08:00
|
|
|
struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
|
2012-03-15 00:53:34 +08:00
|
|
|
uint32_t next_index;
|
|
|
|
int last_error = 0;
|
|
|
|
int skipped;
|
|
|
|
int nr_found;
|
|
|
|
|
|
|
|
restart:
|
|
|
|
skipped = 0;
|
|
|
|
next_index = 0;
|
|
|
|
nr_found = 0;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
|
|
|
|
int error = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mutex_lock(&qi->qi_tree_lock);
|
|
|
|
nr_found = radix_tree_gang_lookup(tree, (void **)batch,
|
|
|
|
next_index, XFS_DQ_LOOKUP_BATCH);
|
|
|
|
if (!nr_found) {
|
|
|
|
mutex_unlock(&qi->qi_tree_lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr_found; i++) {
|
|
|
|
struct xfs_dquot *dqp = batch[i];
|
|
|
|
|
|
|
|
next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
|
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
error = execute(batch[i], data);
|
2012-03-15 00:53:34 +08:00
|
|
|
if (error == EAGAIN) {
|
|
|
|
skipped++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (error && last_error != EFSCORRUPTED)
|
|
|
|
last_error = error;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&qi->qi_tree_lock);
|
|
|
|
|
|
|
|
/* bail out if the filesystem is corrupted. */
|
|
|
|
if (last_error == EFSCORRUPTED) {
|
|
|
|
skipped = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skipped) {
|
|
|
|
delay(1);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
|
|
|
return last_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Purge a dquot from all tracking data structures and free it.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_dqpurge(
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct xfs_dquot *dqp,
|
|
|
|
void *data)
|
2012-03-15 00:53:34 +08:00
|
|
|
{
|
|
|
|
struct xfs_mount *mp = dqp->q_mount;
|
|
|
|
struct xfs_quotainfo *qi = mp->m_quotainfo;
|
|
|
|
struct xfs_dquot *gdqp = NULL;
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot *pdqp = NULL;
|
2012-03-15 00:53:34 +08:00
|
|
|
|
|
|
|
xfs_dqlock(dqp);
|
|
|
|
if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
|
|
|
|
xfs_dqunlock(dqp);
|
|
|
|
return EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-07-11 13:00:40 +08:00
|
|
|
* If this quota has a hint attached, prepare for releasing it now.
|
2012-03-15 00:53:34 +08:00
|
|
|
*/
|
|
|
|
gdqp = dqp->q_gdquot;
|
|
|
|
if (gdqp) {
|
|
|
|
xfs_dqlock(gdqp);
|
|
|
|
dqp->q_gdquot = NULL;
|
|
|
|
}
|
|
|
|
|
2013-07-11 13:00:40 +08:00
|
|
|
pdqp = dqp->q_pdquot;
|
|
|
|
if (pdqp) {
|
|
|
|
xfs_dqlock(pdqp);
|
|
|
|
dqp->q_pdquot = NULL;
|
|
|
|
}
|
|
|
|
|
2012-03-15 00:53:34 +08:00
|
|
|
dqp->dq_flags |= XFS_DQ_FREEING;
|
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_dqflock(dqp);
|
2012-03-15 00:53:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are turning this type of quotas off, we don't care
|
|
|
|
* about the dirty metadata sitting in this dquot. OTOH, if
|
|
|
|
* we're unmounting, we do care, so we flush it and wait.
|
|
|
|
*/
|
|
|
|
if (XFS_DQ_IS_DIRTY(dqp)) {
|
2012-04-23 13:58:37 +08:00
|
|
|
struct xfs_buf *bp = NULL;
|
|
|
|
int error;
|
2012-03-15 00:53:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't care about getting disk errors here. We need
|
|
|
|
* to purge this dquot anyway, so we go ahead regardless.
|
|
|
|
*/
|
2012-04-23 13:58:37 +08:00
|
|
|
error = xfs_qm_dqflush(dqp, &bp);
|
|
|
|
if (error) {
|
2012-03-15 00:53:34 +08:00
|
|
|
xfs_warn(mp, "%s: dquot %p flush failed",
|
|
|
|
__func__, dqp);
|
2012-04-23 13:58:37 +08:00
|
|
|
} else {
|
|
|
|
error = xfs_bwrite(bp);
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
}
|
2012-03-15 00:53:34 +08:00
|
|
|
xfs_dqflock(dqp);
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(atomic_read(&dqp->q_pincount) == 0);
|
|
|
|
ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
|
|
|
|
!(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
|
|
|
|
|
|
|
|
xfs_dqfunlock(dqp);
|
|
|
|
xfs_dqunlock(dqp);
|
|
|
|
|
2013-06-28 06:25:05 +08:00
|
|
|
radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
|
2012-03-15 00:53:34 +08:00
|
|
|
be32_to_cpu(dqp->q_core.d_id));
|
|
|
|
qi->qi_dquots--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We move dquots to the freelist as soon as their reference count
|
|
|
|
* hits zero, so it really should be on the freelist here.
|
|
|
|
*/
|
|
|
|
mutex_lock(&qi->qi_lru_lock);
|
|
|
|
ASSERT(!list_empty(&dqp->q_lru));
|
|
|
|
list_del_init(&dqp->q_lru);
|
|
|
|
qi->qi_lru_count--;
|
|
|
|
XFS_STATS_DEC(xs_qm_dquot_unused);
|
|
|
|
mutex_unlock(&qi->qi_lru_lock);
|
|
|
|
|
|
|
|
xfs_qm_dqdestroy(dqp);
|
|
|
|
|
|
|
|
if (gdqp)
|
|
|
|
xfs_qm_dqput(gdqp);
|
2013-07-11 13:00:40 +08:00
|
|
|
if (pdqp)
|
|
|
|
xfs_qm_dqput(pdqp);
|
2012-03-15 00:53:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Purge the dquot cache.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_qm_dqpurge_all(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
uint flags)
|
|
|
|
{
|
|
|
|
if (flags & XFS_QMOPT_UQUOTA)
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
|
2012-03-15 00:53:34 +08:00
|
|
|
if (flags & XFS_QMOPT_GQUOTA)
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
|
2012-03-15 00:53:34 +08:00
|
|
|
if (flags & XFS_QMOPT_PQUOTA)
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
|
2012-03-15 00:53:34 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Just destroy the quotainfo structure.
|
|
|
|
*/
|
|
|
|
void
|
2009-06-08 21:33:32 +08:00
|
|
|
xfs_qm_unmount(
|
|
|
|
struct xfs_mount *mp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-06-08 21:33:32 +08:00
|
|
|
if (mp->m_quotainfo) {
|
2010-04-20 15:02:29 +08:00
|
|
|
xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qm_destroy_quotainfo(mp);
|
2009-06-08 21:33:32 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called from xfs_mountfs to start quotas and initialize all
|
|
|
|
* necessary data structures like quotainfo. This is also responsible for
|
|
|
|
* running a quotacheck as necessary. We are guaranteed that the superblock
|
|
|
|
* is consistently read in at this point.
|
2008-04-10 10:20:31 +08:00
|
|
|
*
|
|
|
|
* If we fail here, the mount will continue with quota turned off. We don't
|
|
|
|
* need to inidicate success or failure at all.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-04-10 10:20:31 +08:00
|
|
|
void
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qm_mount_quotas(
|
2008-08-13 14:49:32 +08:00
|
|
|
xfs_mount_t *mp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
uint sbf;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If quotas on realtime volumes is not supported, we disable
|
|
|
|
* quotas immediately.
|
|
|
|
*/
|
|
|
|
if (mp->m_sb.sb_rextents) {
|
2011-03-07 07:08:35 +08:00
|
|
|
xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
|
2005-04-17 06:20:36 +08:00
|
|
|
mp->m_qflags = 0;
|
|
|
|
goto write_changes;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
2005-09-02 14:43:48 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Allocate the quotainfo structure inside the mount struct, and
|
|
|
|
* create quotainode(s), and change/rev superblock if necessary.
|
|
|
|
*/
|
2008-04-10 10:20:31 +08:00
|
|
|
error = xfs_qm_init_quotainfo(mp);
|
|
|
|
if (error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We must turn off quotas.
|
|
|
|
*/
|
|
|
|
ASSERT(mp->m_quotainfo == NULL);
|
|
|
|
mp->m_qflags = 0;
|
|
|
|
goto write_changes;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If any of the quotas are not consistent, do a quotacheck.
|
|
|
|
*/
|
2008-08-13 14:49:32 +08:00
|
|
|
if (XFS_QM_NEED_QUOTACHECK(mp)) {
|
2008-04-10 10:20:31 +08:00
|
|
|
error = xfs_qm_quotacheck(mp);
|
|
|
|
if (error) {
|
|
|
|
/* Quotacheck failed and disabled quotas. */
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
2007-05-08 11:49:09 +08:00
|
|
|
/*
|
|
|
|
* If one type of quotas is off, then it will lose its
|
|
|
|
* quotachecked status, since we won't be doing accounting for
|
|
|
|
* that type anymore.
|
|
|
|
*/
|
2008-04-10 10:20:31 +08:00
|
|
|
if (!XFS_IS_UQUOTA_ON(mp))
|
2007-05-08 11:49:09 +08:00
|
|
|
mp->m_qflags &= ~XFS_UQUOTA_CHKD;
|
2013-06-28 06:25:10 +08:00
|
|
|
if (!XFS_IS_GQUOTA_ON(mp))
|
|
|
|
mp->m_qflags &= ~XFS_GQUOTA_CHKD;
|
|
|
|
if (!XFS_IS_PQUOTA_ON(mp))
|
|
|
|
mp->m_qflags &= ~XFS_PQUOTA_CHKD;
|
2005-09-02 14:43:48 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
write_changes:
|
|
|
|
/*
|
2007-10-11 15:42:32 +08:00
|
|
|
* We actually don't have to acquire the m_sb_lock at all.
|
2005-04-17 06:20:36 +08:00
|
|
|
* This can only be called from mount, and that's single threaded. XXX
|
|
|
|
*/
|
2007-10-11 15:42:32 +08:00
|
|
|
spin_lock(&mp->m_sb_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
sbf = mp->m_sb.sb_qflags;
|
|
|
|
mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
|
2007-10-11 15:42:32 +08:00
|
|
|
spin_unlock(&mp->m_sb_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
|
|
|
|
if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
|
|
|
|
/*
|
|
|
|
* We could only have been turning quotas off.
|
|
|
|
* We aren't in very good shape actually because
|
|
|
|
* the incore structures are convinced that quotas are
|
|
|
|
* off, but the on disk superblock doesn't know that !
|
|
|
|
*/
|
|
|
|
ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
|
2011-03-07 07:05:35 +08:00
|
|
|
xfs_alert(mp, "%s: Superblock update failed!",
|
|
|
|
__func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
2011-03-07 07:05:35 +08:00
|
|
|
xfs_warn(mp, "Failed to initialize disk quotas.");
|
2009-06-08 21:33:32 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from the vfsops layer.
|
|
|
|
*/
|
2008-12-03 19:20:36 +08:00
|
|
|
void
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qm_unmount_quotas(
|
|
|
|
xfs_mount_t *mp)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Release the dquots that root inode, et al might be holding,
|
|
|
|
* before we flush quotas and blow away the quotainfo structure.
|
|
|
|
*/
|
|
|
|
ASSERT(mp->m_rootip);
|
|
|
|
xfs_qm_dqdetach(mp->m_rootip);
|
|
|
|
if (mp->m_rbmip)
|
|
|
|
xfs_qm_dqdetach(mp->m_rbmip);
|
|
|
|
if (mp->m_rsumip)
|
|
|
|
xfs_qm_dqdetach(mp->m_rsumip);
|
|
|
|
|
|
|
|
/*
|
2008-12-03 19:20:36 +08:00
|
|
|
* Release the quota inodes.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
if (mp->m_quotainfo) {
|
2008-12-03 19:20:36 +08:00
|
|
|
if (mp->m_quotainfo->qi_uquotaip) {
|
|
|
|
IRELE(mp->m_quotainfo->qi_uquotaip);
|
|
|
|
mp->m_quotainfo->qi_uquotaip = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-12-03 19:20:36 +08:00
|
|
|
if (mp->m_quotainfo->qi_gquotaip) {
|
|
|
|
IRELE(mp->m_quotainfo->qi_gquotaip);
|
|
|
|
mp->m_quotainfo->qi_gquotaip = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (mp->m_quotainfo->qi_pquotaip) {
|
|
|
|
IRELE(mp->m_quotainfo->qi_pquotaip);
|
|
|
|
mp->m_quotainfo->qi_pquotaip = NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_dqattach_one(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_dqid_t id,
|
|
|
|
uint type,
|
|
|
|
uint doalloc,
|
|
|
|
xfs_dquot_t *udqhint, /* hint */
|
|
|
|
xfs_dquot_t **IO_idqpp)
|
|
|
|
{
|
|
|
|
xfs_dquot_t *dqp;
|
|
|
|
int error;
|
|
|
|
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-17 06:20:36 +08:00
|
|
|
error = 0;
|
2009-02-09 04:51:42 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* See if we already have it in the inode itself. IO_idqpp is
|
|
|
|
* &i_udquot or &i_gdquot. This made the code look weird, but
|
|
|
|
* made the logic a lot simpler.
|
|
|
|
*/
|
2009-02-09 04:51:42 +08:00
|
|
|
dqp = *IO_idqpp;
|
|
|
|
if (dqp) {
|
2009-12-15 07:14:59 +08:00
|
|
|
trace_xfs_dqattach_found(dqp);
|
2009-02-09 04:51:42 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* udqhint is the i_udquot field in inode, and is non-NULL only
|
2005-06-21 13:38:48 +08:00
|
|
|
* when the type arg is group/project. Its purpose is to save a
|
2005-04-17 06:20:36 +08:00
|
|
|
* lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
|
|
|
|
* the user dquot.
|
|
|
|
*/
|
2009-02-09 04:51:42 +08:00
|
|
|
if (udqhint) {
|
|
|
|
ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_dqlock(udqhint);
|
|
|
|
|
2009-02-09 04:51:42 +08:00
|
|
|
/*
|
|
|
|
* No need to take dqlock to look at the id.
|
|
|
|
*
|
|
|
|
* The ID can't change until it gets reclaimed, and it won't
|
|
|
|
* be reclaimed as long as we have a ref from inode and we
|
|
|
|
* hold the ilock.
|
|
|
|
*/
|
2013-07-11 13:00:40 +08:00
|
|
|
if (type == XFS_DQ_GROUP)
|
|
|
|
dqp = udqhint->q_gdquot;
|
|
|
|
else
|
|
|
|
dqp = udqhint->q_pdquot;
|
2009-02-09 04:51:42 +08:00
|
|
|
if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
|
|
|
|
ASSERT(*IO_idqpp == NULL);
|
|
|
|
|
2011-12-07 05:58:22 +08:00
|
|
|
*IO_idqpp = xfs_qm_dqhold(dqp);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_dqunlock(udqhint);
|
2009-02-09 04:51:42 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-02-09 04:51:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't hold a dquot lock when we call the dqget code.
|
|
|
|
* We'll deadlock in no time, because of (not conforming to)
|
|
|
|
* lock ordering - the inodelock comes before any dquot lock,
|
|
|
|
* and we may drop and reacquire the ilock in xfs_qm_dqget().
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_dqunlock(udqhint);
|
2009-02-09 04:51:42 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Find the dquot from somewhere. This bumps the
|
|
|
|
* reference count of dquot and returns it locked.
|
|
|
|
* This can return ENOENT if dquot didn't exist on
|
|
|
|
* disk and we didn't ask it to allocate;
|
|
|
|
* ESRCH if quotas got turned off suddenly.
|
|
|
|
*/
|
2011-11-10 09:33:10 +08:00
|
|
|
error = xfs_qm_dqget(ip->i_mount, ip, id, type,
|
|
|
|
doalloc | XFS_QMOPT_DOWARN, &dqp);
|
2009-02-09 04:51:42 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-12-15 07:14:59 +08:00
|
|
|
trace_xfs_dqattach_get(dqp);
|
2009-02-09 04:51:42 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* dqget may have dropped and re-acquired the ilock, but it guarantees
|
|
|
|
* that the dquot returned is the one that should go in the inode.
|
|
|
|
*/
|
|
|
|
*IO_idqpp = dqp;
|
2009-02-09 04:51:42 +08:00
|
|
|
xfs_dqunlock(dqp);
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2013-07-11 13:00:40 +08:00
|
|
|
* Given a udquot and group/project type, attach the group/project
|
|
|
|
* dquot pointer to the udquot as a hint for future lookups.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
STATIC void
|
2013-07-11 13:00:40 +08:00
|
|
|
xfs_qm_dqattach_hint(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int type)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot **dqhintp;
|
|
|
|
struct xfs_dquot *dqp;
|
|
|
|
struct xfs_dquot *udq = ip->i_udquot;
|
|
|
|
|
|
|
|
ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-02-09 04:51:42 +08:00
|
|
|
xfs_dqlock(udq);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-11 13:00:40 +08:00
|
|
|
if (type == XFS_DQ_GROUP) {
|
|
|
|
dqp = ip->i_gdquot;
|
|
|
|
dqhintp = &udq->q_gdquot;
|
|
|
|
} else {
|
|
|
|
dqp = ip->i_pdquot;
|
|
|
|
dqhintp = &udq->q_pdquot;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*dqhintp) {
|
|
|
|
struct xfs_dquot *tmp;
|
|
|
|
|
|
|
|
if (*dqhintp == dqp)
|
2011-12-07 05:58:20 +08:00
|
|
|
goto done;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-11 13:00:40 +08:00
|
|
|
tmp = *dqhintp;
|
|
|
|
*dqhintp = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qm_dqrele(tmp);
|
|
|
|
}
|
2009-02-09 04:51:42 +08:00
|
|
|
|
2013-07-11 13:00:40 +08:00
|
|
|
*dqhintp = xfs_qm_dqhold(dqp);
|
2011-12-07 05:58:20 +08:00
|
|
|
done:
|
2009-02-09 04:51:42 +08:00
|
|
|
xfs_dqunlock(udq);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-03-27 22:34:46 +08:00
|
|
|
static bool
|
|
|
|
xfs_qm_need_dqattach(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
|
|
|
|
if (!XFS_IS_QUOTA_RUNNING(mp))
|
|
|
|
return false;
|
|
|
|
if (!XFS_IS_QUOTA_ON(mp))
|
|
|
|
return false;
|
|
|
|
if (!XFS_NOT_DQATTACHED(mp, ip))
|
|
|
|
return false;
|
2013-06-28 06:25:04 +08:00
|
|
|
if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
|
2012-03-27 22:34:46 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2005-06-21 13:38:48 +08:00
|
|
|
* Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
|
|
|
|
* into account.
|
2005-04-17 06:20:36 +08:00
|
|
|
* If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
|
|
|
|
* Inode may get unlocked and relocked in here, and the caller must deal with
|
|
|
|
* the consequences.
|
|
|
|
*/
|
|
|
|
int
|
2009-06-08 21:33:32 +08:00
|
|
|
xfs_qm_dqattach_locked(
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_inode_t *ip,
|
|
|
|
uint flags)
|
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
uint nquotas = 0;
|
|
|
|
int error = 0;
|
|
|
|
|
2012-03-27 22:34:46 +08:00
|
|
|
if (!xfs_qm_need_dqattach(ip))
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (XFS_IS_UQUOTA_ON(mp)) {
|
|
|
|
error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
|
|
|
|
flags & XFS_QMOPT_DQALLOC,
|
|
|
|
NULL, &ip->i_udquot);
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
nquotas++;
|
|
|
|
}
|
2008-04-22 15:34:00 +08:00
|
|
|
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2013-07-11 13:00:40 +08:00
|
|
|
if (XFS_IS_GQUOTA_ON(mp)) {
|
|
|
|
error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
|
2005-04-17 06:20:36 +08:00
|
|
|
flags & XFS_QMOPT_DQALLOC,
|
|
|
|
ip->i_udquot, &ip->i_gdquot);
|
|
|
|
/*
|
|
|
|
* Don't worry about the udquot that we may have
|
|
|
|
* attached above. It'll get detached, if not already.
|
|
|
|
*/
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
nquotas++;
|
|
|
|
}
|
|
|
|
|
2013-07-11 13:00:40 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
|
if (XFS_IS_PQUOTA_ON(mp)) {
|
|
|
|
error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
|
|
|
|
flags & XFS_QMOPT_DQALLOC,
|
|
|
|
ip->i_udquot, &ip->i_pdquot);
|
|
|
|
/*
|
|
|
|
* Don't worry about the udquot that we may have
|
|
|
|
* attached above. It'll get detached, if not already.
|
|
|
|
*/
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
nquotas++;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2013-07-11 13:00:40 +08:00
|
|
|
* Attach this group/project quota to the user quota as a hint.
|
2005-04-17 06:20:36 +08:00
|
|
|
* This WON'T, in general, result in a thrash.
|
|
|
|
*/
|
2013-07-11 13:00:40 +08:00
|
|
|
if (nquotas > 1 && ip->i_udquot) {
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2013-07-11 13:00:40 +08:00
|
|
|
ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
|
|
|
|
ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2011-12-07 05:58:20 +08:00
|
|
|
* We do not have i_udquot locked at this point, but this check
|
|
|
|
* is OK since we don't depend on the i_gdquot to be accurate
|
|
|
|
* 100% all the time. It is just a hint, and this will
|
|
|
|
* succeed in general.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-12-07 05:58:20 +08:00
|
|
|
if (ip->i_udquot->q_gdquot != ip->i_gdquot)
|
2013-07-11 13:00:40 +08:00
|
|
|
xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
|
|
|
|
|
|
|
|
if (ip->i_udquot->q_pdquot != ip->i_pdquot)
|
|
|
|
xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
done:
|
2011-07-13 19:43:50 +08:00
|
|
|
#ifdef DEBUG
|
|
|
|
if (!error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (XFS_IS_UQUOTA_ON(mp))
|
|
|
|
ASSERT(ip->i_udquot);
|
2013-07-11 13:00:40 +08:00
|
|
|
if (XFS_IS_GQUOTA_ON(mp))
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(ip->i_gdquot);
|
2013-07-11 13:00:40 +08:00
|
|
|
if (XFS_IS_PQUOTA_ON(mp))
|
|
|
|
ASSERT(ip->i_pdquot);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-06-08 21:33:32 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2009-06-08 21:33:32 +08:00
|
|
|
return error;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
int
|
|
|
|
xfs_qm_dqattach(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
uint flags)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2012-03-27 22:34:46 +08:00
|
|
|
if (!xfs_qm_need_dqattach(ip))
|
|
|
|
return 0;
|
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
error = xfs_qm_dqattach_locked(ip, flags);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release dquots (and their references) if any.
|
|
|
|
* The inode should be locked EXCL except when this's called by
|
|
|
|
* xfs_ireclaim.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_qm_dqdetach(
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
2013-07-11 13:00:40 +08:00
|
|
|
if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
2009-12-15 07:14:59 +08:00
|
|
|
trace_xfs_dquot_dqdetach(ip);
|
|
|
|
|
2013-06-28 06:25:04 +08:00
|
|
|
ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ip->i_udquot) {
|
|
|
|
xfs_qm_dqrele(ip->i_udquot);
|
|
|
|
ip->i_udquot = NULL;
|
|
|
|
}
|
|
|
|
if (ip->i_gdquot) {
|
|
|
|
xfs_qm_dqrele(ip->i_gdquot);
|
|
|
|
ip->i_gdquot = NULL;
|
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (ip->i_pdquot) {
|
|
|
|
xfs_qm_dqrele(ip->i_pdquot);
|
|
|
|
ip->i_pdquot = NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-04-03 13:11:16 +08:00
|
|
|
int
|
|
|
|
xfs_qm_calc_dquots_per_chunk(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
unsigned int nbblks) /* basic block units */
|
|
|
|
{
|
|
|
|
unsigned int ndquots;
|
|
|
|
|
|
|
|
ASSERT(nbblks > 0);
|
|
|
|
ndquots = BBTOB(nbblks);
|
|
|
|
do_div(ndquots, sizeof(xfs_dqblk_t));
|
|
|
|
|
|
|
|
return ndquots;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This initializes all the quota information that's kept in the
|
|
|
|
* mount structure
|
|
|
|
*/
|
2005-06-21 13:36:52 +08:00
|
|
|
STATIC int
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qm_init_quotainfo(
|
|
|
|
xfs_mount_t *mp)
|
|
|
|
{
|
|
|
|
xfs_quotainfo_t *qinf;
|
|
|
|
int error;
|
|
|
|
xfs_dquot_t *dqp;
|
|
|
|
|
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
|
|
|
|
|
|
|
qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if quotainodes are setup, and if not, allocate them,
|
|
|
|
* and change the superblock accordingly.
|
|
|
|
*/
|
|
|
|
if ((error = xfs_qm_init_quotainos(mp))) {
|
2008-05-19 14:31:57 +08:00
|
|
|
kmem_free(qinf);
|
2005-04-17 06:20:36 +08:00
|
|
|
mp->m_quotainfo = NULL;
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-03-13 16:52:35 +08:00
|
|
|
INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
|
|
|
|
INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
|
2013-07-11 13:00:40 +08:00
|
|
|
INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
|
2012-03-13 16:52:35 +08:00
|
|
|
mutex_init(&qinf->qi_tree_lock);
|
|
|
|
|
2012-03-13 16:52:34 +08:00
|
|
|
INIT_LIST_HEAD(&qinf->qi_lru_list);
|
|
|
|
qinf->qi_lru_count = 0;
|
|
|
|
mutex_init(&qinf->qi_lru_lock);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* mutex used to serialize quotaoffs */
|
2006-01-10 07:59:21 +08:00
|
|
|
mutex_init(&qinf->qi_quotaofflock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Precalc some constants */
|
|
|
|
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
|
2013-04-03 13:11:16 +08:00
|
|
|
qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
|
|
|
|
qinf->qi_dqchunklen);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We try to get the limits from the superuser's limits fields.
|
|
|
|
* This is quite hacky, but it is standard quota practice.
|
2011-12-07 05:58:25 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* We look at the USR dquot with id == 0 first, but if user quotas
|
|
|
|
* are not enabled we goto the GRP dquot with id == 0.
|
|
|
|
* We don't really care to keep separate default limits for user
|
|
|
|
* and group quotas, at least not at this point.
|
2011-12-07 05:58:25 +08:00
|
|
|
*
|
|
|
|
* Since we may not have done a quotacheck by this point, just read
|
|
|
|
* the dquot without attaching it to any hashtables or lists.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-12-07 05:58:25 +08:00
|
|
|
error = xfs_qm_dqread(mp, 0,
|
|
|
|
XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
|
|
|
|
(XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
|
|
|
|
XFS_DQ_PROJ),
|
|
|
|
XFS_QMOPT_DOWARN, &dqp);
|
|
|
|
if (!error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_disk_dquot_t *ddqp = &dqp->q_core;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The warnings and timers set the grace period given to
|
|
|
|
* a user or group before he or she can not perform any
|
|
|
|
* more writing. If it is zero, a default is used.
|
|
|
|
*/
|
2005-11-02 12:01:12 +08:00
|
|
|
qinf->qi_btimelimit = ddqp->d_btimer ?
|
|
|
|
be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
|
|
|
|
qinf->qi_itimelimit = ddqp->d_itimer ?
|
|
|
|
be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
|
|
|
|
qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
|
|
|
|
be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
|
|
|
|
qinf->qi_bwarnlimit = ddqp->d_bwarns ?
|
|
|
|
be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
|
|
|
|
qinf->qi_iwarnlimit = ddqp->d_iwarns ?
|
|
|
|
be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
|
|
|
|
qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
|
|
|
|
be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
|
|
|
|
qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
|
|
|
|
qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
|
|
|
|
qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
|
|
|
|
qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
|
|
|
|
qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
|
|
|
|
qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
xfs_qm_dqdestroy(dqp);
|
|
|
|
} else {
|
|
|
|
qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
|
|
|
|
qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
|
|
|
|
qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
|
|
|
|
qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
|
|
|
|
qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
|
2005-06-21 13:48:47 +08:00
|
|
|
qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-03-13 16:52:34 +08:00
|
|
|
qinf->qi_shrinker.shrink = xfs_qm_shake;
|
|
|
|
qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
|
|
|
|
register_shrinker(&qinf->qi_shrinker);
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Gets called when unmounting a filesystem or when all quotas get
|
|
|
|
* turned off.
|
|
|
|
* This purges the quota inodes, destroys locks and frees itself.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_qm_destroy_quotainfo(
|
|
|
|
xfs_mount_t *mp)
|
|
|
|
{
|
|
|
|
xfs_quotainfo_t *qi;
|
|
|
|
|
|
|
|
qi = mp->m_quotainfo;
|
|
|
|
ASSERT(qi != NULL);
|
|
|
|
|
2012-03-13 16:52:34 +08:00
|
|
|
unregister_shrinker(&qi->qi_shrinker);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (qi->qi_uquotaip) {
|
2008-07-18 15:12:43 +08:00
|
|
|
IRELE(qi->qi_uquotaip);
|
2005-04-17 06:20:36 +08:00
|
|
|
qi->qi_uquotaip = NULL; /* paranoia */
|
|
|
|
}
|
|
|
|
if (qi->qi_gquotaip) {
|
2008-07-18 15:12:43 +08:00
|
|
|
IRELE(qi->qi_gquotaip);
|
2005-04-17 06:20:36 +08:00
|
|
|
qi->qi_gquotaip = NULL;
|
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (qi->qi_pquotaip) {
|
|
|
|
IRELE(qi->qi_pquotaip);
|
|
|
|
qi->qi_pquotaip = NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
mutex_destroy(&qi->qi_quotaofflock);
|
2008-05-19 14:31:57 +08:00
|
|
|
kmem_free(qi);
|
2005-04-17 06:20:36 +08:00
|
|
|
mp->m_quotainfo = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create an inode and return with a reference already taken, but unlocked
|
|
|
|
* This is how we create quota inodes
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_qino_alloc(
|
|
|
|
xfs_mount_t *mp,
|
|
|
|
xfs_inode_t **ip,
|
|
|
|
__int64_t sbfields,
|
|
|
|
uint flags)
|
|
|
|
{
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
int error;
|
|
|
|
int committed;
|
|
|
|
|
2013-07-20 06:36:02 +08:00
|
|
|
*ip = NULL;
|
|
|
|
/*
|
|
|
|
* With superblock that doesn't have separate pquotino, we
|
|
|
|
* share an inode between gquota and pquota. If the on-disk
|
|
|
|
* superblock has GQUOTA and the filesystem is now mounted
|
|
|
|
* with PQUOTA, just use sb_gquotino for sb_pquotino and
|
|
|
|
* vice-versa.
|
|
|
|
*/
|
|
|
|
if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
|
|
|
|
(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
|
|
|
|
xfs_ino_t ino = NULLFSINO;
|
|
|
|
|
|
|
|
if ((flags & XFS_QMOPT_PQUOTA) &&
|
|
|
|
(mp->m_sb.sb_gquotino != NULLFSINO)) {
|
|
|
|
ino = mp->m_sb.sb_gquotino;
|
|
|
|
ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
|
|
|
|
} else if ((flags & XFS_QMOPT_GQUOTA) &&
|
|
|
|
(mp->m_sb.sb_pquotino != NULLFSINO)) {
|
|
|
|
ino = mp->m_sb.sb_pquotino;
|
|
|
|
ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
|
|
|
|
}
|
|
|
|
if (ino != NULLFSINO) {
|
|
|
|
error = xfs_iget(mp, NULL, ino, 0, 0, ip);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
mp->m_sb.sb_gquotino = NULLFSINO;
|
|
|
|
mp->m_sb.sb_pquotino = NULLFSINO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-11 12:27:50 +08:00
|
|
|
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
|
2013-08-12 18:49:59 +08:00
|
|
|
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
|
|
|
|
XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
|
|
|
|
if (error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_cancel(tp, 0);
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-07-20 06:36:02 +08:00
|
|
|
if (!*ip) {
|
|
|
|
error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
|
|
|
|
&committed);
|
|
|
|
if (error) {
|
|
|
|
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
|
|
|
|
XFS_TRANS_ABORT);
|
|
|
|
return error;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make the changes in the superblock, and log those too.
|
|
|
|
* sbfields arg may contain fields other than *QUOTINO;
|
|
|
|
* VERSIONNUM for example.
|
|
|
|
*/
|
2007-10-11 15:42:32 +08:00
|
|
|
spin_lock(&mp->m_sb_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (flags & XFS_QMOPT_SBVERSION) {
|
2008-03-06 10:44:28 +08:00
|
|
|
ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
|
2013-07-20 06:36:02 +08:00
|
|
|
XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
|
|
|
|
(XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
|
|
|
|
XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
|
|
|
|
XFS_SB_QFLAGS));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-03-06 10:44:28 +08:00
|
|
|
xfs_sb_version_addquota(&mp->m_sb);
|
2005-04-17 06:20:36 +08:00
|
|
|
mp->m_sb.sb_uquotino = NULLFSINO;
|
|
|
|
mp->m_sb.sb_gquotino = NULLFSINO;
|
2013-07-20 06:36:02 +08:00
|
|
|
mp->m_sb.sb_pquotino = NULLFSINO;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-20 06:36:02 +08:00
|
|
|
/* qflags will get updated fully _after_ quotacheck */
|
|
|
|
mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if (flags & XFS_QMOPT_UQUOTA)
|
|
|
|
mp->m_sb.sb_uquotino = (*ip)->i_ino;
|
2013-07-20 06:36:02 +08:00
|
|
|
else if (flags & XFS_QMOPT_GQUOTA)
|
2005-04-17 06:20:36 +08:00
|
|
|
mp->m_sb.sb_gquotino = (*ip)->i_ino;
|
2013-07-20 06:36:02 +08:00
|
|
|
else
|
|
|
|
mp->m_sb.sb_pquotino = (*ip)->i_ino;
|
2007-10-11 15:42:32 +08:00
|
|
|
spin_unlock(&mp->m_sb_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_mod_sb(tp, sbfields);
|
|
|
|
|
2007-05-08 11:48:42 +08:00
|
|
|
if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
|
2011-03-07 07:05:35 +08:00
|
|
|
xfs_alert(mp, "%s failed (error %d)!", __func__, error);
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-10 10:20:10 +08:00
|
|
|
STATIC void
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qm_reset_dqcounts(
|
|
|
|
xfs_mount_t *mp,
|
|
|
|
xfs_buf_t *bp,
|
|
|
|
xfs_dqid_t id,
|
|
|
|
uint type)
|
|
|
|
{
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
struct xfs_dqblk *dqb;
|
2005-04-17 06:20:36 +08:00
|
|
|
int j;
|
|
|
|
|
2009-12-15 07:14:59 +08:00
|
|
|
trace_xfs_reset_dqcounts(bp, _RET_IP_);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Reset all counters and timers. They'll be
|
|
|
|
* started afresh by xfs_qm_quotacheck.
|
|
|
|
*/
|
|
|
|
#ifdef DEBUG
|
|
|
|
j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
|
|
|
|
do_div(j, sizeof(xfs_dqblk_t));
|
2010-04-20 15:01:30 +08:00
|
|
|
ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
dqb = bp->b_addr;
|
2010-04-20 15:01:30 +08:00
|
|
|
for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
struct xfs_disk_dquot *ddq;
|
|
|
|
|
|
|
|
ddq = (struct xfs_disk_dquot *)&dqb[j];
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Do a sanity check, and if needed, repair the dqblk. Don't
|
|
|
|
* output any warnings because it's perfectly possible to
|
2006-03-29 06:55:14 +08:00
|
|
|
* find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-03-07 07:01:35 +08:00
|
|
|
(void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
|
2005-04-17 06:20:36 +08:00
|
|
|
"xfs_quotacheck");
|
2005-11-02 12:01:12 +08:00
|
|
|
ddq->d_bcount = 0;
|
|
|
|
ddq->d_icount = 0;
|
|
|
|
ddq->d_rtbcount = 0;
|
|
|
|
ddq->d_btimer = 0;
|
|
|
|
ddq->d_itimer = 0;
|
|
|
|
ddq->d_rtbtimer = 0;
|
|
|
|
ddq->d_bwarns = 0;
|
|
|
|
ddq->d_iwarns = 0;
|
|
|
|
ddq->d_rtbwarns = 0;
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
|
|
|
|
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
|
|
|
xfs_update_cksum((char *)&dqb[j],
|
|
|
|
sizeof(struct xfs_dqblk),
|
|
|
|
XFS_DQUOT_CRC_OFF);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_dqiter_bufs(
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_dqid_t firstid,
|
|
|
|
xfs_fsblock_t bno,
|
|
|
|
xfs_filblks_t blkcnt,
|
|
|
|
uint flags,
|
|
|
|
struct list_head *buffer_list)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct xfs_buf *bp;
|
|
|
|
int error;
|
|
|
|
int type;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ASSERT(blkcnt > 0);
|
2005-06-21 13:38:48 +08:00
|
|
|
type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
|
|
|
|
(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Blkcnt arg can be a very big number, and might even be
|
|
|
|
* larger than the log itself. So, we have to break it up into
|
|
|
|
* manageable-sized transactions.
|
|
|
|
* Note that we don't start a permanent transaction here; we might
|
|
|
|
* not be able to get a log reservation for the whole thing up front,
|
|
|
|
* and we don't really care to either, because we just discard
|
|
|
|
* everything if we were to crash in the middle of this loop.
|
|
|
|
*/
|
|
|
|
while (blkcnt--) {
|
|
|
|
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
|
|
|
|
XFS_FSB_TO_DADDR(mp, bno),
|
2012-11-14 14:50:13 +08:00
|
|
|
mp->m_quotainfo->qi_dqchunklen, 0, &bp,
|
2012-11-14 14:54:40 +08:00
|
|
|
&xfs_dquot_buf_ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-04-03 13:11:16 +08:00
|
|
|
/*
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
* CRC and validation errors will return a EFSCORRUPTED here. If
|
|
|
|
* this occurs, re-read without CRC validation so that we can
|
|
|
|
* repair the damage via xfs_qm_reset_dqcounts(). This process
|
|
|
|
* will leave a trace in the log indicating corruption has
|
|
|
|
* been detected.
|
2013-04-03 13:11:16 +08:00
|
|
|
*/
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
if (error == EFSCORRUPTED) {
|
|
|
|
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
|
|
|
|
XFS_FSB_TO_DADDR(mp, bno),
|
|
|
|
mp->m_quotainfo->qi_dqchunklen, 0, &bp,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
2008-04-10 10:20:10 +08:00
|
|
|
xfs_qm_reset_dqcounts(mp, bp, firstid, type);
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_buf_delwri_queue(bp, buffer_list);
|
2011-08-23 16:28:06 +08:00
|
|
|
xfs_buf_relse(bp);
|
xfs: rework dquot CRCs
Calculating dquot CRCs when the backing buffer is written back just
doesn't work reliably. There are several places which manipulate
dquots directly in the buffers, and they don't calculate CRCs
appropriately, nor do they always set the buffer up to calculate
CRCs appropriately.
Firstly, if we log a dquot buffer (e.g. during allocation) it gets
logged without valid CRC, and so on recovery we end up with a dquot
that is not valid.
Secondly, if we recover/repair a dquot, we don't have a verifier
attached to the buffer and hence CRCs are not calculated on the way
down to disk.
Thirdly, calculating the CRC after we've changed the contents means
that if we re-read the dquot from the buffer, we cannot verify the
contents of the dquot are valid, as the CRC is invalid.
So, to avoid all the dquot CRC errors that are being detected by the
read verifier, change to using the same model as for inodes. That
is, dquot CRCs are calculated and written to the backing buffer at
the time the dquot is flushed to the backing buffer. If we modify
the dquot directly in the backing buffer, calculate the CRC
immediately after the modification is complete. Hence the dquot in
the on-disk buffer should always have a valid CRC.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-03 13:28:46 +08:00
|
|
|
|
|
|
|
/* goto the next block. */
|
2005-04-17 06:20:36 +08:00
|
|
|
bno++;
|
2010-04-20 15:01:30 +08:00
|
|
|
firstid += mp->m_quotainfo->qi_dqperchunk;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-06-21 13:38:48 +08:00
|
|
|
* Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
|
2005-04-17 06:20:36 +08:00
|
|
|
* caller supplied function for every chunk of dquots that we find.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_dqiterate(
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_inode *qip,
|
|
|
|
uint flags,
|
|
|
|
struct list_head *buffer_list)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct xfs_bmbt_irec *map;
|
2005-04-17 06:20:36 +08:00
|
|
|
int i, nmaps; /* number of map entries */
|
|
|
|
int error; /* return value */
|
|
|
|
xfs_fileoff_t lblkno;
|
|
|
|
xfs_filblks_t maxlblkcnt;
|
|
|
|
xfs_dqid_t firstid;
|
|
|
|
xfs_fsblock_t rablkno;
|
|
|
|
xfs_filblks_t rablkcnt;
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
/*
|
2006-03-29 06:55:14 +08:00
|
|
|
* This looks racy, but we can't keep an inode lock across a
|
2005-04-17 06:20:36 +08:00
|
|
|
* trans_reserve. But, this gets called during quotacheck, and that
|
|
|
|
* happens only at mount time which is single threaded.
|
|
|
|
*/
|
|
|
|
if (qip->i_d.di_nblocks == 0)
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
|
|
|
|
|
|
|
|
lblkno = 0;
|
2012-06-08 13:44:54 +08:00
|
|
|
maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
2005-04-17 06:20:36 +08:00
|
|
|
do {
|
|
|
|
nmaps = XFS_DQITER_MAP_SIZE;
|
|
|
|
/*
|
|
|
|
* We aren't changing the inode itself. Just changing
|
|
|
|
* some of its data. No new blocks are added here, and
|
|
|
|
* the inode is never added to the transaction.
|
|
|
|
*/
|
|
|
|
xfs_ilock(qip, XFS_ILOCK_SHARED);
|
2011-09-19 04:40:45 +08:00
|
|
|
error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
|
|
|
|
map, &nmaps, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_iunlock(qip, XFS_ILOCK_SHARED);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
|
|
|
|
for (i = 0; i < nmaps; i++) {
|
|
|
|
ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
|
|
|
|
ASSERT(map[i].br_blockcount);
|
|
|
|
|
|
|
|
|
|
|
|
lblkno += map[i].br_blockcount;
|
|
|
|
|
|
|
|
if (map[i].br_startblock == HOLESTARTBLOCK)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
firstid = (xfs_dqid_t) map[i].br_startoff *
|
2010-04-20 15:01:30 +08:00
|
|
|
mp->m_quotainfo->qi_dqperchunk;
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Do a read-ahead on the next extent.
|
|
|
|
*/
|
|
|
|
if ((i+1 < nmaps) &&
|
|
|
|
(map[i+1].br_startblock != HOLESTARTBLOCK)) {
|
|
|
|
rablkcnt = map[i+1].br_blockcount;
|
|
|
|
rablkno = map[i+1].br_startblock;
|
|
|
|
while (rablkcnt--) {
|
2010-10-07 02:41:18 +08:00
|
|
|
xfs_buf_readahead(mp->m_ddev_targp,
|
2005-04-17 06:20:36 +08:00
|
|
|
XFS_FSB_TO_DADDR(mp, rablkno),
|
2012-11-12 19:54:01 +08:00
|
|
|
mp->m_quotainfo->qi_dqchunklen,
|
|
|
|
NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
rablkno++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Iterate thru all the blks in the extent and
|
|
|
|
* reset the counters of all the dquots inside them.
|
|
|
|
*/
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
error = xfs_qm_dqiter_bufs(mp, firstid,
|
|
|
|
map[i].br_startblock,
|
|
|
|
map[i].br_blockcount,
|
|
|
|
flags, buffer_list);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
} while (nmaps > 0);
|
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
out:
|
2008-05-19 14:31:57 +08:00
|
|
|
kmem_free(map);
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by dqusage_adjust in doing a quotacheck.
|
2010-09-06 09:44:22 +08:00
|
|
|
*
|
|
|
|
* Given the inode, and a dquot id this updates both the incore dqout as well
|
|
|
|
* as the buffer copy. This is so that once the quotacheck is done, we can
|
|
|
|
* just log all the buffers, as opposed to logging numerous updates to
|
|
|
|
* individual dquots.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2010-09-06 09:44:22 +08:00
|
|
|
STATIC int
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qm_quotacheck_dqadjust(
|
2010-09-06 09:44:22 +08:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_dqid_t id,
|
|
|
|
uint type,
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_qcnt_t nblks,
|
|
|
|
xfs_qcnt_t rtblks)
|
|
|
|
{
|
2010-09-06 09:44:22 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_dquot *dqp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = xfs_qm_dqget(mp, ip, id, type,
|
|
|
|
XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
|
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* Shouldn't be able to turn off quotas here.
|
|
|
|
*/
|
|
|
|
ASSERT(error != ESRCH);
|
|
|
|
ASSERT(error != ENOENT);
|
|
|
|
return error;
|
|
|
|
}
|
2009-12-15 07:14:59 +08:00
|
|
|
|
|
|
|
trace_xfs_dqadjust(dqp);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Adjust the inode count and the block count to reflect this inode's
|
|
|
|
* resource usage.
|
|
|
|
*/
|
2008-02-14 07:03:29 +08:00
|
|
|
be64_add_cpu(&dqp->q_core.d_icount, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
dqp->q_res_icount++;
|
|
|
|
if (nblks) {
|
2008-02-14 07:03:29 +08:00
|
|
|
be64_add_cpu(&dqp->q_core.d_bcount, nblks);
|
2005-04-17 06:20:36 +08:00
|
|
|
dqp->q_res_bcount += nblks;
|
|
|
|
}
|
|
|
|
if (rtblks) {
|
2008-02-14 07:03:29 +08:00
|
|
|
be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
|
2005-04-17 06:20:36 +08:00
|
|
|
dqp->q_res_rtbcount += rtblks;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set default limits, adjust timers (since we changed usages)
|
2010-04-20 15:01:53 +08:00
|
|
|
*
|
|
|
|
* There are no timers for the default values set in the root dquot.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2010-04-20 15:01:53 +08:00
|
|
|
if (dqp->q_core.d_id) {
|
2013-03-18 22:51:45 +08:00
|
|
|
xfs_qm_adjust_dqlimits(mp, dqp);
|
2010-09-06 09:44:22 +08:00
|
|
|
xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
dqp->dq_flags |= XFS_DQ_DIRTY;
|
2010-09-06 09:44:22 +08:00
|
|
|
xfs_qm_dqput(dqp);
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_get_rtblks(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_qcnt_t *O_rtblks)
|
|
|
|
{
|
|
|
|
xfs_filblks_t rtblks; /* total rt blks */
|
2006-03-14 10:29:52 +08:00
|
|
|
xfs_extnum_t idx; /* extent record index */
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
|
|
xfs_extnum_t nextents; /* number of extent entries */
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ASSERT(XFS_IS_REALTIME_INODE(ip));
|
|
|
|
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
|
|
|
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
|
if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
rtblks = 0;
|
2006-03-14 10:29:52 +08:00
|
|
|
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
|
2007-08-16 14:23:40 +08:00
|
|
|
for (idx = 0; idx < nextents; idx++)
|
|
|
|
rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
|
2005-04-17 06:20:36 +08:00
|
|
|
*O_rtblks = (xfs_qcnt_t)rtblks;
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* callback routine supplied to bulkstat(). Given an inumber, find its
|
|
|
|
* dquots and update them to account for resources taken by that inode.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_dqusage_adjust(
|
|
|
|
xfs_mount_t *mp, /* mount point for filesystem */
|
|
|
|
xfs_ino_t ino, /* inode number to get data for */
|
|
|
|
void __user *buffer, /* not used */
|
|
|
|
int ubsize, /* not used */
|
|
|
|
int *ubused, /* not used */
|
|
|
|
int *res) /* result code value */
|
|
|
|
{
|
|
|
|
xfs_inode_t *ip;
|
2010-09-06 09:44:22 +08:00
|
|
|
xfs_qcnt_t nblks, rtblks = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rootino must have its resources accounted for, not so with the quota
|
|
|
|
* inodes.
|
|
|
|
*/
|
2013-06-28 06:25:04 +08:00
|
|
|
if (xfs_is_quota_inode(&mp->m_sb, ino)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
*res = BULKSTAT_RV_NOTHING;
|
|
|
|
return XFS_ERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
|
|
|
|
* interface expects the inode to be exclusively locked because that's
|
|
|
|
* the case in all other instances. It's OK that we do this because
|
|
|
|
* quotacheck is done only at mount time.
|
|
|
|
*/
|
2010-09-06 09:44:22 +08:00
|
|
|
error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
|
|
|
|
if (error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
*res = BULKSTAT_RV_NOTHING;
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-09-06 09:44:22 +08:00
|
|
|
ASSERT(ip->i_delayed_blks == 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-06 09:44:22 +08:00
|
|
|
if (XFS_IS_REALTIME_INODE(ip)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Walk thru the extent list and count the realtime blocks.
|
|
|
|
*/
|
2010-09-06 09:44:22 +08:00
|
|
|
error = xfs_qm_get_rtblks(ip, &rtblks);
|
|
|
|
if (error)
|
|
|
|
goto error0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-09-06 09:44:22 +08:00
|
|
|
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the (disk blocks and inode) resources occupied by this
|
|
|
|
* inode to its dquots. We do this adjustment in the incore dquot,
|
|
|
|
* and also copy the changes to its buffer.
|
|
|
|
* We don't care about putting these changes in a transaction
|
|
|
|
* envelope because if we crash in the middle of a 'quotacheck'
|
|
|
|
* we have to start from the beginning anyway.
|
|
|
|
* Once we're done, we'll log all the dquot bufs.
|
|
|
|
*
|
2006-03-29 06:55:14 +08:00
|
|
|
* The *QUOTA_ON checks below may look pretty racy, but quotachecks
|
2005-04-17 06:20:36 +08:00
|
|
|
* and quotaoffs don't race. (Quotachecks happen at mount time only).
|
|
|
|
*/
|
|
|
|
if (XFS_IS_UQUOTA_ON(mp)) {
|
2010-09-06 09:44:22 +08:00
|
|
|
error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
|
|
|
|
XFS_DQ_USER, nblks, rtblks);
|
|
|
|
if (error)
|
|
|
|
goto error0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-09-06 09:44:22 +08:00
|
|
|
|
|
|
|
if (XFS_IS_GQUOTA_ON(mp)) {
|
|
|
|
error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
|
|
|
|
XFS_DQ_GROUP, nblks, rtblks);
|
|
|
|
if (error)
|
|
|
|
goto error0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-09-06 09:44:22 +08:00
|
|
|
if (XFS_IS_PQUOTA_ON(mp)) {
|
2010-09-26 14:10:18 +08:00
|
|
|
error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
|
2010-09-06 09:44:22 +08:00
|
|
|
XFS_DQ_PROJ, nblks, rtblks);
|
|
|
|
if (error)
|
|
|
|
goto error0;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
IRELE(ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
*res = BULKSTAT_RV_DIDONE;
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2010-09-06 09:44:22 +08:00
|
|
|
|
|
|
|
error0:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
IRELE(ip);
|
|
|
|
*res = BULKSTAT_RV_GIVEUP;
|
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-03-15 00:53:34 +08:00
|
|
|
STATIC int
|
|
|
|
xfs_qm_flush_one(
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct xfs_dquot *dqp,
|
|
|
|
void *data)
|
2012-03-15 00:53:34 +08:00
|
|
|
{
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct list_head *buffer_list = data;
|
2012-04-23 13:58:37 +08:00
|
|
|
struct xfs_buf *bp = NULL;
|
2012-03-15 00:53:34 +08:00
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
xfs_dqlock(dqp);
|
|
|
|
if (dqp->dq_flags & XFS_DQ_FREEING)
|
|
|
|
goto out_unlock;
|
|
|
|
if (!XFS_DQ_IS_DIRTY(dqp))
|
|
|
|
goto out_unlock;
|
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_dqflock(dqp);
|
2012-04-23 13:58:37 +08:00
|
|
|
error = xfs_qm_dqflush(dqp, &bp);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2012-03-15 00:53:34 +08:00
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_buf_delwri_queue(bp, buffer_list);
|
2012-04-23 13:58:37 +08:00
|
|
|
xfs_buf_relse(bp);
|
2012-03-15 00:53:34 +08:00
|
|
|
out_unlock:
|
|
|
|
xfs_dqunlock(dqp);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Walk thru all the filesystem inodes and construct a consistent view
|
|
|
|
* of the disk quota world. If the quotacheck fails, disable quotas.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_qm_quotacheck(
|
|
|
|
xfs_mount_t *mp)
|
|
|
|
{
|
2013-06-28 06:25:07 +08:00
|
|
|
int done, count, error, error2;
|
|
|
|
xfs_ino_t lastino;
|
|
|
|
size_t structsz;
|
|
|
|
uint flags;
|
|
|
|
LIST_HEAD (buffer_list);
|
|
|
|
struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
|
|
|
|
struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
count = INT_MAX;
|
|
|
|
structsz = 1;
|
|
|
|
lastino = 0;
|
|
|
|
flags = 0;
|
|
|
|
|
2013-07-11 13:00:40 +08:00
|
|
|
ASSERT(uip || gip || pip);
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
|
|
|
|
2011-03-07 07:08:35 +08:00
|
|
|
xfs_notice(mp, "Quotacheck needed: Please wait.");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2005-06-21 13:38:48 +08:00
|
|
|
* First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
|
2005-04-17 06:20:36 +08:00
|
|
|
* their counters to zero. We need a clean slate.
|
|
|
|
* We don't log our changes till later.
|
|
|
|
*/
|
2010-04-20 15:01:30 +08:00
|
|
|
if (uip) {
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
|
|
|
|
&buffer_list);
|
2010-04-20 15:01:30 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto error_return;
|
|
|
|
flags |= XFS_UQUOTA_CHKD;
|
|
|
|
}
|
|
|
|
|
2010-04-20 15:01:30 +08:00
|
|
|
if (gip) {
|
2013-07-11 13:00:40 +08:00
|
|
|
error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
&buffer_list);
|
2010-04-20 15:01:30 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto error_return;
|
2013-07-11 13:00:40 +08:00
|
|
|
flags |= XFS_GQUOTA_CHKD;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pip) {
|
|
|
|
error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
|
|
|
|
&buffer_list);
|
|
|
|
if (error)
|
|
|
|
goto error_return;
|
|
|
|
flags |= XFS_PQUOTA_CHKD;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* Iterate thru all the inodes in the file system,
|
|
|
|
* adjusting the corresponding dquot counters in core.
|
|
|
|
*/
|
2010-06-23 16:11:11 +08:00
|
|
|
error = xfs_bulkstat(mp, &lastino, &count,
|
|
|
|
xfs_qm_dqusage_adjust,
|
|
|
|
structsz, NULL, &done);
|
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
2010-06-23 16:11:11 +08:00
|
|
|
} while (!done);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-10 10:20:17 +08:00
|
|
|
/*
|
2012-03-15 00:53:34 +08:00
|
|
|
* We've made all the changes that we need to make incore. Flush them
|
|
|
|
* down to disk buffers if everything was updated successfully.
|
2008-04-10 10:20:17 +08:00
|
|
|
*/
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
if (XFS_IS_UQUOTA_ON(mp)) {
|
|
|
|
error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
|
|
|
|
&buffer_list);
|
|
|
|
}
|
2012-03-15 00:53:34 +08:00
|
|
|
if (XFS_IS_GQUOTA_ON(mp)) {
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
|
|
|
|
&buffer_list);
|
2012-03-15 00:53:34 +08:00
|
|
|
if (!error)
|
|
|
|
error = error2;
|
|
|
|
}
|
|
|
|
if (XFS_IS_PQUOTA_ON(mp)) {
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
|
|
|
|
&buffer_list);
|
2012-03-15 00:53:34 +08:00
|
|
|
if (!error)
|
|
|
|
error = error2;
|
|
|
|
}
|
2008-04-10 10:20:17 +08:00
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
error2 = xfs_buf_delwri_submit(&buffer_list);
|
|
|
|
if (!error)
|
|
|
|
error = error2;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We can get this error if we couldn't do a dquot allocation inside
|
|
|
|
* xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
|
|
|
|
* dirty dquots that might be cached, we just want to get rid of them
|
|
|
|
* and turn quotaoff. The dquots won't be attached to any of the inodes
|
|
|
|
* at this point (because we intentionally didn't in dqget_noattach).
|
|
|
|
*/
|
|
|
|
if (error) {
|
2010-04-20 15:02:29 +08:00
|
|
|
xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If one type of quotas is off, then it will lose its
|
|
|
|
* quotachecked status, since we won't be doing accounting for
|
|
|
|
* that type anymore.
|
|
|
|
*/
|
2012-01-24 01:31:43 +08:00
|
|
|
mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
|
2005-04-17 06:20:36 +08:00
|
|
|
mp->m_qflags |= flags;
|
|
|
|
|
|
|
|
error_return:
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
while (!list_empty(&buffer_list)) {
|
|
|
|
struct xfs_buf *bp =
|
|
|
|
list_first_entry(&buffer_list, struct xfs_buf, b_list);
|
|
|
|
list_del_init(&bp->b_list);
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error) {
|
2011-03-07 07:08:35 +08:00
|
|
|
xfs_warn(mp,
|
|
|
|
"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
|
|
|
|
error);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We must turn off quotas.
|
|
|
|
*/
|
|
|
|
ASSERT(mp->m_quotainfo != NULL);
|
|
|
|
xfs_qm_destroy_quotainfo(mp);
|
2008-04-10 10:20:38 +08:00
|
|
|
if (xfs_mount_reset_sbqflags(mp)) {
|
2011-03-07 07:08:35 +08:00
|
|
|
xfs_warn(mp,
|
|
|
|
"Quotacheck: Failed to reset quota flags.");
|
2008-04-10 10:20:38 +08:00
|
|
|
}
|
2011-03-07 07:08:35 +08:00
|
|
|
} else
|
|
|
|
xfs_notice(mp, "Quotacheck: Done.");
|
2005-04-17 06:20:36 +08:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called after the superblock has been read in and we're ready to
|
|
|
|
* iget the quota inodes.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_qm_init_quotainos(
|
|
|
|
xfs_mount_t *mp)
|
|
|
|
{
|
2013-06-28 06:25:07 +08:00
|
|
|
struct xfs_inode *uip = NULL;
|
|
|
|
struct xfs_inode *gip = NULL;
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_inode *pip = NULL;
|
2013-06-28 06:25:07 +08:00
|
|
|
int error;
|
|
|
|
__int64_t sbflags = 0;
|
|
|
|
uint flags = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ASSERT(mp->m_quotainfo);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the uquota and gquota inodes
|
|
|
|
*/
|
2008-03-06 10:44:28 +08:00
|
|
|
if (xfs_sb_version_hasquota(&mp->m_sb)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (XFS_IS_UQUOTA_ON(mp) &&
|
|
|
|
mp->m_sb.sb_uquotino != NULLFSINO) {
|
|
|
|
ASSERT(mp->m_sb.sb_uquotino > 0);
|
2013-06-28 06:25:07 +08:00
|
|
|
error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
|
|
|
|
0, 0, &uip);
|
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
return XFS_ERROR(error);
|
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (XFS_IS_GQUOTA_ON(mp) &&
|
2005-04-17 06:20:36 +08:00
|
|
|
mp->m_sb.sb_gquotino != NULLFSINO) {
|
|
|
|
ASSERT(mp->m_sb.sb_gquotino > 0);
|
2013-06-28 06:25:07 +08:00
|
|
|
error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
|
|
|
|
0, 0, &gip);
|
|
|
|
if (error)
|
|
|
|
goto error_rele;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (XFS_IS_PQUOTA_ON(mp) &&
|
2013-07-20 06:36:02 +08:00
|
|
|
mp->m_sb.sb_pquotino != NULLFSINO) {
|
|
|
|
ASSERT(mp->m_sb.sb_pquotino > 0);
|
|
|
|
error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
|
2013-07-11 13:00:40 +08:00
|
|
|
0, 0, &pip);
|
|
|
|
if (error)
|
|
|
|
goto error_rele;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
flags |= XFS_QMOPT_SBVERSION;
|
|
|
|
sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
|
2013-07-20 06:36:02 +08:00
|
|
|
XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
|
|
|
|
XFS_SB_QFLAGS);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-07-11 13:00:40 +08:00
|
|
|
* Create the three inodes, if they don't exist already. The changes
|
2005-04-17 06:20:36 +08:00
|
|
|
* made above will get added to a transaction and logged in one of
|
|
|
|
* the qino_alloc calls below. If the device is readonly,
|
|
|
|
* temporarily switch to read-write to do this.
|
|
|
|
*/
|
|
|
|
if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
|
2013-06-28 06:25:07 +08:00
|
|
|
error = xfs_qm_qino_alloc(mp, &uip,
|
2005-04-17 06:20:36 +08:00
|
|
|
sbflags | XFS_SB_UQUOTINO,
|
2013-06-28 06:25:07 +08:00
|
|
|
flags | XFS_QMOPT_UQUOTA);
|
|
|
|
if (error)
|
|
|
|
goto error_rele;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
flags &= ~XFS_QMOPT_SBVERSION;
|
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
|
2005-06-21 13:38:48 +08:00
|
|
|
error = xfs_qm_qino_alloc(mp, &gip,
|
2013-07-11 13:00:40 +08:00
|
|
|
sbflags | XFS_SB_GQUOTINO,
|
|
|
|
flags | XFS_QMOPT_GQUOTA);
|
|
|
|
if (error)
|
|
|
|
goto error_rele;
|
|
|
|
|
|
|
|
flags &= ~XFS_QMOPT_SBVERSION;
|
|
|
|
}
|
|
|
|
if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
|
|
|
|
error = xfs_qm_qino_alloc(mp, &pip,
|
2013-07-20 06:36:02 +08:00
|
|
|
sbflags | XFS_SB_PQUOTINO,
|
2013-07-11 13:00:40 +08:00
|
|
|
flags | XFS_QMOPT_PQUOTA);
|
2013-06-28 06:25:07 +08:00
|
|
|
if (error)
|
|
|
|
goto error_rele;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-04-20 15:01:30 +08:00
|
|
|
mp->m_quotainfo->qi_uquotaip = uip;
|
|
|
|
mp->m_quotainfo->qi_gquotaip = gip;
|
2013-07-11 13:00:40 +08:00
|
|
|
mp->m_quotainfo->qi_pquotaip = pip;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2013-06-28 06:25:07 +08:00
|
|
|
|
|
|
|
error_rele:
|
|
|
|
if (uip)
|
|
|
|
IRELE(uip);
|
|
|
|
if (gip)
|
|
|
|
IRELE(gip);
|
2013-07-11 13:00:40 +08:00
|
|
|
if (pip)
|
|
|
|
IRELE(pip);
|
2013-06-28 06:25:07 +08:00
|
|
|
return XFS_ERROR(error);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
STATIC void
|
|
|
|
xfs_qm_dqfree_one(
|
|
|
|
struct xfs_dquot *dqp)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = dqp->q_mount;
|
|
|
|
struct xfs_quotainfo *qi = mp->m_quotainfo;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-03-13 16:52:35 +08:00
|
|
|
mutex_lock(&qi->qi_tree_lock);
|
2013-06-28 06:25:05 +08:00
|
|
|
radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
|
2012-03-13 16:52:35 +08:00
|
|
|
be32_to_cpu(dqp->q_core.d_id));
|
2010-04-13 13:06:50 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
qi->qi_dquots--;
|
2012-03-15 00:53:34 +08:00
|
|
|
mutex_unlock(&qi->qi_tree_lock);
|
2012-02-01 21:57:20 +08:00
|
|
|
|
|
|
|
xfs_qm_dqdestroy(dqp);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_qm_dqreclaim_one(
|
|
|
|
struct xfs_dquot *dqp,
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
struct list_head *buffer_list,
|
2012-02-01 21:57:20 +08:00
|
|
|
struct list_head *dispose_list)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-02-01 21:57:20 +08:00
|
|
|
struct xfs_mount *mp = dqp->q_mount;
|
2012-03-13 16:52:34 +08:00
|
|
|
struct xfs_quotainfo *qi = mp->m_quotainfo;
|
2012-02-01 21:57:20 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
if (!xfs_dqlock_nowait(dqp))
|
2012-11-28 10:01:02 +08:00
|
|
|
goto out_move_tail;
|
2011-12-07 05:58:19 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
/*
|
|
|
|
* This dquot has acquired a reference in the meantime remove it from
|
|
|
|
* the freelist and try again.
|
|
|
|
*/
|
|
|
|
if (dqp->q_nrefs) {
|
|
|
|
xfs_dqunlock(dqp);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
trace_xfs_dqreclaim_want(dqp);
|
2012-03-13 16:52:33 +08:00
|
|
|
XFS_STATS_INC(xs_qm_dqwants);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-03-13 16:52:34 +08:00
|
|
|
list_del_init(&dqp->q_lru);
|
|
|
|
qi->qi_lru_count--;
|
2012-03-13 16:52:33 +08:00
|
|
|
XFS_STATS_DEC(xs_qm_dquot_unused);
|
2012-02-01 21:57:20 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-04-13 13:06:50 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
/*
|
|
|
|
* Try to grab the flush lock. If this dquot is in the process of
|
|
|
|
* getting flushed to disk, we don't want to reclaim it.
|
|
|
|
*/
|
|
|
|
if (!xfs_dqflock_nowait(dqp))
|
2012-11-28 10:01:02 +08:00
|
|
|
goto out_unlock_move_tail;
|
2009-12-15 07:14:59 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
if (XFS_DQ_IS_DIRTY(dqp)) {
|
2012-04-23 13:58:37 +08:00
|
|
|
struct xfs_buf *bp = NULL;
|
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
trace_xfs_dqreclaim_dirty(dqp);
|
2009-12-15 07:14:59 +08:00
|
|
|
|
2012-04-23 13:58:37 +08:00
|
|
|
error = xfs_qm_dqflush(dqp, &bp);
|
2012-02-01 21:57:20 +08:00
|
|
|
if (error) {
|
|
|
|
xfs_warn(mp, "%s: dquot %p flush failed",
|
|
|
|
__func__, dqp);
|
2012-11-28 10:01:02 +08:00
|
|
|
goto out_unlock_move_tail;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-04-13 13:06:50 +08:00
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_buf_delwri_queue(bp, buffer_list);
|
2012-04-23 13:58:37 +08:00
|
|
|
xfs_buf_relse(bp);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2012-02-01 21:57:20 +08:00
|
|
|
* Give the dquot another try on the freelist, as the
|
|
|
|
* flushing will take some time.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2012-11-28 10:01:02 +08:00
|
|
|
goto out_unlock_move_tail;
|
2012-02-01 21:57:20 +08:00
|
|
|
}
|
|
|
|
xfs_dqfunlock(dqp);
|
2011-12-07 05:58:18 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
/*
|
|
|
|
* Prevent lookups now that we are past the point of no return.
|
|
|
|
*/
|
|
|
|
dqp->dq_flags |= XFS_DQ_FREEING;
|
|
|
|
xfs_dqunlock(dqp);
|
2011-12-07 05:58:18 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
ASSERT(dqp->q_nrefs == 0);
|
2012-03-13 16:52:34 +08:00
|
|
|
list_move_tail(&dqp->q_lru, dispose_list);
|
|
|
|
qi->qi_lru_count--;
|
2012-03-13 16:52:33 +08:00
|
|
|
XFS_STATS_DEC(xs_qm_dquot_unused);
|
2011-12-07 05:58:18 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
trace_xfs_dqreclaim_done(dqp);
|
2012-03-13 16:52:33 +08:00
|
|
|
XFS_STATS_INC(xs_qm_dqreclaims);
|
2012-02-01 21:57:20 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
/*
|
|
|
|
* Move the dquot to the tail of the list so that we don't spin on it.
|
|
|
|
*/
|
2012-11-28 10:01:02 +08:00
|
|
|
out_unlock_move_tail:
|
|
|
|
xfs_dqunlock(dqp);
|
|
|
|
out_move_tail:
|
2012-03-13 16:52:34 +08:00
|
|
|
list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
|
2012-02-01 21:57:20 +08:00
|
|
|
trace_xfs_dqreclaim_busy(dqp);
|
2012-03-13 16:52:33 +08:00
|
|
|
XFS_STATS_INC(xs_qm_dqreclaim_misses);
|
2010-04-13 13:06:50 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
STATIC int
|
2010-07-19 12:56:17 +08:00
|
|
|
xfs_qm_shake(
|
2012-02-01 21:57:20 +08:00
|
|
|
struct shrinker *shrink,
|
|
|
|
struct shrink_control *sc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-03-13 16:52:34 +08:00
|
|
|
struct xfs_quotainfo *qi =
|
|
|
|
container_of(shrink, struct xfs_quotainfo, qi_shrinker);
|
2012-02-01 21:57:20 +08:00
|
|
|
int nr_to_scan = sc->nr_to_scan;
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
LIST_HEAD (buffer_list);
|
2012-02-01 21:57:20 +08:00
|
|
|
LIST_HEAD (dispose_list);
|
|
|
|
struct xfs_dquot *dqp;
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2012-02-01 21:57:20 +08:00
|
|
|
if (!nr_to_scan)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-03-13 16:52:34 +08:00
|
|
|
mutex_lock(&qi->qi_lru_lock);
|
|
|
|
while (!list_empty(&qi->qi_lru_list)) {
|
2012-02-01 21:57:20 +08:00
|
|
|
if (nr_to_scan-- <= 0)
|
|
|
|
break;
|
2012-03-13 16:52:34 +08:00
|
|
|
dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
|
|
|
|
q_lru);
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-03-13 16:52:34 +08:00
|
|
|
mutex_unlock(&qi->qi_lru_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
error = xfs_buf_delwri_submit(&buffer_list);
|
|
|
|
if (error)
|
|
|
|
xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
|
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
while (!list_empty(&dispose_list)) {
|
2012-03-13 16:52:34 +08:00
|
|
|
dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
|
|
|
|
list_del_init(&dqp->q_lru);
|
2012-02-01 21:57:20 +08:00
|
|
|
xfs_qm_dqfree_one(dqp);
|
|
|
|
}
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
|
2012-02-01 21:57:20 +08:00
|
|
|
out:
|
2012-03-13 16:52:34 +08:00
|
|
|
return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start a transaction and write the incore superblock changes to
|
|
|
|
* disk. flags parameter indicates which fields have changed.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_qm_write_sb_changes(
|
|
|
|
xfs_mount_t *mp,
|
|
|
|
__int64_t flags)
|
|
|
|
{
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
|
2013-08-12 18:49:59 +08:00
|
|
|
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
|
2013-01-28 21:26:34 +08:00
|
|
|
if (error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_cancel(tp, 0);
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
xfs_mod_sb(tp, flags);
|
2008-04-10 10:21:18 +08:00
|
|
|
error = xfs_trans_commit(tp, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-10 10:21:18 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* --------------- utility functions for vnodeops ---------------- */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2010-10-07 02:41:17 +08:00
|
|
|
* Given an inode, a uid, gid and prid make sure that we have
|
2005-04-17 06:20:36 +08:00
|
|
|
* allocated relevant dquot(s) on disk, and that we won't exceed inode
|
|
|
|
* quotas by creating this file.
|
|
|
|
* This also attaches dquot(s) to the given inode after locking it,
|
|
|
|
* and returns the dquots corresponding to the uid and/or gid.
|
|
|
|
*
|
|
|
|
* in : inode (unlocked)
|
|
|
|
* out : udquot, gdquot with references taken and unlocked
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_qm_vop_dqalloc(
|
2009-06-08 21:33:32 +08:00
|
|
|
struct xfs_inode *ip,
|
2013-08-16 02:08:01 +08:00
|
|
|
xfs_dqid_t uid,
|
|
|
|
xfs_dqid_t gid,
|
2009-06-08 21:33:32 +08:00
|
|
|
prid_t prid,
|
|
|
|
uint flags,
|
|
|
|
struct xfs_dquot **O_udqpp,
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot **O_gdqpp,
|
|
|
|
struct xfs_dquot **O_pdqpp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-06-08 21:33:32 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2013-06-28 06:25:07 +08:00
|
|
|
struct xfs_dquot *uq = NULL;
|
|
|
|
struct xfs_dquot *gq = NULL;
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot *pq = NULL;
|
2009-06-08 21:33:32 +08:00
|
|
|
int error;
|
|
|
|
uint lockflags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
lockflags = XFS_ILOCK_EXCL;
|
|
|
|
xfs_ilock(ip, lockflags);
|
|
|
|
|
2007-08-30 15:21:12 +08:00
|
|
|
if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
|
2005-04-17 06:20:36 +08:00
|
|
|
gid = ip->i_d.di_gid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach the dquot(s) to this inode, doing a dquot allocation
|
|
|
|
* if necessary. The dquot(s) will not be locked.
|
|
|
|
*/
|
|
|
|
if (XFS_NOT_DQATTACHED(mp, ip)) {
|
2009-06-08 21:33:32 +08:00
|
|
|
error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
|
|
|
|
if (error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_iunlock(ip, lockflags);
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-06-21 13:38:48 +08:00
|
|
|
if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ip->i_d.di_uid != uid) {
|
|
|
|
/*
|
|
|
|
* What we need is the dquot that has this uid, and
|
|
|
|
* if we send the inode to dqget, the uid of the inode
|
|
|
|
* takes priority over what's sent in the uid argument.
|
|
|
|
* We must unlock inode here before calling dqget if
|
|
|
|
* we're not sending the inode, because otherwise
|
|
|
|
* we'll deadlock by doing trans_reserve while
|
|
|
|
* holding ilock.
|
|
|
|
*/
|
|
|
|
xfs_iunlock(ip, lockflags);
|
2013-08-16 02:08:01 +08:00
|
|
|
error = xfs_qm_dqget(mp, NULL, uid,
|
2005-04-17 06:20:36 +08:00
|
|
|
XFS_DQ_USER,
|
|
|
|
XFS_QMOPT_DQALLOC |
|
|
|
|
XFS_QMOPT_DOWARN,
|
2013-06-28 06:25:07 +08:00
|
|
|
&uq);
|
|
|
|
if (error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(error != ENOENT);
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Get the ilock in the right order.
|
|
|
|
*/
|
|
|
|
xfs_dqunlock(uq);
|
|
|
|
lockflags = XFS_ILOCK_SHARED;
|
|
|
|
xfs_ilock(ip, lockflags);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Take an extra reference, because we'll return
|
|
|
|
* this to caller
|
|
|
|
*/
|
|
|
|
ASSERT(ip->i_udquot);
|
2011-12-07 05:58:22 +08:00
|
|
|
uq = xfs_qm_dqhold(ip->i_udquot);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
2005-06-21 13:38:48 +08:00
|
|
|
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ip->i_d.di_gid != gid) {
|
|
|
|
xfs_iunlock(ip, lockflags);
|
2013-08-16 02:08:01 +08:00
|
|
|
error = xfs_qm_dqget(mp, NULL, gid,
|
2005-04-17 06:20:36 +08:00
|
|
|
XFS_DQ_GROUP,
|
|
|
|
XFS_QMOPT_DQALLOC |
|
|
|
|
XFS_QMOPT_DOWARN,
|
2013-06-28 06:25:07 +08:00
|
|
|
&gq);
|
|
|
|
if (error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(error != ENOENT);
|
2013-06-28 06:25:07 +08:00
|
|
|
goto error_rele;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
xfs_dqunlock(gq);
|
|
|
|
lockflags = XFS_ILOCK_SHARED;
|
|
|
|
xfs_ilock(ip, lockflags);
|
|
|
|
} else {
|
|
|
|
ASSERT(ip->i_gdquot);
|
2011-12-07 05:58:22 +08:00
|
|
|
gq = xfs_qm_dqhold(ip->i_gdquot);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
}
|
|
|
|
if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
|
2010-09-26 14:10:18 +08:00
|
|
|
if (xfs_get_projid(ip) != prid) {
|
2005-06-21 13:38:48 +08:00
|
|
|
xfs_iunlock(ip, lockflags);
|
2013-06-28 06:25:07 +08:00
|
|
|
error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
|
2005-06-21 13:38:48 +08:00
|
|
|
XFS_DQ_PROJ,
|
|
|
|
XFS_QMOPT_DQALLOC |
|
|
|
|
XFS_QMOPT_DOWARN,
|
2013-07-11 13:00:40 +08:00
|
|
|
&pq);
|
2013-06-28 06:25:07 +08:00
|
|
|
if (error) {
|
2005-06-21 13:38:48 +08:00
|
|
|
ASSERT(error != ENOENT);
|
2013-06-28 06:25:07 +08:00
|
|
|
goto error_rele;
|
2005-06-21 13:38:48 +08:00
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
xfs_dqunlock(pq);
|
2005-06-21 13:38:48 +08:00
|
|
|
lockflags = XFS_ILOCK_SHARED;
|
|
|
|
xfs_ilock(ip, lockflags);
|
|
|
|
} else {
|
2013-07-11 13:00:40 +08:00
|
|
|
ASSERT(ip->i_pdquot);
|
|
|
|
pq = xfs_qm_dqhold(ip->i_pdquot);
|
2005-06-21 13:38:48 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if (uq)
|
2009-12-15 07:14:59 +08:00
|
|
|
trace_xfs_dquot_dqalloc(ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
xfs_iunlock(ip, lockflags);
|
|
|
|
if (O_udqpp)
|
|
|
|
*O_udqpp = uq;
|
|
|
|
else if (uq)
|
|
|
|
xfs_qm_dqrele(uq);
|
|
|
|
if (O_gdqpp)
|
|
|
|
*O_gdqpp = gq;
|
|
|
|
else if (gq)
|
|
|
|
xfs_qm_dqrele(gq);
|
2013-07-11 13:00:40 +08:00
|
|
|
if (O_pdqpp)
|
|
|
|
*O_pdqpp = pq;
|
|
|
|
else if (pq)
|
|
|
|
xfs_qm_dqrele(pq);
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2013-06-28 06:25:07 +08:00
|
|
|
|
|
|
|
error_rele:
|
2013-07-11 13:00:40 +08:00
|
|
|
if (gq)
|
|
|
|
xfs_qm_dqrele(gq);
|
2013-06-28 06:25:07 +08:00
|
|
|
if (uq)
|
|
|
|
xfs_qm_dqrele(uq);
|
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Actually transfer ownership, and do dquot modifications.
|
|
|
|
* These were already reserved.
|
|
|
|
*/
|
|
|
|
xfs_dquot_t *
|
|
|
|
xfs_qm_vop_chown(
|
|
|
|
xfs_trans_t *tp,
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_dquot_t **IO_olddq,
|
|
|
|
xfs_dquot_t *newdq)
|
|
|
|
{
|
|
|
|
xfs_dquot_t *prevdq;
|
2005-06-21 13:48:47 +08:00
|
|
|
uint bfield = XFS_IS_REALTIME_INODE(ip) ?
|
|
|
|
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
|
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
|
|
|
|
|
|
|
|
/* old dquot */
|
|
|
|
prevdq = *IO_olddq;
|
|
|
|
ASSERT(prevdq);
|
|
|
|
ASSERT(prevdq != newdq);
|
|
|
|
|
2005-06-21 13:48:47 +08:00
|
|
|
xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
|
|
|
|
xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* the sparkling new dquot */
|
2005-06-21 13:48:47 +08:00
|
|
|
xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
|
|
|
|
xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2011-12-07 05:58:22 +08:00
|
|
|
* Take an extra reference, because the inode is going to keep
|
|
|
|
* this dquot pointer even after the trans_commit.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-12-07 05:58:22 +08:00
|
|
|
*IO_olddq = xfs_qm_dqhold(newdq);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-15 09:37:08 +08:00
|
|
|
return prevdq;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-06-21 13:38:48 +08:00
|
|
|
* Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_qm_vop_chown_reserve(
|
2013-06-28 06:25:07 +08:00
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dquot *udqp,
|
|
|
|
struct xfs_dquot *gdqp,
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot *pdqp,
|
2013-06-28 06:25:07 +08:00
|
|
|
uint flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-06-28 06:25:07 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
uint delblks, blkflags, prjflags = 0;
|
|
|
|
struct xfs_dquot *udq_unres = NULL;
|
|
|
|
struct xfs_dquot *gdq_unres = NULL;
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot *pdq_unres = NULL;
|
2013-06-28 06:25:07 +08:00
|
|
|
struct xfs_dquot *udq_delblks = NULL;
|
|
|
|
struct xfs_dquot *gdq_delblks = NULL;
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot *pdq_delblks = NULL;
|
2013-06-28 06:25:07 +08:00
|
|
|
int error;
|
2009-06-08 21:33:32 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
|
|
|
|
|
|
|
delblks = ip->i_delayed_blks;
|
2005-06-21 13:48:47 +08:00
|
|
|
blkflags = XFS_IS_REALTIME_INODE(ip) ?
|
|
|
|
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (XFS_IS_UQUOTA_ON(mp) && udqp &&
|
2013-08-16 02:08:01 +08:00
|
|
|
ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
|
2013-06-28 06:25:07 +08:00
|
|
|
udq_delblks = udqp;
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* If there are delayed allocation blocks, then we have to
|
|
|
|
* unreserve those from the old dquot, and add them to the
|
|
|
|
* new dquot.
|
|
|
|
*/
|
|
|
|
if (delblks) {
|
|
|
|
ASSERT(ip->i_udquot);
|
2013-06-28 06:25:07 +08:00
|
|
|
udq_unres = ip->i_udquot;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
|
|
|
|
ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
|
|
|
|
gdq_delblks = gdqp;
|
|
|
|
if (delblks) {
|
|
|
|
ASSERT(ip->i_gdquot);
|
|
|
|
gdq_unres = ip->i_gdquot;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
|
|
|
|
xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
|
|
|
|
prjflags = XFS_QMOPT_ENOSPC;
|
|
|
|
pdq_delblks = pdqp;
|
|
|
|
if (delblks) {
|
|
|
|
ASSERT(ip->i_pdquot);
|
|
|
|
pdq_unres = ip->i_pdquot;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-28 06:25:07 +08:00
|
|
|
error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
|
2013-07-11 13:00:40 +08:00
|
|
|
udq_delblks, gdq_delblks, pdq_delblks,
|
|
|
|
ip->i_d.di_nblocks, 1,
|
2013-06-28 06:25:07 +08:00
|
|
|
flags | blkflags | prjflags);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do the delayed blks reservations/unreservations now. Since, these
|
|
|
|
* are done without the help of a transaction, if a reservation fails
|
|
|
|
* its previous reservations won't be automatically undone by trans
|
|
|
|
* code. So, we have to do it manually here.
|
|
|
|
*/
|
|
|
|
if (delblks) {
|
|
|
|
/*
|
|
|
|
* Do the reservations first. Unreservation can't fail.
|
|
|
|
*/
|
2013-07-11 13:00:40 +08:00
|
|
|
ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
|
|
|
|
ASSERT(udq_unres || gdq_unres || pdq_unres);
|
2013-06-28 06:25:07 +08:00
|
|
|
error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
|
2013-07-11 13:00:40 +08:00
|
|
|
udq_delblks, gdq_delblks, pdq_delblks,
|
|
|
|
(xfs_qcnt_t)delblks, 0,
|
2013-06-28 06:25:07 +08:00
|
|
|
flags | blkflags | prjflags);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
|
2013-07-11 13:00:40 +08:00
|
|
|
udq_unres, gdq_unres, pdq_unres,
|
|
|
|
-((xfs_qcnt_t)delblks), 0, blkflags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xfs_qm_vop_rename_dqattach(
|
2009-06-08 21:33:32 +08:00
|
|
|
struct xfs_inode **i_tab)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-06-08 21:33:32 +08:00
|
|
|
struct xfs_mount *mp = i_tab[0]->i_mount;
|
|
|
|
int i;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-08 21:33:32 +08:00
|
|
|
for (i = 0; (i < 4 && i_tab[i]); i++) {
|
|
|
|
struct xfs_inode *ip = i_tab[i];
|
|
|
|
int error;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Watch out for duplicate entries in the table.
|
|
|
|
*/
|
2009-06-08 21:33:32 +08:00
|
|
|
if (i == 0 || ip != i_tab[i-1]) {
|
|
|
|
if (XFS_NOT_DQATTACHED(mp, ip)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
error = xfs_qm_dqattach(ip, 0);
|
|
|
|
if (error)
|
2006-01-15 09:37:08 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-01-15 09:37:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-06-08 21:33:32 +08:00
|
|
|
xfs_qm_vop_create_dqattach(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dquot *udqp,
|
2013-07-11 13:00:40 +08:00
|
|
|
struct xfs_dquot *gdqp,
|
|
|
|
struct xfs_dquot *pdqp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-06-08 21:33:32 +08:00
|
|
|
struct xfs_mount *mp = tp->t_mountp;
|
|
|
|
|
|
|
|
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2009-06-08 21:33:32 +08:00
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (udqp) {
|
|
|
|
ASSERT(ip->i_udquot == NULL);
|
2009-06-08 21:33:32 +08:00
|
|
|
ASSERT(XFS_IS_UQUOTA_ON(mp));
|
2005-11-02 12:01:12 +08:00
|
|
|
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
|
2011-12-07 05:58:22 +08:00
|
|
|
|
|
|
|
ip->i_udquot = xfs_qm_dqhold(udqp);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
|
|
|
|
}
|
|
|
|
if (gdqp) {
|
|
|
|
ASSERT(ip->i_gdquot == NULL);
|
2013-07-11 13:00:40 +08:00
|
|
|
ASSERT(XFS_IS_GQUOTA_ON(mp));
|
|
|
|
ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
|
2011-12-07 05:58:22 +08:00
|
|
|
ip->i_gdquot = xfs_qm_dqhold(gdqp);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
|
|
|
|
}
|
2013-07-11 13:00:40 +08:00
|
|
|
if (pdqp) {
|
|
|
|
ASSERT(ip->i_pdquot == NULL);
|
|
|
|
ASSERT(XFS_IS_PQUOTA_ON(mp));
|
|
|
|
ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
|
|
|
|
|
|
|
|
ip->i_pdquot = xfs_qm_dqhold(pdqp);
|
|
|
|
xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|