2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-06-09 12:48:12 +08:00
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
2005-11-02 11:58:39 +08:00
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-17 06:20:36 +08:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-11-23 13:30:51 +08:00
|
|
|
#include <linux/log2.h>
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_fs.h"
|
2013-08-12 18:49:26 +08:00
|
|
|
#include "xfs_format.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_log.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_inum.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_trans.h"
|
|
|
|
#include "xfs_trans_priv.h"
|
|
|
|
#include "xfs_sb.h"
|
|
|
|
#include "xfs_ag.h"
|
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_bmap_btree.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_alloc_btree.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_ialloc_btree.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_attr_sf.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_dinode.h"
|
|
|
|
#include "xfs_inode.h"
|
|
|
|
#include "xfs_buf_item.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_inode_item.h"
|
|
|
|
#include "xfs_btree.h"
|
|
|
|
#include "xfs_alloc.h"
|
|
|
|
#include "xfs_ialloc.h"
|
|
|
|
#include "xfs_bmap.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_utils.h"
|
|
|
|
#include "xfs_quota.h"
|
2007-07-11 09:09:12 +08:00
|
|
|
#include "xfs_filestream.h"
|
2007-08-29 08:58:01 +08:00
|
|
|
#include "xfs_vnodeops.h"
|
2013-04-03 13:11:17 +08:00
|
|
|
#include "xfs_cksum.h"
|
2009-12-15 07:14:59 +08:00
|
|
|
#include "xfs_trace.h"
|
2012-10-08 18:56:11 +08:00
|
|
|
#include "xfs_icache.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
kmem_zone_t *xfs_inode_zone;
|
|
|
|
|
|
|
|
/*
|
2011-07-08 20:34:34 +08:00
|
|
|
* Used in xfs_itruncate_extents(). This is the maximum number of extents
|
2005-04-17 06:20:36 +08:00
|
|
|
* freed from a file in a single transaction.
|
|
|
|
*/
|
|
|
|
#define XFS_ITRUNC_MAX_EXTENTS 2
|
|
|
|
|
|
|
|
STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
|
|
|
|
|
2012-04-23 13:59:02 +08:00
|
|
|
/*
|
|
|
|
* helper function to extract extent size hint from inode
|
|
|
|
*/
|
|
|
|
xfs_extlen_t
|
|
|
|
xfs_get_extsz_hint(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
|
|
|
|
return ip->i_d.di_extsize;
|
|
|
|
if (XFS_IS_REALTIME_INODE(ip))
|
|
|
|
return ip->i_mount->m_sb.sb_rextsize;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-08 18:56:10 +08:00
|
|
|
/*
|
|
|
|
* This is a wrapper routine around the xfs_ilock() routine used to centralize
|
|
|
|
* some grungy code. It is used in places that wish to lock the inode solely
|
|
|
|
* for reading the extents. The reason these places can't just call
|
|
|
|
* xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
|
|
|
|
* extents from disk for a file in b-tree format. If the inode is in b-tree
|
|
|
|
* format, then we need to lock the inode exclusively until the extents are read
|
|
|
|
* in. Locking it exclusively all the time would limit our parallelism
|
|
|
|
* unnecessarily, though. What we do instead is check to see if the extents
|
|
|
|
* have been read in yet, and only lock the inode exclusively if they have not.
|
|
|
|
*
|
|
|
|
* The function returns a value which should be given to the corresponding
|
|
|
|
* xfs_iunlock_map_shared(). This value is the mode in which the lock was
|
|
|
|
* actually taken.
|
|
|
|
*/
|
|
|
|
uint
|
|
|
|
xfs_ilock_map_shared(
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
|
|
|
uint lock_mode;
|
|
|
|
|
|
|
|
if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
|
|
|
|
((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
|
|
|
|
lock_mode = XFS_ILOCK_EXCL;
|
|
|
|
} else {
|
|
|
|
lock_mode = XFS_ILOCK_SHARED;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_ilock(ip, lock_mode);
|
|
|
|
|
|
|
|
return lock_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is simply the unlock routine to go with xfs_ilock_map_shared().
|
|
|
|
* All it does is call xfs_iunlock() with the given lock_mode.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_iunlock_map_shared(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
unsigned int lock_mode)
|
|
|
|
{
|
|
|
|
xfs_iunlock(ip, lock_mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The xfs inode contains 2 locks: a multi-reader lock called the
|
|
|
|
* i_iolock and a multi-reader lock called the i_lock. This routine
|
|
|
|
* allows either or both of the locks to be obtained.
|
|
|
|
*
|
|
|
|
* The 2 locks should always be ordered so that the IO lock is
|
|
|
|
* obtained first in order to prevent deadlock.
|
|
|
|
*
|
|
|
|
* ip -- the inode being locked
|
|
|
|
* lock_flags -- this parameter indicates the inode's locks
|
|
|
|
* to be locked. It can be:
|
|
|
|
* XFS_IOLOCK_SHARED,
|
|
|
|
* XFS_IOLOCK_EXCL,
|
|
|
|
* XFS_ILOCK_SHARED,
|
|
|
|
* XFS_ILOCK_EXCL,
|
|
|
|
* XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
|
|
|
|
* XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
|
|
|
|
* XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
|
|
|
|
* XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_ilock(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
uint lock_flags)
|
|
|
|
{
|
|
|
|
trace_xfs_ilock(ip, lock_flags, _RET_IP_);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* You can't set both SHARED and EXCL for the same lock,
|
|
|
|
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
|
|
|
|
* and XFS_ILOCK_EXCL are valid values to set in lock_flags.
|
|
|
|
*/
|
|
|
|
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
|
|
|
|
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
|
|
|
|
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
|
|
|
|
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
|
|
|
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
|
|
|
|
|
|
|
|
if (lock_flags & XFS_IOLOCK_EXCL)
|
|
|
|
mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
|
|
|
|
else if (lock_flags & XFS_IOLOCK_SHARED)
|
|
|
|
mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
|
|
|
|
|
|
|
|
if (lock_flags & XFS_ILOCK_EXCL)
|
|
|
|
mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
|
|
|
|
else if (lock_flags & XFS_ILOCK_SHARED)
|
|
|
|
mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is just like xfs_ilock(), except that the caller
|
|
|
|
* is guaranteed not to sleep. It returns 1 if it gets
|
|
|
|
* the requested locks and 0 otherwise. If the IO lock is
|
|
|
|
* obtained but the inode lock cannot be, then the IO lock
|
|
|
|
* is dropped before returning.
|
|
|
|
*
|
|
|
|
* ip -- the inode being locked
|
|
|
|
* lock_flags -- this parameter indicates the inode's locks to be
|
|
|
|
* to be locked. See the comment for xfs_ilock() for a list
|
|
|
|
* of valid values.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_ilock_nowait(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
uint lock_flags)
|
|
|
|
{
|
|
|
|
trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* You can't set both SHARED and EXCL for the same lock,
|
|
|
|
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
|
|
|
|
* and XFS_ILOCK_EXCL are valid values to set in lock_flags.
|
|
|
|
*/
|
|
|
|
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
|
|
|
|
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
|
|
|
|
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
|
|
|
|
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
|
|
|
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
|
|
|
|
|
|
|
|
if (lock_flags & XFS_IOLOCK_EXCL) {
|
|
|
|
if (!mrtryupdate(&ip->i_iolock))
|
|
|
|
goto out;
|
|
|
|
} else if (lock_flags & XFS_IOLOCK_SHARED) {
|
|
|
|
if (!mrtryaccess(&ip->i_iolock))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (lock_flags & XFS_ILOCK_EXCL) {
|
|
|
|
if (!mrtryupdate(&ip->i_lock))
|
|
|
|
goto out_undo_iolock;
|
|
|
|
} else if (lock_flags & XFS_ILOCK_SHARED) {
|
|
|
|
if (!mrtryaccess(&ip->i_lock))
|
|
|
|
goto out_undo_iolock;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
out_undo_iolock:
|
|
|
|
if (lock_flags & XFS_IOLOCK_EXCL)
|
|
|
|
mrunlock_excl(&ip->i_iolock);
|
|
|
|
else if (lock_flags & XFS_IOLOCK_SHARED)
|
|
|
|
mrunlock_shared(&ip->i_iolock);
|
|
|
|
out:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* xfs_iunlock() is used to drop the inode locks acquired with
|
|
|
|
* xfs_ilock() and xfs_ilock_nowait(). The caller must pass
|
|
|
|
* in the flags given to xfs_ilock() or xfs_ilock_nowait() so
|
|
|
|
* that we know which locks to drop.
|
|
|
|
*
|
|
|
|
* ip -- the inode being unlocked
|
|
|
|
* lock_flags -- this parameter indicates the inode's locks to be
|
|
|
|
* to be unlocked. See the comment for xfs_ilock() for a list
|
|
|
|
* of valid values for this parameter.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_iunlock(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
uint lock_flags)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* You can't set both SHARED and EXCL for the same lock,
|
|
|
|
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
|
|
|
|
* and XFS_ILOCK_EXCL are valid values to set in lock_flags.
|
|
|
|
*/
|
|
|
|
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
|
|
|
|
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
|
|
|
|
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
|
|
|
|
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
|
|
|
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
|
|
|
|
ASSERT(lock_flags != 0);
|
|
|
|
|
|
|
|
if (lock_flags & XFS_IOLOCK_EXCL)
|
|
|
|
mrunlock_excl(&ip->i_iolock);
|
|
|
|
else if (lock_flags & XFS_IOLOCK_SHARED)
|
|
|
|
mrunlock_shared(&ip->i_iolock);
|
|
|
|
|
|
|
|
if (lock_flags & XFS_ILOCK_EXCL)
|
|
|
|
mrunlock_excl(&ip->i_lock);
|
|
|
|
else if (lock_flags & XFS_ILOCK_SHARED)
|
|
|
|
mrunlock_shared(&ip->i_lock);
|
|
|
|
|
|
|
|
trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* give up write locks. the i/o lock cannot be held nested
|
|
|
|
* if it is being demoted.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_ilock_demote(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
uint lock_flags)
|
|
|
|
{
|
|
|
|
ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
|
|
|
|
ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
|
|
|
|
|
|
|
|
if (lock_flags & XFS_ILOCK_EXCL)
|
|
|
|
mrdemote(&ip->i_lock);
|
|
|
|
if (lock_flags & XFS_IOLOCK_EXCL)
|
|
|
|
mrdemote(&ip->i_iolock);
|
|
|
|
|
|
|
|
trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
|
|
|
|
}
|
|
|
|
|
2013-04-30 19:39:34 +08:00
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
2012-10-08 18:56:10 +08:00
|
|
|
int
|
|
|
|
xfs_isilocked(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
uint lock_flags)
|
|
|
|
{
|
|
|
|
if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
|
|
|
|
if (!(lock_flags & XFS_ILOCK_SHARED))
|
|
|
|
return !!ip->i_lock.mr_writer;
|
|
|
|
return rwsem_is_locked(&ip->i_lock.mr_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
|
|
|
|
if (!(lock_flags & XFS_IOLOCK_SHARED))
|
|
|
|
return !!ip->i_iolock.mr_writer;
|
|
|
|
return rwsem_is_locked(&ip->i_iolock.mr_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void
|
|
|
|
__xfs_iflock(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
|
|
|
|
DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
|
|
|
|
|
|
|
|
do {
|
|
|
|
prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
|
|
|
if (xfs_isiflocked(ip))
|
|
|
|
io_schedule();
|
|
|
|
} while (!xfs_iflock_nowait(ip));
|
|
|
|
|
|
|
|
finish_wait(wq, &wait.wait);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
STATIC uint
|
|
|
|
_xfs_dic2xflags(
|
|
|
|
__uint16_t di_flags)
|
|
|
|
{
|
|
|
|
uint flags = 0;
|
|
|
|
|
|
|
|
if (di_flags & XFS_DIFLAG_ANY) {
|
|
|
|
if (di_flags & XFS_DIFLAG_REALTIME)
|
|
|
|
flags |= XFS_XFLAG_REALTIME;
|
|
|
|
if (di_flags & XFS_DIFLAG_PREALLOC)
|
|
|
|
flags |= XFS_XFLAG_PREALLOC;
|
|
|
|
if (di_flags & XFS_DIFLAG_IMMUTABLE)
|
|
|
|
flags |= XFS_XFLAG_IMMUTABLE;
|
|
|
|
if (di_flags & XFS_DIFLAG_APPEND)
|
|
|
|
flags |= XFS_XFLAG_APPEND;
|
|
|
|
if (di_flags & XFS_DIFLAG_SYNC)
|
|
|
|
flags |= XFS_XFLAG_SYNC;
|
|
|
|
if (di_flags & XFS_DIFLAG_NOATIME)
|
|
|
|
flags |= XFS_XFLAG_NOATIME;
|
|
|
|
if (di_flags & XFS_DIFLAG_NODUMP)
|
|
|
|
flags |= XFS_XFLAG_NODUMP;
|
|
|
|
if (di_flags & XFS_DIFLAG_RTINHERIT)
|
|
|
|
flags |= XFS_XFLAG_RTINHERIT;
|
|
|
|
if (di_flags & XFS_DIFLAG_PROJINHERIT)
|
|
|
|
flags |= XFS_XFLAG_PROJINHERIT;
|
|
|
|
if (di_flags & XFS_DIFLAG_NOSYMLINKS)
|
|
|
|
flags |= XFS_XFLAG_NOSYMLINKS;
|
2006-01-11 12:28:28 +08:00
|
|
|
if (di_flags & XFS_DIFLAG_EXTSIZE)
|
|
|
|
flags |= XFS_XFLAG_EXTSIZE;
|
|
|
|
if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
|
|
|
|
flags |= XFS_XFLAG_EXTSZINHERIT;
|
2006-06-09 12:54:19 +08:00
|
|
|
if (di_flags & XFS_DIFLAG_NODEFRAG)
|
|
|
|
flags |= XFS_XFLAG_NODEFRAG;
|
2007-07-11 09:09:12 +08:00
|
|
|
if (di_flags & XFS_DIFLAG_FILESTREAM)
|
|
|
|
flags |= XFS_XFLAG_FILESTREAM;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint
|
|
|
|
xfs_ip2xflags(
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
2007-08-28 11:57:51 +08:00
|
|
|
xfs_icdinode_t *dic = &ip->i_d;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-06-09 15:12:17 +08:00
|
|
|
return _xfs_dic2xflags(dic->di_flags) |
|
2007-12-07 11:07:20 +08:00
|
|
|
(XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
uint
|
|
|
|
xfs_dic2xflags(
|
2007-12-07 11:07:20 +08:00
|
|
|
xfs_dinode_t *dip)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-11-28 11:23:39 +08:00
|
|
|
return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
|
2007-12-07 11:07:20 +08:00
|
|
|
(XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an inode on disk and return a copy of its in-core version.
|
|
|
|
* The in-core inode is locked exclusively. Set mode, nlink, and rdev
|
|
|
|
* appropriately within the inode. The uid and gid for the inode are
|
|
|
|
* set according to the contents of the given cred structure.
|
|
|
|
*
|
|
|
|
* Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
|
2012-10-20 22:08:19 +08:00
|
|
|
* has a free inode available, call xfs_iget() to obtain the in-core
|
|
|
|
* version of the allocated inode. Finally, fill in the inode and
|
|
|
|
* log its initial contents. In this case, ialloc_context would be
|
|
|
|
* set to NULL.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2012-10-20 22:08:19 +08:00
|
|
|
* If xfs_dialloc() does not have an available inode, it will replenish
|
|
|
|
* its supply by doing an allocation. Since we can only do one
|
|
|
|
* allocation within a transaction without deadlocks, we must commit
|
|
|
|
* the current transaction before returning the inode itself.
|
|
|
|
* In this case, therefore, we will set ialloc_context and return.
|
2005-04-17 06:20:36 +08:00
|
|
|
* The caller should then commit the current transaction, start a new
|
|
|
|
* transaction, and call xfs_ialloc() again to actually get the inode.
|
|
|
|
*
|
|
|
|
* To ensure that some other process does not grab the inode that
|
|
|
|
* was allocated during the first call to xfs_ialloc(), this routine
|
|
|
|
* also returns the [locked] bp pointing to the head of the freelist
|
|
|
|
* as ialloc_context. The caller should hold this buffer across
|
|
|
|
* the commit and pass it back into this routine on the second call.
|
2007-07-11 09:09:33 +08:00
|
|
|
*
|
|
|
|
* If we are allocating quota inodes, we do not have a parent inode
|
|
|
|
* to attach to or associate with (i.e. pip == NULL) because they
|
|
|
|
* are not linked into the directory structure - they are attached
|
|
|
|
* directly to the superblock - and so have no parent.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_ialloc(
|
|
|
|
xfs_trans_t *tp,
|
|
|
|
xfs_inode_t *pip,
|
2011-07-26 14:50:15 +08:00
|
|
|
umode_t mode,
|
2005-05-06 04:25:00 +08:00
|
|
|
xfs_nlink_t nlink,
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_dev_t rdev,
|
2010-09-26 14:10:18 +08:00
|
|
|
prid_t prid,
|
2005-04-17 06:20:36 +08:00
|
|
|
int okalloc,
|
|
|
|
xfs_buf_t **ialloc_context,
|
|
|
|
xfs_inode_t **ipp)
|
|
|
|
{
|
2013-04-03 13:11:17 +08:00
|
|
|
struct xfs_mount *mp = tp->t_mountp;
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_ino_t ino;
|
|
|
|
xfs_inode_t *ip;
|
|
|
|
uint flags;
|
|
|
|
int error;
|
2008-08-13 14:44:15 +08:00
|
|
|
timespec_t tv;
|
2008-10-30 14:36:14 +08:00
|
|
|
int filestreams = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the space management code to pick
|
|
|
|
* the on-disk inode to be allocated.
|
|
|
|
*/
|
2007-07-11 09:09:33 +08:00
|
|
|
error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
|
2012-07-04 22:54:47 +08:00
|
|
|
ialloc_context, &ino);
|
2008-10-30 14:36:14 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
2012-07-04 22:54:47 +08:00
|
|
|
if (*ialloc_context || ino == NULLFSINO) {
|
2005-04-17 06:20:36 +08:00
|
|
|
*ipp = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
ASSERT(*ialloc_context == NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the in-core inode with the lock held exclusively.
|
|
|
|
* This is because we're setting fields here we need
|
|
|
|
* to prevent others from looking at until we're done.
|
|
|
|
*/
|
2013-04-03 13:11:17 +08:00
|
|
|
error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
|
2011-02-13 21:26:42 +08:00
|
|
|
XFS_ILOCK_EXCL, &ip);
|
2008-10-30 14:36:14 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
ASSERT(ip != NULL);
|
|
|
|
|
2011-07-26 14:50:15 +08:00
|
|
|
ip->i_d.di_mode = mode;
|
2005-04-17 06:20:36 +08:00
|
|
|
ip->i_d.di_onlink = 0;
|
|
|
|
ip->i_d.di_nlink = nlink;
|
|
|
|
ASSERT(ip->i_d.di_nlink == nlink);
|
2008-08-13 23:20:04 +08:00
|
|
|
ip->i_d.di_uid = current_fsuid();
|
|
|
|
ip->i_d.di_gid = current_fsgid();
|
2010-09-26 14:10:18 +08:00
|
|
|
xfs_set_projid(ip, prid);
|
2005-04-17 06:20:36 +08:00
|
|
|
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the superblock version is up to where we support new format
|
|
|
|
* inodes and this is currently an old format inode, then change
|
|
|
|
* the inode version number now. This way we only do the conversion
|
|
|
|
* here rather than here and in the flush/logging code.
|
|
|
|
*/
|
2013-04-03 13:11:17 +08:00
|
|
|
if (xfs_sb_version_hasnlink(&mp->m_sb) &&
|
2008-11-28 11:23:39 +08:00
|
|
|
ip->i_d.di_version == 1) {
|
|
|
|
ip->i_d.di_version = 2;
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We've already zeroed the old link count, the projid field,
|
|
|
|
* and the pad field.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Project ids won't be stored on disk if we are using a version 1 inode.
|
|
|
|
*/
|
2008-11-28 11:23:39 +08:00
|
|
|
if ((prid != 0) && (ip->i_d.di_version == 1))
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_bump_ino_vers2(tp, ip);
|
|
|
|
|
2007-08-30 15:21:12 +08:00
|
|
|
if (pip && XFS_INHERIT_GID(pip)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ip->i_d.di_gid = pip->i_d.di_gid;
|
2011-07-26 14:31:30 +08:00
|
|
|
if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ip->i_d.di_mode |= S_ISGID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the group ID of the new file does not match the effective group
|
|
|
|
* ID or one of the supplementary group IDs, the S_ISGID bit is cleared
|
|
|
|
* (and only if the irix_sgid_inherit compatibility variable is set).
|
|
|
|
*/
|
|
|
|
if ((irix_sgid_inherit) &&
|
|
|
|
(ip->i_d.di_mode & S_ISGID) &&
|
|
|
|
(!in_group_p((gid_t)ip->i_d.di_gid))) {
|
|
|
|
ip->i_d.di_mode &= ~S_ISGID;
|
|
|
|
}
|
|
|
|
|
|
|
|
ip->i_d.di_size = 0;
|
|
|
|
ip->i_d.di_nextents = 0;
|
|
|
|
ASSERT(ip->i_d.di_nblocks == 0);
|
2008-08-13 14:44:15 +08:00
|
|
|
|
|
|
|
nanotime(&tv);
|
|
|
|
ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
|
|
|
|
ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
|
|
|
|
ip->i_d.di_atime = ip->i_d.di_mtime;
|
|
|
|
ip->i_d.di_ctime = ip->i_d.di_mtime;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* di_gen will have been taken care of in xfs_iread.
|
|
|
|
*/
|
|
|
|
ip->i_d.di_extsize = 0;
|
|
|
|
ip->i_d.di_dmevmask = 0;
|
|
|
|
ip->i_d.di_dmstate = 0;
|
|
|
|
ip->i_d.di_flags = 0;
|
2013-04-03 13:11:17 +08:00
|
|
|
|
|
|
|
if (ip->i_d.di_version == 3) {
|
|
|
|
ASSERT(ip->i_d.di_ino == ino);
|
|
|
|
ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
|
|
|
|
ip->i_d.di_crc = 0;
|
|
|
|
ip->i_d.di_changecount = 1;
|
|
|
|
ip->i_d.di_lsn = 0;
|
|
|
|
ip->i_d.di_flags2 = 0;
|
|
|
|
memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
|
|
|
|
ip->i_d.di_crtime = ip->i_d.di_mtime;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
flags = XFS_ILOG_CORE;
|
|
|
|
switch (mode & S_IFMT) {
|
|
|
|
case S_IFIFO:
|
|
|
|
case S_IFCHR:
|
|
|
|
case S_IFBLK:
|
|
|
|
case S_IFSOCK:
|
|
|
|
ip->i_d.di_format = XFS_DINODE_FMT_DEV;
|
|
|
|
ip->i_df.if_u2.if_rdev = rdev;
|
|
|
|
ip->i_df.if_flags = 0;
|
|
|
|
flags |= XFS_ILOG_DEV;
|
|
|
|
break;
|
|
|
|
case S_IFREG:
|
2008-10-30 14:36:14 +08:00
|
|
|
/*
|
|
|
|
* we can't set up filestreams until after the VFS inode
|
|
|
|
* is set up properly.
|
|
|
|
*/
|
|
|
|
if (pip && xfs_inode_is_filestream(pip))
|
|
|
|
filestreams = 1;
|
2007-07-11 09:09:12 +08:00
|
|
|
/* fall through */
|
2005-04-17 06:20:36 +08:00
|
|
|
case S_IFDIR:
|
2007-07-11 09:09:33 +08:00
|
|
|
if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
|
2005-06-21 13:39:12 +08:00
|
|
|
uint di_flags = 0;
|
|
|
|
|
2011-07-26 14:31:30 +08:00
|
|
|
if (S_ISDIR(mode)) {
|
2005-06-21 13:39:12 +08:00
|
|
|
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
|
|
|
|
di_flags |= XFS_DIFLAG_RTINHERIT;
|
2006-01-11 12:28:28 +08:00
|
|
|
if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
|
|
|
|
di_flags |= XFS_DIFLAG_EXTSZINHERIT;
|
|
|
|
ip->i_d.di_extsize = pip->i_d.di_extsize;
|
|
|
|
}
|
2011-07-26 14:31:30 +08:00
|
|
|
} else if (S_ISREG(mode)) {
|
2007-10-11 15:44:08 +08:00
|
|
|
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
|
2005-06-21 13:39:12 +08:00
|
|
|
di_flags |= XFS_DIFLAG_REALTIME;
|
2006-01-11 12:28:28 +08:00
|
|
|
if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
|
|
|
|
di_flags |= XFS_DIFLAG_EXTSIZE;
|
|
|
|
ip->i_d.di_extsize = pip->i_d.di_extsize;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
|
|
|
|
xfs_inherit_noatime)
|
2005-06-21 13:39:12 +08:00
|
|
|
di_flags |= XFS_DIFLAG_NOATIME;
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
|
|
|
|
xfs_inherit_nodump)
|
2005-06-21 13:39:12 +08:00
|
|
|
di_flags |= XFS_DIFLAG_NODUMP;
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
|
|
|
|
xfs_inherit_sync)
|
2005-06-21 13:39:12 +08:00
|
|
|
di_flags |= XFS_DIFLAG_SYNC;
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
|
|
|
|
xfs_inherit_nosymlinks)
|
2005-06-21 13:39:12 +08:00
|
|
|
di_flags |= XFS_DIFLAG_NOSYMLINKS;
|
|
|
|
if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
|
|
|
|
di_flags |= XFS_DIFLAG_PROJINHERIT;
|
2006-06-09 12:54:19 +08:00
|
|
|
if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
|
|
|
|
xfs_inherit_nodefrag)
|
|
|
|
di_flags |= XFS_DIFLAG_NODEFRAG;
|
2007-07-11 09:09:12 +08:00
|
|
|
if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
|
|
|
|
di_flags |= XFS_DIFLAG_FILESTREAM;
|
2005-06-21 13:39:12 +08:00
|
|
|
ip->i_d.di_flags |= di_flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case S_IFLNK:
|
|
|
|
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
|
|
|
|
ip->i_df.if_flags = XFS_IFEXTENTS;
|
|
|
|
ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
|
|
|
|
ip->i_df.if_u1.if_extents = NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Attribute fork settings for new inode.
|
|
|
|
*/
|
|
|
|
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
|
|
|
|
ip->i_d.di_anextents = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Log the new values stuffed into the inode.
|
|
|
|
*/
|
2011-09-19 23:00:54 +08:00
|
|
|
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_log_inode(tp, ip, flags);
|
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
/* now that we have an i_mode we can setup inode ops and unlock */
|
2008-08-13 14:23:13 +08:00
|
|
|
xfs_setup_inode(ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-30 14:36:14 +08:00
|
|
|
/* now we have set up the vfs inode we can associate the filestream */
|
|
|
|
if (filestreams) {
|
|
|
|
error = xfs_filestream_associate(pip, ip);
|
|
|
|
if (error < 0)
|
|
|
|
return -error;
|
|
|
|
if (!error)
|
|
|
|
xfs_iflags_set(ip, XFS_IFILESTREAM);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
*ipp = ip;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-07-08 20:34:34 +08:00
|
|
|
* Free up the underlying blocks past new_size. The new size must be smaller
|
|
|
|
* than the current size. This routine can be used both for the attribute and
|
|
|
|
* data fork, and does not modify the inode size, which is left to the caller.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2008-04-17 14:50:04 +08:00
|
|
|
* The transaction passed to this routine must have made a permanent log
|
|
|
|
* reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
|
|
|
|
* given transaction and start new ones, so make sure everything involved in
|
|
|
|
* the transaction is tidy before calling here. Some transaction will be
|
|
|
|
* returned to the caller to be committed. The incoming transaction must
|
|
|
|
* already include the inode, and both inode locks must be held exclusively.
|
|
|
|
* The inode must also be "held" within the transaction. On return the inode
|
|
|
|
* will be "held" within the returned transaction. This routine does NOT
|
|
|
|
* require any disk space to be reserved for it within the transaction.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2008-04-17 14:50:04 +08:00
|
|
|
* If we get an error, we must return with the inode locked and linked into the
|
|
|
|
* current transaction. This keeps things simple for the higher level code,
|
|
|
|
* because it always knows that the inode is locked and held in the transaction
|
|
|
|
* that returns to it whether errors occur or not. We don't mark the inode
|
|
|
|
* dirty on error so that transactions can be easily aborted if possible.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
int
|
2011-07-08 20:34:34 +08:00
|
|
|
xfs_itruncate_extents(
|
|
|
|
struct xfs_trans **tpp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int whichfork,
|
|
|
|
xfs_fsize_t new_size)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-07-08 20:34:34 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp = *tpp;
|
|
|
|
struct xfs_trans *ntp;
|
|
|
|
xfs_bmap_free_t free_list;
|
|
|
|
xfs_fsblock_t first_block;
|
|
|
|
xfs_fileoff_t first_unmap_block;
|
|
|
|
xfs_fileoff_t last_block;
|
|
|
|
xfs_filblks_t unmap_len;
|
|
|
|
int committed;
|
|
|
|
int error = 0;
|
|
|
|
int done = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-04 23:13:31 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
|
ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
|
|
|
|
xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
2011-12-19 04:00:11 +08:00
|
|
|
ASSERT(new_size <= XFS_ISIZE(ip));
|
2011-07-08 20:34:34 +08:00
|
|
|
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(ip->i_itemp != NULL);
|
2010-06-24 09:36:58 +08:00
|
|
|
ASSERT(ip->i_itemp->ili_lock_flags == 0);
|
2011-07-08 20:34:34 +08:00
|
|
|
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-19 04:00:04 +08:00
|
|
|
trace_xfs_itruncate_extents_start(ip, new_size);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Since it is possible for space to become allocated beyond
|
|
|
|
* the end of the file (in a crash where the space is allocated
|
|
|
|
* but the inode size is not yet updated), simply remove any
|
|
|
|
* blocks which show up between the new EOF and the maximum
|
|
|
|
* possible file size. If the first block to be removed is
|
|
|
|
* beyond the maximum file size (ie it is the same as last_block),
|
|
|
|
* then there is nothing to do.
|
|
|
|
*/
|
2011-07-08 20:34:34 +08:00
|
|
|
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
|
2012-06-08 13:44:54 +08:00
|
|
|
last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
2011-07-08 20:34:34 +08:00
|
|
|
if (first_unmap_block == last_block)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ASSERT(first_unmap_block < last_block);
|
|
|
|
unmap_len = last_block - first_unmap_block + 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
while (!done) {
|
2009-01-15 13:22:07 +08:00
|
|
|
xfs_bmap_init(&free_list, &first_block);
|
2011-07-08 20:34:34 +08:00
|
|
|
error = xfs_bunmapi(tp, ip,
|
2006-06-09 12:48:12 +08:00
|
|
|
first_unmap_block, unmap_len,
|
2011-07-08 20:34:34 +08:00
|
|
|
xfs_bmapi_aflag(whichfork),
|
2005-04-17 06:20:36 +08:00
|
|
|
XFS_ITRUNC_MAX_EXTENTS,
|
2006-06-09 12:48:12 +08:00
|
|
|
&first_block, &free_list,
|
2010-06-23 16:11:15 +08:00
|
|
|
&done);
|
2011-07-08 20:34:34 +08:00
|
|
|
if (error)
|
|
|
|
goto out_bmap_cancel;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Duplicate the transaction that has the permanent
|
|
|
|
* reservation and commit the old transaction.
|
|
|
|
*/
|
2011-07-08 20:34:34 +08:00
|
|
|
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
2010-06-24 09:36:58 +08:00
|
|
|
if (committed)
|
2011-09-19 23:00:54 +08:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2011-07-08 20:34:34 +08:00
|
|
|
if (error)
|
|
|
|
goto out_bmap_cancel;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (committed) {
|
|
|
|
/*
|
2008-04-17 14:50:04 +08:00
|
|
|
* Mark the inode dirty so it will be logged and
|
2008-04-10 10:21:18 +08:00
|
|
|
* moved forward in the log as part of every commit.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-07-08 20:34:34 +08:00
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-04-17 14:50:04 +08:00
|
|
|
|
2011-07-08 20:34:34 +08:00
|
|
|
ntp = xfs_trans_dup(tp);
|
|
|
|
error = xfs_trans_commit(tp, 0);
|
|
|
|
tp = ntp;
|
2008-04-10 10:21:18 +08:00
|
|
|
|
2011-09-19 23:00:54 +08:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2008-04-17 14:50:04 +08:00
|
|
|
|
2008-11-17 14:37:10 +08:00
|
|
|
if (error)
|
2011-07-08 20:34:34 +08:00
|
|
|
goto out;
|
|
|
|
|
2008-11-17 14:37:10 +08:00
|
|
|
/*
|
2011-07-08 20:34:34 +08:00
|
|
|
* Transaction commit worked ok so we can drop the extra ticket
|
2008-11-17 14:37:10 +08:00
|
|
|
* reference that we gained in xfs_trans_dup()
|
|
|
|
*/
|
2011-07-08 20:34:34 +08:00
|
|
|
xfs_log_ticket_put(tp->t_ticket);
|
|
|
|
error = xfs_trans_reserve(tp, 0,
|
2008-04-17 14:50:04 +08:00
|
|
|
XFS_ITRUNCATE_LOG_RES(mp), 0,
|
|
|
|
XFS_TRANS_PERM_LOG_RES,
|
|
|
|
XFS_ITRUNCATE_LOG_COUNT);
|
|
|
|
if (error)
|
2011-07-08 20:34:34 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2011-07-08 20:34:34 +08:00
|
|
|
|
2011-12-19 04:00:04 +08:00
|
|
|
/*
|
|
|
|
* Always re-log the inode so that our permanent transaction can keep
|
|
|
|
* on rolling it forward in the log.
|
|
|
|
*/
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
|
|
|
|
trace_xfs_itruncate_extents_end(ip, new_size);
|
|
|
|
|
2011-07-08 20:34:34 +08:00
|
|
|
out:
|
|
|
|
*tpp = tp;
|
|
|
|
return error;
|
|
|
|
out_bmap_cancel:
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2011-07-08 20:34:34 +08:00
|
|
|
* If the bunmapi call encounters an error, return to the caller where
|
|
|
|
* the transaction can be properly aborted. We just need to make sure
|
|
|
|
* we're not holding any resources that we were not when we came in.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-07-08 20:34:34 +08:00
|
|
|
xfs_bmap_cancel(&free_list);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is called when the inode's link count goes to 0.
|
|
|
|
* We place the on-disk inode on a list in the AGI. It
|
|
|
|
* will be pulled from this list when the inode is freed.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_iunlink(
|
|
|
|
xfs_trans_t *tp,
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
|
|
|
xfs_mount_t *mp;
|
|
|
|
xfs_agi_t *agi;
|
|
|
|
xfs_dinode_t *dip;
|
|
|
|
xfs_buf_t *agibp;
|
|
|
|
xfs_buf_t *ibp;
|
|
|
|
xfs_agino_t agino;
|
|
|
|
short bucket_index;
|
|
|
|
int offset;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ASSERT(ip->i_d.di_nlink == 0);
|
|
|
|
ASSERT(ip->i_d.di_mode != 0);
|
|
|
|
|
|
|
|
mp = tp->t_mountp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the agi buffer first. It ensures lock ordering
|
|
|
|
* on the list.
|
|
|
|
*/
|
2008-11-28 11:23:37 +08:00
|
|
|
error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
|
2007-10-11 15:44:18 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
agi = XFS_BUF_TO_AGI(agibp);
|
2008-11-28 11:23:37 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Get the index into the agi hash table for the
|
|
|
|
* list this inode will go on.
|
|
|
|
*/
|
|
|
|
agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
|
|
|
|
ASSERT(agino != 0);
|
|
|
|
bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
|
|
|
|
ASSERT(agi->agi_unlinked[bucket_index]);
|
2005-11-02 12:11:25 +08:00
|
|
|
ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-08 20:36:05 +08:00
|
|
|
if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* There is already another inode in the bucket we need
|
|
|
|
* to add ourselves to. Add us at the front of the list.
|
|
|
|
* Here we put the head pointer into our next pointer,
|
|
|
|
* and then we fall through to point the head at us.
|
|
|
|
*/
|
2012-07-04 00:21:22 +08:00
|
|
|
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
|
|
|
|
0, 0);
|
2007-11-23 13:27:51 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2011-07-08 20:36:05 +08:00
|
|
|
ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
|
2005-04-17 06:20:36 +08:00
|
|
|
dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
|
2008-11-28 11:23:41 +08:00
|
|
|
offset = ip->i_imap.im_boffset +
|
2005-04-17 06:20:36 +08:00
|
|
|
offsetof(xfs_dinode_t, di_next_unlinked);
|
2013-06-05 10:09:08 +08:00
|
|
|
|
|
|
|
/* need to recalc the inode CRC if appropriate */
|
|
|
|
xfs_dinode_calc_crc(mp, dip);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_inode_buf(tp, ibp);
|
|
|
|
xfs_trans_log_buf(tp, ibp, offset,
|
|
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
|
|
xfs_inobp_check(mp, ibp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Point the bucket head pointer at the inode being inserted.
|
|
|
|
*/
|
|
|
|
ASSERT(agino != 0);
|
2005-11-02 12:11:25 +08:00
|
|
|
agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
|
2005-04-17 06:20:36 +08:00
|
|
|
offset = offsetof(xfs_agi_t, agi_unlinked) +
|
|
|
|
(sizeof(xfs_agino_t) * bucket_index);
|
|
|
|
xfs_trans_log_buf(tp, agibp, offset,
|
|
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pull the on-disk inode from the AGI unlinked list.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_iunlink_remove(
|
|
|
|
xfs_trans_t *tp,
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
|
|
|
xfs_ino_t next_ino;
|
|
|
|
xfs_mount_t *mp;
|
|
|
|
xfs_agi_t *agi;
|
|
|
|
xfs_dinode_t *dip;
|
|
|
|
xfs_buf_t *agibp;
|
|
|
|
xfs_buf_t *ibp;
|
|
|
|
xfs_agnumber_t agno;
|
|
|
|
xfs_agino_t agino;
|
|
|
|
xfs_agino_t next_agino;
|
|
|
|
xfs_buf_t *last_ibp;
|
2006-06-28 08:13:52 +08:00
|
|
|
xfs_dinode_t *last_dip = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
short bucket_index;
|
2006-06-28 08:13:52 +08:00
|
|
|
int offset, last_offset = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
mp = tp->t_mountp;
|
|
|
|
agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the agi buffer first. It ensures lock ordering
|
|
|
|
* on the list.
|
|
|
|
*/
|
2008-11-28 11:23:37 +08:00
|
|
|
error = xfs_read_agi(mp, tp, agno, &agibp);
|
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
2008-11-28 11:23:37 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
agi = XFS_BUF_TO_AGI(agibp);
|
2008-11-28 11:23:37 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Get the index into the agi hash table for the
|
|
|
|
* list this inode will go on.
|
|
|
|
*/
|
|
|
|
agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
|
|
|
|
ASSERT(agino != 0);
|
|
|
|
bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
|
2011-07-08 20:36:05 +08:00
|
|
|
ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(agi->agi_unlinked[bucket_index]);
|
|
|
|
|
2005-11-02 12:11:25 +08:00
|
|
|
if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2012-07-04 00:21:22 +08:00
|
|
|
* We're at the head of the list. Get the inode's on-disk
|
|
|
|
* buffer to see if there is anyone after us on the list.
|
|
|
|
* Only modify our next pointer if it is not already NULLAGINO.
|
|
|
|
* This saves us the overhead of dealing with the buffer when
|
|
|
|
* there is no need to change it.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2012-07-04 00:21:22 +08:00
|
|
|
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
|
|
|
|
0, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error) {
|
2012-07-04 00:21:22 +08:00
|
|
|
xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
|
2011-03-07 07:08:35 +08:00
|
|
|
__func__, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2007-08-28 11:57:51 +08:00
|
|
|
next_agino = be32_to_cpu(dip->di_next_unlinked);
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(next_agino != 0);
|
|
|
|
if (next_agino != NULLAGINO) {
|
2007-08-28 11:57:51 +08:00
|
|
|
dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
|
2008-11-28 11:23:41 +08:00
|
|
|
offset = ip->i_imap.im_boffset +
|
2005-04-17 06:20:36 +08:00
|
|
|
offsetof(xfs_dinode_t, di_next_unlinked);
|
2013-06-05 10:09:08 +08:00
|
|
|
|
|
|
|
/* need to recalc the inode CRC if appropriate */
|
|
|
|
xfs_dinode_calc_crc(mp, dip);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_inode_buf(tp, ibp);
|
|
|
|
xfs_trans_log_buf(tp, ibp, offset,
|
|
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
|
|
xfs_inobp_check(mp, ibp);
|
|
|
|
} else {
|
|
|
|
xfs_trans_brelse(tp, ibp);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Point the bucket head pointer at the next inode.
|
|
|
|
*/
|
|
|
|
ASSERT(next_agino != 0);
|
|
|
|
ASSERT(next_agino != agino);
|
2005-11-02 12:11:25 +08:00
|
|
|
agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
|
2005-04-17 06:20:36 +08:00
|
|
|
offset = offsetof(xfs_agi_t, agi_unlinked) +
|
|
|
|
(sizeof(xfs_agino_t) * bucket_index);
|
|
|
|
xfs_trans_log_buf(tp, agibp, offset,
|
|
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We need to search the list for the inode being freed.
|
|
|
|
*/
|
2005-11-02 12:11:25 +08:00
|
|
|
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
|
2005-04-17 06:20:36 +08:00
|
|
|
last_ibp = NULL;
|
|
|
|
while (next_agino != agino) {
|
2012-07-04 00:21:51 +08:00
|
|
|
struct xfs_imap imap;
|
|
|
|
|
|
|
|
if (last_ibp)
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_brelse(tp, last_ibp);
|
2012-07-04 00:21:51 +08:00
|
|
|
|
|
|
|
imap.im_blkno = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
|
2012-07-04 00:21:51 +08:00
|
|
|
|
|
|
|
error = xfs_imap(mp, tp, next_ino, &imap, 0);
|
|
|
|
if (error) {
|
|
|
|
xfs_warn(mp,
|
|
|
|
"%s: xfs_imap returned error %d.",
|
|
|
|
__func__, error);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
|
|
|
|
&last_ibp, 0, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error) {
|
2011-03-07 07:08:35 +08:00
|
|
|
xfs_warn(mp,
|
2012-07-04 00:21:51 +08:00
|
|
|
"%s: xfs_imap_to_bp returned error %d.",
|
2011-03-07 07:08:35 +08:00
|
|
|
__func__, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2012-07-04 00:21:51 +08:00
|
|
|
|
|
|
|
last_offset = imap.im_boffset;
|
2007-08-28 11:57:51 +08:00
|
|
|
next_agino = be32_to_cpu(last_dip->di_next_unlinked);
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(next_agino != NULLAGINO);
|
|
|
|
ASSERT(next_agino != 0);
|
|
|
|
}
|
2012-07-04 00:21:22 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2012-07-04 00:21:22 +08:00
|
|
|
* Now last_ibp points to the buffer previous to us on the
|
|
|
|
* unlinked list. Pull us from the list.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2012-07-04 00:21:22 +08:00
|
|
|
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
|
|
|
|
0, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error) {
|
2012-07-04 00:21:22 +08:00
|
|
|
xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
|
2011-03-07 07:08:35 +08:00
|
|
|
__func__, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2007-08-28 11:57:51 +08:00
|
|
|
next_agino = be32_to_cpu(dip->di_next_unlinked);
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(next_agino != 0);
|
|
|
|
ASSERT(next_agino != agino);
|
|
|
|
if (next_agino != NULLAGINO) {
|
2007-08-28 11:57:51 +08:00
|
|
|
dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
|
2008-11-28 11:23:41 +08:00
|
|
|
offset = ip->i_imap.im_boffset +
|
2005-04-17 06:20:36 +08:00
|
|
|
offsetof(xfs_dinode_t, di_next_unlinked);
|
2013-06-05 10:09:08 +08:00
|
|
|
|
|
|
|
/* need to recalc the inode CRC if appropriate */
|
|
|
|
xfs_dinode_calc_crc(mp, dip);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_inode_buf(tp, ibp);
|
|
|
|
xfs_trans_log_buf(tp, ibp, offset,
|
|
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
|
|
xfs_inobp_check(mp, ibp);
|
|
|
|
} else {
|
|
|
|
xfs_trans_brelse(tp, ibp);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Point the previous inode on the list to the next inode.
|
|
|
|
*/
|
2007-08-28 11:57:51 +08:00
|
|
|
last_dip->di_next_unlinked = cpu_to_be32(next_agino);
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(next_agino != 0);
|
|
|
|
offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
|
2013-06-05 10:09:08 +08:00
|
|
|
|
|
|
|
/* need to recalc the inode CRC if appropriate */
|
|
|
|
xfs_dinode_calc_crc(mp, last_dip);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_inode_buf(tp, last_ibp);
|
|
|
|
xfs_trans_log_buf(tp, last_ibp, offset,
|
|
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
|
|
xfs_inobp_check(mp, last_ibp);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-24 09:42:41 +08:00
|
|
|
/*
|
|
|
|
* A big issue when freeing the inode cluster is is that we _cannot_ skip any
|
|
|
|
* inodes that are in memory - they all must be marked stale and attached to
|
|
|
|
* the cluster buffer.
|
|
|
|
*/
|
2011-09-20 21:56:55 +08:00
|
|
|
STATIC int
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_ifree_cluster(
|
|
|
|
xfs_inode_t *free_ip,
|
|
|
|
xfs_trans_t *tp,
|
|
|
|
xfs_ino_t inum)
|
|
|
|
{
|
|
|
|
xfs_mount_t *mp = free_ip->i_mount;
|
|
|
|
int blks_per_cluster;
|
|
|
|
int nbufs;
|
|
|
|
int ninodes;
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
int i, j;
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_daddr_t blkno;
|
|
|
|
xfs_buf_t *bp;
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
xfs_inode_t *ip;
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_inode_log_item_t *iip;
|
|
|
|
xfs_log_item_t *lip;
|
2010-01-11 19:47:40 +08:00
|
|
|
struct xfs_perag *pag;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-11 19:47:40 +08:00
|
|
|
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
|
2005-04-17 06:20:36 +08:00
|
|
|
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
|
|
|
|
blks_per_cluster = 1;
|
|
|
|
ninodes = mp->m_sb.sb_inopblock;
|
|
|
|
nbufs = XFS_IALLOC_BLOCKS(mp);
|
|
|
|
} else {
|
|
|
|
blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
|
|
|
|
mp->m_sb.sb_blocksize;
|
|
|
|
ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
|
|
|
|
nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < nbufs; j++, inum += ninodes) {
|
|
|
|
blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
|
|
|
|
XFS_INO_TO_AGBNO(mp, inum));
|
|
|
|
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
/*
|
|
|
|
* We obtain and lock the backing buffer first in the process
|
|
|
|
* here, as we have to ensure that any dirty inode that we
|
|
|
|
* can't get the flush lock on is attached to the buffer.
|
|
|
|
* If we scan the in-memory inodes first, then buffer IO can
|
|
|
|
* complete before we get a lock on it, and hence we may fail
|
|
|
|
* to mark all the active inodes on the buffer stale.
|
|
|
|
*/
|
|
|
|
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
|
2012-11-02 08:38:42 +08:00
|
|
|
mp->m_bsize * blks_per_cluster,
|
|
|
|
XBF_UNMAPPED);
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
|
2011-09-20 21:56:55 +08:00
|
|
|
if (!bp)
|
|
|
|
return ENOMEM;
|
2012-11-14 14:53:49 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This buffer may not have been correctly initialised as we
|
|
|
|
* didn't read it from disk. That's not important because we are
|
|
|
|
* only using to mark the buffer as stale in the log, and to
|
|
|
|
* attach stale cached inodes on it. That means it will never be
|
|
|
|
* dispatched for IO. If it is, we want to know about it, and we
|
|
|
|
* want it to fail. We can acheive this by adding a write
|
|
|
|
* verifier to the buffer.
|
|
|
|
*/
|
2012-11-14 14:54:40 +08:00
|
|
|
bp->b_ops = &xfs_inode_buf_ops;
|
2012-11-14 14:53:49 +08:00
|
|
|
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
/*
|
|
|
|
* Walk the inodes already attached to the buffer and mark them
|
|
|
|
* stale. These will all have the flush locks held, so an
|
2010-08-24 09:42:41 +08:00
|
|
|
* in-memory inode walk can't lock them. By marking them all
|
|
|
|
* stale first, we will not attempt to lock them in the loop
|
|
|
|
* below as the XFS_ISTALE flag will be set.
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
*/
|
2011-07-13 19:43:49 +08:00
|
|
|
lip = bp->b_fspriv;
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
while (lip) {
|
|
|
|
if (lip->li_type == XFS_LI_INODE) {
|
|
|
|
iip = (xfs_inode_log_item_t *)lip;
|
|
|
|
ASSERT(iip->ili_logged == 1);
|
2010-06-23 16:11:15 +08:00
|
|
|
lip->li_cb = xfs_istale_done;
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
xfs_trans_ail_copy_lsn(mp->m_ail,
|
|
|
|
&iip->ili_flush_lsn,
|
|
|
|
&iip->ili_item.li_lsn);
|
|
|
|
xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
|
|
|
|
}
|
|
|
|
lip = lip->li_bio_list;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-08-24 09:42:41 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
* For each inode in memory attempt to add it to the inode
|
|
|
|
* buffer and set it up for being staled on buffer IO
|
|
|
|
* completion. This is safe as we've locked out tail pushing
|
|
|
|
* and flushing by locking the buffer.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
* We have already marked every inode that was part of a
|
|
|
|
* transaction stale above, which means there is no point in
|
|
|
|
* even trying to lock them.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
for (i = 0; i < ninodes; i++) {
|
2010-08-24 09:42:41 +08:00
|
|
|
retry:
|
2010-12-17 14:29:43 +08:00
|
|
|
rcu_read_lock();
|
2007-08-28 12:00:13 +08:00
|
|
|
ip = radix_tree_lookup(&pag->pag_ici_root,
|
|
|
|
XFS_INO_TO_AGINO(mp, (inum + i)));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-12-17 14:29:43 +08:00
|
|
|
/* Inode not in memory, nothing to do */
|
|
|
|
if (!ip) {
|
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-12-17 14:29:43 +08:00
|
|
|
/*
|
|
|
|
* because this is an RCU protected lookup, we could
|
|
|
|
* find a recently freed or even reallocated inode
|
|
|
|
* during the lookup. We need to check under the
|
|
|
|
* i_flags_lock for a valid inode here. Skip it if it
|
|
|
|
* is not valid, the wrong inode or stale.
|
|
|
|
*/
|
|
|
|
spin_lock(&ip->i_flags_lock);
|
|
|
|
if (ip->i_ino != inum + i ||
|
|
|
|
__xfs_iflags_test(ip, XFS_ISTALE)) {
|
|
|
|
spin_unlock(&ip->i_flags_lock);
|
|
|
|
rcu_read_unlock();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
spin_unlock(&ip->i_flags_lock);
|
|
|
|
|
2010-08-24 09:42:41 +08:00
|
|
|
/*
|
|
|
|
* Don't try to lock/unlock the current inode, but we
|
|
|
|
* _cannot_ skip the other inodes that we did not find
|
|
|
|
* in the list attached to the buffer and are not
|
|
|
|
* already marked stale. If we can't lock it, back off
|
|
|
|
* and retry.
|
|
|
|
*/
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
if (ip != free_ip &&
|
|
|
|
!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
|
2010-12-17 14:29:43 +08:00
|
|
|
rcu_read_unlock();
|
2010-08-24 09:42:41 +08:00
|
|
|
delay(1);
|
|
|
|
goto retry;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-12-17 14:29:43 +08:00
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-08-24 09:42:41 +08:00
|
|
|
xfs_iflock(ip);
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
xfs_iflags_set(ip, XFS_ISTALE);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-08-24 09:42:41 +08:00
|
|
|
/*
|
|
|
|
* we don't need to attach clean inodes or those only
|
|
|
|
* with unlogged changes (which we throw away, anyway).
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
iip = ip->i_itemp;
|
2010-08-24 09:42:41 +08:00
|
|
|
if (!iip || xfs_inode_clean(ip)) {
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
ASSERT(ip != free_ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_ifunlock(ip);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-02-29 17:53:54 +08:00
|
|
|
iip->ili_last_fields = iip->ili_fields;
|
|
|
|
iip->ili_fields = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
iip->ili_logged = 1;
|
2008-10-30 14:39:12 +08:00
|
|
|
xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
|
|
|
|
&iip->ili_item.li_lsn);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-06-23 16:11:15 +08:00
|
|
|
xfs_buf_attach_iodone(bp, xfs_istale_done,
|
|
|
|
&iip->ili_item);
|
xfs: fix race in inode cluster freeing failing to stale inodes
When an inode cluster is freed, it needs to mark all inodes in memory as
XFS_ISTALE before marking the buffer as stale. This is eeded because the inodes
have a different life cycle to the buffer, and once the buffer is torn down
during transaction completion, we must ensure none of the inodes get written
back (which is what XFS_ISTALE does).
Unfortunately, xfs_ifree_cluster() has some bugs that lead to inodes not being
marked with XFS_ISTALE. This shows up when xfs_iflush() is called on these
inodes either during inode reclaim or tail pushing on the AIL. The buffer is
read back, but no longer contains inodes and so triggers assert failures and
shutdowns. This was reproducable with at run.dbench10 invocation from xfstests.
There are two main causes of xfs_ifree_cluster() failing. The first is simple -
it checks in-memory inodes it finds in the per-ag icache to see if they are
clean without holding the flush lock. if they are clean it skips them
completely. However, If an inode is flushed delwri, it will
appear clean, but is not guaranteed to be written back until the flush lock has
been dropped. Hence we may have raced on the clean check and the inode may
actually be dirty. Hence always mark inodes found in memory stale before we
check properly if they are clean.
The second is more complex, and makes the first problem easier to hit.
Basically the in-memory inode scan is done with full knowledge it can be racing
with inode flushing and AIl tail pushing, which means that inodes that it can't
get the flush lock on might not be attached to the buffer after then in-memory
inode scan due to IO completion occurring. This is actually documented in the
code as "needs better interlocking". i.e. this is a zero-day bug.
Effectively, the in-memory scan must be done while the inode buffer is locked
and Io cannot be issued on it while we do the in-memory inode scan. This
ensures that inodes we couldn't get the flush lock on are guaranteed to be
attached to the cluster buffer, so we can then catch all in-memory inodes and
mark them stale.
Now that the inode cluster buffer is locked before the in-memory scan is done,
there is no need for the two-phase update of the in-memory inodes, so simplify
the code into two loops and remove the allocation of the temporary buffer used
to hold locked inodes across the phases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2010-06-03 14:22:29 +08:00
|
|
|
|
|
|
|
if (ip != free_ip)
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
}
|
|
|
|
|
2010-08-24 09:42:41 +08:00
|
|
|
xfs_trans_stale_inode_buf(tp, bp);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_trans_binval(tp, bp);
|
|
|
|
}
|
|
|
|
|
2010-01-11 19:47:40 +08:00
|
|
|
xfs_perag_put(pag);
|
2011-09-20 21:56:55 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called to return an inode to the inode free list.
|
|
|
|
* The inode should already be truncated to 0 length and have
|
|
|
|
* no pages associated with it. This routine also assumes that
|
|
|
|
* the inode is already a part of the transaction.
|
|
|
|
*
|
|
|
|
* The on-disk copy of the inode will have been added to the list
|
|
|
|
* of unlinked inodes in the AGI. We need to remove the inode from
|
|
|
|
* that list atomically with respect to freeing it here.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_ifree(
|
|
|
|
xfs_trans_t *tp,
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_bmap_free_t *flist)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int delete;
|
|
|
|
xfs_ino_t first_ino;
|
|
|
|
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(ip->i_d.di_nlink == 0);
|
|
|
|
ASSERT(ip->i_d.di_nextents == 0);
|
|
|
|
ASSERT(ip->i_d.di_anextents == 0);
|
2011-12-19 04:00:11 +08:00
|
|
|
ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(ip->i_d.di_nblocks == 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pull the on-disk inode from the AGI unlinked list.
|
|
|
|
*/
|
|
|
|
error = xfs_iunlink_remove(tp, ip);
|
2013-06-27 14:04:50 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
|
|
|
|
error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
|
2013-06-27 14:04:50 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
2013-06-27 14:04:50 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
ip->i_d.di_mode = 0; /* mark incore inode as free */
|
|
|
|
ip->i_d.di_flags = 0;
|
|
|
|
ip->i_d.di_dmevmask = 0;
|
|
|
|
ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
|
|
|
|
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
|
|
|
|
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
|
|
|
|
/*
|
|
|
|
* Bump the generation count so no one will be confused
|
|
|
|
* by reincarnations of this inode.
|
|
|
|
*/
|
|
|
|
ip->i_d.di_gen++;
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
|
2013-06-27 14:04:50 +08:00
|
|
|
if (delete)
|
2011-09-20 21:56:55 +08:00
|
|
|
error = xfs_ifree_cluster(ip, tp, first_ino);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-09-20 21:56:55 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-02-18 03:43:56 +08:00
|
|
|
* This is called to unpin an inode. The caller must have the inode locked
|
|
|
|
* in at least shared mode so that the buffer cannot be subsequently pinned
|
|
|
|
* once someone is waiting for it to be unpinned.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2010-02-18 03:43:56 +08:00
|
|
|
static void
|
2011-12-19 04:00:10 +08:00
|
|
|
xfs_iunpin(
|
2010-02-18 03:43:56 +08:00
|
|
|
struct xfs_inode *ip)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-03-08 08:24:07 +08:00
|
|
|
trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
|
|
|
|
|
2008-03-06 10:43:42 +08:00
|
|
|
/* Give the log a push to start the unpinning I/O */
|
2010-02-18 03:43:56 +08:00
|
|
|
xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
|
2010-01-19 17:56:46 +08:00
|
|
|
|
2008-03-06 10:43:42 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-19 04:00:10 +08:00
|
|
|
static void
|
|
|
|
__xfs_iunpin_wait(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
|
|
|
|
DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
|
|
|
|
|
|
|
|
xfs_iunpin(ip);
|
|
|
|
|
|
|
|
do {
|
|
|
|
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
|
|
|
if (xfs_ipincount(ip))
|
|
|
|
io_schedule();
|
|
|
|
} while (xfs_ipincount(ip));
|
|
|
|
finish_wait(wq, &wait.wait);
|
|
|
|
}
|
|
|
|
|
2010-02-06 09:37:26 +08:00
|
|
|
void
|
2008-03-06 10:43:42 +08:00
|
|
|
xfs_iunpin_wait(
|
2010-02-18 03:43:56 +08:00
|
|
|
struct xfs_inode *ip)
|
2008-03-06 10:43:42 +08:00
|
|
|
{
|
2011-12-19 04:00:10 +08:00
|
|
|
if (xfs_ipincount(ip))
|
|
|
|
__xfs_iunpin_wait(ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:33 +08:00
|
|
|
STATIC int
|
|
|
|
xfs_iflush_cluster(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_buf_t *bp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-08-12 18:49:33 +08:00
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
unsigned long first_index, mask;
|
|
|
|
unsigned long inodes_per_cluster;
|
|
|
|
int ilist_size;
|
|
|
|
xfs_inode_t **ilist;
|
|
|
|
xfs_inode_t *iq;
|
|
|
|
int nr_found;
|
|
|
|
int clcount = 0;
|
|
|
|
int bufwasdelwri;
|
2005-04-17 06:20:36 +08:00
|
|
|
int i;
|
|
|
|
|
2013-08-12 18:49:33 +08:00
|
|
|
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-12 18:49:33 +08:00
|
|
|
inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
|
|
|
|
ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
|
|
|
|
ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
|
|
|
|
if (!ilist)
|
|
|
|
goto out_put;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-12 18:49:33 +08:00
|
|
|
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
|
|
|
|
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
|
|
|
|
rcu_read_lock();
|
|
|
|
/* really need a gang lookup range call here */
|
|
|
|
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
|
|
|
|
first_index, inodes_per_cluster);
|
|
|
|
if (nr_found == 0)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_found; i++) {
|
|
|
|
iq = ilist[i];
|
|
|
|
if (iq == ip)
|
2008-03-06 10:43:49 +08:00
|
|
|
continue;
|
2010-12-17 14:29:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* because this is an RCU protected lookup, we could find a
|
|
|
|
* recently freed or even reallocated inode during the lookup.
|
|
|
|
* We need to check under the i_flags_lock for a valid inode
|
|
|
|
* here. Skip it if it is not valid or the wrong inode.
|
|
|
|
*/
|
|
|
|
spin_lock(&ip->i_flags_lock);
|
|
|
|
if (!ip->i_ino ||
|
|
|
|
(XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
|
|
|
|
spin_unlock(&ip->i_flags_lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
spin_unlock(&ip->i_flags_lock);
|
|
|
|
|
2008-03-06 10:43:49 +08:00
|
|
|
/*
|
|
|
|
* Do an un-protected check to see if the inode is dirty and
|
|
|
|
* is a candidate for flushing. These checks will be repeated
|
|
|
|
* later after the appropriate locks are acquired.
|
|
|
|
*/
|
2008-03-06 10:43:59 +08:00
|
|
|
if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
|
2008-03-06 10:43:49 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to get locks. If any are unavailable or it is pinned,
|
|
|
|
* then this inode cannot be flushed and is skipped.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
|
|
|
|
continue;
|
|
|
|
if (!xfs_iflock_nowait(iq)) {
|
|
|
|
xfs_iunlock(iq, XFS_ILOCK_SHARED);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (xfs_ipincount(iq)) {
|
|
|
|
xfs_ifunlock(iq);
|
|
|
|
xfs_iunlock(iq, XFS_ILOCK_SHARED);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* arriving here means that this inode can be flushed. First
|
|
|
|
* re-check that it's dirty before flushing.
|
|
|
|
*/
|
2008-03-06 10:43:59 +08:00
|
|
|
if (!xfs_inode_clean(iq)) {
|
|
|
|
int error;
|
2008-03-06 10:43:49 +08:00
|
|
|
error = xfs_iflush_int(iq, bp);
|
|
|
|
if (error) {
|
|
|
|
xfs_iunlock(iq, XFS_ILOCK_SHARED);
|
|
|
|
goto cluster_corrupt_out;
|
|
|
|
}
|
|
|
|
clcount++;
|
|
|
|
} else {
|
|
|
|
xfs_ifunlock(iq);
|
|
|
|
}
|
|
|
|
xfs_iunlock(iq, XFS_ILOCK_SHARED);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (clcount) {
|
|
|
|
XFS_STATS_INC(xs_icluster_flushcnt);
|
|
|
|
XFS_STATS_ADD(xs_icluster_flushinode, clcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free:
|
2010-12-17 14:29:43 +08:00
|
|
|
rcu_read_unlock();
|
2008-05-19 14:31:57 +08:00
|
|
|
kmem_free(ilist);
|
2010-01-11 19:47:43 +08:00
|
|
|
out_put:
|
|
|
|
xfs_perag_put(pag);
|
2008-03-06 10:43:49 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
|
|
cluster_corrupt_out:
|
|
|
|
/*
|
|
|
|
* Corruption detected in the clustering loop. Invalidate the
|
|
|
|
* inode buffer and shut down the filesystem.
|
|
|
|
*/
|
2010-12-17 14:29:43 +08:00
|
|
|
rcu_read_unlock();
|
2008-03-06 10:43:49 +08:00
|
|
|
/*
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
* Clean up the buffer. If it was delwri, just release it --
|
2008-03-06 10:43:49 +08:00
|
|
|
* brelse can handle it with no problems. If not, shut down the
|
|
|
|
* filesystem before releasing the buffer.
|
|
|
|
*/
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 13:58:39 +08:00
|
|
|
bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
|
2008-03-06 10:43:49 +08:00
|
|
|
if (bufwasdelwri)
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
|
|
|
|
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
|
|
|
|
|
|
|
if (!bufwasdelwri) {
|
|
|
|
/*
|
|
|
|
* Just like incore_relse: if we have b_iodone functions,
|
|
|
|
* mark the buffer as an error and call them. Otherwise
|
|
|
|
* mark it as stale and brelse.
|
|
|
|
*/
|
2011-07-13 19:43:49 +08:00
|
|
|
if (bp->b_iodone) {
|
2008-03-06 10:43:49 +08:00
|
|
|
XFS_BUF_UNDONE(bp);
|
2011-10-11 00:52:46 +08:00
|
|
|
xfs_buf_stale(bp);
|
2011-07-23 07:39:51 +08:00
|
|
|
xfs_buf_ioerror(bp, EIO);
|
2010-10-07 02:41:18 +08:00
|
|
|
xfs_buf_ioend(bp, 0);
|
2008-03-06 10:43:49 +08:00
|
|
|
} else {
|
2011-10-11 00:52:46 +08:00
|
|
|
xfs_buf_stale(bp);
|
2008-03-06 10:43:49 +08:00
|
|
|
xfs_buf_relse(bp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlocks the flush lock
|
|
|
|
*/
|
2012-04-23 13:58:41 +08:00
|
|
|
xfs_iflush_abort(iq, false);
|
2008-05-19 14:31:57 +08:00
|
|
|
kmem_free(ilist);
|
2010-01-11 19:47:43 +08:00
|
|
|
xfs_perag_put(pag);
|
2008-03-06 10:43:49 +08:00
|
|
|
return XFS_ERROR(EFSCORRUPTED);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2012-04-23 13:58:36 +08:00
|
|
|
* Flush dirty inode metadata into the backing buffer.
|
|
|
|
*
|
|
|
|
* The caller must have the inode lock and the inode flush lock held. The
|
|
|
|
* inode lock will still be held upon return to the caller, and the inode
|
|
|
|
* flush lock will be released after the inode has reached the disk.
|
|
|
|
*
|
|
|
|
* The caller must write out the buffer returned in *bpp and release it.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_iflush(
|
2012-04-23 13:58:36 +08:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_buf **bpp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-04-23 13:58:36 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
struct xfs_dinode *dip;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
XFS_STATS_INC(xs_iflush_count);
|
|
|
|
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
|
2011-12-19 04:00:09 +08:00
|
|
|
ASSERT(xfs_isiflocked(ip));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
|
2011-12-19 04:00:07 +08:00
|
|
|
ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-04-23 13:58:36 +08:00
|
|
|
*bpp = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
xfs_iunpin_wait(ip);
|
|
|
|
|
2010-01-11 19:45:21 +08:00
|
|
|
/*
|
|
|
|
* For stale inodes we cannot rely on the backing buffer remaining
|
|
|
|
* stale in cache for the remaining life of the stale inode and so
|
2012-07-04 00:21:22 +08:00
|
|
|
* xfs_imap_to_bp() below may give us a buffer that no longer contains
|
2010-01-11 19:45:21 +08:00
|
|
|
* inodes below. We have to check this after ensuring the inode is
|
|
|
|
* unpinned so that it is safe to reclaim the stale inode after the
|
|
|
|
* flush call.
|
|
|
|
*/
|
|
|
|
if (xfs_iflags_test(ip, XFS_ISTALE)) {
|
|
|
|
xfs_ifunlock(ip);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This may have been unpinned because the filesystem is shutting
|
|
|
|
* down forcibly. If that's the case we must not write this inode
|
2012-04-23 13:58:32 +08:00
|
|
|
* to disk, because the log record didn't make it to disk.
|
|
|
|
*
|
|
|
|
* We also have to remove the log item from the AIL in this case,
|
|
|
|
* as we wait for an empty AIL as part of the unmount process.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
if (XFS_FORCED_SHUTDOWN(mp)) {
|
2012-04-23 13:58:32 +08:00
|
|
|
error = XFS_ERROR(EIO);
|
|
|
|
goto abort_out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-03-06 10:43:42 +08:00
|
|
|
/*
|
|
|
|
* Get the buffer containing the on-disk inode.
|
|
|
|
*/
|
2012-07-04 00:21:22 +08:00
|
|
|
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
|
|
|
|
0);
|
2008-03-06 10:43:42 +08:00
|
|
|
if (error || !bp) {
|
|
|
|
xfs_ifunlock(ip);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* First flush out the inode that xfs_iflush was called with.
|
|
|
|
*/
|
|
|
|
error = xfs_iflush_int(ip, bp);
|
2008-03-06 10:43:49 +08:00
|
|
|
if (error)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto corrupt_out;
|
|
|
|
|
2008-03-06 10:43:42 +08:00
|
|
|
/*
|
|
|
|
* If the buffer is pinned then push on the log now so we won't
|
|
|
|
* get stuck waiting in the write for too long.
|
|
|
|
*/
|
2011-07-23 07:40:27 +08:00
|
|
|
if (xfs_buf_ispinned(bp))
|
2010-01-19 17:56:46 +08:00
|
|
|
xfs_log_force(mp, 0);
|
2008-03-06 10:43:42 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* inode clustering:
|
|
|
|
* see if other inodes can be gathered into this write
|
|
|
|
*/
|
2008-03-06 10:43:49 +08:00
|
|
|
error = xfs_iflush_cluster(ip, bp);
|
|
|
|
if (error)
|
|
|
|
goto cluster_corrupt_out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-04-23 13:58:36 +08:00
|
|
|
*bpp = bp;
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
corrupt_out:
|
|
|
|
xfs_buf_relse(bp);
|
2006-06-09 12:58:38 +08:00
|
|
|
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
2005-04-17 06:20:36 +08:00
|
|
|
cluster_corrupt_out:
|
2012-04-23 13:58:32 +08:00
|
|
|
error = XFS_ERROR(EFSCORRUPTED);
|
|
|
|
abort_out:
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Unlocks the flush lock
|
|
|
|
*/
|
2012-04-23 13:58:41 +08:00
|
|
|
xfs_iflush_abort(ip, false);
|
2012-04-23 13:58:32 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_iflush_int(
|
2013-04-03 13:11:17 +08:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_buf *bp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-04-03 13:11:17 +08:00
|
|
|
struct xfs_inode_log_item *iip = ip->i_itemp;
|
|
|
|
struct xfs_dinode *dip;
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-22 15:34:00 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
|
2011-12-19 04:00:09 +08:00
|
|
|
ASSERT(xfs_isiflocked(ip));
|
2005-04-17 06:20:36 +08:00
|
|
|
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
|
2011-12-19 04:00:07 +08:00
|
|
|
ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
|
2013-04-03 13:11:17 +08:00
|
|
|
ASSERT(iip != NULL && iip->ili_fields != 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* set *dip = inode's place in the buffer */
|
2008-11-28 11:23:41 +08:00
|
|
|
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-08 20:36:05 +08:00
|
|
|
if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
|
2005-04-17 06:20:36 +08:00
|
|
|
mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
|
2011-03-07 07:02:35 +08:00
|
|
|
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
|
|
|
"%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
|
|
|
|
__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto corrupt_out;
|
|
|
|
}
|
|
|
|
if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
|
|
|
|
mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
|
2011-03-07 07:02:35 +08:00
|
|
|
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
|
|
|
"%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
|
|
|
|
__func__, ip->i_ino, ip, ip->i_d.di_magic);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto corrupt_out;
|
|
|
|
}
|
2011-07-26 14:31:30 +08:00
|
|
|
if (S_ISREG(ip->i_d.di_mode)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (XFS_TEST_ERROR(
|
|
|
|
(ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
|
|
|
|
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
|
|
|
|
mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
|
2011-03-07 07:02:35 +08:00
|
|
|
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
|
|
|
"%s: Bad regular inode %Lu, ptr 0x%p",
|
|
|
|
__func__, ip->i_ino, ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto corrupt_out;
|
|
|
|
}
|
2011-07-26 14:31:30 +08:00
|
|
|
} else if (S_ISDIR(ip->i_d.di_mode)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (XFS_TEST_ERROR(
|
|
|
|
(ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
|
|
|
|
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
|
|
|
|
(ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
|
|
|
|
mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
|
2011-03-07 07:02:35 +08:00
|
|
|
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
|
|
|
"%s: Bad directory inode %Lu, ptr 0x%p",
|
|
|
|
__func__, ip->i_ino, ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto corrupt_out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
|
|
|
|
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
|
|
|
|
XFS_RANDOM_IFLUSH_5)) {
|
2011-03-07 07:02:35 +08:00
|
|
|
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
|
|
|
"%s: detected corrupt incore inode %Lu, "
|
|
|
|
"total extents = %d, nblocks = %Ld, ptr 0x%p",
|
|
|
|
__func__, ip->i_ino,
|
2005-04-17 06:20:36 +08:00
|
|
|
ip->i_d.di_nextents + ip->i_d.di_anextents,
|
2011-03-07 07:02:35 +08:00
|
|
|
ip->i_d.di_nblocks, ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto corrupt_out;
|
|
|
|
}
|
|
|
|
if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
|
|
|
|
mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
|
2011-03-07 07:02:35 +08:00
|
|
|
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
|
|
|
"%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
|
|
|
|
__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto corrupt_out;
|
|
|
|
}
|
xfs: di_flushiter considered harmful
When we made all inode updates transactional, we no longer needed
the log recovery detection for inodes being newer on disk than the
transaction being replayed - it was redundant as replay of the log
would always result in the latest version of the inode would be on
disk. It was redundant, but left in place because it wasn't
considered to be a problem.
However, with the new "don't read inodes on create" optimisation,
flushiter has come back to bite us. Essentially, the optimisation
made always initialises flushiter to zero in the create transaction,
and so if we then crash and run recovery and the inode already on
disk has a non-zero flushiter it will skip recovery of that inode.
As a result, log recovery does the wrong thing and we end up with a
corrupt filesystem.
Because we have to support old kernel to new kernel upgrades, we
can't just get rid of the flushiter support in log recovery as we
might be upgrading from a kernel that doesn't have fully transactional
inode updates. Unfortunately, for v4 superblocks there is no way to
guarantee that log recovery knows about this fact.
We cannot add a new inode format flag to say it's a "special inode
create" because it won't be understood by older kernels and so
recovery could do the wrong thing on downgrade. We cannot specially
detect the combination of zero mode/non-zero flushiter on disk to
non-zero mode, zero flushiter in the log item during recovery
because wrapping of the flushiter can result in false detection.
Hence that makes this "don't use flushiter" optimisation limited to
a disk format that guarantees that we don't need it. And that means
the only fix here is to limit the "no read IO on create"
optimisation to version 5 superblocks....
Reported-by: Markus Trippelsdorf <markus@trippelsdorf.de>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-07-24 13:47:30 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
xfs: di_flushiter considered harmful
When we made all inode updates transactional, we no longer needed
the log recovery detection for inodes being newer on disk than the
transaction being replayed - it was redundant as replay of the log
would always result in the latest version of the inode would be on
disk. It was redundant, but left in place because it wasn't
considered to be a problem.
However, with the new "don't read inodes on create" optimisation,
flushiter has come back to bite us. Essentially, the optimisation
made always initialises flushiter to zero in the create transaction,
and so if we then crash and run recovery and the inode already on
disk has a non-zero flushiter it will skip recovery of that inode.
As a result, log recovery does the wrong thing and we end up with a
corrupt filesystem.
Because we have to support old kernel to new kernel upgrades, we
can't just get rid of the flushiter support in log recovery as we
might be upgrading from a kernel that doesn't have fully transactional
inode updates. Unfortunately, for v4 superblocks there is no way to
guarantee that log recovery knows about this fact.
We cannot add a new inode format flag to say it's a "special inode
create" because it won't be understood by older kernels and so
recovery could do the wrong thing on downgrade. We cannot specially
detect the combination of zero mode/non-zero flushiter on disk to
non-zero mode, zero flushiter in the log item during recovery
because wrapping of the flushiter can result in false detection.
Hence that makes this "don't use flushiter" optimisation limited to
a disk format that guarantees that we don't need it. And that means
the only fix here is to limit the "no read IO on create"
optimisation to version 5 superblocks....
Reported-by: Markus Trippelsdorf <markus@trippelsdorf.de>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-07-24 13:47:30 +08:00
|
|
|
* Inode item log recovery for v1/v2 inodes are dependent on the
|
|
|
|
* di_flushiter count for correct sequencing. We bump the flush
|
|
|
|
* iteration count so we can detect flushes which postdate a log record
|
|
|
|
* during recovery. This is redundant as we now log every change and
|
|
|
|
* hence this can't happen but we need to still do it to ensure
|
|
|
|
* backwards compatibility with old kernels that predate logging all
|
|
|
|
* inode changes.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
xfs: di_flushiter considered harmful
When we made all inode updates transactional, we no longer needed
the log recovery detection for inodes being newer on disk than the
transaction being replayed - it was redundant as replay of the log
would always result in the latest version of the inode would be on
disk. It was redundant, but left in place because it wasn't
considered to be a problem.
However, with the new "don't read inodes on create" optimisation,
flushiter has come back to bite us. Essentially, the optimisation
made always initialises flushiter to zero in the create transaction,
and so if we then crash and run recovery and the inode already on
disk has a non-zero flushiter it will skip recovery of that inode.
As a result, log recovery does the wrong thing and we end up with a
corrupt filesystem.
Because we have to support old kernel to new kernel upgrades, we
can't just get rid of the flushiter support in log recovery as we
might be upgrading from a kernel that doesn't have fully transactional
inode updates. Unfortunately, for v4 superblocks there is no way to
guarantee that log recovery knows about this fact.
We cannot add a new inode format flag to say it's a "special inode
create" because it won't be understood by older kernels and so
recovery could do the wrong thing on downgrade. We cannot specially
detect the combination of zero mode/non-zero flushiter on disk to
non-zero mode, zero flushiter in the log item during recovery
because wrapping of the flushiter can result in false detection.
Hence that makes this "don't use flushiter" optimisation limited to
a disk format that guarantees that we don't need it. And that means
the only fix here is to limit the "no read IO on create"
optimisation to version 5 superblocks....
Reported-by: Markus Trippelsdorf <markus@trippelsdorf.de>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-07-24 13:47:30 +08:00
|
|
|
if (ip->i_d.di_version < 3)
|
|
|
|
ip->i_d.di_flushiter++;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the dirty parts of the inode into the on-disk
|
|
|
|
* inode. We always copy out the core of the inode,
|
|
|
|
* because if the inode is dirty at all the core must
|
|
|
|
* be.
|
|
|
|
*/
|
2008-11-28 11:23:39 +08:00
|
|
|
xfs_dinode_to_disk(dip, &ip->i_d);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Wrap, we never let the log put out DI_MAX_FLUSH */
|
|
|
|
if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
|
|
|
|
ip->i_d.di_flushiter = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is really an old format inode and the superblock version
|
|
|
|
* has not been updated to support only new format inodes, then
|
|
|
|
* convert back to the old inode format. If the superblock version
|
|
|
|
* has been updated, then make the conversion permanent.
|
|
|
|
*/
|
2008-11-28 11:23:39 +08:00
|
|
|
ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
|
|
|
|
if (ip->i_d.di_version == 1) {
|
2008-03-06 10:44:28 +08:00
|
|
|
if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Convert it back.
|
|
|
|
*/
|
|
|
|
ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
|
2008-11-28 11:23:39 +08:00
|
|
|
dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The superblock version has already been bumped,
|
|
|
|
* so just make the conversion to the new inode
|
|
|
|
* format permanent.
|
|
|
|
*/
|
2008-11-28 11:23:39 +08:00
|
|
|
ip->i_d.di_version = 2;
|
|
|
|
dip->di_version = 2;
|
2005-04-17 06:20:36 +08:00
|
|
|
ip->i_d.di_onlink = 0;
|
2008-11-28 11:23:39 +08:00
|
|
|
dip->di_onlink = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
|
2008-11-28 11:23:39 +08:00
|
|
|
memset(&(dip->di_pad[0]), 0,
|
|
|
|
sizeof(dip->di_pad));
|
2010-09-26 14:10:18 +08:00
|
|
|
ASSERT(xfs_get_projid(ip) == 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-10 10:23:58 +08:00
|
|
|
xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
|
|
|
|
if (XFS_IFORK_Q(ip))
|
|
|
|
xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_inobp_check(mp, bp);
|
|
|
|
|
|
|
|
/*
|
2012-02-29 17:53:54 +08:00
|
|
|
* We've recorded everything logged in the inode, so we'd like to clear
|
|
|
|
* the ili_fields bits so we don't log and flush things unnecessarily.
|
|
|
|
* However, we can't stop logging all this information until the data
|
|
|
|
* we've copied into the disk buffer is written to disk. If we did we
|
|
|
|
* might overwrite the copy of the inode in the log with all the data
|
|
|
|
* after re-logging only part of it, and in the face of a crash we
|
|
|
|
* wouldn't have all the data we need to recover.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2012-02-29 17:53:54 +08:00
|
|
|
* What we do is move the bits to the ili_last_fields field. When
|
|
|
|
* logging the inode, these bits are moved back to the ili_fields field.
|
|
|
|
* In the xfs_iflush_done() routine we clear ili_last_fields, since we
|
|
|
|
* know that the information those bits represent is permanently on
|
|
|
|
* disk. As long as the flush completes before the inode is logged
|
|
|
|
* again, then both ili_fields and ili_last_fields will be cleared.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2012-02-29 17:53:54 +08:00
|
|
|
* We can play with the ili_fields bits here, because the inode lock
|
|
|
|
* must be held exclusively in order to set bits there and the flush
|
|
|
|
* lock protects the ili_last_fields bits. Set ili_logged so the flush
|
|
|
|
* done routine can tell whether or not to look in the AIL. Also, store
|
|
|
|
* the current LSN of the inode so that we can tell whether the item has
|
|
|
|
* moved in the AIL from xfs_iflush_done(). In order to read the lsn we
|
|
|
|
* need the AIL lock, because it is a 64 bit value that cannot be read
|
|
|
|
* atomically.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-04-03 13:11:17 +08:00
|
|
|
iip->ili_last_fields = iip->ili_fields;
|
|
|
|
iip->ili_fields = 0;
|
|
|
|
iip->ili_logged = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-04-03 13:11:17 +08:00
|
|
|
xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
|
|
|
|
&iip->ili_item.li_lsn);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-04-03 13:11:17 +08:00
|
|
|
/*
|
|
|
|
* Attach the function xfs_iflush_done to the inode's
|
|
|
|
* buffer. This will remove the inode from the AIL
|
|
|
|
* and unlock the inode's flush lock when the inode is
|
|
|
|
* completely written to disk.
|
|
|
|
*/
|
|
|
|
xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-04-03 13:11:17 +08:00
|
|
|
/* update the lsn in the on disk inode if required */
|
|
|
|
if (ip->i_d.di_version == 3)
|
|
|
|
dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
|
|
|
|
|
|
|
|
/* generate the checksum. */
|
|
|
|
xfs_dinode_calc_crc(mp, dip);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-04-03 13:11:17 +08:00
|
|
|
ASSERT(bp->b_fspriv != NULL);
|
|
|
|
ASSERT(bp->b_iodone != NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
corrupt_out:
|
|
|
|
return XFS_ERROR(EFSCORRUPTED);
|
|
|
|
}
|
|
|
|
|
2012-11-06 22:50:40 +08:00
|
|
|
/*
|
|
|
|
* Test whether it is appropriate to check an inode for and free post EOF
|
|
|
|
* blocks. The 'force' parameter determines whether we should also consider
|
|
|
|
* regular files that are marked preallocated or append-only.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
|
|
|
|
{
|
|
|
|
/* prealloc/delalloc exists only on regular files */
|
|
|
|
if (!S_ISREG(ip->i_d.di_mode))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero sized files with no cached pages and delalloc blocks will not
|
|
|
|
* have speculative prealloc/delalloc blocks to remove.
|
|
|
|
*/
|
|
|
|
if (VFS_I(ip)->i_size == 0 &&
|
|
|
|
VN_CACHED(VFS_I(ip)) == 0 &&
|
|
|
|
ip->i_delayed_blks == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* If we haven't read in the extent list, then don't do it now. */
|
|
|
|
if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not free real preallocated or append-only files unless the file
|
|
|
|
* has delalloc blocks and we are forced to remove them.
|
|
|
|
*/
|
|
|
|
if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
|
|
|
|
if (!force || ip->i_delayed_blks == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|