xfs: clean up xfs_iomap_valid

Rename all iomap_valid identifiers to imap_valid to fit the new
world order, and clean up xfs_iomap_valid to convert the passed in
offset to blocks instead of the imap values to bytes.  Use the
simpler inode->i_blkbits instead of the XFS macros for this.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
Christoph Hellwig 2010-04-28 12:28:58 +00:00 committed by Alex Elder
parent 34a52c6c06
commit 558e689169

View File

@ -329,17 +329,15 @@ xfs_map_blocks(
} }
STATIC int STATIC int
xfs_iomap_valid( xfs_imap_valid(
struct inode *inode, struct inode *inode,
struct xfs_bmbt_irec *imap, struct xfs_bmbt_irec *imap,
loff_t offset) xfs_off_t offset)
{ {
struct xfs_mount *mp = XFS_I(inode)->i_mount; offset >>= inode->i_blkbits;
xfs_off_t iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
xfs_off_t iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
return offset >= iomap_offset && return offset >= imap->br_startoff &&
offset < iomap_offset + iomap_bsize; offset < imap->br_startoff + imap->br_blockcount;
} }
/* /*
@ -825,7 +823,7 @@ xfs_convert_page(
else else
type = IO_DELAY; type = IO_DELAY;
if (!xfs_iomap_valid(inode, imap, offset)) { if (!xfs_imap_valid(inode, imap, offset)) {
done = 1; done = 1;
continue; continue;
} }
@ -1069,7 +1067,7 @@ xfs_page_state_convert(
__uint64_t end_offset; __uint64_t end_offset;
pgoff_t end_index, last_index, tlast; pgoff_t end_index, last_index, tlast;
ssize_t size, len; ssize_t size, len;
int flags, err, iomap_valid = 0, uptodate = 1; int flags, err, imap_valid = 0, uptodate = 1;
int page_dirty, count = 0; int page_dirty, count = 0;
int trylock = 0; int trylock = 0;
int all_bh = unmapped; int all_bh = unmapped;
@ -1130,12 +1128,12 @@ xfs_page_state_convert(
* the iomap is actually still valid, but the ioend * the iomap is actually still valid, but the ioend
* isn't. shouldn't happen too often. * isn't. shouldn't happen too often.
*/ */
iomap_valid = 0; imap_valid = 0;
continue; continue;
} }
if (iomap_valid) if (imap_valid)
iomap_valid = xfs_iomap_valid(inode, &imap, offset); imap_valid = xfs_imap_valid(inode, &imap, offset);
/* /*
* First case, map an unwritten extent and prepare for * First case, map an unwritten extent and prepare for
@ -1156,7 +1154,7 @@ xfs_page_state_convert(
* Make sure we don't use a read-only iomap * Make sure we don't use a read-only iomap
*/ */
if (flags == BMAPI_READ) if (flags == BMAPI_READ)
iomap_valid = 0; imap_valid = 0;
if (buffer_unwritten(bh)) { if (buffer_unwritten(bh)) {
type = IO_UNWRITTEN; type = IO_UNWRITTEN;
@ -1169,7 +1167,7 @@ xfs_page_state_convert(
flags = BMAPI_WRITE | BMAPI_MMAP; flags = BMAPI_WRITE | BMAPI_MMAP;
} }
if (!iomap_valid) { if (!imap_valid) {
/* /*
* if we didn't have a valid mapping then we * if we didn't have a valid mapping then we
* need to ensure that we put the new mapping * need to ensure that we put the new mapping
@ -1190,9 +1188,10 @@ xfs_page_state_convert(
&imap, flags); &imap, flags);
if (err) if (err)
goto error; goto error;
iomap_valid = xfs_iomap_valid(inode, &imap, offset); imap_valid = xfs_imap_valid(inode, &imap,
offset);
} }
if (iomap_valid) { if (imap_valid) {
xfs_map_at_offset(inode, bh, &imap, offset); xfs_map_at_offset(inode, bh, &imap, offset);
if (startio) { if (startio) {
xfs_add_to_ioend(inode, bh, offset, xfs_add_to_ioend(inode, bh, offset,
@ -1212,7 +1211,7 @@ xfs_page_state_convert(
* That means it must already have extents allocated * That means it must already have extents allocated
* underneath it. Map the extent by reading it. * underneath it. Map the extent by reading it.
*/ */
if (!iomap_valid || flags != BMAPI_READ) { if (!imap_valid || flags != BMAPI_READ) {
flags = BMAPI_READ; flags = BMAPI_READ;
size = xfs_probe_cluster(inode, page, bh, size = xfs_probe_cluster(inode, page, bh,
head, 1); head, 1);
@ -1220,7 +1219,8 @@ xfs_page_state_convert(
&imap, flags); &imap, flags);
if (err) if (err)
goto error; goto error;
iomap_valid = xfs_iomap_valid(inode, &imap, offset); imap_valid = xfs_imap_valid(inode, &imap,
offset);
} }
/* /*
@ -1234,18 +1234,18 @@ xfs_page_state_convert(
type = IO_NEW; type = IO_NEW;
if (trylock_buffer(bh)) { if (trylock_buffer(bh)) {
ASSERT(buffer_mapped(bh)); ASSERT(buffer_mapped(bh));
if (iomap_valid) if (imap_valid)
all_bh = 1; all_bh = 1;
xfs_add_to_ioend(inode, bh, offset, type, xfs_add_to_ioend(inode, bh, offset, type,
&ioend, !iomap_valid); &ioend, !imap_valid);
page_dirty--; page_dirty--;
count++; count++;
} else { } else {
iomap_valid = 0; imap_valid = 0;
} }
} else if ((buffer_uptodate(bh) || PageUptodate(page)) && } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
(unmapped || startio)) { (unmapped || startio)) {
iomap_valid = 0; imap_valid = 0;
} }
if (!iohead) if (!iohead)
@ -1259,7 +1259,7 @@ xfs_page_state_convert(
if (startio) if (startio)
xfs_start_page_writeback(page, 1, count); xfs_start_page_writeback(page, 1, count);
if (ioend && iomap_valid) { if (ioend && imap_valid) {
struct xfs_mount *m = XFS_I(inode)->i_mount; struct xfs_mount *m = XFS_I(inode)->i_mount;
xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap.br_startoff); xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap.br_startoff);
xfs_off_t iomap_bsize = XFS_FSB_TO_B(m, imap.br_blockcount); xfs_off_t iomap_bsize = XFS_FSB_TO_B(m, imap.br_blockcount);