2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

xfs: add DAX block zeroing support

Add initial support for DAX block zeroing operations to XFS. DAX
cannot use buffered IO through the page cache for zeroing, nor do we
need to issue IO for uncached block zeroing. In both cases, we can
simply call out to the dax block zeroing function.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
This commit is contained in:
Dave Chinner 2015-06-04 09:19:08 +10:00 committed by Dave Chinner
parent 6b698edeee
commit 4f69f578a8
2 changed files with 46 additions and 22 deletions

View File

@ -1133,14 +1133,29 @@ xfs_zero_remaining_bytes(
break; break;
ASSERT(imap.br_blockcount >= 1); ASSERT(imap.br_blockcount >= 1);
ASSERT(imap.br_startoff == offset_fsb); ASSERT(imap.br_startoff == offset_fsb);
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
if (imap.br_startblock == HOLESTARTBLOCK ||
imap.br_state == XFS_EXT_UNWRITTEN) {
/* skip the entire extent */
lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff +
imap.br_blockcount) - 1;
continue;
}
lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1; lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
if (lastoffset > endoff) if (lastoffset > endoff)
lastoffset = endoff; lastoffset = endoff;
if (imap.br_startblock == HOLESTARTBLOCK)
continue; /* DAX can just zero the backing device directly */
ASSERT(imap.br_startblock != DELAYSTARTBLOCK); if (IS_DAX(VFS_I(ip))) {
if (imap.br_state == XFS_EXT_UNWRITTEN) error = dax_zero_page_range(VFS_I(ip), offset,
lastoffset - offset + 1,
xfs_get_blocks_direct);
if (error)
return error;
continue; continue;
}
error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ? error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp, mp->m_rtdev_targp : mp->m_ddev_targp,

View File

@ -79,14 +79,15 @@ xfs_rw_ilock_demote(
} }
/* /*
* xfs_iozero * xfs_iozero clears the specified range supplied via the page cache (except in
* the DAX case). Writes through the page cache will allocate blocks over holes,
* though the callers usually map the holes first and avoid them. If a block is
* not completely zeroed, then it will be read from disk before being partially
* zeroed.
* *
* xfs_iozero clears the specified range of buffer supplied, * In the DAX case, we can just directly write to the underlying pages. This
* and marks all the affected blocks as valid and modified. If * will not allocate blocks, but will avoid holes and unwritten extents and so
* an affected block is not allocated, it will be allocated. If * not do unnecessary work.
* an affected block is not completely overwritten, and is not
* valid before the operation, it will be read from disk before
* being partially zeroed.
*/ */
int int
xfs_iozero( xfs_iozero(
@ -96,7 +97,8 @@ xfs_iozero(
{ {
struct page *page; struct page *page;
struct address_space *mapping; struct address_space *mapping;
int status; int status = 0;
mapping = VFS_I(ip)->i_mapping; mapping = VFS_I(ip)->i_mapping;
do { do {
@ -108,20 +110,27 @@ xfs_iozero(
if (bytes > count) if (bytes > count)
bytes = count; bytes = count;
status = pagecache_write_begin(NULL, mapping, pos, bytes, if (IS_DAX(VFS_I(ip))) {
AOP_FLAG_UNINTERRUPTIBLE, status = dax_zero_page_range(VFS_I(ip), pos, bytes,
&page, &fsdata); xfs_get_blocks_direct);
if (status) if (status)
break; break;
} else {
status = pagecache_write_begin(NULL, mapping, pos, bytes,
AOP_FLAG_UNINTERRUPTIBLE,
&page, &fsdata);
if (status)
break;
zero_user(page, offset, bytes); zero_user(page, offset, bytes);
status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, status = pagecache_write_end(NULL, mapping, pos, bytes,
page, fsdata); bytes, page, fsdata);
WARN_ON(status <= 0); /* can't return less than zero! */ WARN_ON(status <= 0); /* can't return less than zero! */
status = 0;
}
pos += bytes; pos += bytes;
count -= bytes; count -= bytes;
status = 0;
} while (count); } while (count);
return (-status); return (-status);