mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
iomap: Change calling convention for zeroing
Pass the full length to iomap_zero() and dax_iomap_zero(), and have them return how many bytes they actually handled. This is preparatory work for handling THP, although it looks like DAX could actually take advantage of it if there's a larger contiguous area. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
e25ba8cbfd
commit
81ee8e52a7
13
fs/dax.c
13
fs/dax.c
@ -1037,18 +1037,18 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
|
||||
struct iomap *iomap)
|
||||
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
|
||||
{
|
||||
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
|
||||
pgoff_t pgoff;
|
||||
long rc, id;
|
||||
void *kaddr;
|
||||
bool page_aligned = false;
|
||||
|
||||
unsigned offset = offset_in_page(pos);
|
||||
unsigned size = min_t(u64, PAGE_SIZE - offset, length);
|
||||
|
||||
if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
|
||||
IS_ALIGNED(size, PAGE_SIZE))
|
||||
(size == PAGE_SIZE))
|
||||
page_aligned = true;
|
||||
|
||||
rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
|
||||
@ -1058,8 +1058,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
|
||||
id = dax_read_lock();
|
||||
|
||||
if (page_aligned)
|
||||
rc = dax_zero_page_range(iomap->dax_dev, pgoff,
|
||||
size >> PAGE_SHIFT);
|
||||
rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
|
||||
else
|
||||
rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
|
||||
if (rc < 0) {
|
||||
@ -1072,7 +1071,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
|
||||
dax_flush(iomap->dax_dev, kaddr + offset, size);
|
||||
}
|
||||
dax_read_unlock(id);
|
||||
return 0;
|
||||
return size;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
|
@ -898,11 +898,13 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_file_unshare);
|
||||
|
||||
static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
|
||||
unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
|
||||
static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
{
|
||||
struct page *page;
|
||||
int status;
|
||||
unsigned offset = offset_in_page(pos);
|
||||
unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
|
||||
|
||||
status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
|
||||
if (status)
|
||||
@ -914,38 +916,33 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
|
||||
return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
|
||||
loff_t length, void *data, struct iomap *iomap,
|
||||
struct iomap *srcmap)
|
||||
{
|
||||
bool *did_zero = data;
|
||||
loff_t written = 0;
|
||||
int status;
|
||||
|
||||
/* already zeroed? we're done. */
|
||||
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
|
||||
return count;
|
||||
return length;
|
||||
|
||||
do {
|
||||
unsigned offset, bytes;
|
||||
|
||||
offset = offset_in_page(pos);
|
||||
bytes = min_t(loff_t, PAGE_SIZE - offset, count);
|
||||
s64 bytes;
|
||||
|
||||
if (IS_DAX(inode))
|
||||
status = dax_iomap_zero(pos, offset, bytes, iomap);
|
||||
bytes = dax_iomap_zero(pos, length, iomap);
|
||||
else
|
||||
status = iomap_zero(inode, pos, offset, bytes, iomap,
|
||||
srcmap);
|
||||
if (status < 0)
|
||||
return status;
|
||||
bytes = iomap_zero(inode, pos, length, iomap, srcmap);
|
||||
if (bytes < 0)
|
||||
return bytes;
|
||||
|
||||
pos += bytes;
|
||||
count -= bytes;
|
||||
length -= bytes;
|
||||
written += bytes;
|
||||
if (did_zero)
|
||||
*did_zero = true;
|
||||
} while (count > 0);
|
||||
} while (length > 0);
|
||||
|
||||
return written;
|
||||
}
|
||||
|
@ -214,8 +214,7 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
|
||||
struct iomap *iomap);
|
||||
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
|
||||
static inline bool dax_mapping(struct address_space *mapping)
|
||||
{
|
||||
return mapping->host && IS_DAX(mapping->host);
|
||||
|
Loading…
Reference in New Issue
Block a user