mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
91d25ba8a6
When servicing mmap() reads from file holes the current DAX code allocates a page cache page of all zeroes and places the struct page pointer in the mapping->page_tree radix tree. This has three major drawbacks: 1) It consumes memory unnecessarily. For every 4k page that is read via a DAX mmap() over a hole, we allocate a new page cache page. This means that if you read 1GiB worth of pages, you end up using 1GiB of zeroed memory. This is easily visible by looking at the overall memory consumption of the system or by looking at /proc/[pid]/smaps: 7f62e72b3000-7f63272b3000 rw-s 00000000 103:00 12 /root/dax/data Size: 1048576 kB Rss: 1048576 kB Pss: 1048576 kB Shared_Clean: 0 kB Shared_Dirty: 0 kB Private_Clean: 1048576 kB Private_Dirty: 0 kB Referenced: 1048576 kB Anonymous: 0 kB LazyFree: 0 kB AnonHugePages: 0 kB ShmemPmdMapped: 0 kB Shared_Hugetlb: 0 kB Private_Hugetlb: 0 kB Swap: 0 kB SwapPss: 0 kB KernelPageSize: 4 kB MMUPageSize: 4 kB Locked: 0 kB 2) It is slower than using a common zero page because each page fault has more work to do. Instead of just inserting a common zero page we have to allocate a page cache page, zero it, and then insert it. Here are the average latencies of dax_load_hole() as measured by ftrace on a random test box: Old method, using zeroed page cache pages: 3.4 us New method, using the common 4k zero page: 0.8 us This was the average latency over 1 GiB of sequential reads done by this simple fio script: [global] size=1G filename=/root/dax/data fallocate=none [io] rw=read ioengine=mmap 3) The fact that we had to check for both DAX exceptional entries and for page cache pages in the radix tree made the DAX code more complex. Solve these issues by following the lead of the DAX PMD code and using a common 4k zero page instead. As with the PMD code we will now insert a DAX exceptional entry into the radix tree instead of a struct page pointer which allows us to remove all the special casing in the DAX code. Note that we do still pretty aggressively check for regular pages in the DAX radix tree, especially where we take action based on the bits set in the page. If we ever find a regular page in our radix tree now that most likely means that someone besides DAX is inserting pages (which has happened lots of times in the past), and we want to find that out early and fail loudly. This solution also removes the extra memory consumption. Here is that same /proc/[pid]/smaps after 1GiB of reading from a hole with the new code: 7f2054a74000-7f2094a74000 rw-s 00000000 103:00 12 /root/dax/data Size: 1048576 kB Rss: 0 kB Pss: 0 kB Shared_Clean: 0 kB Shared_Dirty: 0 kB Private_Clean: 0 kB Private_Dirty: 0 kB Referenced: 0 kB Anonymous: 0 kB LazyFree: 0 kB AnonHugePages: 0 kB ShmemPmdMapped: 0 kB Shared_Hugetlb: 0 kB Private_Hugetlb: 0 kB Swap: 0 kB SwapPss: 0 kB KernelPageSize: 4 kB MMUPageSize: 4 kB Locked: 0 kB Overall system memory consumption is similarly improved. Another major change is that we remove dax_pfn_mkwrite() from our fault flow, and instead rely on the page fault itself to make the PTE dirty and writeable. The following description from the patch adding the vm_insert_mixed_mkwrite() call explains this a little more: "To be able to use the common 4k zero page in DAX we need to have our PTE fault path look more like our PMD fault path where a PTE entry can be marked as dirty and writeable as it is first inserted rather than waiting for a follow-up dax_pfn_mkwrite() => finish_mkwrite_fault() call. Right now we can rely on having a dax_pfn_mkwrite() call because we can distinguish between these two cases in do_wp_page(): case 1: 4k zero page => writable DAX storage case 2: read-only DAX storage => writeable DAX storage This distinction is made by via vm_normal_page(). vm_normal_page() returns false for the common 4k zero page, though, just as it does for DAX ptes. Instead of special casing the DAX + 4k zero page case we will simplify our DAX PTE page fault sequence so that it matches our DAX PMD sequence, and get rid of the dax_pfn_mkwrite() helper. We will instead use dax_iomap_fault() to handle write-protection faults. This means that insert_pfn() needs to follow the lead of insert_pfn_pmd() and allow us to pass in a 'mkwrite' flag. If 'mkwrite' is set insert_pfn() will do the work that was previously done by wp_page_reuse() as part of the dax_pfn_mkwrite() call path" Link: http://lkml.kernel.org/r/20170724170616.25810-4-ross.zwisler@linux.intel.com Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Christoph Hellwig <hch@lst.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
165 lines
4.9 KiB
C
165 lines
4.9 KiB
C
#ifndef _LINUX_DAX_H
|
|
#define _LINUX_DAX_H
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/radix-tree.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
struct iomap_ops;
|
|
struct dax_device;
|
|
struct dax_operations {
|
|
/*
|
|
* direct_access: translate a device-relative
|
|
* logical-page-offset into an absolute physical pfn. Return the
|
|
* number of pages available for DAX at that pfn.
|
|
*/
|
|
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
|
void **, pfn_t *);
|
|
/* copy_from_iter: required operation for fs-dax direct-i/o */
|
|
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
|
|
struct iov_iter *);
|
|
/* flush: optional driver-specific cache management after writes */
|
|
void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
|
|
};
|
|
|
|
extern struct attribute_group dax_attribute_group;
|
|
|
|
#if IS_ENABLED(CONFIG_DAX)
|
|
struct dax_device *dax_get_by_host(const char *host);
|
|
void put_dax(struct dax_device *dax_dev);
|
|
#else
|
|
static inline struct dax_device *dax_get_by_host(const char *host)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void put_dax(struct dax_device *dax_dev)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
|
#if IS_ENABLED(CONFIG_FS_DAX)
|
|
int __bdev_dax_supported(struct super_block *sb, int blocksize);
|
|
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
|
{
|
|
return __bdev_dax_supported(sb, blocksize);
|
|
}
|
|
|
|
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
|
{
|
|
return dax_get_by_host(host);
|
|
}
|
|
|
|
static inline void fs_put_dax(struct dax_device *dax_dev)
|
|
{
|
|
put_dax(dax_dev);
|
|
}
|
|
|
|
#else
|
|
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void fs_put_dax(struct dax_device *dax_dev)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
int dax_read_lock(void);
|
|
void dax_read_unlock(int id);
|
|
struct dax_device *alloc_dax(void *private, const char *host,
|
|
const struct dax_operations *ops);
|
|
bool dax_alive(struct dax_device *dax_dev);
|
|
void kill_dax(struct dax_device *dax_dev);
|
|
void *dax_get_private(struct dax_device *dax_dev);
|
|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
|
void **kaddr, pfn_t *pfn);
|
|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
|
size_t bytes, struct iov_iter *i);
|
|
void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
|
size_t size);
|
|
void dax_write_cache(struct dax_device *dax_dev, bool wc);
|
|
bool dax_write_cache_enabled(struct dax_device *dax_dev);
|
|
|
|
/*
|
|
* We use lowest available bit in exceptional entry for locking, one bit for
|
|
* the entry size (PMD) and two more to tell us if the entry is a zero page or
|
|
* an empty entry that is just used for locking. In total four special bits.
|
|
*
|
|
* If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
|
|
* and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
|
|
* block allocation.
|
|
*/
|
|
#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
|
|
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
|
|
#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
|
|
#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
|
|
#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
|
|
|
|
static inline unsigned long dax_radix_sector(void *entry)
|
|
{
|
|
return (unsigned long)entry >> RADIX_DAX_SHIFT;
|
|
}
|
|
|
|
static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
|
|
{
|
|
return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
|
|
((unsigned long)sector << RADIX_DAX_SHIFT) |
|
|
RADIX_DAX_ENTRY_LOCK);
|
|
}
|
|
|
|
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|
const struct iomap_ops *ops);
|
|
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
|
|
const struct iomap_ops *ops);
|
|
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
|
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
|
pgoff_t index);
|
|
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
|
pgoff_t index, void *entry, bool wake_all);
|
|
|
|
#ifdef CONFIG_FS_DAX
|
|
int __dax_zero_page_range(struct block_device *bdev,
|
|
struct dax_device *dax_dev, sector_t sector,
|
|
unsigned int offset, unsigned int length);
|
|
#else
|
|
static inline int __dax_zero_page_range(struct block_device *bdev,
|
|
struct dax_device *dax_dev, sector_t sector,
|
|
unsigned int offset, unsigned int length)
|
|
{
|
|
return -ENXIO;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_FS_DAX_PMD
|
|
static inline unsigned int dax_radix_order(void *entry)
|
|
{
|
|
if ((unsigned long)entry & RADIX_DAX_PMD)
|
|
return PMD_SHIFT - PAGE_SHIFT;
|
|
return 0;
|
|
}
|
|
#else
|
|
static inline unsigned int dax_radix_order(void *entry)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static inline bool dax_mapping(struct address_space *mapping)
|
|
{
|
|
return mapping->host && IS_DAX(mapping->host);
|
|
}
|
|
|
|
struct writeback_control;
|
|
int dax_writeback_mapping_range(struct address_space *mapping,
|
|
struct block_device *bdev, struct writeback_control *wbc);
|
|
#endif
|