mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
udmabuf: Add support for mapping hugepages (v4)
If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages (hugetlbfs and not THP), we have to first find the hugepage(s) where the Guest allocations are located and then extract the regular 4k sized subpages from them. v2: Ensure that the subpage and hugepage offsets are calculated correctly when the range of subpage allocations cuts across multiple hugepages. v3: Instead of repeatedly looking up the hugepage for each subpage, only do it when the subpage allocation crosses over into a different hugepage. (suggested by Gerd and DW) v4: Fix the following warning identified by checkpatch: CHECK:OPEN_ENDED_LINE: Lines should not end with a '(' Cc: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com> Signed-off-by: Dongwon Kim <dongwon.kim@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20210609182915.592743-1-vivek.kasireddy@intel.com [ kraxel: one more checkpatch format tweak ] Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
This commit is contained in:
parent
6eca310e89
commit
16c243e99d
@ -11,6 +11,7 @@
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/udmabuf.h>
|
||||
#include <linux/hugetlb.h>
|
||||
|
||||
static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */
|
||||
static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
|
||||
@ -160,10 +161,13 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
struct file *memfd = NULL;
|
||||
struct address_space *mapping = NULL;
|
||||
struct udmabuf *ubuf;
|
||||
struct dma_buf *buf;
|
||||
pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
|
||||
struct page *page;
|
||||
struct page *page, *hpage = NULL;
|
||||
pgoff_t subpgoff, maxsubpgs;
|
||||
struct hstate *hpstate;
|
||||
int seals, ret = -EINVAL;
|
||||
u32 i, flags;
|
||||
|
||||
@ -194,7 +198,8 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
memfd = fget(list[i].memfd);
|
||||
if (!memfd)
|
||||
goto err;
|
||||
if (!shmem_mapping(file_inode(memfd)->i_mapping))
|
||||
mapping = file_inode(memfd)->i_mapping;
|
||||
if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
|
||||
goto err;
|
||||
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
|
||||
if (seals == -EINVAL)
|
||||
@ -205,17 +210,48 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
goto err;
|
||||
pgoff = list[i].offset >> PAGE_SHIFT;
|
||||
pgcnt = list[i].size >> PAGE_SHIFT;
|
||||
if (is_file_hugepages(memfd)) {
|
||||
hpstate = hstate_file(memfd);
|
||||
pgoff = list[i].offset >> huge_page_shift(hpstate);
|
||||
subpgoff = (list[i].offset &
|
||||
~huge_page_mask(hpstate)) >> PAGE_SHIFT;
|
||||
maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
|
||||
}
|
||||
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
|
||||
page = shmem_read_mapping_page(
|
||||
file_inode(memfd)->i_mapping, pgoff + pgidx);
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err;
|
||||
if (is_file_hugepages(memfd)) {
|
||||
if (!hpage) {
|
||||
hpage = find_get_page_flags(mapping, pgoff,
|
||||
FGP_ACCESSED);
|
||||
if (IS_ERR(hpage)) {
|
||||
ret = PTR_ERR(hpage);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
page = hpage + subpgoff;
|
||||
get_page(page);
|
||||
subpgoff++;
|
||||
if (subpgoff == maxsubpgs) {
|
||||
put_page(hpage);
|
||||
hpage = NULL;
|
||||
subpgoff = 0;
|
||||
pgoff++;
|
||||
}
|
||||
} else {
|
||||
page = shmem_read_mapping_page(mapping,
|
||||
pgoff + pgidx);
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
ubuf->pages[pgbuf++] = page;
|
||||
}
|
||||
fput(memfd);
|
||||
memfd = NULL;
|
||||
if (hpage) {
|
||||
put_page(hpage);
|
||||
hpage = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
exp_info.ops = &udmabuf_ops;
|
||||
|
Loading…
Reference in New Issue
Block a user