2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 17:23:55 +08:00

Merge branch 'ttm-prot-fix' of git://people.freedesktop.org/~thomash/linux into drm-next

A small fix for the long-standing ttm vm page protection hack.

Sent as a separate PR as it touches mm, has all acks in place.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Hellström (VMware) <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200116102411.3056-1-thomas_os@shipmail.org
This commit is contained in:
Dave Airlie 2020-01-31 15:18:21 +10:00
commit b45f1b3b58
4 changed files with 63 additions and 12 deletions

View File

@ -179,7 +179,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgoff_t num_prefault)
{
struct vm_area_struct *vma = vmf->vma;
struct vm_area_struct cvma = *vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
unsigned long page_offset;
@ -250,7 +249,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
goto out_io_unlock;
}
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
prot = ttm_io_prot(bo->mem.placement, prot);
if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
@ -266,7 +265,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}
} else {
/* Iomem should not be marked encrypted */
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
prot = pgprot_decrypted(prot);
}
/*
@ -289,11 +288,20 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pfn = page_to_pfn(page);
}
/*
* Note that the value of @prot at this point may differ from
* the value of @vma->vm_page_prot in the caching- and
* encryption bits. This is because the exact location of the
* data may not be known at mmap() time and may also change
* at arbitrary times while the data is mmap'ed.
* See vmf_insert_mixed_prot() for a discussion.
*/
if (vma->vm_flags & VM_MIXEDMAP)
ret = vmf_insert_mixed(&cvma, address,
__pfn_to_pfn_t(pfn, PFN_DEV));
ret = vmf_insert_mixed_prot(vma, address,
__pfn_to_pfn_t(pfn, PFN_DEV),
prot);
else
ret = vmf_insert_pfn(&cvma, address, pfn);
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
@ -325,7 +333,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
if (ret)
return ret;
prot = vm_get_page_prot(vma->vm_flags);
prot = vma->vm_page_prot;
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;

View File

@ -2533,6 +2533,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);

View File

@ -312,7 +312,12 @@ struct vm_area_struct {
/* Second cache line starts here. */
struct mm_struct *vm_mm; /* The address space we belong to. */
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
* Access permissions of this VMA.
* See vmf_insert_mixed_prot() for discussion.
*/
pgprot_t vm_page_prot;
unsigned long vm_flags; /* Flags, see mm.h. */
/*

View File

@ -1664,6 +1664,9 @@ out_unlock:
* vmf_insert_pfn_prot should only be used if using multiple VMAs is
* impractical.
*
* See vmf_insert_mixed_prot() for a discussion of the implication of using
* a value of @pgprot different from that of @vma->vm_page_prot.
*
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value.
*/
@ -1737,9 +1740,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
}
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn, bool mkwrite)
unsigned long addr, pfn_t pfn, pgprot_t pgprot,
bool mkwrite)
{
pgprot_t pgprot = vma->vm_page_prot;
int err;
BUG_ON(!vm_mixed_ok(vma, pfn));
@ -1782,10 +1785,43 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
/**
* vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
* @pgprot: pgprot flags for the inserted page
*
* This is exactly like vmf_insert_mixed(), except that it allows drivers to
* to override pgprot on a per-page basis.
*
* Typically this function should be used by drivers to set caching- and
* encryption bits different than those of @vma->vm_page_prot, because
* the caching- or encryption mode may not be known at mmap() time.
* This is ok as long as @vma->vm_page_prot is not used by the core vm
* to set caching and encryption bits for those vmas (except for COW pages).
* This is ensured by core vm only modifying these page table entries using
* functions that don't touch caching- or encryption bits, using pte_modify()
* if needed. (See for example mprotect()).
* Also when new page-table entries are created, this is only done using the
* fault() callback, and never using the value of vma->vm_page_prot,
* except for page-table entries that point to anonymous pages as the result
* of COW.
*
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value.
*/
vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t pgprot)
{
return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
}
EXPORT_SYMBOL(vmf_insert_mixed_prot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn)
{
return __vm_insert_mixed(vma, addr, pfn, false);
return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
}
EXPORT_SYMBOL(vmf_insert_mixed);
@ -1797,7 +1833,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn)
{
return __vm_insert_mixed(vma, addr, pfn, true);
return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
}
EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);