2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 09:43:59 +08:00

vmalloc: lift the arm flag for coherent mappings to common code

The arm architecture had a VM_ARM_DMA_CONSISTENT flag to mark DMA
coherent remapping for a while.  Lift this flag to common code so
that we can use it generically.  We also check it in the only place
VM_USERMAP is directly check so that we can entirely replace that
flag as well (although I'm not even sure why we'd want to allow
remapping DMA appings, but I'd rather not change behavior).

Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Christoph Hellwig 2019-06-03 08:55:13 +02:00
parent 249baa5479
commit fe9041c245
4 changed files with 13 additions and 19 deletions

View File

@ -343,19 +343,13 @@ static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
{
/*
* DMA allocation can be mapped to user space, so lets
* set VM_USERMAP flags too.
*/
return dma_common_contiguous_remap(page, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP,
return dma_common_contiguous_remap(page, size, VM_DMA_COHERENT,
prot, caller);
}
static void __dma_free_remap(void *cpu_addr, size_t size)
{
dma_common_free_remap(cpu_addr, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP);
dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT);
}
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@ -1371,8 +1365,8 @@ static void *
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
{
return dma_common_pages_remap(pages, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
return dma_common_pages_remap(pages, size, VM_DMA_COHERENT, prot,
caller);
}
/*
@ -1456,7 +1450,7 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
return cpu_addr;
area = find_vm_area(cpu_addr);
if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
if (area && (area->flags & VM_DMA_COHERENT))
return area->pages;
return NULL;
}
@ -1614,10 +1608,8 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
dma_common_free_remap(cpu_addr, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP);
}
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT);
__iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size, attrs);

View File

@ -70,9 +70,6 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
/* consistent regions used by dma_alloc_attrs() */
#define VM_ARM_DMA_CONSISTENT 0x20000000
struct static_vm {
struct vm_struct vm;

View File

@ -18,6 +18,7 @@ struct notifier_block; /* in notifier.h */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
@ -26,6 +27,7 @@ struct notifier_block; /* in notifier.h */
* vfree_atomic().
*/
#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
/* bits [20..32] reserved for arch specific ioremap internals */
/*

View File

@ -2993,7 +2993,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
if (!area)
return -EINVAL;
if (!(area->flags & VM_USERMAP))
if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
return -EINVAL;
if (kaddr + size > area->addr + get_vm_area_size(area))
@ -3496,6 +3496,9 @@ static int s_show(struct seq_file *m, void *p)
if (v->flags & VM_USERMAP)
seq_puts(m, " user");
if (v->flags & VM_DMA_COHERENT)
seq_puts(m, " dma-coherent");
if (is_vmalloc_addr(v->pages))
seq_puts(m, " vpages");