mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
[POWERPC] unmap_vm_area becomes unmap_kernel_range for the public
This makes unmap_vm_area static and a wrapper around a new exported unmap_kernel_range that takes an explicit range instead of a vm_area struct. This makes it more versatile for code that wants to play with kernel page tables outside of the standard vmalloc area. (One example is some rework of the PowerPC PCI IO space mapping code that depends on that patch and removes some code duplication and horrible abuse of forged struct vm_struct). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
3c8c90ab88
commit
c19c03fc74
@ -253,7 +253,7 @@ Here are the routines, one by one:
|
||||
|
||||
The first of these two routines is invoked after map_vm_area()
|
||||
has installed the page table entries. The second is invoked
|
||||
before unmap_vm_area() deletes the page table entries.
|
||||
before unmap_kernel_range() deletes the page table entries.
|
||||
|
||||
There exists another whole class of cpu cache issues which currently
|
||||
require a whole different set of interfaces to handle properly.
|
||||
|
@ -301,7 +301,8 @@ void im_free(void * addr)
|
||||
for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
|
||||
if (tmp->addr == addr) {
|
||||
*p = tmp->next;
|
||||
unmap_vm_area(tmp);
|
||||
unmap_kernel_range((unsigned long)tmp->addr,
|
||||
tmp->size);
|
||||
kfree(tmp);
|
||||
mutex_unlock(&imlist_mutex);
|
||||
return;
|
||||
|
@ -240,7 +240,6 @@ int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
|
||||
/*
|
||||
* Unmap an IO region and remove it from imalloc'd list.
|
||||
* Access to IO memory should be serialized by driver.
|
||||
* This code is modeled after vmalloc code - unmap_vm_area()
|
||||
*
|
||||
* XXX what about calls before mem_init_done (ie python_countermeasures())
|
||||
*/
|
||||
|
@ -65,9 +65,10 @@ extern struct vm_struct *get_vm_area_node(unsigned long size,
|
||||
unsigned long flags, int node,
|
||||
gfp_t gfp_mask);
|
||||
extern struct vm_struct *remove_vm_area(void *addr);
|
||||
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page ***pages);
|
||||
extern void unmap_vm_area(struct vm_struct *area);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
|
||||
/*
|
||||
* Internals. Dont't use..
|
||||
|
13
mm/vmalloc.c
13
mm/vmalloc.c
@ -68,12 +68,12 @@ static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
void unmap_vm_area(struct vm_struct *area)
|
||||
void unmap_kernel_range(unsigned long addr, unsigned long size)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long next;
|
||||
unsigned long addr = (unsigned long) area->addr;
|
||||
unsigned long end = addr + area->size;
|
||||
unsigned long start = addr;
|
||||
unsigned long end = addr + size;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
pgd = pgd_offset_k(addr);
|
||||
@ -84,7 +84,12 @@ void unmap_vm_area(struct vm_struct *area)
|
||||
continue;
|
||||
vunmap_pud_range(pgd, addr, next);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
flush_tlb_kernel_range((unsigned long) area->addr, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
||||
static void unmap_vm_area(struct vm_struct *area)
|
||||
{
|
||||
unmap_kernel_range((unsigned long)area->addr, area->size);
|
||||
}
|
||||
|
||||
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
|
Loading…
Reference in New Issue
Block a user