xen/balloon: set a mapping for ballooned out pages

Currently ballooned out pages are mapped to 0 and have INVALID_P2M_ENTRY
in the p2m. These ballooned out pages are used to map foreign grants
by gntdev and blkback (see alloc_xenballooned_pages).

Allocate a page per cpu and map all the ballooned out pages to the
corresponding mfn. Set the p2m accordingly. This way reading from a
ballooned out page won't cause a kernel crash (see
http://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html).

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
CC: alex@alex.org.uk
CC: dcrisan@flexiant.com
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
Stefano Stabellini 2013-08-04 15:39:40 +01:00 committed by Konrad Rzeszutek Wilk
parent 73cc4bb0c7
commit cd9151e26d
2 changed files with 69 additions and 3 deletions

View File

@ -38,6 +38,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
@ -52,6 +53,7 @@
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/percpu-defs.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@ -90,6 +92,8 @@ EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
/* List of ballooned pages, threaded through the mem_map array. */
static LIST_HEAD(ballooned_pages);
@ -412,7 +416,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
if (xen_pv_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
pfn_pte(page_to_pfn(__get_cpu_var(balloon_scratch_page)),
PAGE_KERNEL_RO), 0);
BUG_ON(ret);
}
#endif
@ -425,7 +430,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = mfn_to_pfn(frame_list[i]);
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
__set_phys_to_machine(pfn,
pfn_to_mfn(page_to_pfn(__get_cpu_var(balloon_scratch_page))));
balloon_append(pfn_to_page(pfn));
}
@ -480,6 +486,18 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex);
}
struct page *get_balloon_scratch_page(void)
{
struct page *ret = get_cpu_var(balloon_scratch_page);
BUG_ON(ret == NULL);
return ret;
}
void put_balloon_scratch_page(void)
{
put_cpu_var(balloon_scratch_page);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
void balloon_set_new_target(unsigned long target)
{
@ -573,13 +591,47 @@ static void __init balloon_add_region(unsigned long start_pfn,
}
}
static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
if (per_cpu(balloon_scratch_page, cpu) != NULL)
break;
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
return NOTIFY_BAD;
}
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block balloon_cpu_notifier __cpuinitdata = {
.notifier_call = balloon_cpu_notify,
};
static int __init balloon_init(void)
{
int i;
int i, cpu;
if (!xen_domain())
return -ENODEV;
for_each_online_cpu(cpu)
{
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
return -ENOMEM;
}
}
register_cpu_notifier(&balloon_cpu_notifier);
pr_info("Initialising balloon driver\n");
balloon_stats.current_pages = xen_pv_domain()
@ -616,4 +668,15 @@ static int __init balloon_init(void)
subsys_initcall(balloon_init);
static int __init balloon_clear(void)
{
int cpu;
for_each_possible_cpu(cpu)
per_cpu(balloon_scratch_page, cpu) = NULL;
return 0;
}
early_initcall(balloon_clear);
MODULE_LICENSE("GPL");

View File

@ -29,6 +29,9 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages,
bool highmem);
void free_xenballooned_pages(int nr_pages, struct page **pages);
struct page *get_balloon_scratch_page(void);
void put_balloon_scratch_page(void);
struct device;
#ifdef CONFIG_XEN_SELFBALLOONING
extern int register_xen_selfballooning(struct device *dev);