2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 05:04:00 +08:00
linux-next/include/xen/mem-reservation.h
Marek Marczykowski-Górecki 197ecb3802 xen/balloon: add runtime control for scrubbing ballooned out pages
Scrubbing pages on initial balloon down can take some time, especially
in nested virtualization case (nested EPT is slow). When HVM/PVH guest is
started with memory= significantly lower than maxmem=, all the extra
pages will be scrubbed before returning to Xen. But since most of them
weren't used at all at that point, Xen needs to populate them first
(from populate-on-demand pool). In nested virt case (Xen inside KVM)
this slows down the guest boot by 15-30s with just 1.5GB needed to be
returned to Xen.

Add runtime parameter to enable/disable it, to allow initially disabling
scrubbing, then enable it back during boot (for example in initramfs).
Such usage relies on assumption that a) most pages ballooned out during
initial boot weren't used at all, and b) even if they were, very few
secrets are in the guest at that time (before any serious userspace
kicks in).
Convert CONFIG_XEN_SCRUB_PAGES to CONFIG_XEN_SCRUB_PAGES_DEFAULT (also
enabled by default), controlling default value for the new runtime
switch.

Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
2018-09-14 08:51:10 -04:00

61 lines
1.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xen memory reservation utilities.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
#ifndef _XENMEM_RESERVATION_H
#define _XENMEM_RESERVATION_H
#include <linux/highmem.h>
#include <xen/page.h>
extern bool xen_scrub_pages;
static inline void xenmem_reservation_scrub_page(struct page *page)
{
if (xen_scrub_pages)
clear_highpage(page);
}
#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames);
void __xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages);
#endif
static inline void xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_update(count, pages, frames);
#endif
}
static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_reset(count, pages);
#endif
}
int xenmem_reservation_increase(int count, xen_pfn_t *frames);
int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
#endif