mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
2a3e83c6f9
On machines where the GART aperture is mapped over physical RAM /proc/vmcore contains the remapped range and reading it may cause hangs or reboots. In the past, the GART region was added into the resource map, implemented by commit56dd669a13
("[PATCH] Insert GART region into resource map") However, inserting the iomem_resource from the early GART code caused resource conflicts with some AGP drivers (bko#72201), which got avoided by reverting the patch in commit707d4eefbd
("Revert [PATCH] Insert GART region into resource map"). This revert introduced the /proc/vmcore bug. The vmcore ELF header is either prepared by the kernel (when using the kexec_file_load syscall) or by the kexec userspace (when using the kexec_load syscall). Since we no longer have the GART iomem resource, the userspace kexec has no way of knowing which region to exclude from the ELF header. Changes from v1 of this patch: Instead of excluding the aperture from the ELF header, this patch makes /proc/vmcore return zeroes in the second kernel when attempting to read the aperture region. This is done by reusing the gart_oldmem_pfn_is_ram infrastructure originally intended to exclude XEN balooned memory. This works for both, the kexec_file_load and kexec_load syscalls. [Note that the GART region is the same in the first and second kernels: regardless whether the first kernel fixed up the northbridge/bios setting and mapped the aperture over physical memory, the second kernel finds the northbridge properly configured by the first kernel and the aperture never overlaps with e820 memory because the second kernel has a fake e820 map created from the crashkernel memory regions. Thus, the second kernel keeps the aperture address/size as configured by the first kernel.] register_oldmem_pfn_is_ram can only register one callback and returns an error if the callback has been registered already. Since XEN used to be the only user of this function, it never checks the return value. Now that we have more than one user, I added a WARN_ON just in case agp, XEN, or any other future user of register_oldmem_pfn_is_ram were to step on each other's toes. Fixes:707d4eefbd
("Revert [PATCH] Insert GART region into resource map") Signed-off-by: Jiri Bohac <jbohac@suse.cz> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Baoquan He <bhe@redhat.com> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: David Airlie <airlied@linux.ie> Cc: yinghai@kernel.org Cc: joro@8bytes.org Cc: kexec@lists.infradead.org Cc: Borislav Petkov <bp@alien8.de> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Dave Young <dyoung@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Link: https://lkml.kernel.org/r/20180106010013.73suskgxm7lox7g6@dwarf.suse.cz
81 lines
1.7 KiB
C
81 lines
1.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/types.h>
|
|
#include <linux/crash_dump.h>
|
|
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/hvm.h>
|
|
|
|
#include "mmu.h"
|
|
|
|
#ifdef CONFIG_PROC_VMCORE
|
|
/*
|
|
* This function is used in two contexts:
|
|
* - the kdump kernel has to check whether a pfn of the crashed kernel
|
|
* was a ballooned page. vmcore is using this function to decide
|
|
* whether to access a pfn of the crashed kernel.
|
|
* - the kexec kernel has to check whether a pfn was ballooned by the
|
|
* previous kernel. If the pfn is ballooned, handle it properly.
|
|
* Returns 0 if the pfn is not backed by a RAM page, the caller may
|
|
* handle the pfn special in this case.
|
|
*/
|
|
static int xen_oldmem_pfn_is_ram(unsigned long pfn)
|
|
{
|
|
struct xen_hvm_get_mem_type a = {
|
|
.domid = DOMID_SELF,
|
|
.pfn = pfn,
|
|
};
|
|
int ram;
|
|
|
|
if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
|
|
return -ENXIO;
|
|
|
|
switch (a.mem_type) {
|
|
case HVMMEM_mmio_dm:
|
|
ram = 0;
|
|
break;
|
|
case HVMMEM_ram_rw:
|
|
case HVMMEM_ram_ro:
|
|
default:
|
|
ram = 1;
|
|
break;
|
|
}
|
|
|
|
return ram;
|
|
}
|
|
#endif
|
|
|
|
static void xen_hvm_exit_mmap(struct mm_struct *mm)
|
|
{
|
|
struct xen_hvm_pagetable_dying a;
|
|
int rc;
|
|
|
|
a.domid = DOMID_SELF;
|
|
a.gpa = __pa(mm->pgd);
|
|
rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
|
|
WARN_ON_ONCE(rc < 0);
|
|
}
|
|
|
|
static int is_pagetable_dying_supported(void)
|
|
{
|
|
struct xen_hvm_pagetable_dying a;
|
|
int rc = 0;
|
|
|
|
a.domid = DOMID_SELF;
|
|
a.gpa = 0x00;
|
|
rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
|
|
if (rc < 0) {
|
|
printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
void __init xen_hvm_init_mmu_ops(void)
|
|
{
|
|
if (is_pagetable_dying_supported())
|
|
pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
|
|
#ifdef CONFIG_PROC_VMCORE
|
|
WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
|
|
#endif
|
|
}
|