mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
a3f5c338b9
We have seen bad_pte_print when testing crashdump on an SN machine in recent 2.6.20 kernel. There are tons of bad pte print (pfn < max_low_pfn) reports when the crash kernel boots up, all those reported bad pages are inside initmem range; That is because if the crash kernel code and data happens to be at the beginning of the 1st node. build_node_maps in discontig.c will bypass reserved regions with filter_rsvd_memory. Since min_low_pfn is calculated in build_node_map, so in this case, min_low_pfn will be greater than kernel code and data. Because pages inside initmem are freed and reused later, we saw pfn_valid check fail on those pages. I think this theoretically happen on a normal kernel. When I check min_low_pfn and max_low_pfn calculation in contig.c and discontig.c. I found more issues than this. 1. min_low_pfn and max_low_pfn calculation is inconsistent between contig.c and discontig.c, min_low_pfn is calculated as the first page number of boot memmap in contig.c (Why? Though this may work at the most of the time, I don't think it is the right logic). It is calculated as the lowest physical memory page number bypass reserved regions in discontig.c. max_low_pfn is calculated include reserved regions in contig.c. It is calculated exclude reserved regions in discontig.c. 2. If kernel code and data region is happen to be at the begin or the end of physical memory, when min_low_pfn and max_low_pfn calculation is bypassed kernel code and data, pages in initmem will report bad. 3. initrd is also in reserved regions, if it is at the begin or at the end of physical memory, kernel will refuse to reuse the memory. Because the virt_addr_valid check in free_initrd_mem. So it is better to fix and clean up those issues. Calculate min_low_pfn and max_low_pfn in a consistent way. Signed-off-by: Zou Nan hai <nanhai.zou@intel.com> Acked-by: Jay Lan <jlan@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
75 lines
2.3 KiB
C
75 lines
2.3 KiB
C
#ifndef meminit_h
|
|
#define meminit_h
|
|
|
|
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
|
|
|
|
/*
|
|
* Entries defined so far:
|
|
* - boot param structure itself
|
|
* - memory map
|
|
* - initrd (optional)
|
|
* - command line string
|
|
* - kernel code & data
|
|
* - crash dumping code reserved region
|
|
* - Kernel memory map built from EFI memory map
|
|
* - ELF core header
|
|
*
|
|
* More could be added if necessary
|
|
*/
|
|
#define IA64_MAX_RSVD_REGIONS 8
|
|
|
|
struct rsvd_region {
|
|
unsigned long start; /* virtual address of beginning of element */
|
|
unsigned long end; /* virtual address of end of element + 1 */
|
|
};
|
|
|
|
extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
|
|
extern int num_rsvd_regions;
|
|
|
|
extern void find_memory (void);
|
|
extern void reserve_memory (void);
|
|
extern void find_initrd (void);
|
|
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
|
|
extern void efi_memmap_init(unsigned long *, unsigned long *);
|
|
extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
|
|
|
|
extern unsigned long vmcore_find_descriptor_size(unsigned long address);
|
|
extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
|
|
|
|
/*
|
|
* For rounding an address to the next IA64_GRANULE_SIZE or order
|
|
*/
|
|
#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
|
|
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
|
|
#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
|
|
|
|
#ifdef CONFIG_NUMA
|
|
extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
|
|
#else
|
|
# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
|
|
#endif
|
|
|
|
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
|
|
|
|
extern int register_active_ranges(u64 start, u64 end, void *arg);
|
|
|
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
|
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
|
extern unsigned long vmalloc_end;
|
|
extern struct page *vmem_map;
|
|
extern int find_largest_hole (u64 start, u64 end, void *arg);
|
|
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
|
|
extern int vmemmap_find_next_valid_pfn(int, int);
|
|
#else
|
|
static inline int vmemmap_find_next_valid_pfn(int node, int i)
|
|
{
|
|
return i + 1;
|
|
}
|
|
#endif
|
|
#endif /* meminit_h */
|