mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 15:44:02 +08:00
[PATCH] gfp_t: the rest
zone handling, mapping->flags handling Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
c4cdd03831
commit
260b23674f
@ -302,7 +302,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive,
|
||||
void build_all_zonelists(void);
|
||||
void wakeup_kswapd(struct zone *zone, int order);
|
||||
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
||||
int alloc_type, int can_try_harder, int gfp_high);
|
||||
int alloc_type, int can_try_harder, gfp_t gfp_high);
|
||||
|
||||
#ifdef CONFIG_HAVE_MEMORY_PRESENT
|
||||
void memory_present(int nid, unsigned long start, unsigned long end);
|
||||
|
@ -21,16 +21,17 @@
|
||||
|
||||
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
|
||||
{
|
||||
return mapping->flags & __GFP_BITS_MASK;
|
||||
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is non-atomic. Only to be used before the mapping is activated.
|
||||
* Probably needs a barrier...
|
||||
*/
|
||||
static inline void mapping_set_gfp_mask(struct address_space *m, int mask)
|
||||
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
|
||||
{
|
||||
m->flags = (m->flags & ~__GFP_BITS_MASK) | mask;
|
||||
m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
|
||||
(__force unsigned long)mask;
|
||||
}
|
||||
|
||||
/*
|
||||
|
14
mm/highmem.c
14
mm/highmem.c
@ -30,11 +30,9 @@
|
||||
|
||||
static mempool_t *page_pool, *isa_page_pool;
|
||||
|
||||
static void *page_pool_alloc(gfp_t gfp_mask, void *data)
|
||||
static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
unsigned int gfp = gfp_mask | (unsigned int) (long) data;
|
||||
|
||||
return alloc_page(gfp);
|
||||
return alloc_page(gfp_mask | GFP_DMA);
|
||||
}
|
||||
|
||||
static void page_pool_free(void *page, void *data)
|
||||
@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data)
|
||||
* n means that there are (n-1) current users of it.
|
||||
*/
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
||||
static void *page_pool_alloc(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
return alloc_page(gfp_mask);
|
||||
}
|
||||
|
||||
static int pkmap_count[LAST_PKMAP];
|
||||
static unsigned int last_pkmap_nr;
|
||||
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
|
||||
@ -267,7 +271,7 @@ int init_emergency_isa_pool(void)
|
||||
if (isa_page_pool)
|
||||
return 0;
|
||||
|
||||
isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA);
|
||||
isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
|
||||
if (!isa_page_pool)
|
||||
BUG();
|
||||
|
||||
|
@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
|
||||
* of the allocation.
|
||||
*/
|
||||
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
||||
int classzone_idx, int can_try_harder, int gfp_high)
|
||||
int classzone_idx, int can_try_harder, gfp_t gfp_high)
|
||||
{
|
||||
/* free_pages my go negative - that's OK */
|
||||
long min = mark, free_pages = z->free_pages - (1 << order) + 1;
|
||||
@ -777,7 +777,7 @@ struct page * fastcall
|
||||
__alloc_pages(gfp_t gfp_mask, unsigned int order,
|
||||
struct zonelist *zonelist)
|
||||
{
|
||||
const int wait = gfp_mask & __GFP_WAIT;
|
||||
const gfp_t wait = gfp_mask & __GFP_WAIT;
|
||||
struct zone **zones, *z;
|
||||
struct page *page;
|
||||
struct reclaim_state reclaim_state;
|
||||
@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
|
||||
* get_zeroed_page() returns a 32-bit address, which cannot represent
|
||||
* a highmem page
|
||||
*/
|
||||
BUG_ON(gfp_mask & __GFP_HIGHMEM);
|
||||
BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
|
||||
|
||||
page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
|
||||
if (page)
|
||||
@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
|
||||
return j;
|
||||
}
|
||||
|
||||
static inline int highest_zone(int zone_bits)
|
||||
{
|
||||
int res = ZONE_NORMAL;
|
||||
if (zone_bits & (__force int)__GFP_HIGHMEM)
|
||||
res = ZONE_HIGHMEM;
|
||||
if (zone_bits & (__force int)__GFP_DMA)
|
||||
res = ZONE_DMA;
|
||||
return res;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#define MAX_NODE_LOAD (num_online_nodes())
|
||||
static int __initdata node_load[MAX_NUMNODES];
|
||||
@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
|
||||
zonelist = pgdat->node_zonelists + i;
|
||||
for (j = 0; zonelist->zones[j] != NULL; j++);
|
||||
|
||||
k = ZONE_NORMAL;
|
||||
if (i & __GFP_HIGHMEM)
|
||||
k = ZONE_HIGHMEM;
|
||||
if (i & __GFP_DMA)
|
||||
k = ZONE_DMA;
|
||||
k = highest_zone(i);
|
||||
|
||||
j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
|
||||
zonelist->zones[j] = NULL;
|
||||
@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
|
||||
zonelist = pgdat->node_zonelists + i;
|
||||
|
||||
j = 0;
|
||||
k = ZONE_NORMAL;
|
||||
if (i & __GFP_HIGHMEM)
|
||||
k = ZONE_HIGHMEM;
|
||||
if (i & __GFP_DMA)
|
||||
k = ZONE_DMA;
|
||||
|
||||
k = highest_zone(i);
|
||||
j = build_zonelists_node(pgdat, zonelist, j, k);
|
||||
/*
|
||||
* Now we build the zonelist so that it contains the zones
|
||||
|
Loading…
Reference in New Issue
Block a user