Memoryless nodes: SLUB support

Simply switch all for_each_online_node to for_each_node_state(NORMAL_MEMORY).
That way SLUB only operates on nodes with regular memory.  Any allocation
attempt on a memoryless node or a node with just highmem will fall whereupon
SLUB will fetch memory from a nearby node (depending on how memory policies
and cpuset describe fallback).

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Bob Picco <bob.picco@hp.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@skynet.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Christoph Lameter 2007-10-16 01:25:33 -07:00 committed by Linus Torvalds
parent 04231b3002
commit f64dc58c54

View File

@ -1921,7 +1921,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
for_each_online_node(node) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = s->node[node];
if (n && n != &s->local_node)
kmem_cache_free(kmalloc_caches, n);
@ -1939,7 +1939,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
else
local_node = 0;
for_each_online_node(node) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n;
if (local_node == node)
@ -2192,7 +2192,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
flush_all(s);
/* Attempt to free all objects */
for_each_online_node(node) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
n->nr_partial -= free_list(s, n, &n->partial);
@ -2521,7 +2521,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
return -ENOMEM;
flush_all(s);
for_each_online_node(node) {
for_each_node_state(node, N_NORMAL_MEMORY) {
n = get_node(s, node);
if (!n->nr_partial)
@ -2916,7 +2916,7 @@ static long validate_slab_cache(struct kmem_cache *s)
return -ENOMEM;
flush_all(s);
for_each_online_node(node) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
count += validate_slab_node(s, n, map);
@ -3136,7 +3136,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
/* Push back cpu slabs */
flush_all(s);
for_each_online_node(node) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long flags;
struct page *page;
@ -3263,7 +3263,7 @@ static unsigned long slab_objects(struct kmem_cache *s,
}
}
for_each_online_node(node) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
if (flags & SO_PARTIAL) {
@ -3291,7 +3291,7 @@ static unsigned long slab_objects(struct kmem_cache *s,
x = sprintf(buf, "%lu", total);
#ifdef CONFIG_NUMA
for_each_online_node(node)
for_each_node_state(node, N_NORMAL_MEMORY)
if (nodes[node])
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);