2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-20 00:26:39 +08:00

hexagon: Use common threadinfo allocator

The core now has a threadinfo allocator which uses a kmemcache when
THREAD_SIZE < PAGE_SIZE.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Richard Kuo <rkuo@codeaurora.org>
Link: http://lkml.kernel.org/r/20120505150141.812612113@linutronix.de
This commit is contained in:
Thomas Gleixner 2012-05-05 15:05:44 +00:00
parent e6e9c540d5
commit fe844052bc
2 changed files with 0 additions and 45 deletions

View File

@ -31,15 +31,7 @@
#define THREAD_SHIFT 12
#define THREAD_SIZE (1<<THREAD_SHIFT)
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#else /* don't use standard allocator */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
#endif
#ifndef __ASSEMBLY__

View File

@ -233,43 +233,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
/*
* Borrowed from PowerPC -- basically allow smaller kernel stacks if we
* go crazy with the page sizes.
*/
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
/* Weak symbol; called by init/main.c */
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
/*
* Required placeholder.
*/