mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 09:14:19 +08:00
slub: Add cmpxchg_double_slab()
Add a function that operates on the second doubleword in the page struct and manipulates the object counters, the freelist and the frozen attribute. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
fc9bb8c768
commit
b789ef518b
@ -33,6 +33,7 @@ enum stat_item {
|
|||||||
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
|
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
|
||||||
ORDER_FALLBACK, /* Number of times fallback was necessary */
|
ORDER_FALLBACK, /* Number of times fallback was necessary */
|
||||||
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
|
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
|
||||||
|
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
|
||||||
NR_SLUB_STAT_ITEMS };
|
NR_SLUB_STAT_ITEMS };
|
||||||
|
|
||||||
struct kmem_cache_cpu {
|
struct kmem_cache_cpu {
|
||||||
|
65
mm/slub.c
65
mm/slub.c
@ -131,6 +131,9 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
|
|||||||
/* Enable to test recovery from slab corruption on boot */
|
/* Enable to test recovery from slab corruption on boot */
|
||||||
#undef SLUB_RESILIENCY_TEST
|
#undef SLUB_RESILIENCY_TEST
|
||||||
|
|
||||||
|
/* Enable to log cmpxchg failures */
|
||||||
|
#undef SLUB_DEBUG_CMPXCHG
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mininum number of partial slabs. These will be left on the partial
|
* Mininum number of partial slabs. These will be left on the partial
|
||||||
* lists even if they are empty. kmem_cache_shrink may reclaim them.
|
* lists even if they are empty. kmem_cache_shrink may reclaim them.
|
||||||
@ -170,6 +173,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
|
|||||||
|
|
||||||
/* Internal SLUB flags */
|
/* Internal SLUB flags */
|
||||||
#define __OBJECT_POISON 0x80000000UL /* Poison object */
|
#define __OBJECT_POISON 0x80000000UL /* Poison object */
|
||||||
|
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
|
||||||
|
|
||||||
static int kmem_size = sizeof(struct kmem_cache);
|
static int kmem_size = sizeof(struct kmem_cache);
|
||||||
|
|
||||||
@ -338,6 +342,37 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
|
|||||||
return x.x & OO_MASK;
|
return x.x & OO_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
|
||||||
|
void *freelist_old, unsigned long counters_old,
|
||||||
|
void *freelist_new, unsigned long counters_new,
|
||||||
|
const char *n)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_CMPXCHG_DOUBLE
|
||||||
|
if (s->flags & __CMPXCHG_DOUBLE) {
|
||||||
|
if (cmpxchg_double(&page->freelist,
|
||||||
|
freelist_old, counters_old,
|
||||||
|
freelist_new, counters_new))
|
||||||
|
return 1;
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
if (page->freelist == freelist_old && page->counters == counters_old) {
|
||||||
|
page->freelist = freelist_new;
|
||||||
|
page->counters = counters_new;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu_relax();
|
||||||
|
stat(s, CMPXCHG_DOUBLE_FAIL);
|
||||||
|
|
||||||
|
#ifdef SLUB_DEBUG_CMPXCHG
|
||||||
|
printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
/*
|
/*
|
||||||
* Determine a map of object in use on a page.
|
* Determine a map of object in use on a page.
|
||||||
@ -2596,6 +2631,12 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CMPXCHG_DOUBLE
|
||||||
|
if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
|
||||||
|
/* Enable fast mode */
|
||||||
|
s->flags |= __CMPXCHG_DOUBLE;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The larger the object size is, the more pages we want on the partial
|
* The larger the object size is, the more pages we want on the partial
|
||||||
* list to avoid pounding the page allocator excessively.
|
* list to avoid pounding the page allocator excessively.
|
||||||
@ -4248,8 +4289,10 @@ static ssize_t sanity_checks_store(struct kmem_cache *s,
|
|||||||
const char *buf, size_t length)
|
const char *buf, size_t length)
|
||||||
{
|
{
|
||||||
s->flags &= ~SLAB_DEBUG_FREE;
|
s->flags &= ~SLAB_DEBUG_FREE;
|
||||||
if (buf[0] == '1')
|
if (buf[0] == '1') {
|
||||||
|
s->flags &= ~__CMPXCHG_DOUBLE;
|
||||||
s->flags |= SLAB_DEBUG_FREE;
|
s->flags |= SLAB_DEBUG_FREE;
|
||||||
|
}
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
SLAB_ATTR(sanity_checks);
|
SLAB_ATTR(sanity_checks);
|
||||||
@ -4263,8 +4306,10 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
|
|||||||
size_t length)
|
size_t length)
|
||||||
{
|
{
|
||||||
s->flags &= ~SLAB_TRACE;
|
s->flags &= ~SLAB_TRACE;
|
||||||
if (buf[0] == '1')
|
if (buf[0] == '1') {
|
||||||
|
s->flags &= ~__CMPXCHG_DOUBLE;
|
||||||
s->flags |= SLAB_TRACE;
|
s->flags |= SLAB_TRACE;
|
||||||
|
}
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
SLAB_ATTR(trace);
|
SLAB_ATTR(trace);
|
||||||
@ -4281,8 +4326,10 @@ static ssize_t red_zone_store(struct kmem_cache *s,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
s->flags &= ~SLAB_RED_ZONE;
|
s->flags &= ~SLAB_RED_ZONE;
|
||||||
if (buf[0] == '1')
|
if (buf[0] == '1') {
|
||||||
|
s->flags &= ~__CMPXCHG_DOUBLE;
|
||||||
s->flags |= SLAB_RED_ZONE;
|
s->flags |= SLAB_RED_ZONE;
|
||||||
|
}
|
||||||
calculate_sizes(s, -1);
|
calculate_sizes(s, -1);
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
@ -4300,8 +4347,10 @@ static ssize_t poison_store(struct kmem_cache *s,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
s->flags &= ~SLAB_POISON;
|
s->flags &= ~SLAB_POISON;
|
||||||
if (buf[0] == '1')
|
if (buf[0] == '1') {
|
||||||
|
s->flags &= ~__CMPXCHG_DOUBLE;
|
||||||
s->flags |= SLAB_POISON;
|
s->flags |= SLAB_POISON;
|
||||||
|
}
|
||||||
calculate_sizes(s, -1);
|
calculate_sizes(s, -1);
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
@ -4319,8 +4368,10 @@ static ssize_t store_user_store(struct kmem_cache *s,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
s->flags &= ~SLAB_STORE_USER;
|
s->flags &= ~SLAB_STORE_USER;
|
||||||
if (buf[0] == '1')
|
if (buf[0] == '1') {
|
||||||
|
s->flags &= ~__CMPXCHG_DOUBLE;
|
||||||
s->flags |= SLAB_STORE_USER;
|
s->flags |= SLAB_STORE_USER;
|
||||||
|
}
|
||||||
calculate_sizes(s, -1);
|
calculate_sizes(s, -1);
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
@ -4493,6 +4544,8 @@ STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
|
|||||||
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
|
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
|
||||||
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
|
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
|
||||||
STAT_ATTR(ORDER_FALLBACK, order_fallback);
|
STAT_ATTR(ORDER_FALLBACK, order_fallback);
|
||||||
|
STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
|
||||||
|
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct attribute *slab_attrs[] = {
|
static struct attribute *slab_attrs[] = {
|
||||||
@ -4550,6 +4603,8 @@ static struct attribute *slab_attrs[] = {
|
|||||||
&deactivate_to_tail_attr.attr,
|
&deactivate_to_tail_attr.attr,
|
||||||
&deactivate_remote_frees_attr.attr,
|
&deactivate_remote_frees_attr.attr,
|
||||||
&order_fallback_attr.attr,
|
&order_fallback_attr.attr,
|
||||||
|
&cmpxchg_double_fail_attr.attr,
|
||||||
|
&cmpxchg_double_cpu_fail_attr.attr,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_FAILSLAB
|
#ifdef CONFIG_FAILSLAB
|
||||||
&failslab_attr.attr,
|
&failslab_attr.attr,
|
||||||
|
Loading…
Reference in New Issue
Block a user