2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 06:34:11 +08:00
linux-next/include/linux/percpu_counter.h
Nikolay Borisov 104b4e5139 percpu_counter: Rename __percpu_counter_add to percpu_counter_add_batch
Currently, percpu_counter_add is a wrapper around __percpu_counter_add
which is preempt safe due to explicit calls to preempt_disable.  Given
how __ prefix is used in percpu related interfaces, the naming
unfortunately creates the false sense that __percpu_counter_add is
less safe than percpu_counter_add.  In terms of context-safety,
they're equivalent.  The only difference is that the __ version takes
a batch parameter.

Make this a bit more explicit by just renaming __percpu_counter_add to
percpu_counter_add_batch.

This patch doesn't cause any functional changes.

tj: Minor updates to patch description for clarity.  Cosmetic
    indentation updates.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <jbacik@fb.com>
Cc: David Sterba <dsterba@suse.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Jan Kara <jack@suse.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: linux-mm@kvack.org
Cc: "David S. Miller" <davem@davemloft.net>
2017-06-20 15:42:32 -04:00

192 lines
4.2 KiB
C

#ifndef _LINUX_PERCPU_COUNTER_H
#define _LINUX_PERCPU_COUNTER_H
/*
* A simple "approximate counter" for use in ext2 and ext3 superblocks.
*
* WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
*/
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/list.h>
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/gfp.h>
#ifdef CONFIG_SMP
struct percpu_counter {
raw_spinlock_t lock;
s64 count;
#ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */
#endif
s32 __percpu *counters;
};
extern int percpu_counter_batch;
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key);
#define percpu_counter_init(fbc, value, gfp) \
({ \
static struct lock_class_key __key; \
\
__percpu_counter_init(fbc, value, gfp, &__key); \
})
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
}
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
return ret < 0 ? 0 : ret;
}
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
/*
* It is possible for the percpu_counter_read() to return a small negative
* number for some counter which should never be negative.
*
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
s64 ret = fbc->count;
barrier(); /* Prevent reloads of fbc->count */
if (ret >= 0)
return ret;
return 0;
}
static inline int percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != NULL);
}
#else /* !CONFIG_SMP */
struct percpu_counter {
s64 count;
};
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
fbc->count = amount;
return 0;
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
{
}
static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
fbc->count = amount;
}
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
if (fbc->count > rhs)
return 1;
else if (fbc->count < rhs)
return -1;
else
return 0;
}
static inline int
__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
{
return percpu_counter_compare(fbc, rhs);
}
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
preempt_disable();
fbc->count += amount;
preempt_enable();
}
static inline void
percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
percpu_counter_add(fbc, amount);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
/*
* percpu_counter is intended to track positive numbers. In the UP case the
* number should never be negative.
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
return fbc->count;
}
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
return percpu_counter_read_positive(fbc);
}
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return percpu_counter_read(fbc);
}
static inline int percpu_counter_initialized(struct percpu_counter *fbc)
{
return 1;
}
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, 1);
}
static inline void percpu_counter_dec(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, -1);
}
static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add(fbc, -amount);
}
#endif /* _LINUX_PERCPU_COUNTER_H */