2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 15:44:02 +08:00

[PATCH] Use __read_mostly on some hot fs variables

I discovered on oprofile hunting on a SMP platform that dentry lookups were
slowed down because d_hash_mask, d_hash_shift and dentry_hashtable were in
a cache line that contained inodes_stat.  So each time inodes_stats is
changed by a cpu, other cpus have to refill their cache line.

This patch moves some variables to the __read_mostly section, in order to
avoid false sharing.  RCU dentry lookups can go full speed.

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Eric Dumazet 2006-03-26 01:37:24 -08:00 committed by Linus Torvalds
parent 878a9f30d7
commit fa3536cc14
12 changed files with 35 additions and 35 deletions

View File

@ -30,7 +30,7 @@
#define BIO_POOL_SIZE 256
static kmem_cache_t *bio_slab;
static kmem_cache_t *bio_slab __read_mostly;
#define BIOVEC_NR_POOLS 6
@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab;
* basically we just need to survive
*/
#define BIO_SPLIT_ENTRIES 8
mempool_t *bio_split_pool;
mempool_t *bio_split_pool __read_mostly;
struct biovec_slab {
int nr_vecs;

View File

@ -234,7 +234,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
static kmem_cache_t * bdev_cachep;
static kmem_cache_t * bdev_cachep __read_mostly;
static struct inode *bdev_alloc_inode(struct super_block *sb)
{
@ -308,7 +308,7 @@ static struct file_system_type bd_type = {
.kill_sb = kill_anon_super,
};
static struct vfsmount *bd_mnt;
static struct vfsmount *bd_mnt __read_mostly;
struct super_block *blockdev_superblock;
void __init bdev_cache_init(void)

View File

@ -36,7 +36,7 @@
/* #define DCACHE_DEBUG 1 */
int sysctl_vfs_cache_pressure = 100;
int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
@ -44,7 +44,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
EXPORT_SYMBOL(dcache_lock);
static kmem_cache_t *dentry_cache;
static kmem_cache_t *dentry_cache __read_mostly;
#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
@ -59,9 +59,9 @@ static kmem_cache_t *dentry_cache;
#define D_HASHBITS d_hash_shift
#define D_HASHMASK d_hash_mask
static unsigned int d_hash_mask;
static unsigned int d_hash_shift;
static struct hlist_head *dentry_hashtable;
static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift __read_mostly;
static struct hlist_head *dentry_hashtable __read_mostly;
static LIST_HEAD(dentry_unused);
/* Statistics gathering. */
@ -1719,10 +1719,10 @@ static void __init dcache_init(unsigned long mempages)
}
/* SLAB cache for __getname() consumers */
kmem_cache_t *names_cachep;
kmem_cache_t *names_cachep __read_mostly;
/* SLAB cache for file structures */
kmem_cache_t *filp_cachep;
kmem_cache_t *filp_cachep __read_mostly;
EXPORT_SYMBOL(d_genocide);

View File

@ -38,9 +38,9 @@ struct dcookie_struct {
static LIST_HEAD(dcookie_users);
static DEFINE_MUTEX(dcookie_mutex);
static kmem_cache_t * dcookie_cache;
static struct list_head * dcookie_hashtable;
static size_t hash_size;
static kmem_cache_t *dcookie_cache __read_mostly;
static struct list_head *dcookie_hashtable __read_mostly;
static size_t hash_size __read_mostly;
static inline int is_live(void)
{

View File

@ -21,9 +21,9 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
int dir_notify_enable = 1;
int dir_notify_enable __read_mostly = 1;
static kmem_cache_t *dn_cache;
static kmem_cache_t *dn_cache __read_mostly;
static void redo_inode_mask(struct inode *inode)
{

View File

@ -281,13 +281,13 @@ static struct mutex epmutex;
static struct poll_safewake psw;
/* Slab cache used to allocate "struct epitem" */
static kmem_cache_t *epi_cache;
static kmem_cache_t *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
static kmem_cache_t *pwq_cache;
static kmem_cache_t *pwq_cache __read_mostly;
/* Virtual fs used to allocate inodes for eventpoll files */
static struct vfsmount *eventpoll_mnt;
static struct vfsmount *eventpoll_mnt __read_mostly;
/* File callbacks that implement the eventpoll file behaviour */
static struct file_operations eventpoll_fops = {

View File

@ -412,7 +412,7 @@ out:
/* Table to convert sigio signal codes into poll band bitmaps */
static long band_table[NSIGPOLL] = {
static const long band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
}
static DEFINE_RWLOCK(fasync_lock);
static kmem_cache_t *fasync_cache;
static kmem_cache_t *fasync_cache __read_mostly;
/*
* fasync_helper() is used by some character device drivers (mainly mice)

View File

@ -56,8 +56,8 @@
#define I_HASHBITS i_hash_shift
#define I_HASHMASK i_hash_mask
static unsigned int i_hash_mask;
static unsigned int i_hash_shift;
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
/*
* Each inode can be on two separate lists. One is
@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused);
static struct hlist_head *inode_hashtable;
static struct hlist_head *inode_hashtable __read_mostly;
/*
* A simple spinlock to protect the list manipulations.
@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex);
*/
struct inodes_stat_t inodes_stat;
static kmem_cache_t * inode_cachep;
static kmem_cache_t * inode_cachep __read_mostly;
static struct inode *alloc_inode(struct super_block *sb)
{

View File

@ -39,15 +39,15 @@
static atomic_t inotify_cookie;
static kmem_cache_t *watch_cachep;
static kmem_cache_t *event_cachep;
static kmem_cache_t *watch_cachep __read_mostly;
static kmem_cache_t *event_cachep __read_mostly;
static struct vfsmount *inotify_mnt;
static struct vfsmount *inotify_mnt __read_mostly;
/* these are configurable via /proc/sys/fs/inotify/ */
int inotify_max_user_instances;
int inotify_max_user_watches;
int inotify_max_queued_events;
int inotify_max_user_instances __read_mostly;
int inotify_max_user_watches __read_mostly;
int inotify_max_queued_events __read_mostly;
/*
* Lock ordering:

View File

@ -142,7 +142,7 @@ int lease_break_time = 45;
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
static kmem_cache_t *filelock_cache;
static kmem_cache_t *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(void)

View File

@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event;
static struct list_head *mount_hashtable;
static struct list_head *mount_hashtable __read_mostly;
static int hash_mask __read_mostly, hash_bits __read_mostly;
static kmem_cache_t *mnt_cache;
static kmem_cache_t *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem;
/* /sys/fs */

View File

@ -675,7 +675,7 @@ fail_page:
return NULL;
}
static struct vfsmount *pipe_mnt;
static struct vfsmount *pipe_mnt __read_mostly;
static int pipefs_delete_dentry(struct dentry *dentry)
{
return 1;