2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* include/linux/sunrpc/cache.h
|
|
|
|
*
|
|
|
|
* Generic code for various authentication-related caches
|
|
|
|
* used by sunrpc clients and servers.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
|
|
|
|
*
|
|
|
|
* Released under terms in GPL version 2. See COPYING.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_SUNRPC_CACHE_H_
|
|
|
|
#define _LINUX_SUNRPC_CACHE_H_
|
|
|
|
|
2011-01-10 14:18:25 +08:00
|
|
|
#include <linux/kref.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each cache requires:
|
|
|
|
* - A 'struct cache_detail' which contains information specific to the cache
|
|
|
|
* for common code to use.
|
|
|
|
* - An item structure that must contain a "struct cache_head"
|
|
|
|
* - A lookup function defined using DefineCacheLookup
|
|
|
|
* - A 'put' function that can release a cache item. It will only
|
|
|
|
* be called after cache_put has succeed, so there are guarantee
|
|
|
|
* to be no references.
|
|
|
|
* - A function to calculate a hash of an item's key.
|
|
|
|
*
|
|
|
|
* as well as assorted code fragments (e.g. compare keys) and numbers
|
|
|
|
* (e.g. hash size, goal_age, etc).
|
|
|
|
*
|
|
|
|
* Each cache must be registered so that it can be cleaned regularly.
|
|
|
|
* When the cache is unregistered, it is flushed completely.
|
|
|
|
*
|
|
|
|
* Entries have a ref count and a 'hashed' flag which counts the existance
|
|
|
|
* in the hash table.
|
|
|
|
* We only expire entries when refcount is zero.
|
|
|
|
* Existance in the cache is counted the refcount.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Every cache item has a common header that is used
|
|
|
|
* for expiring and refreshing entries.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct cache_head {
|
|
|
|
struct cache_head * next;
|
|
|
|
time_t expiry_time; /* After time time, don't use the data */
|
|
|
|
time_t last_refresh; /* If CACHE_PENDING, this is when upcall
|
|
|
|
* was sent, else this is when update was received
|
|
|
|
*/
|
2006-03-27 17:15:09 +08:00
|
|
|
struct kref ref;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
|
|
|
};
|
|
|
|
#define CACHE_VALID 0 /* Entry contains valid data */
|
|
|
|
#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
|
|
|
|
#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
|
|
|
|
|
|
|
|
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
|
|
|
|
|
2009-08-10 03:14:29 +08:00
|
|
|
struct cache_detail_procfs {
|
|
|
|
struct proc_dir_entry *proc_ent;
|
|
|
|
struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
|
|
|
|
};
|
|
|
|
|
2009-08-10 03:14:30 +08:00
|
|
|
struct cache_detail_pipefs {
|
|
|
|
struct dentry *dir;
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct cache_detail {
|
2005-09-07 06:17:08 +08:00
|
|
|
struct module * owner;
|
2005-04-17 06:20:36 +08:00
|
|
|
int hash_size;
|
|
|
|
struct cache_head ** hash_table;
|
|
|
|
rwlock_t hash_lock;
|
|
|
|
|
|
|
|
atomic_t inuse; /* active user-space update or lookup */
|
|
|
|
|
|
|
|
char *name;
|
2006-03-27 17:15:09 +08:00
|
|
|
void (*cache_put)(struct kref *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-08-10 03:14:29 +08:00
|
|
|
int (*cache_upcall)(struct cache_detail *,
|
|
|
|
struct cache_head *);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
int (*cache_parse)(struct cache_detail *,
|
|
|
|
char *buf, int len);
|
|
|
|
|
|
|
|
int (*cache_show)(struct seq_file *m,
|
|
|
|
struct cache_detail *cd,
|
|
|
|
struct cache_head *h);
|
2009-08-10 03:14:26 +08:00
|
|
|
void (*warn_no_listener)(struct cache_detail *cd,
|
|
|
|
int has_died);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-27 17:15:02 +08:00
|
|
|
struct cache_head * (*alloc)(void);
|
|
|
|
int (*match)(struct cache_head *orig, struct cache_head *new);
|
|
|
|
void (*init)(struct cache_head *orig, struct cache_head *new);
|
|
|
|
void (*update)(struct cache_head *orig, struct cache_head *new);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* fields below this comment are for internal use
|
|
|
|
* and should not be touched by cache owners
|
|
|
|
*/
|
|
|
|
time_t flush_time; /* flush all cache items with last_refresh
|
|
|
|
* earlier than this */
|
|
|
|
struct list_head others;
|
|
|
|
time_t nextcheck;
|
|
|
|
int entries;
|
|
|
|
|
|
|
|
/* fields for communication over channel */
|
|
|
|
struct list_head queue;
|
|
|
|
|
|
|
|
atomic_t readers; /* how many time is /chennel open */
|
|
|
|
time_t last_close; /* if no readers, when did last close */
|
|
|
|
time_t last_warn; /* when we last warned about no readers */
|
2009-08-10 03:14:29 +08:00
|
|
|
|
|
|
|
union {
|
|
|
|
struct cache_detail_procfs procfs;
|
2009-08-10 03:14:30 +08:00
|
|
|
struct cache_detail_pipefs pipefs;
|
2009-08-10 03:14:29 +08:00
|
|
|
} u;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* this must be embedded in any request structure that
|
|
|
|
* identifies an object that will want a callback on
|
|
|
|
* a cache fill
|
|
|
|
*/
|
|
|
|
struct cache_req {
|
|
|
|
struct cache_deferred_req *(*defer)(struct cache_req *req);
|
sunrpc/cache: allow threads to block while waiting for cache update.
The current practice of waiting for cache updates by queueing the
whole request to be retried has (at least) two problems.
1/ With NFSv4, requests can be quite complex and re-trying a whole
request when a later part fails should only be a last-resort, not a
normal practice.
2/ Large requests, and in particular any 'write' request, will not be
queued by the current code and doing so would be undesirable.
In many cases only a very sort wait is needed before the cache gets
valid data.
So, providing the underlying transport permits it by setting
->thread_wait,
arrange to wait briefly for an upcall to be completed (as reflected in
the clearing of CACHE_PENDING).
If the short wait was not long enough and CACHE_PENDING is still set,
fall back on the old approach.
The 'thread_wait' value is set to 5 seconds when there are spare
threads, and 1 second when there are no spare threads.
These values are probably much higher than needed, but will ensure
some forward progress.
Note that as we only request an update for a non-valid item, and as
non-valid items are updated in place it is extremely unlikely that
cache_check will return -ETIMEDOUT. Normally cache_defer_req will
sleep for a short while and then find that the item is_valid.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2010-08-12 15:04:06 +08:00
|
|
|
int thread_wait; /* How long (jiffies) we can block the
|
|
|
|
* current thread to wait for updates.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
/* this must be embedded in a deferred_request that is being
|
|
|
|
* delayed awaiting cache-fill
|
|
|
|
*/
|
|
|
|
struct cache_deferred_req {
|
2010-08-12 15:04:08 +08:00
|
|
|
struct hlist_node hash; /* on hash chain */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct list_head recent; /* on fifo */
|
|
|
|
struct cache_head *item; /* cache item we wait on */
|
|
|
|
void *owner; /* we might need to discard all defered requests
|
|
|
|
* owned by someone */
|
|
|
|
void (*revisit)(struct cache_deferred_req *req,
|
|
|
|
int too_many);
|
|
|
|
};
|
|
|
|
|
2006-03-27 17:15:01 +08:00
|
|
|
|
2009-08-10 03:14:30 +08:00
|
|
|
extern const struct file_operations cache_file_operations_pipefs;
|
|
|
|
extern const struct file_operations content_file_operations_pipefs;
|
|
|
|
extern const struct file_operations cache_flush_operations_pipefs;
|
|
|
|
|
2006-03-27 17:15:02 +08:00
|
|
|
extern struct cache_head *
|
|
|
|
sunrpc_cache_lookup(struct cache_detail *detail,
|
|
|
|
struct cache_head *key, int hash);
|
|
|
|
extern struct cache_head *
|
|
|
|
sunrpc_cache_update(struct cache_detail *detail,
|
|
|
|
struct cache_head *new, struct cache_head *old, int hash);
|
|
|
|
|
2009-08-10 03:14:29 +08:00
|
|
|
extern int
|
|
|
|
sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
|
|
|
|
void (*cache_request)(struct cache_detail *,
|
|
|
|
struct cache_head *,
|
|
|
|
char **,
|
|
|
|
int *));
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
extern void cache_clean_deferred(void *owner);
|
|
|
|
|
|
|
|
static inline struct cache_head *cache_get(struct cache_head *h)
|
|
|
|
{
|
2006-03-27 17:15:09 +08:00
|
|
|
kref_get(&h->ref);
|
2005-04-17 06:20:36 +08:00
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-03-27 17:15:09 +08:00
|
|
|
static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-27 17:15:09 +08:00
|
|
|
if (atomic_read(&h->ref.refcount) <= 2 &&
|
2005-04-17 06:20:36 +08:00
|
|
|
h->expiry_time < cd->nextcheck)
|
|
|
|
cd->nextcheck = h->expiry_time;
|
2006-03-27 17:15:09 +08:00
|
|
|
kref_put(&h->ref, cd->cache_put);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-10-04 17:15:50 +08:00
|
|
|
static inline int cache_valid(struct cache_head *h)
|
|
|
|
{
|
|
|
|
/* If an item has been unhashed pending removal when
|
|
|
|
* the refcount drops to 0, the expiry_time will be
|
|
|
|
* set to 0. We don't want to consider such items
|
|
|
|
* valid in this context even though CACHE_VALID is
|
|
|
|
* set.
|
|
|
|
*/
|
|
|
|
return (h->expiry_time != 0 && test_bit(CACHE_VALID, &h->flags));
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern int cache_check(struct cache_detail *detail,
|
|
|
|
struct cache_head *h, struct cache_req *rqstp);
|
|
|
|
extern void cache_flush(void);
|
|
|
|
extern void cache_purge(struct cache_detail *detail);
|
|
|
|
#define NEVER (0x7FFFFFFF)
|
sunrpc: make the cache cleaner workqueue deferrable
This patch makes the cache_cleaner workqueue deferrable, to prevent
unnecessary system wake-ups, which is very important for embedded
battery-powered devices.
do_cache_clean() is called every 30 seconds at the moment, and often
makes the system wake up from its power-save sleep state. With this
change, when the workqueue uses a deferrable timer, the
do_cache_clean() invocation will be delayed and combined with the
closest "real" wake-up. This improves the power consumption situation.
Note, I tried to create a DECLARE_DELAYED_WORK_DEFERRABLE() helper
macro, similar to DECLARE_DELAYED_WORK(), but failed because of the
way the timer wheel core stores the deferrable flag (it is the
LSBit in the time->base pointer). My attempt to define a static
variable with this bit set ended up with the "initializer element is
not constant" error.
Thus, I have to use run-time initialization, so I created a new
cache_initialize() function which is called once when sunrpc is
being initialized.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2010-07-01 23:05:56 +08:00
|
|
|
extern void __init cache_initialize(void);
|
2007-11-09 06:20:34 +08:00
|
|
|
extern int cache_register(struct cache_detail *cd);
|
2010-09-27 18:00:15 +08:00
|
|
|
extern int cache_register_net(struct cache_detail *cd, struct net *net);
|
2007-11-09 05:09:59 +08:00
|
|
|
extern void cache_unregister(struct cache_detail *cd);
|
2010-09-27 18:00:15 +08:00
|
|
|
extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-08-10 03:14:30 +08:00
|
|
|
extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
|
|
|
|
mode_t, struct cache_detail *);
|
|
|
|
extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void qword_add(char **bpp, int *lp, char *str);
|
|
|
|
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
|
|
|
|
extern int qword_get(char **bpp, char *dest, int bufsize);
|
|
|
|
|
|
|
|
static inline int get_int(char **bpp, int *anint)
|
|
|
|
{
|
|
|
|
char buf[50];
|
|
|
|
char *ep;
|
|
|
|
int rv;
|
|
|
|
int len = qword_get(bpp, buf, 50);
|
|
|
|
if (len < 0) return -EINVAL;
|
|
|
|
if (len ==0) return -ENOENT;
|
|
|
|
rv = simple_strtol(buf, &ep, 0);
|
|
|
|
if (*ep) return -EINVAL;
|
|
|
|
*anint = rv;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-12 14:55:22 +08:00
|
|
|
/*
|
|
|
|
* timestamps kept in the cache are expressed in seconds
|
|
|
|
* since boot. This is the best for measuring differences in
|
|
|
|
* real time.
|
|
|
|
*/
|
|
|
|
static inline time_t seconds_since_boot(void)
|
|
|
|
{
|
|
|
|
struct timespec boot;
|
|
|
|
getboottime(&boot);
|
|
|
|
return get_seconds() - boot.tv_sec;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline time_t convert_to_wallclock(time_t sinceboot)
|
|
|
|
{
|
|
|
|
struct timespec boot;
|
|
|
|
getboottime(&boot);
|
|
|
|
return boot.tv_sec + sinceboot;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static inline time_t get_expiry(char **bpp)
|
|
|
|
{
|
|
|
|
int rv;
|
2010-08-12 14:55:22 +08:00
|
|
|
struct timespec boot;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (get_int(bpp, &rv))
|
|
|
|
return 0;
|
|
|
|
if (rv < 0)
|
|
|
|
return 0;
|
2010-08-12 14:55:22 +08:00
|
|
|
getboottime(&boot);
|
|
|
|
return rv - boot.tv_sec;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-01-05 02:31:45 +08:00
|
|
|
#ifdef CONFIG_NFSD_DEPRECATED
|
2010-08-12 14:55:22 +08:00
|
|
|
static inline void sunrpc_invalidate(struct cache_head *h,
|
|
|
|
struct cache_detail *detail)
|
|
|
|
{
|
2010-08-12 14:55:22 +08:00
|
|
|
h->expiry_time = seconds_since_boot() - 1;
|
|
|
|
detail->nextcheck = seconds_since_boot();
|
2010-08-12 14:55:22 +08:00
|
|
|
}
|
2011-01-05 02:31:45 +08:00
|
|
|
#endif /* CONFIG_NFSD_DEPRECATED */
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* _LINUX_SUNRPC_CACHE_H_ */
|