mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
nfsd: don't try to reuse an expired DRC entry off the list
Currently when we are processing a request, we try to scrape an expired or over-limit entry off the list in preference to allocating a new one from the slab. This is unnecessarily complicated. Just use the slab layer. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
parent
28303ca309
commit
a0ef5e1968
@ -131,13 +131,6 @@ nfsd_reply_cache_alloc(void)
|
|||||||
return rp;
|
return rp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
nfsd_reply_cache_unhash(struct svc_cacherep *rp)
|
|
||||||
{
|
|
||||||
hlist_del_init(&rp->c_hash);
|
|
||||||
list_del_init(&rp->c_lru);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
|
nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
|
||||||
{
|
{
|
||||||
@ -416,22 +409,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Since the common case is a cache miss followed by an insert,
|
* Since the common case is a cache miss followed by an insert,
|
||||||
* preallocate an entry. First, try to reuse the first entry on the LRU
|
* preallocate an entry.
|
||||||
* if it works, then go ahead and prune the LRU list.
|
|
||||||
*/
|
*/
|
||||||
spin_lock(&cache_lock);
|
|
||||||
if (!list_empty(&lru_head)) {
|
|
||||||
rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
|
|
||||||
if (nfsd_cache_entry_expired(rp) ||
|
|
||||||
num_drc_entries >= max_drc_entries) {
|
|
||||||
nfsd_reply_cache_unhash(rp);
|
|
||||||
prune_cache_entries();
|
|
||||||
goto search_cache;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* No expired ones available, allocate a new one. */
|
|
||||||
spin_unlock(&cache_lock);
|
|
||||||
rp = nfsd_reply_cache_alloc();
|
rp = nfsd_reply_cache_alloc();
|
||||||
spin_lock(&cache_lock);
|
spin_lock(&cache_lock);
|
||||||
if (likely(rp)) {
|
if (likely(rp)) {
|
||||||
@ -439,7 +418,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
|
|||||||
drc_mem_usage += sizeof(*rp);
|
drc_mem_usage += sizeof(*rp);
|
||||||
}
|
}
|
||||||
|
|
||||||
search_cache:
|
/* go ahead and prune the cache */
|
||||||
|
prune_cache_entries();
|
||||||
|
|
||||||
found = nfsd_cache_search(rqstp, csum);
|
found = nfsd_cache_search(rqstp, csum);
|
||||||
if (found) {
|
if (found) {
|
||||||
if (likely(rp))
|
if (likely(rp))
|
||||||
@ -453,15 +434,6 @@ search_cache:
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We're keeping the one we just allocated. Are we now over the
|
|
||||||
* limit? Prune one off the tip of the LRU in trade for the one we
|
|
||||||
* just allocated if so.
|
|
||||||
*/
|
|
||||||
if (num_drc_entries >= max_drc_entries)
|
|
||||||
nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
|
|
||||||
struct svc_cacherep, c_lru));
|
|
||||||
|
|
||||||
nfsdstats.rcmisses++;
|
nfsdstats.rcmisses++;
|
||||||
rqstp->rq_cacherep = rp;
|
rqstp->rq_cacherep = rp;
|
||||||
rp->c_state = RC_INPROG;
|
rp->c_state = RC_INPROG;
|
||||||
|
Loading…
Reference in New Issue
Block a user