mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
sunrpc: convert sp_task_pending flag to use atomic bitops
In a later patch, we'll want to be able to handle this flag without holding the sp_lock. Change this field to an unsigned long flags field, and declare a new flag in it that can be managed with atomic bitops. Signed-off-by: Jeff Layton <jlayton@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
parent
62978b3c61
commit
4d5db3f536
@ -50,7 +50,9 @@ struct svc_pool {
|
||||
unsigned int sp_nrthreads; /* # of threads in pool */
|
||||
struct list_head sp_all_threads; /* all server threads */
|
||||
struct svc_pool_stats sp_stats; /* statistics on pool operation */
|
||||
int sp_task_pending;/* has pending task */
|
||||
#define SP_TASK_PENDING (0) /* still work to do even if no
|
||||
* xprt is queued. */
|
||||
unsigned long sp_flags;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/*
|
||||
|
@ -509,7 +509,7 @@ void svc_wake_up(struct svc_serv *serv)
|
||||
*/
|
||||
wake_up_process(rqstp->rq_task);
|
||||
} else
|
||||
pool->sp_task_pending = 1;
|
||||
set_bit(SP_TASK_PENDING, &pool->sp_flags);
|
||||
spin_unlock_bh(&pool->sp_lock);
|
||||
}
|
||||
}
|
||||
@ -644,10 +644,9 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
|
||||
* long for cache updates.
|
||||
*/
|
||||
rqstp->rq_chandle.thread_wait = 1*HZ;
|
||||
pool->sp_task_pending = 0;
|
||||
clear_bit(SP_TASK_PENDING, &pool->sp_flags);
|
||||
} else {
|
||||
if (pool->sp_task_pending) {
|
||||
pool->sp_task_pending = 0;
|
||||
if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) {
|
||||
xprt = ERR_PTR(-EAGAIN);
|
||||
goto out;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user