2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 02:04:05 +08:00

[GFS2] Remove queue_empty() function

This function is not longer required since we do not do recursive
locking in the glock layer. As a result all its callers can be
replaceed with list_empty() calls.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
Steven Whitehouse 2007-01-22 13:09:04 -05:00
parent bd44e2b007
commit 12132933c4

View File

@ -210,30 +210,6 @@ out:
return rv;
}
/**
* queue_empty - check to see if a glock's queue is empty
* @gl: the glock
* @head: the head of the queue to check
*
* This function protects the list in the event that a process already
* has a holder on the list and is adding a second holder for itself.
* The glmutex lock is what generally prevents processes from working
* on the same glock at once, but the special case of adding a second
* holder for yourself ("recursive" locking) doesn't involve locking
* glmutex, making the spin lock necessary.
*
* Returns: 1 if the queue is empty
*/
static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
{
int empty;
spin_lock(&gl->gl_spin);
empty = list_empty(head);
spin_unlock(&gl->gl_spin);
return empty;
}
/**
* search_bucket() - Find struct gfs2_glock by lock number
* @bucket: the bucket to search
@ -814,7 +790,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
int op_done = 1;
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
state_change(gl, ret & LM_OUT_ST_MASK);
@ -925,7 +901,7 @@ void gfs2_glock_xmote_th(struct gfs2_holder *gh)
glops->go_xmote_th(gl);
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
gfs2_assert_warn(sdp, state != gl->gl_state);
@ -960,7 +936,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh = gl->gl_req_gh;
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, !ret);
state_change(gl, LM_ST_UNLOCKED);
@ -1007,7 +983,7 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
glops->go_drop_th(gl);
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
gfs2_glock_hold(gl);
@ -1697,7 +1673,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
atomic_inc(&sdp->sd_reclaimed);
if (gfs2_glmutex_trylock(gl)) {
if (queue_empty(gl, &gl->gl_holders) &&
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED);
gfs2_glmutex_unlock(gl);
@ -1761,7 +1737,7 @@ static void scan_glock(struct gfs2_glock *gl)
return;
if (gfs2_glmutex_trylock(gl)) {
if (queue_empty(gl, &gl->gl_holders) &&
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
goto out_schedule;
gfs2_glmutex_unlock(gl);
@ -1810,7 +1786,7 @@ static void clear_glock(struct gfs2_glock *gl)
}
if (gfs2_glmutex_trylock(gl)) {
if (queue_empty(gl, &gl->gl_holders) &&
if (list_empty(gl, &gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED);
gfs2_glmutex_unlock(gl);