mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
dlm: drop own rsb pre allocation mechanism
This patch drops the own written rsb pre allocation mechanism as this is already done by using kmem caches, we don't need another layer on top of that to running some pre allocation scheme. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
parent
4db41bf4f0
commit
1ffefc19c4
@ -322,10 +322,7 @@ struct dlm_rsb {
|
||||
unsigned long res_toss_time;
|
||||
uint32_t res_first_lkid;
|
||||
struct list_head res_lookup; /* lkbs waiting on first */
|
||||
union {
|
||||
struct list_head res_hashchain;
|
||||
struct rhash_head res_node; /* rsbtbl */
|
||||
};
|
||||
struct rhash_head res_node; /* rsbtbl */
|
||||
struct list_head res_grantqueue;
|
||||
struct list_head res_convertqueue;
|
||||
struct list_head res_waitqueue;
|
||||
@ -596,10 +593,6 @@ struct dlm_ls {
|
||||
spinlock_t ls_orphans_lock;
|
||||
struct list_head ls_orphans;
|
||||
|
||||
spinlock_t ls_new_rsb_spin;
|
||||
int ls_new_rsb_count;
|
||||
struct list_head ls_new_rsb; /* new rsb structs */
|
||||
|
||||
struct list_head ls_nodes; /* current nodes in ls */
|
||||
struct list_head ls_nodes_gone; /* dead node list, recovery */
|
||||
int ls_num_nodes; /* number of nodes in ls */
|
||||
|
@ -389,38 +389,6 @@ void dlm_put_rsb(struct dlm_rsb *r)
|
||||
put_rsb(r);
|
||||
}
|
||||
|
||||
static int pre_rsb_struct(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r1, *r2;
|
||||
int count = 0;
|
||||
|
||||
spin_lock_bh(&ls->ls_new_rsb_spin);
|
||||
if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
|
||||
spin_unlock_bh(&ls->ls_new_rsb_spin);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock_bh(&ls->ls_new_rsb_spin);
|
||||
|
||||
r1 = dlm_allocate_rsb(ls);
|
||||
r2 = dlm_allocate_rsb(ls);
|
||||
|
||||
spin_lock_bh(&ls->ls_new_rsb_spin);
|
||||
if (r1) {
|
||||
list_add(&r1->res_hashchain, &ls->ls_new_rsb);
|
||||
ls->ls_new_rsb_count++;
|
||||
}
|
||||
if (r2) {
|
||||
list_add(&r2->res_hashchain, &ls->ls_new_rsb);
|
||||
ls->ls_new_rsb_count++;
|
||||
}
|
||||
count = ls->ls_new_rsb_count;
|
||||
spin_unlock_bh(&ls->ls_new_rsb_spin);
|
||||
|
||||
if (!count)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* connected with timer_delete_sync() in dlm_ls_stop() to stop
|
||||
* new timers when recovery is triggered and don't run them
|
||||
* again until a dlm_timer_resume() tries it again.
|
||||
@ -652,22 +620,10 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
|
||||
struct dlm_rsb **r_ret)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
int count;
|
||||
|
||||
spin_lock_bh(&ls->ls_new_rsb_spin);
|
||||
if (list_empty(&ls->ls_new_rsb)) {
|
||||
count = ls->ls_new_rsb_count;
|
||||
spin_unlock_bh(&ls->ls_new_rsb_spin);
|
||||
log_debug(ls, "find_rsb retry %d %d %s",
|
||||
count, dlm_config.ci_new_rsb_count,
|
||||
(const char *)name);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
|
||||
list_del(&r->res_hashchain);
|
||||
ls->ls_new_rsb_count--;
|
||||
spin_unlock_bh(&ls->ls_new_rsb_spin);
|
||||
r = dlm_allocate_rsb(ls);
|
||||
if (!r)
|
||||
return -ENOMEM;
|
||||
|
||||
r->res_ls = ls;
|
||||
r->res_length = len;
|
||||
@ -792,13 +748,6 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
|
||||
}
|
||||
|
||||
retry:
|
||||
if (create) {
|
||||
error = pre_rsb_struct(ls);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry_lookup:
|
||||
|
||||
/* check if the rsb is in keep state under read lock - likely path */
|
||||
read_lock_bh(&ls->ls_rsbtbl_lock);
|
||||
@ -832,7 +781,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
|
||||
if (!error) {
|
||||
if (!rsb_flag(r, RSB_TOSS)) {
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
goto retry_lookup;
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
@ -898,9 +847,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
|
||||
goto out;
|
||||
|
||||
error = get_rsb_struct(ls, name, len, &r);
|
||||
if (error == -EAGAIN)
|
||||
goto retry;
|
||||
if (error)
|
||||
if (WARN_ON_ONCE(error))
|
||||
goto out;
|
||||
|
||||
r->res_hash = hash;
|
||||
@ -952,7 +899,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
|
||||
*/
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
dlm_free_rsb(r);
|
||||
goto retry_lookup;
|
||||
goto retry;
|
||||
} else if (!error) {
|
||||
list_add(&r->res_rsbs_list, &ls->ls_keep);
|
||||
}
|
||||
@ -976,11 +923,6 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
|
||||
int error;
|
||||
|
||||
retry:
|
||||
error = pre_rsb_struct(ls);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
|
||||
retry_lookup:
|
||||
|
||||
/* check if the rsb is in keep state under read lock - likely path */
|
||||
read_lock_bh(&ls->ls_rsbtbl_lock);
|
||||
@ -1015,7 +957,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
|
||||
if (!error) {
|
||||
if (!rsb_flag(r, RSB_TOSS)) {
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
goto retry_lookup;
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
@ -1070,10 +1012,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
|
||||
*/
|
||||
|
||||
error = get_rsb_struct(ls, name, len, &r);
|
||||
if (error == -EAGAIN) {
|
||||
goto retry;
|
||||
}
|
||||
if (error)
|
||||
if (WARN_ON_ONCE(error))
|
||||
goto out;
|
||||
|
||||
r->res_hash = hash;
|
||||
@ -1090,7 +1029,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
|
||||
*/
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
dlm_free_rsb(r);
|
||||
goto retry_lookup;
|
||||
goto retry;
|
||||
} else if (!error) {
|
||||
list_add(&r->res_rsbs_list, &ls->ls_keep);
|
||||
}
|
||||
@ -1304,11 +1243,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
|
||||
}
|
||||
|
||||
retry:
|
||||
error = pre_rsb_struct(ls);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
retry_lookup:
|
||||
|
||||
/* check if the rsb is in keep state under read lock - likely path */
|
||||
read_lock_bh(&ls->ls_rsbtbl_lock);
|
||||
@ -1354,7 +1288,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
|
||||
/* something as changed, very unlikely but
|
||||
* try again
|
||||
*/
|
||||
goto retry_lookup;
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
@ -1376,9 +1310,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
|
||||
|
||||
not_found:
|
||||
error = get_rsb_struct(ls, name, len, &r);
|
||||
if (error == -EAGAIN)
|
||||
goto retry;
|
||||
if (error)
|
||||
if (WARN_ON_ONCE(error))
|
||||
goto out;
|
||||
|
||||
r->res_hash = hash;
|
||||
@ -1395,7 +1327,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
|
||||
*/
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
dlm_free_rsb(r);
|
||||
goto retry_lookup;
|
||||
goto retry;
|
||||
} else if (error) {
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
/* should never happen */
|
||||
|
@ -428,9 +428,6 @@ static int new_lockspace(const char *name, const char *cluster,
|
||||
INIT_LIST_HEAD(&ls->ls_orphans);
|
||||
spin_lock_init(&ls->ls_orphans_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_new_rsb);
|
||||
spin_lock_init(&ls->ls_new_rsb_spin);
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_nodes);
|
||||
INIT_LIST_HEAD(&ls->ls_nodes_gone);
|
||||
ls->ls_num_nodes = 0;
|
||||
@ -688,7 +685,6 @@ static void rhash_free_rsb(void *ptr, void *arg)
|
||||
|
||||
static int release_lockspace(struct dlm_ls *ls, int force)
|
||||
{
|
||||
struct dlm_rsb *rsb;
|
||||
int busy, rv;
|
||||
|
||||
busy = lockspace_busy(ls, force);
|
||||
@ -756,13 +752,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
|
||||
*/
|
||||
rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
|
||||
|
||||
while (!list_empty(&ls->ls_new_rsb)) {
|
||||
rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
|
||||
res_hashchain);
|
||||
list_del(&rsb->res_hashchain);
|
||||
dlm_free_rsb(rsb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free structures on any other lists
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user