mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 05:04:00 +08:00
1bdad60633
There are several reasons why this is undesirable: 1. It never happens during normal operation anyway 2. If it does happen it causes performance to be very, very poor 3. It isn't likely to solve the original problem (memory shortage on remote DLM node) it was supposed to solve 4. It uses a bunch of arbitrary constants which are unlikely to be correct for any particular situation and for which the tuning seems to be a black art. 5. In an N node cluster, only 1/N of the dropped locked will actually contribute to solving the problem on average. So all in all we are better off without it. This also makes merging the lock_dlm module into GFS2 a bit easier. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
146 lines
4.0 KiB
C
146 lines
4.0 KiB
C
/*
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
|
* of the GNU General Public License version 2.
|
|
*/
|
|
|
|
#ifndef __GLOCK_DOT_H__
|
|
#define __GLOCK_DOT_H__
|
|
|
|
#include <linux/sched.h>
|
|
#include "incore.h"
|
|
|
|
/* Flags for lock requests; used in gfs2_holder gh_flag field.
|
|
From lm_interface.h:
|
|
#define LM_FLAG_TRY 0x00000001
|
|
#define LM_FLAG_TRY_1CB 0x00000002
|
|
#define LM_FLAG_NOEXP 0x00000004
|
|
#define LM_FLAG_ANY 0x00000008
|
|
#define LM_FLAG_PRIORITY 0x00000010 */
|
|
|
|
#define GL_ASYNC 0x00000040
|
|
#define GL_EXACT 0x00000080
|
|
#define GL_SKIP 0x00000100
|
|
#define GL_ATIME 0x00000200
|
|
#define GL_NOCACHE 0x00000400
|
|
|
|
#define GLR_TRYFAILED 13
|
|
|
|
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
|
{
|
|
struct gfs2_holder *gh;
|
|
struct pid *pid;
|
|
|
|
/* Look in glock's list of holders for one with current task as owner */
|
|
spin_lock(&gl->gl_spin);
|
|
pid = task_pid(current);
|
|
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
|
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
|
|
break;
|
|
if (gh->gh_owner_pid == pid)
|
|
goto out;
|
|
}
|
|
gh = NULL;
|
|
out:
|
|
spin_unlock(&gl->gl_spin);
|
|
|
|
return gh;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
|
|
{
|
|
return gl->gl_state == LM_ST_EXCLUSIVE;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
|
|
{
|
|
return gl->gl_state == LM_ST_DEFERRED;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
|
|
{
|
|
return gl->gl_state == LM_ST_SHARED;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
|
|
{
|
|
int ret;
|
|
spin_lock(&gl->gl_spin);
|
|
ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
|
|
spin_unlock(&gl->gl_spin);
|
|
return ret;
|
|
}
|
|
|
|
int gfs2_glock_get(struct gfs2_sbd *sdp,
|
|
u64 number, const struct gfs2_glock_operations *glops,
|
|
int create, struct gfs2_glock **glp);
|
|
int gfs2_glock_put(struct gfs2_glock *gl);
|
|
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
|
|
struct gfs2_holder *gh);
|
|
void gfs2_holder_reinit(unsigned int state, unsigned flags,
|
|
struct gfs2_holder *gh);
|
|
void gfs2_holder_uninit(struct gfs2_holder *gh);
|
|
int gfs2_glock_nq(struct gfs2_holder *gh);
|
|
int gfs2_glock_poll(struct gfs2_holder *gh);
|
|
int gfs2_glock_wait(struct gfs2_holder *gh);
|
|
void gfs2_glock_dq(struct gfs2_holder *gh);
|
|
void gfs2_glock_dq_wait(struct gfs2_holder *gh);
|
|
|
|
void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
|
|
int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
|
|
u64 number, const struct gfs2_glock_operations *glops,
|
|
unsigned int state, int flags, struct gfs2_holder *gh);
|
|
|
|
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
|
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
|
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
|
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
|
|
|
|
/**
|
|
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
|
|
* @gl: the glock
|
|
* @state: the state we're requesting
|
|
* @flags: the modifier flags
|
|
* @gh: the holder structure
|
|
*
|
|
* Returns: 0, GLR_*, or errno
|
|
*/
|
|
|
|
static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
|
|
unsigned int state, int flags,
|
|
struct gfs2_holder *gh)
|
|
{
|
|
int error;
|
|
|
|
gfs2_holder_init(gl, state, flags, gh);
|
|
|
|
error = gfs2_glock_nq(gh);
|
|
if (error)
|
|
gfs2_holder_uninit(gh);
|
|
|
|
return error;
|
|
}
|
|
|
|
/* Lock Value Block functions */
|
|
|
|
int gfs2_lvb_hold(struct gfs2_glock *gl);
|
|
void gfs2_lvb_unhold(struct gfs2_glock *gl);
|
|
|
|
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
|
|
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
|
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
|
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
|
|
|
int __init gfs2_glock_init(void);
|
|
void gfs2_glock_exit(void);
|
|
|
|
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
|
|
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
|
|
int gfs2_register_debugfs(void);
|
|
void gfs2_unregister_debugfs(void);
|
|
|
|
#endif /* __GLOCK_DOT_H__ */
|