struct work_struct gr_work;
};
+struct gfs2_gl_hash_bucket {
+ struct list_head hb_list;
+};
+
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int dump_glock(struct gfs2_glock *gl);
static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
+static rwlock_t gl_hash_locks[GFS2_GL_HASH_SIZE];
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode?
int gfs2_glock_put(struct gfs2_glock *gl)
{
- struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
int rv = 0;
- write_lock(&bucket->hb_lock);
+ write_lock(&gl_hash_locks[gl->gl_hash]);
if (kref_put(&gl->gl_ref, kill_glock)) {
- list_del_init(&gl->gl_list);
- write_unlock(&bucket->hb_lock);
+ list_del_init(&gl_hash_table[gl->gl_hash].hb_list);
+ write_unlock(&gl_hash_locks[gl->gl_hash]);
BUG_ON(spin_is_locked(&gl->gl_spin));
glock_free(gl);
rv = 1;
goto out;
}
- write_unlock(&bucket->hb_lock);
+ write_unlock(&gl_hash_locks[gl->gl_hash]);
out:
return rv;
}
* Returns: NULL, or the struct gfs2_glock with the requested number
*/
-static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
+static struct gfs2_glock *search_bucket(unsigned int hash,
const struct gfs2_sbd *sdp,
const struct lm_lockname *name)
{
struct gfs2_glock *gl;
- list_for_each_entry(gl, &bucket->hb_list, gl_list) {
+ list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) {
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
if (!lm_name_equal(&gl->gl_name, name))
static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
const struct lm_lockname *name)
{
- struct gfs2_gl_hash_bucket *bucket = &gl_hash_table[gl_hash(sdp, name)];
+ unsigned int hash = gl_hash(sdp, name);
struct gfs2_glock *gl;
- read_lock(&bucket->hb_lock);
- gl = search_bucket(bucket, sdp, name);
- read_unlock(&bucket->hb_lock);
+ read_lock(&gl_hash_locks[hash]);
+ gl = search_bucket(hash, sdp, name);
+ read_unlock(&gl_hash_locks[hash]);
return gl;
}
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
- struct lm_lockname name;
+ struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
struct gfs2_glock *gl, *tmp;
- struct gfs2_gl_hash_bucket *bucket;
+ unsigned int hash = gl_hash(sdp, &name);
int error;
- name.ln_number = number;
- name.ln_type = glops->go_type;
- bucket = &gl_hash_table[gl_hash(sdp, &name)];
-
- read_lock(&bucket->hb_lock);
- gl = search_bucket(bucket, sdp, &name);
- read_unlock(&bucket->hb_lock);
+ read_lock(&gl_hash_locks[hash]);
+ gl = search_bucket(hash, sdp, &name);
+ read_unlock(&gl_hash_locks[hash]);
if (gl || !create) {
*glp = gl;
gl->gl_name = name;
kref_init(&gl->gl_ref);
gl->gl_state = LM_ST_UNLOCKED;
+ gl->gl_hash = hash;
gl->gl_owner = NULL;
gl->gl_ip = 0;
gl->gl_ops = glops;
gl->gl_vn = 0;
gl->gl_stamp = jiffies;
gl->gl_object = NULL;
- gl->gl_bucket = bucket;
gl->gl_sbd = sdp;
gl->gl_aspace = NULL;
lops_init_le(&gl->gl_le, &gfs2_glock_lops);
if (error)
goto fail_aspace;
- write_lock(&bucket->hb_lock);
- tmp = search_bucket(bucket, sdp, &name);
+ write_lock(&gl_hash_locks[hash]);
+ tmp = search_bucket(hash, sdp, &name);
if (tmp) {
- write_unlock(&bucket->hb_lock);
+ write_unlock(&gl_hash_locks[hash]);
glock_free(gl);
gl = tmp;
} else {
- list_add_tail(&gl->gl_list, &bucket->hb_list);
- write_unlock(&bucket->hb_lock);
+ list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list);
+ write_unlock(&gl_hash_locks[hash]);
}
*glp = gl;
*/
static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
- struct gfs2_gl_hash_bucket *bucket)
+ unsigned int hash)
{
struct glock_plug plug;
struct list_head *tmp;
memset(&plug.gl_flags, 0, sizeof(unsigned long));
set_bit(GLF_PLUG, &plug.gl_flags);
- write_lock(&bucket->hb_lock);
- list_add(&plug.gl_list, &bucket->hb_list);
- write_unlock(&bucket->hb_lock);
+ write_lock(&gl_hash_locks[hash]);
+ list_add(&plug.gl_list, &gl_hash_table[hash].hb_list);
+ write_unlock(&gl_hash_locks[hash]);
for (;;) {
- write_lock(&bucket->hb_lock);
+ write_lock(&gl_hash_locks[hash]);
for (;;) {
tmp = plug.gl_list.next;
- if (tmp == &bucket->hb_list) {
+ if (tmp == &gl_hash_table[hash].hb_list) {
list_del(&plug.gl_list);
- entries = !list_empty(&bucket->hb_list);
- write_unlock(&bucket->hb_lock);
+ entries = !list_empty(&gl_hash_table[hash].hb_list);
+ write_unlock(&gl_hash_locks[hash]);
return entries;
}
gl = list_entry(tmp, struct gfs2_glock, gl_list);
break;
}
- write_unlock(&bucket->hb_lock);
+ write_unlock(&gl_hash_locks[hash]);
examiner(gl);
}
unsigned int x;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- examine_bucket(scan_glock, sdp, &gl_hash_table[x]);
+ examine_bucket(scan_glock, sdp, x);
cond_resched();
}
}
cont = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
- if (examine_bucket(clear_glock, sdp, &gl_hash_table[x]))
+ if (examine_bucket(clear_glock, sdp, x))
cont = 1;
if (!wait || !cont)
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
{
- struct gfs2_gl_hash_bucket *bucket;
struct gfs2_glock *gl;
unsigned int x;
int error = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- bucket = &gl_hash_table[x];
- read_lock(&bucket->hb_lock);
+ read_lock(&gl_hash_locks[x]);
- list_for_each_entry(gl, &bucket->hb_list, gl_list) {
+ list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) {
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
if (gl->gl_sbd != sdp)
break;
}
- read_unlock(&bucket->hb_lock);
+ read_unlock(&gl_hash_locks[x]);
if (error)
break;
{
unsigned i;
for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
- struct gfs2_gl_hash_bucket *hb = &gl_hash_table[i];
- rwlock_init(&hb->hb_lock);
- INIT_LIST_HEAD(&hb->hb_list);
+ rwlock_init(&gl_hash_locks[i]);
+ INIT_LIST_HEAD(&gl_hash_table[i].hb_list);
}
return 0;
}