typedef void (*glock_examiner) (struct gfs2_glock * gl);
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
+static int dump_glock(struct gfs2_glock *gl);
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode?
spin_lock_init(&gl->gl_spin);
gl->gl_state = LM_ST_UNLOCKED;
+ gl->gl_owner = NULL;
+ gl->gl_ip = 0;
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters2);
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
list_add_tail(&gh.gh_list, &gl->gl_waiters1);
- else
+ else {
+ gl->gl_owner = current;
+ gl->gl_ip = (unsigned long)__builtin_return_address(0);
complete(&gh.gh_wait);
+ }
spin_unlock(&gl->gl_spin);
wait_for_completion(&gh.gh_wait);
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
acquired = 0;
+ else {
+ gl->gl_owner = current;
+ gl->gl_ip = (unsigned long)__builtin_return_address(0);
+ }
spin_unlock(&gl->gl_spin);
return acquired;
{
spin_lock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags);
+ gl->gl_owner = NULL;
+ gl->gl_ip = 0;
run_queue(gl);
BUG_ON(!spin_is_locked(&gl->gl_spin));
spin_unlock(&gl->gl_spin);
struct gfs2_sbd *sdp = gl->gl_sbd;
int error = 0;
- restart:
+restart:
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
set_bit(HIF_ABORTED, &gh->gh_iflags);
return -EIO;
clear_bit(GLF_PREFETCH, &gl->gl_flags);
+ if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
+ dump_glock(gl);
+
return error;
}
spin_lock(&gl->gl_spin);
- printk(KERN_INFO "Glock (%u, %llu)\n",
- gl->gl_name.ln_type,
- gl->gl_name.ln_number);
+ printk(KERN_INFO "Glock (%u, %llu)\n", gl->gl_name.ln_type,
+ gl->gl_name.ln_number);
printk(KERN_INFO " gl_flags =");
for (x = 0; x < 32; x++)
if (test_bit(x, &gl->gl_flags))
printk(" \n");
printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
+ printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
+ print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
#define GL_SYNC 0x00000800
#define GL_NOCANCEL 0x00001000
#define GL_AOP 0x00004000
+#define GL_DUMP 0x00008000
#define GLR_TRYFAILED 13
#define GLR_CANCELED 14
spinlock_t gl_spin;
unsigned int gl_state;
+ struct task_struct *gl_owner;
+ unsigned long gl_ip;
struct list_head gl_holders;
struct list_head gl_waiters1; /* HIF_MUTEX */
struct list_head gl_waiters2; /* HIF_DEMOTE, HIF_GREEDY */
};
struct gfs2_inode {
+ struct inode i_inode;
struct gfs2_inum i_num;
atomic_t i_count;
struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT];
};
+static inline struct gfs2_inode *GFS2_I(struct inode *inode)
+{
+ return container_of(inode, struct gfs2_inode, i_inode);
+}
+
enum {
GFF_DID_DIRECT_ALLOC = 0,
};
error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
&gfs2_inode_glops, LM_ST_EXCLUSIVE,
- LM_FLAG_TRY_1CB, &i_gh);
+ LM_FLAG_TRY_1CB|GL_DUMP, &i_gh);
switch(error) {
case 0:
break;
if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
(name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
dir == sb->s_root->d_inode)) {
- gfs2_inode_hold(dip);
- ipp = dip;
- goto done;
+ igrab(dir);
+ return dir;
}
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
return ERR_PTR(error);
if (!is_root) {
- error = gfs2_repermission(dip->i_vnode, MAY_EXEC, NULL);
+ error = gfs2_repermission(dir, MAY_EXEC, NULL);
if (error)
goto out;
}
out:
gfs2_glock_dq_uninit(&d_gh);
-done:
if (error == -ENOENT)
return NULL;
if (error == 0) {
int error;
munge_mode_uid_gid(dip, &mode, &uid, &gid);
-
gfs2_alloc_get(dip);
error = gfs2_quota_lock(dip, uid, gid);
if (error)
goto out_quota;
- error = gfs2_trans_begin(sdp, RES_DINODE + RES_UNLINKED +
- RES_QUOTA, 0);
+ error = gfs2_trans_begin(sdp, RES_DINODE + RES_UNLINKED + RES_QUOTA, 0);
if (error)
goto out_quota;
ul->ul_ut.ut_flags = 0;
error = gfs2_unlinked_ondisk_munge(sdp, ul);
-
- init_dinode(dip, gl, &ul->ul_ut.ut_inum,
- mode, uid, gid);
-
+ init_dinode(dip, gl, &ul->ul_ut.ut_inum, mode, uid, gid);
gfs2_quota_change(dip, +1, uid, gid);
-
gfs2_trans_end(sdp);
out_quota:
out:
gfs2_alloc_put(dip);
-
return error;
}
if (error)
goto fail_quota_locks;
- error = gfs2_trans_begin(sdp,
- sdp->sd_max_dirres +
+ error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
al->al_rgd->rd_ri.ri_length +
2 * RES_DINODE + RES_UNLINKED +
RES_STATFS + RES_QUOTA, 0);
return 0;
- fail_end_trans:
+fail_end_trans:
gfs2_trans_end(sdp);
- fail_ipreserv:
+fail_ipreserv:
if (dip->i_alloc.al_rgd)
gfs2_inplace_release(dip);
- fail_quota_locks:
+fail_quota_locks:
gfs2_quota_unlock(dip);
- fail:
+fail:
gfs2_alloc_put(dip);
-
return error;
}
if (ul->ul_ut.ut_inum.no_addr < dip->i_num.no_addr) {
gfs2_glock_dq(ghs);
- error = gfs2_glock_nq_num(sdp,
- ul->ul_ut.ut_inum.no_addr,
- &gfs2_inode_glops,
- LM_ST_EXCLUSIVE, GL_SKIP,
- ghs + 1);
+ error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
+ &gfs2_inode_glops, LM_ST_EXCLUSIVE,
+ GL_SKIP, ghs + 1);
if (error) {
gfs2_unlinked_put(sdp, ul);
return ERR_PTR(error);
if (error)
goto fail_gunlock2;
} else {
- error = gfs2_glock_nq_num(sdp,
- ul->ul_ut.ut_inum.no_addr,
- &gfs2_inode_glops,
- LM_ST_EXCLUSIVE, GL_SKIP,
- ghs + 1);
+ error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
+ &gfs2_inode_glops, LM_ST_EXCLUSIVE,
+ GL_SKIP, ghs + 1);
if (error)
goto fail_gunlock;
}
return ERR_PTR(-ENOMEM);
return inode;
- fail_iput:
+fail_iput:
gfs2_inode_put(ip);
- fail_gunlock2:
+fail_gunlock2:
gfs2_glock_dq_uninit(ghs + 1);
- fail_gunlock:
+fail_gunlock:
gfs2_glock_dq(ghs);
- fail:
+fail:
gfs2_unlinked_put(sdp, ul);
-
return ERR_PTR(error);
}
#include "sys.h"
#include "util.h"
+static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+{
+ struct gfs2_inode *ip = foo;
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&ip->i_inode);
+ atomic_set(&ip->i_count, 0);
+ ip->i_vnode = &ip->i_inode;
+ spin_lock_init(&ip->i_spin);
+ init_rwsem(&ip->i_rw_mutex);
+ memset(ip->i_cache, 0, sizeof(ip->i_cache));
+ }
+}
+
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
- 0, 0, NULL, NULL);
+ 0, (SLAB_RECLAIM_ACCOUNT|
+ SLAB_PANIC|SLAB_MEM_SPREAD),
+ gfs2_init_inode_once, NULL);
if (!gfs2_inode_cachep)
goto fail;
return 0;
}
+static struct inode *gfs2_alloc_inode(struct super_block *sb)
+{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+ struct gfs2_inode *ip;
+
+ ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
+ if (ip) {
+ ip->i_flags = 0;
+ ip->i_gl = NULL;
+ ip->i_sbd = sdp;
+ ip->i_vnode = &ip->i_inode;
+ ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
+ ip->i_last_pfault = jiffies;
+ }
+ return &ip->i_inode;
+}
+
+static void gfs2_destroy_inode(struct inode *inode)
+{
+ kmem_cache_free(gfs2_inode_cachep, inode);
+}
+
struct super_operations gfs2_super_ops = {
+ .alloc_inode = gfs2_alloc_inode,
+ .destroy_inode = gfs2_destroy_inode,
.write_inode = gfs2_write_inode,
.put_super = gfs2_put_super,
.write_super = gfs2_write_super,