if (kref_put(&gl->gl_ref, kill_glock)) {
list_del_init(&gl->gl_list);
write_unlock(&bucket->hb_lock);
+ BUG_ON(spin_is_locked(&gl->gl_spin));
glock_free(gl);
rv = 1;
goto out;
*
*/
-void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags,
+void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
struct gfs2_holder *gh)
{
+ flags |= GL_NEVER_RECURSE;
INIT_LIST_HEAD(&gh->gh_list);
gh->gh_gl = gl;
gh->gh_ip = (unsigned long)__builtin_return_address(0);
- gh->gh_owner = (flags & GL_NEVER_RECURSE) ? NULL : current;
+ gh->gh_owner = current;
gh->gh_state = state;
gh->gh_flags = flags;
gh->gh_error = 0;
*
*/
-void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh)
+void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
{
gh->gh_state = state;
- gh->gh_flags = flags;
+ gh->gh_flags = flags | GL_NEVER_RECURSE;
if (gh->gh_state == LM_ST_EXCLUSIVE)
gh->gh_flags |= GL_LOCAL_EXCL;
struct gfs2_holder *tmp_gh, *safe;
int found = 0;
+ BUG_ON(!spin_is_locked(&gl->gl_spin));
+
printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number,
gl->gl_name.ln_type);
struct gfs2_holder *tmp_gh, *last_gh = NULL;
int found = 0;
+ BUG_ON(!spin_is_locked(&gl->gl_spin));
+
if (gfs2_assert_warn(sdp, gh->gh_owner))
return;
* @gl: the glock
*
*/
-
static void run_queue(struct gfs2_glock *gl)
{
struct gfs2_holder *gh;
spin_lock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags);
run_queue(gl);
+ BUG_ON(!spin_is_locked(&gl->gl_spin));
spin_unlock(&gl->gl_spin);
}
return 0;
- fail:
+fail:
print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n",
existing->gh_ip);
print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip);
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_holder *existing;
+ BUG_ON(!gh->gh_owner);
+
if (!gh->gh_owner)
goto out;
if (!(gh->gh_flags & GL_ASYNC)) {
error = glock_wait_internal(gh);
if (error == GLR_CANCELED) {
- msleep(1000);
+ msleep(100);
goto restart;
}
}
else if (list_empty(&gh->gh_list)) {
if (gh->gh_error == GLR_CANCELED) {
spin_unlock(&gl->gl_spin);
- msleep(1000);
+ msleep(100);
if (gfs2_glock_nq(gh))
return 1;
return 0;
error = glock_wait_internal(gh);
if (error == GLR_CANCELED) {
- msleep(1000);
+ msleep(100);
gh->gh_flags &= ~GL_ASYNC;
error = gfs2_glock_nq(gh);
}
if (!list_empty(&gl->gl_reclaim)) {
list_del_init(&gl->gl_reclaim);
atomic_dec(&sdp->sd_reclaim_count);
+ spin_unlock(&sdp->sd_reclaim_lock);
released = gfs2_glock_put(gl);
gfs2_assert(sdp, !released);
+ } else {
+ spin_unlock(&sdp->sd_reclaim_lock);
}
- spin_unlock(&sdp->sd_reclaim_lock);
if (gfs2_glmutex_trylock(gl)) {
if (gl->gl_ops == &gfs2_inode_glops) {
int create, struct gfs2_glock **glp);
void gfs2_glock_hold(struct gfs2_glock *gl);
int gfs2_glock_put(struct gfs2_glock *gl);
-
-void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags,
+void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
struct gfs2_holder *gh);
-void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh);
+void gfs2_holder_reinit(unsigned int state, unsigned flags,
+ struct gfs2_holder *gh);
void gfs2_holder_uninit(struct gfs2_holder *gh);
struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state,
int flags, gfp_t gfp_flags);