]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
ocfs2: temporarily disable automatic lock migration
authorKurt Hackel <kurt.hackel@oracle.com>
Mon, 1 May 2006 20:30:49 +0000 (13:30 -0700)
committerMark Fasheh <mark.fasheh@oracle.com>
Mon, 26 Jun 2006 21:43:07 +0000 (14:43 -0700)
Now we never change the owner of a lock resource until unmount or node
death. This will be re-enabled once some issues in the algorithm used have
been resolved.

Signed-off-by: Kurt Hackel <kurt.hackel@oracle.com>
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
fs/ocfs2/dlm/dlmlock.c
fs/ocfs2/dlm/dlmthread.c

index 675123c308528c7eac99f8c71a1b2e2f1d7355d2..0ff934874942a423e19478331423408017db0dc3 100644 (file)
@@ -227,14 +227,18 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
        res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
        lock->lock_pending = 0;
        if (status != DLM_NORMAL) {
-               if (status != DLM_NOTQUEUED)
+               if (status != DLM_NOTQUEUED) {
+                       /*
+                        * DO NOT call calc_usage, as this would unhash
+                        * the remote lockres before we ever get to use
+                        * it.  treat as if we never made any change to
+                        * the lockres.
+                        */
+                       lockres_changed = 0;
                        dlm_error(status);
+               }
                dlm_revert_pending_lock(res, lock);
                dlm_lock_put(lock);
-               /* do NOT call calc_usage, as this would unhash the remote
-                * lockres before we ever get to use it.  treat as if we
-                * never made any change to the lockres. */
-               lockres_changed = 0;
        } else if (dlm_is_recovery_lock(res->lockname.name, 
                                        res->lockname.len)) {
                /* special case for the $RECOVERY lock.
index 610dc76a851b985a5364c88006cfb47b549196c5..c1c10fd1a5a7c3aeb84bcc298087c9db4adfbcab 100644 (file)
@@ -106,6 +106,20 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
        assert_spin_locked(&res->spinlock);
 
        if (__dlm_lockres_unused(res)){
+               /* For now, just keep any resource we master */
+               if (res->owner == dlm->node_num)
+               {
+                       if (!list_empty(&res->purge)) {
+                               mlog(0, "we master %s:%.*s, but it is on "
+                                    "the purge list.  Removing\n",
+                                    dlm->name, res->lockname.len,
+                                    res->lockname.name);
+                               list_del_init(&res->purge);
+                               dlm->purge_count--;
+                       }
+                       return;
+               }
+
                if (list_empty(&res->purge)) {
                        mlog(0, "putting lockres %.*s from purge list\n",
                             res->lockname.len, res->lockname.name);