ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
        }
 
-       mutex_lock(&mp->m_icsb_mutex);
+       xfs_icsb_lock(mp);
        xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
        xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
        xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
-       mutex_unlock(&mp->m_icsb_mutex);
+       xfs_icsb_unlock(mp);
 
        mp->m_sb_bp = bp;
        xfs_buf_relse(bp);
                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
                break;
        case CPU_ONLINE:
-               mutex_lock(&mp->m_icsb_mutex);
+               xfs_icsb_lock(mp);
                xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
                xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
                xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
-               mutex_unlock(&mp->m_icsb_mutex);
+               xfs_icsb_unlock(mp);
                break;
        case CPU_DEAD:
                /* Disable all the counters, then fold the dead cpu's
                 * count into the total on the global superblock and
                 * re-enable the counters. */
-               mutex_lock(&mp->m_icsb_mutex);
+               xfs_icsb_lock(mp);
                s = XFS_SB_LOCK(mp);
                xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
                xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
                xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
                                         XFS_ICSB_SB_LOCKED, 0);
                XFS_SB_UNLOCK(mp, s);
-               mutex_unlock(&mp->m_icsb_mutex);
+               xfs_icsb_unlock(mp);
                break;
        }
 
                unregister_hotcpu_notifier(&mp->m_icsb_notifier);
                free_percpu(mp->m_sb_cnts);
        }
+       mutex_destroy(&mp->m_icsb_mutex);
 }
 
 STATIC_INLINE void
         * the superblock lock. We still need to hold the superblock
         * lock, however, when we modify the global structures.
         */
-       mutex_lock(&mp->m_icsb_mutex);
+       xfs_icsb_lock(mp);
 
        /*
         * Now running atomically.
         * Drop the lock and try again in the fast path....
         */
        if (!(xfs_icsb_counter_disabled(mp, field))) {
-               mutex_unlock(&mp->m_icsb_mutex);
+               xfs_icsb_unlock(mp);
                goto again;
        }
 
         */
        if (ret != ENOSPC)
                xfs_icsb_balance_counter(mp, field, 0, 0);
-       mutex_unlock(&mp->m_icsb_mutex);
+       xfs_icsb_unlock(mp);
        return ret;
 
 balance_counter:
         * do more balances than strictly necessary but it is not
         * the common slowpath case.
         */
-       mutex_lock(&mp->m_icsb_mutex);
+       xfs_icsb_lock(mp);
 
        /*
         * running atomically.
         * another balance operation being required.
         */
        xfs_icsb_balance_counter(mp, field, 0, delta);
-       mutex_unlock(&mp->m_icsb_mutex);
+       xfs_icsb_unlock(mp);
        goto again;
 }
 
 
 #ifndef __XFS_MOUNT_H__
 #define        __XFS_MOUNT_H__
 
+
 typedef struct xfs_trans_reservations {
        uint    tr_write;       /* extent alloc trans */
        uint    tr_itruncate;   /* truncate trans */
        return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
 }
 
+/*
+ * Per-cpu superblock locking functions
+ */
+#ifdef HAVE_PERCPU_SB
+STATIC_INLINE void
+xfs_icsb_lock(xfs_mount_t *mp)
+{
+       mutex_lock(&mp->m_icsb_mutex);
+}
+
+STATIC_INLINE void
+xfs_icsb_unlock(xfs_mount_t *mp)
+{
+       mutex_unlock(&mp->m_icsb_mutex);
+}
+#else
+#define xfs_icsb_lock(mp)
+#define xfs_icsb_unlock(mp)
+#endif
+
 /*
  * This structure is for use by the xfs_mod_incore_sb_batch() routine.
  */