#ifdef HAVE_PERCPU_SB
STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
- int, int);
+ int);
+STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
+ int);
STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
int64_t, int);
STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
#else
#define xfs_icsb_destroy_counters(mp) do { } while (0)
-#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
+#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
+#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
#endif
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
xfs_icsb_lock(mp);
- xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+ xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+ xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+ xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
xfs_icsb_unlock(mp);
break;
case CPU_DEAD:
memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
- xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
- XFS_ICSB_SB_LOCKED, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
- XFS_ICSB_SB_LOCKED, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
- XFS_ICSB_SB_LOCKED, 0);
+ xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
+ xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
+ xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
spin_unlock(&mp->m_sb_lock);
xfs_icsb_unlock(mp);
break;
* initial balance kicks us off correctly
*/
mp->m_icsb_counters = -1;
- xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+ xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+ xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+ xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
xfs_icsb_unlock(mp);
}
#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
(uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
STATIC void
-xfs_icsb_balance_counter(
+xfs_icsb_balance_counter_locked(
xfs_mount_t *mp,
xfs_sb_field_t field,
- int flags,
int min_per_cpu)
{
uint64_t count, resid;
int weight = num_online_cpus();
uint64_t min = (uint64_t)min_per_cpu;
- if (!(flags & XFS_ICSB_SB_LOCKED))
- spin_lock(&mp->m_sb_lock);
-
/* disable counter and sync counter */
xfs_icsb_disable_counter(mp, field);
count = mp->m_sb.sb_icount;
resid = do_div(count, weight);
if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
- goto out;
+ return;
break;
case XFS_SBS_IFREE:
count = mp->m_sb.sb_ifree;
resid = do_div(count, weight);
if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
- goto out;
+ return;
break;
case XFS_SBS_FDBLOCKS:
count = mp->m_sb.sb_fdblocks;
resid = do_div(count, weight);
if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
- goto out;
+ return;
break;
default:
BUG();
}
xfs_icsb_enable_counter(mp, field, count, resid);
-out:
- if (!(flags & XFS_ICSB_SB_LOCKED))
- spin_unlock(&mp->m_sb_lock);
+}
+
+STATIC void
+xfs_icsb_balance_counter(
+ xfs_mount_t *mp,
+ xfs_sb_field_t fields,
+ int min_per_cpu)
+{
+ spin_lock(&mp->m_sb_lock);
+ xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
+ spin_unlock(&mp->m_sb_lock);
}
STATIC int
* we are done.
*/
if (ret != ENOSPC)
- xfs_icsb_balance_counter(mp, field, 0, 0);
+ xfs_icsb_balance_counter(mp, field, 0);
xfs_icsb_unlock(mp);
return ret;
* will either succeed through the fast path or slow path without
* another balance operation being required.
*/
- xfs_icsb_balance_counter(mp, field, 0, delta);
+ xfs_icsb_balance_counter(mp, field, delta);
xfs_icsb_unlock(mp);
goto again;
}