parent = parent->parent;
if (parent)
goto up;
++ out_unlock:
rcu_read_unlock();
++
++ return ret;
+ }
+
++ static int tg_nop(struct task_group *tg, void *data)
++ {
++ return 0;
+ }
++ #endif
++
++ #ifdef CONFIG_SMP
++ static unsigned long source_load(int cpu, int type);
++ static unsigned long target_load(int cpu, int type);
++ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
++
++ static unsigned long cpu_avg_load_per_task(int cpu)
++ {
++ struct rq *rq = cpu_rq(cpu);
++
++ if (rq->nr_running)
++ rq->avg_load_per_task = rq->load.weight / rq->nr_running;
++
++ return rq->avg_load_per_task;
++ }
++
++ #ifdef CONFIG_FAIR_GROUP_SCHED
+
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
/*
static unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
-- return 1ULL << 16;
++ return 1ULL << 20;
-- return div64_u64(runtime << 16, period);
++ return div64_u64(runtime << 20, period);
}
-- #ifdef CONFIG_CGROUP_SCHED
-- static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
++ /* Must be called with tasklist_lock held */
++ static inline int tg_has_rt_tasks(struct task_group *tg)
{
-- struct task_group *tgi, *parent = tg->parent;
-- unsigned long total = 0;
++ struct task_struct *g, *p;
-- if (!parent) {
-- if (global_rt_period() < period)
-- return 0;
++ do_each_thread(g, p) {
++ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
++ return 1;
++ } while_each_thread(g, p);
-- return to_ratio(period, runtime) <
-- to_ratio(global_rt_period(), global_rt_runtime());
-- }
++ return 0;
++ }
-- if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period)
-- return 0;
++ struct rt_schedulable_data {
++ struct task_group *tg;
++ u64 rt_period;
++ u64 rt_runtime;
++ };
-- rcu_read_lock();
-- list_for_each_entry_rcu(tgi, &parent->children, siblings) {
-- if (tgi == tg)
-- continue;
++ static int tg_schedulable(struct task_group *tg, void *data)
++ {
++ struct rt_schedulable_data *d = data;
++ struct task_group *child;
++ unsigned long total, sum = 0;
++ u64 period, runtime;
++
++ period = ktime_to_ns(tg->rt_bandwidth.rt_period);
++ runtime = tg->rt_bandwidth.rt_runtime;
-- total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
-- tgi->rt_bandwidth.rt_runtime);
++ if (tg == d->tg) {
++ period = d->rt_period;
++ runtime = d->rt_runtime;
}
-- rcu_read_unlock();
-- return total + to_ratio(period, runtime) <=
-- to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period),
-- parent->rt_bandwidth.rt_runtime);
-- }
-- #elif defined CONFIG_USER_SCHED
-- static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
-- {
-- struct task_group *tgi;
-- unsigned long total = 0;
-- unsigned long global_ratio =
-- to_ratio(global_rt_period(), global_rt_runtime());
++ if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
++ return -EBUSY;
-- rcu_read_lock();
-- list_for_each_entry_rcu(tgi, &task_groups, list) {
-- if (tgi == tg)
-- continue;
++ total = to_ratio(period, runtime);
+
- total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
- tgi->rt_bandwidth.rt_runtime);
++ list_for_each_entry_rcu(child, &tg->children, siblings) {
++ period = ktime_to_ns(child->rt_bandwidth.rt_period);
++ runtime = child->rt_bandwidth.rt_runtime;
++
++ if (child == d->tg) {
++ period = d->rt_period;
++ runtime = d->rt_runtime;
++ }
+
- total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
- tgi->rt_bandwidth.rt_runtime);
++ sum += to_ratio(period, runtime);
}
-- rcu_read_unlock();
-- return total + to_ratio(period, runtime) < global_ratio;
++ if (sum > total)
++ return -EINVAL;
++
++ return 0;
}
-- #endif
-- /* Must be called with tasklist_lock held */
-- static inline int tg_has_rt_tasks(struct task_group *tg)
++ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
-- struct task_struct *g, *p;
-- do_each_thread(g, p) {
-- if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
-- return 1;
-- } while_each_thread(g, p);
-- return 0;
++ struct rt_schedulable_data data = {
++ .tg = tg,
++ .rt_period = period,
++ .rt_runtime = runtime,
++ };
++
++ return walk_tg_tree(tg_schedulable, tg_nop, &data);
}
static int tg_set_bandwidth(struct task_group *tg,
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
-- if (rt_runtime == 0 && tg_has_rt_tasks(tg)) {
-- err = -EBUSY;
- goto unlock;
- }
- if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
- err = -EINVAL;
++ err = __rt_schedulable(tg, rt_period, rt_runtime);
++ if (err)
goto unlock;
- }
- if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
- err = -EINVAL;
- goto unlock;
-- }
spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);