]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
sched: Optimize cgroup vs wakeup a bit
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 16 Sep 2009 11:44:33 +0000 (13:44 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 16 Sep 2009 14:44:32 +0000 (16:44 +0200)
We don't need to call update_shares() for each domain we iterate,
just got the largets one.

However, we should call it before wake_affine() as well, so that
that can use up-to-date values too.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h

index af04ede6dd2f52a637ab17ca3b1e68fced55670c..5049d959bb26d95f04a6ade71497bf2a6ef51cb0 100644 (file)
@@ -376,13 +376,6 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 
 #else
 
-#ifdef CONFIG_SMP
-static int root_task_group_empty(void)
-{
-       return 1;
-}
-#endif
-
 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 static inline struct task_group *task_group(struct task_struct *p)
 {
index acf16a8d934b1f5bbbeee67ef044f03d24cf56d6..722d392b0dacdc54122a77d009be10b9d6ce9ec9 100644 (file)
@@ -1348,7 +1348,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  */
 static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
 {
-       struct sched_domain *tmp, *sd = NULL;
+       struct sched_domain *tmp, *shares = NULL, *sd = NULL;
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        int new_cpu = cpu;
@@ -1387,22 +1387,14 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
                                break;
                }
 
-               switch (sd_flag) {
-               case SD_BALANCE_WAKE:
-                       if (!sched_feat(LB_WAKEUP_UPDATE))
-                               break;
-               case SD_BALANCE_FORK:
-               case SD_BALANCE_EXEC:
-                       if (root_task_group_empty())
-                               break;
-                       update_shares(tmp);
-               default:
-                       break;
-               }
-
                if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
                    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
 
+                       if (sched_feat(LB_SHARES_UPDATE)) {
+                               update_shares(tmp);
+                               shares = tmp;
+                       }
+
                        if (wake_affine(tmp, p, sync)) {
                                new_cpu = cpu;
                                goto out;
@@ -1417,6 +1409,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
                sd = tmp;
        }
 
+       if (sd && sd != shares && sched_feat(LB_SHARES_UPDATE))
+               update_shares(sd);
+
        while (sd) {
                struct sched_group *group;
                int weight;
index fd375675f83453fb485814489605236938b0afb8..d5059fd761d9bf49aca78e830a4fe9330bd464c9 100644 (file)
@@ -107,7 +107,7 @@ SCHED_FEAT(ARCH_POWER, 0)
 SCHED_FEAT(HRTICK, 0)
 SCHED_FEAT(DOUBLE_TICK, 0)
 SCHED_FEAT(LB_BIAS, 1)
-SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
+SCHED_FEAT(LB_SHARES_UPDATE, 1)
 SCHED_FEAT(ASYM_EFF_LOAD, 1)
 
 /*