* Minimal preemption granularity for CPU-bound tasks:
* (default: 2 msec, units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
+const_debug unsigned int sysctl_sched_nr_latency = 20;
/*
* sys_sched_yield() compat mode
*/
const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
-unsigned int sysctl_sched_runtime_limit __read_mostly;
-
extern struct sched_class fair_sched_class;
/**************************************************************
static inline u64
max_vruntime(u64 min_vruntime, u64 vruntime)
{
- if ((vruntime > min_vruntime) ||
- (min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50)))
+ s64 delta = (s64)(vruntime - min_vruntime);
+ if (delta > 0)
min_vruntime = vruntime;
return min_vruntime;
}
-static inline void
-set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
+static inline u64
+min_vruntime(u64 min_vruntime, u64 vruntime)
{
- struct sched_entity *se;
+ s64 delta = (s64)(vruntime - min_vruntime);
+ if (delta < 0)
+ min_vruntime = vruntime;
- cfs_rq->rb_leftmost = leftmost;
- if (leftmost)
- se = rb_entry(leftmost, struct sched_entity, run_node);
+ return min_vruntime;
}
static inline s64
* used):
*/
if (leftmost)
- set_leftmost(cfs_rq, &se->run_node);
+ cfs_rq->rb_leftmost = &se->run_node;
rb_link_node(&se->run_node, parent, link);
rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->rb_leftmost == &se->run_node)
- set_leftmost(cfs_rq, rb_next(&se->run_node));
+ cfs_rq->rb_leftmost = rb_next(&se->run_node);
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}
static u64 __sched_period(unsigned long nr_running)
{
u64 period = sysctl_sched_latency;
- unsigned long nr_latency =
- sysctl_sched_latency / sysctl_sched_min_granularity;
+ unsigned long nr_latency = sysctl_sched_nr_latency;
if (unlikely(nr_running > nr_latency)) {
period *= nr_running;
return period;
}
+static u64 __sched_vslice(unsigned long nr_running)
+{
+ unsigned long period = sysctl_sched_latency;
+ unsigned long nr_latency = sysctl_sched_nr_latency;
+
+ if (unlikely(nr_running > nr_latency))
+ nr_running = nr_latency;
+
+ period /= nr_running;
+
+ return (u64)period;
+}
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
unsigned long delta_exec)
{
unsigned long delta_exec_weighted;
- u64 next_vruntime, min_vruntime;
+ u64 vruntime;
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
* value tracking the leftmost vruntime in the tree.
*/
if (first_fair(cfs_rq)) {
- next_vruntime = __pick_next_entity(cfs_rq)->vruntime;
-
- /* min_vruntime() := !max_vruntime() */
- min_vruntime = max_vruntime(curr->vruntime, next_vruntime);
- if (min_vruntime == next_vruntime)
- min_vruntime = curr->vruntime;
- else
- min_vruntime = next_vruntime;
+ vruntime = min_vruntime(curr->vruntime,
+ __pick_next_entity(cfs_rq)->vruntime);
} else
- min_vruntime = curr->vruntime;
+ vruntime = curr->vruntime;
cfs_rq->min_vruntime =
- max_vruntime(cfs_rq->min_vruntime, min_vruntime);
+ max_vruntime(cfs_rq->min_vruntime, vruntime);
}
static void update_curr(struct cfs_rq *cfs_rq)
#endif
}
+static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ s64 d = se->vruntime - cfs_rq->min_vruntime;
+
+ if (d < 0)
+ d = -d;
+
+ if (d > 3*sysctl_sched_latency)
+ schedstat_inc(cfs_rq, nr_spread_over);
+#endif
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
- u64 min_runtime, latency;
+ u64 vruntime;
- min_runtime = cfs_rq->min_vruntime;
+ vruntime = cfs_rq->min_vruntime;
if (sched_feat(USE_TREE_AVG)) {
struct sched_entity *last = __pick_last_entity(cfs_rq);
if (last) {
- min_runtime = __pick_next_entity(cfs_rq)->vruntime;
- min_runtime += last->vruntime;
- min_runtime >>= 1;
+ vruntime += last->vruntime;
+ vruntime >>= 1;
}
- } else if (sched_feat(APPROX_AVG))
- min_runtime += sysctl_sched_latency/2;
+ } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
+ vruntime += __sched_vslice(cfs_rq->nr_running)/2;
if (initial && sched_feat(START_DEBIT))
- min_runtime += sched_slice(cfs_rq, se);
-
- if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
- latency = sysctl_sched_latency;
- if (min_runtime > latency)
- min_runtime -= latency;
- else
- min_runtime = 0;
+ vruntime += __sched_vslice(cfs_rq->nr_running + 1);
+
+ if (!initial) {
+ if (sched_feat(NEW_FAIR_SLEEPERS))
+ vruntime -= sysctl_sched_latency;
+
+ vruntime = max_t(s64, vruntime, se->vruntime);
}
- se->vruntime = max(se->vruntime, min_runtime);
+ se->vruntime = vruntime;
+
}
static void
}
update_stats_enqueue(cfs_rq, se);
+ check_spread(cfs_rq, se);
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
update_stats_dequeue(cfs_rq, se);
-#ifdef CONFIG_SCHEDSTATS
if (sleep) {
+#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock;
}
- }
#endif
+ }
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
resched_task(rq_of(cfs_rq)->curr);
}
-static inline void
+static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- /*
- * Any task has to be enqueued before it get to execute on
- * a CPU. So account for the time it spent waiting on the
- * runqueue.
- */
- update_stats_wait_end(cfs_rq, se);
+ /* 'current' is not kept within the tree. */
+ if (se->on_rq) {
+ /*
+ * Any task has to be enqueued before it get to execute on
+ * a CPU. So account for the time it spent waiting on the
+ * runqueue.
+ */
+ update_stats_wait_end(cfs_rq, se);
+ __dequeue_entity(cfs_rq, se);
+ }
+
update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se;
#ifdef CONFIG_SCHEDSTATS
{
struct sched_entity *se = __pick_next_entity(cfs_rq);
- /* 'current' is not kept within the tree. */
- if (se)
- __dequeue_entity(cfs_rq, se);
-
set_next_entity(cfs_rq, se);
return se;
update_stats_curr_end(cfs_rq, prev);
+ check_spread(cfs_rq, prev);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
-/* Do the two (enqueued) tasks belong to the same group ? */
-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+/* Do the two (enqueued) entities belong to the same group ? */
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
- if (curr->se.cfs_rq == p->se.cfs_rq)
+ if (se->cfs_rq == pse->cfs_rq)
return 1;
return 0;
}
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return se->parent;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
#define for_each_sched_entity(se) \
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
return 1;
}
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return NULL;
+}
+
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
+ wakeup = 1;
}
}
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight)
break;
+ sleep = 1;
}
}
*/
static void yield_task_fair(struct rq *rq)
{
- struct cfs_rq *cfs_rq = &rq->cfs;
- struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+ struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
struct sched_entity *rightmost, *se = &rq->curr->se;
- struct rb_node *parent;
/*
* Are we the only task in the tree?
* Dequeue and enqueue the task to update its
* position within the tree:
*/
- dequeue_entity(cfs_rq, se, 0);
- enqueue_entity(cfs_rq, se, 0);
+ update_curr(cfs_rq);
return;
}
/*
* Find the rightmost entry in the rbtree:
*/
- do {
- parent = *link;
- link = &parent->rb_right;
- } while (*link);
-
- rightmost = rb_entry(parent, struct sched_entity, run_node);
+ rightmost = __pick_last_entity(cfs_rq);
/*
* Already in the rightmost position?
*/
- if (unlikely(rightmost == se))
+ if (unlikely(rightmost->vruntime < se->vruntime))
return;
/*
* Minimally necessary key value to be last in the tree:
+ * Upon rescheduling, sched_class::put_prev_task() will place
+ * 'current' within the tree based on its new key value.
*/
se->vruntime = rightmost->vruntime + 1;
-
- if (cfs_rq->rb_leftmost == &se->run_node)
- cfs_rq->rb_leftmost = rb_next(&se->run_node);
- /*
- * Relink the task to the rightmost position:
- */
- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
- rb_link_node(&se->run_node, parent, link);
- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}
/*
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ struct sched_entity *se = &curr->se, *pse = &p->se;
+ s64 delta;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
resched_task(curr);
return;
}
- if (is_same_group(curr, p)) {
- s64 delta = curr->se.vruntime - p->se.vruntime;
- if (delta > (s64)sysctl_sched_wakeup_granularity)
- resched_task(curr);
+ while (!is_same_group(se, pse)) {
+ se = parent_entity(se);
+ pse = parent_entity(pse);
}
+
+ delta = se->vruntime - pse->vruntime;
+
+ if (delta > (s64)sysctl_sched_wakeup_granularity)
+ resched_task(curr);
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
if (!cfs_rq->nr_running)
return MAX_PRIO;
- curr = __pick_next_entity(cfs_rq);
+ curr = cfs_rq->curr;
+ if (!curr)
+ curr = __pick_next_entity(cfs_rq);
+
p = task_of(curr);
return p->prio;
if (sysctl_sched_child_runs_first &&
curr->vruntime < se->vruntime) {
/*
- * Upon rescheduling, sched_class::put_prev_task() will place
- * 'current' within the tree based on its new key value.
- */
+ * Upon rescheduling, sched_class::put_prev_task() will place
+ * 'current' within the tree based on its new key value.
+ */
swap(curr->vruntime, se->vruntime);
}
update_stats_enqueue(cfs_rq, se);
+ check_spread(cfs_rq, se);
+ check_spread(cfs_rq, curr);
__enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
resched_task(rq->curr);
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
/* Account for a task changing its policy or group.
*
* This routine is mostly called to set cfs_rq->curr field when a task
for_each_sched_entity(se)
set_next_entity(cfs_rq_of(se), se);
}
-#else
-static void set_curr_task_fair(struct rq *rq)
-{
- struct sched_entity *se = &rq->curr->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
- cfs_rq->curr = se;
-}
-#endif
/*
* All the scheduling class methods:
{
struct cfs_rq *cfs_rq;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
+#endif
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
print_cfs_rq(m, cpu, cfs_rq);
}