[Devel] [PATCH rh7] sched: add WARN_ON's to debug task boosting
Vladimir Davydov
vdavydov at virtuozzo.com
Wed Aug 3 05:55:14 PDT 2016
This patch ports
* diff-sched-add-WARN_ONs-to-debug-task-boosting
Added to 042stab114_2
Assert that we never have a boosted entity under a throttled hierarchy.
Also, do not panic if on set_next_entity we find a boosted entity being
not on the list - just warn and carry on as if nothing happened.
https://jira.sw.ru/browse/PSBM-44475
https://jira.sw.ru/browse/PSBM-50077
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
Reviewed-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
kernel/sched/fair.c | 22 +++++++++++++++++-----
1 file changed, 17 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 515685f77217..e39ed4c17464 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -909,9 +909,10 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
#ifdef CONFIG_CFS_BANDWIDTH
static inline void update_entity_boost(struct sched_entity *se)
{
- if (!entity_is_task(se))
+ if (!entity_is_task(se)) {
se->boosted = cfs_rq_has_boosted_entities(group_cfs_rq(se));
- else {
+ WARN_ON(se->boosted && cfs_rq_throttled(group_cfs_rq(se)));
+ } else {
struct task_struct *p = task_of(se);
if (unlikely(p != current))
@@ -943,6 +944,8 @@ static inline void __enqueue_boosted_entity(struct cfs_rq *cfs_rq,
static inline void __dequeue_boosted_entity(struct cfs_rq *cfs_rq,
struct sched_entity *se)
{
+ if (WARN_ON(se->boost_node.next == LIST_POISON1))
+ return;
list_del(&se->boost_node);
}
@@ -953,8 +956,11 @@ static int enqueue_boosted_entity(struct cfs_rq *cfs_rq,
if (se != cfs_rq->curr)
__enqueue_boosted_entity(cfs_rq, se);
se->boosted = 1;
+ WARN_ON(!entity_is_task(se) &&
+ cfs_rq_throttled(group_cfs_rq(se)));
return 1;
- }
+ } else
+ WARN_ON(cfs_rq_throttled(group_cfs_rq(se)));
return 0;
}
@@ -3847,6 +3853,8 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
*/
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{
+ WARN_ON(cfs_rq_has_boosted_entities(cfs_rq));
+
if (!cfs_bandwidth_used())
return;
@@ -4150,8 +4158,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
} else if (boost) {
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- if (!enqueue_boosted_entity(cfs_rq, se))
+ if (!enqueue_boosted_entity(cfs_rq, se)) {
+ WARN_ON(throttled_hierarchy(cfs_rq));
break;
+ }
if (cfs_rq_throttled(cfs_rq))
unthrottle_cfs_rq(cfs_rq);
}
@@ -4213,8 +4223,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running--;
- if (cfs_rq_throttled(cfs_rq))
+ if (cfs_rq_throttled(cfs_rq)) {
+ WARN_ON(boosted);
break;
+ }
if (boosted)
boosted = dequeue_boosted_entity(cfs_rq, se);
--
2.1.4
More information about the Devel
mailing list