[Devel] [PATCH rh7] Revert "ve/sched: introduce cond_resched_may_throttle"

Konstantin Khorenko khorenko at virtuozzo.com
Mon Feb 10 18:42:56 MSK 2020


This reverts commit 270b803e8bca2d5836e38be0e9ce55bfe0ba91c1.

We've rolled back boosting hacks by commits:
  28a9251d7129c ("Revert "ve/sched: port boosting hacks against prio
		  inversion"")
  3ad70f5be8a36 ("Revert "sched: add WARN_ON's to debug task boosting"")

Thus the commit being reverted does nothing now,
let's revert it as well.

https://jira.sw.ru/browse/PSBM-100188

Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
 include/linux/sched.h |  8 --------
 kernel/sched/core.c   | 23 ++++-------------------
 2 files changed, 4 insertions(+), 27 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89982455835e5..6d4106e44690d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1500,7 +1500,6 @@ struct task_struct {
 				 * execve */
 	unsigned in_iowait:1;
 	unsigned no_new_privs:1; /* task may not gain privileges */
-	unsigned may_throttle:1;
 
 	pid_t pid;
 	pid_t tgid;
@@ -3143,13 +3142,6 @@ extern int _cond_resched(void);
 	_cond_resched();			\
 })
 
-extern int _cond_resched_may_throttle(void);
-
-#define cond_resched_may_throttle() ({		\
-	__might_sleep(__FILE__, __LINE__, 0);	\
-	_cond_resched_may_throttle();		\
-})
-
 extern int __cond_resched_lock(spinlock_t *lock);
 
 #ifdef CONFIG_PREEMPT_COUNT
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f264c209b1e77..c8e1aaaeefcbb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5745,38 +5745,23 @@ static inline int should_resched(void)
 	return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
 }
 
-static void __cond_resched(bool may_throttle)
+static void __cond_resched(void)
 {
 	add_preempt_count(PREEMPT_ACTIVE);
-	if (may_throttle)
-		current->may_throttle = 1;
 	__schedule();
-	if (may_throttle)
-		current->may_throttle = 0;
 	sub_preempt_count(PREEMPT_ACTIVE);
 }
 
 int __sched _cond_resched(void)
 {
 	if (should_resched()) {
-		__cond_resched(false);
+		__cond_resched();
 		return 1;
 	}
 	return 0;
 }
 EXPORT_SYMBOL(_cond_resched);
 
-int __sched _cond_resched_may_throttle(void)
-{
-	if (should_resched()) {
-		__cond_resched(true);
-		return 1;
-	}
-	return 0;
-}
-EXPORT_SYMBOL(_cond_resched_may_throttle);
-
-
 /*
  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
  * call schedule, and on return reacquire the lock.
@@ -5795,7 +5780,7 @@ int __cond_resched_lock(spinlock_t *lock)
 	if (spin_needbreak(lock) || resched) {
 		spin_unlock(lock);
 		if (resched)
-			__cond_resched(false);
+			__cond_resched();
 		else
 			cpu_relax();
 		ret = 1;
@@ -5811,7 +5796,7 @@ int __sched __cond_resched_softirq(void)
 
 	if (should_resched()) {
 		local_bh_enable();
-		__cond_resched(false);
+		__cond_resched();
 		local_bh_disable();
 		return 1;
 	}
-- 
2.15.1



More information about the Devel mailing list