[Devel] [PATCH RHEL7 COMMIT] locking/rwsem-xadd: Add killable versions of rwsem_down_read_failed()

Konstantin Khorenko khorenko at virtuozzo.com
Mon May 25 17:55:56 MSK 2020


The commit is pushed to "branch-rh7-3.10.0-1127.8.2.vz7.161.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1127.8.2.vz7.161.1
------>
commit 3dfd3ee0794857407475c8fe9f7fb3bd8c3a9b0c
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date:   Mon May 25 17:55:56 2020 +0300

    locking/rwsem-xadd: Add killable versions of rwsem_down_read_failed()
    
    ms commit 83ced169d9a0
    
    Rename rwsem_down_read_failed() in __rwsem_down_read_failed_common()
    and teach it to abort waiting in case of pending signals and killable
    state argument passed.
    
    Note, that we shouldn't wake anybody up in EINTR path, as:
    
    We check for (waiter.task) under spinlock before we go to out_nolock
    path. Current task wasn't able to be woken up, so there are
    a writer, owning the sem, or a writer, which is the first waiter.
    In the both cases we shouldn't wake anybody. If there is a writer,
    owning the sem, and we were the only waiter, remove RWSEM_WAITING_BIAS,
    as there are no waiters anymore.
    
    Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
    
    Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
    Cc: Linus Torvalds <torvalds at linux-foundation.org>
    Cc: Peter Zijlstra <peterz at infradead.org>
    Cc: Thomas Gleixner <tglx at linutronix.de>
    Cc: arnd at arndb.de
    Cc: avagin at virtuozzo.com
    Cc: davem at davemloft.net
    Cc: fenghua.yu at intel.com
    Cc: gorcunov at virtuozzo.com
    Cc: heiko.carstens at de.ibm.com
    Cc: hpa at zytor.com
    Cc: ink at jurassic.park.msu.ru
    Cc: mattst88 at gmail.com
    Cc: rth at twiddle.net
    Cc: schwidefsky at de.ibm.com
    Cc: tony.luck at intel.com
    Link: http://lkml.kernel.org/r/149789534632.9059.2901382369609922565.stgit@localhost.localdomain
    Signed-off-by: Ingo Molnar <mingo at kernel.org>
    
    =====================
    Patchset description:
    
    This ports support for down_read_killable(),
    which is a prerequisite for pernet_ops_rwsem.
    
    https://jira.sw.ru/browse/PSBM-104158
---
 lib/rwsem.c | 39 ++++++++++++++++++++++++++++++++-------
 1 file changed, 32 insertions(+), 7 deletions(-)

diff --git a/lib/rwsem.c b/lib/rwsem.c
index 0289921e3881c..053f3e6ae97a8 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -241,16 +241,14 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
 /*
  * Wait for the read lock to be granted
  */
-__visible
-struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
+static inline struct rw_semaphore __sched *
+__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
 {
 	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
 	struct rwsem_waiter waiter;
-	struct task_struct *tsk = current;
 	WAKE_Q(wake_q);
 
-	/* set up my own style of waitqueue */
-	waiter.task = tsk;
+	waiter.task = current;
 	waiter.type = RWSEM_WAITING_FOR_READ;
 
 	raw_spin_lock_irq(&sem->wait_lock);
@@ -288,17 +286,44 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 
 	/* wait to be given the lock */
 	while (true) {
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		set_current_state(state);
 		if (!waiter.task)
 			break;
+		if (signal_pending_state(state, current)) {
+			raw_spin_lock_irq(&sem->wait_lock);
+			if (waiter.task)
+				goto out_nolock;
+			raw_spin_unlock_irq(&sem->wait_lock);
+			break;
+		}
 		schedule();
 	}
 
-	__set_task_state(tsk, TASK_RUNNING);
+	__set_task_state(current, TASK_RUNNING);
 	return sem;
+out_nolock:
+	slist_del(&waiter.list, &sem->wait_list);
+	if (slist_empty(&sem->wait_list))
+		atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
+	raw_spin_unlock_irq(&sem->wait_lock);
+	__set_current_state(TASK_RUNNING);
+	return ERR_PTR(-EINTR);
+}
+
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed(struct rw_semaphore *sem)
+{
+	return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(rwsem_down_read_failed);
 
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed_killable(struct rw_semaphore *sem)
+{
+	return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(rwsem_down_read_failed_killable);
+
 /*
  * This function must be called with the sem->wait_lock held to prevent
  * race conditions between checking the rwsem wait list and setting the


More information about the Devel mailing list