[Devel] [PATCH RHEL7 COMMIT] ms/cfq-iosched: remove @gfp_mask from cfq_find_alloc_queue()

Konstantin Khorenko khorenko at virtuozzo.com
Mon Oct 12 03:18:49 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-229.7.2.vz7.8.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-229.7.2.vz7.8.5
------>
commit b7e9193a5cee21eeaaf2a2d39c8561e10eb39c21
Author: Maxim Patlasov <mpatlasov at virtuozzo.com>
Date:   Mon Oct 12 14:18:49 2015 +0400

    ms/cfq-iosched: remove @gfp_mask from cfq_find_alloc_queue()
    
    Backport 2da8de0bb from mainline. Original patch description:
    
        Even when allocations fail, cfq_find_alloc_queue() always returns a
        valid cfq_queue by falling back to the oom cfq_queue.  As such, there
        isn't much point in taking @gfp_mask and trying "harder" if __GFP_WAIT
        is set.  GFP_NOWAIT allocations don't fail often and even when they do
        the degraded behavior is acceptable and temporary.
    
        After all, the only reason get_request(), which ultimately determines
        the gfp_mask, cares about __GFP_WAIT is to guarantee request
        allocation, assuming IO forward progress, for callers which are
        willing to wait.  There's no reason for cfq_find_alloc_queue() to
        behave differently on __GFP_WAIT when it already has a fallback
        mechanism.
    
        Remove @gfp_mask from cfq_find_alloc_queue() and propagate the changes
        to its callers.  This simplifies the function quite a bit and will
        help making async queues per-cfq_group.
    
        v2: Updated to reflect GFP_ATOMIC -> GPF_NOWAIT.
    
        Signed-off-by: Tejun Heo <tj at kernel.org>
        Reviewed-by: Jeff Moyer <jmoyer at redhat.com>
        Cc: Vivek Goyal <vgoyal at redhat.com>
        Cc: Arianna Avanzini <avanzini.arianna at gmail.com>
        Signed-off-by: Jens Axboe <axboe at fb.com>
    
    https://jira.sw.ru/browse/PSBM-39956
    
    Signed-off-by: Maxim Patlasov <mpatlasov at virtuozzo.com>
    Acked-by: Dmitry Monakhov <dmonakhov at virtuozzo.com>
---
 block/cfq-iosched.c | 46 ++++++++++------------------------------------
 1 file changed, 10 insertions(+), 36 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1d7fce9..987c0f9 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -860,8 +860,7 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
-				       struct cfq_io_cq *cic, struct bio *bio,
-				       gfp_t gfp_mask);
+				       struct cfq_io_cq *cic, struct bio *bio);
 
 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 {
@@ -3569,8 +3568,7 @@ static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
 	cfqq = cic->cfqq[BLK_RW_ASYNC];
 	if (cfqq) {
 		struct cfq_queue *new_cfqq;
-		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
-					 GFP_ATOMIC);
+		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
 		if (new_cfqq) {
 			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
 			cfq_put_queue(cfqq);
@@ -3641,13 +3639,12 @@ static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) {
 
 static struct cfq_queue *
 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
-		     struct bio *bio, gfp_t gfp_mask)
+		     struct bio *bio)
 {
 	struct blkcg *blkcg;
-	struct cfq_queue *cfqq, *new_cfqq = NULL;
+	struct cfq_queue *cfqq;
 	struct cfq_group *cfqg;
 
-retry:
 	rcu_read_lock();
 
 	blkcg = bio_blkcg(bio);
@@ -3659,27 +3656,9 @@ retry:
 	 * originally, since it should just be a temporary situation.
 	 */
 	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
-		cfqq = NULL;
-		if (new_cfqq) {
-			cfqq = new_cfqq;
-			new_cfqq = NULL;
-		} else if (gfp_mask & __GFP_WAIT) {
-			rcu_read_unlock();
-			spin_unlock_irq(cfqd->queue->queue_lock);
-			new_cfqq = kmem_cache_alloc_node(cfq_pool,
-					gfp_mask | __GFP_ZERO,
-					cfqd->queue->node);
-			spin_lock_irq(cfqd->queue->queue_lock);
-			if (new_cfqq)
-				goto retry;
-			else
-				return &cfqd->oom_cfqq;
-		} else {
-			cfqq = kmem_cache_alloc_node(cfq_pool,
-					gfp_mask | __GFP_ZERO,
-					cfqd->queue->node);
-		}
-
+		cfqq = kmem_cache_alloc_node(cfq_pool,
+					     GFP_ATOMIC | __GFP_ZERO,
+					     cfqd->queue->node);
 		if (cfqq) {
 			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
 			cfq_init_prio_data(cfqq, cic);
@@ -3689,9 +3668,6 @@ retry:
 			cfqq = &cfqd->oom_cfqq;
 	}
 
-	if (new_cfqq)
-		kmem_cache_free(cfq_pool, new_cfqq);
-
 	rcu_read_unlock();
 	return cfqq;
 }
@@ -3716,7 +3692,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 
 static struct cfq_queue *
 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
-	      struct bio *bio, gfp_t gfp_mask)
+	      struct bio *bio)
 {
 	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
 	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
@@ -3729,7 +3705,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
 	}
 
 	if (!cfqq)
-		cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
+		cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio);
 
 	/*
 	 * pin the queue now that it's allocated, scheduler exit will prune it
@@ -4280,8 +4256,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
 	const bool is_sync = rq_is_sync(rq);
 	struct cfq_queue *cfqq;
 
-	might_sleep_if(gfp_mask & __GFP_WAIT);
-
 	spin_lock_irq(q->queue_lock);
 
 	check_ioprio_changed(cic, bio);
@@ -4289,7 +4263,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
 new_queue:
 	cfqq = cic_to_cfqq(cic, is_sync);
 	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
-		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
+		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
 		cic_set_cfqq(cic, cfqq, is_sync);
 	} else {
 		/*



More information about the Devel mailing list