[Devel] [PATCH RHEL7 COMMIT] rh/bnxt: don't lock the tx queue from napi poll

Konstantin Khorenko khorenko at virtuozzo.com
Thu Dec 15 20:27:37 MSK 2022


The commit is pushed to "branch-rh7-3.10.0-1160.80.1.vz7.191.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1160.80.1.vz7.191.4
------>
commit 451e988eee7ac96b0075a15a09b9238f338c8e62
Author: Jakub Kicinski <kuba at kernel.org>
Date:   Thu Aug 12 14:42:39 2021 -0700

    rh/bnxt: don't lock the tx queue from napi poll
    
    We can't take the tx lock from the napi poll routine, because
    netpoll can poll napi at any moment, including with the tx lock
    already held.
    
    The tx lock is protecting against two paths - the disable
    path, and (as Michael points out) the NETDEV_TX_BUSY case
    which may occur if NAPI completions race with start_xmit
    and both decide to re-enable the queue.
    
    For the disable/ifdown path use synchronize_net() to make sure
    closing the device does not race we restarting the queues.
    Annotate accesses to dev_state against data races.
    
    For the NAPI cleanup vs start_xmit path - appropriate barriers
    are already in place in the main spot where Tx queue is stopped
    but we need to do the same careful dance in the TX_BUSY case.
    
    mFixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.")
    Reviewed-by: Michael Chan <michael.chan at broadcom.com>
    Reviewed-by: Edwin Peer <edwin.peer at broadcom.com>
    Signed-off-by: Jakub Kicinski <kuba at kernel.org>
    
    The patch is backported from RHEL7.9 kernel 3.10.0-1160.81.1.el7:
    https://access.redhat.com/labs/rhcb/RHEL-7.9/kernel-3.10.0-1160.81.1.el7/patches/blob/1499-netdrv-bnxt-don-t-lock-the-tx-queue-from-napi-poll.patch
    
      From: Jamie Bainbridge <jbainbri at redhat.com>
      Date: Thu, 12 Aug 2021 14:42:39 -0700
      Subject: [PATCH 1499/1510] [netdrv] bnxt: don't lock the tx queue from
               napi poll
      Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2110869
    
    https://jira.sw.ru/browse/PSBM-144136
    (cherry picked from ms commit 3c603136c9f82833813af77185618de5af67676c)
    Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 54 ++++++++++++++++++-------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 351ab994b773..328eef7b7ef2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -340,6 +340,26 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
 	return md_dst->u.port_info.port_id;
 }
 
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+					  struct bnxt_tx_ring_info *txr,
+					  struct netdev_queue *txq)
+{
+	netif_tx_stop_queue(txq);
+
+	/* netif_tx_stop_queue() must be done before checking
+	 * tx index in bnxt_tx_avail() below, because in
+	 * bnxt_tx_int(), we update tx index before checking for
+	 * netif_tx_queue_stopped().
+	 */
+	smp_mb();
+	if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+		netif_tx_wake_queue(txq);
+		return false;
+	}
+
+	return true;
+}
+
 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct bnxt *bp = netdev_priv(dev);
@@ -367,8 +387,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	free_size = bnxt_tx_avail(bp, txr);
 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
-		netif_tx_stop_queue(txq);
-		return NETDEV_TX_BUSY;
+		if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+			return NETDEV_TX_BUSY;
 	}
 
 	length = skb->len;
@@ -579,16 +599,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		if (skb->xmit_more && !tx_buf->is_push)
 			bnxt_db_write(bp, &txr->tx_db, prod);
 
-		netif_tx_stop_queue(txq);
-
-		/* netif_tx_stop_queue() must be done before checking
-		 * tx index in bnxt_tx_avail() below, because in
-		 * bnxt_tx_int(), we update tx index before checking for
-		 * netif_tx_queue_stopped().
-		 */
-		smp_mb();
-		if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
-			netif_tx_wake_queue(txq);
+		bnxt_txr_netif_try_stop_queue(bp, txr, txq);
 	}
 	return NETDEV_TX_OK;
 
@@ -672,14 +683,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 	smp_mb();
 
 	if (unlikely(netif_tx_queue_stopped(txq)) &&
-	    (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
-		__netif_tx_lock(txq, smp_processor_id());
-		if (netif_tx_queue_stopped(txq) &&
-		    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
-		    txr->dev_state != BNXT_DEV_STATE_CLOSING)
-			netif_tx_wake_queue(txq);
-		__netif_tx_unlock(txq);
-	}
+	    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+	    READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+		netif_tx_wake_queue(txq);
 }
 
 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -7791,9 +7797,11 @@ void bnxt_tx_disable(struct bnxt *bp)
 	if (bp->tx_ring) {
 		for (i = 0; i < bp->tx_nr_rings; i++) {
 			txr = &bp->tx_ring[i];
-			txr->dev_state = BNXT_DEV_STATE_CLOSING;
+			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
 		}
 	}
+	/* Make sure napi polls see @dev_state change */
+	synchronize_net();
 	/* Drop carrier first to prevent TX timeout */
 	netif_carrier_off(bp->dev);
 	/* Stop all TX queues */
@@ -7807,8 +7815,10 @@ void bnxt_tx_enable(struct bnxt *bp)
 
 	for (i = 0; i < bp->tx_nr_rings; i++) {
 		txr = &bp->tx_ring[i];
-		txr->dev_state = 0;
+		WRITE_ONCE(txr->dev_state, 0);
 	}
+	/* Make sure napi polls see @dev_state change */
+	synchronize_net();
 	netif_tx_wake_all_queues(bp->dev);
 	if (bp->link_info.link_up)
 		netif_carrier_on(bp->dev);


More information about the Devel mailing list