[Devel] [PATCH rh7] ms/qed/qede: Fix scheduling while atomic
Konstantin Khorenko
khorenko at virtuozzo.com
Thu Jul 20 21:54:47 MSK 2023
From: Manish Chopra <manishc at marvell.com>
Statistics read through bond interface via sysfs causes
below bug and traces as it triggers the bonding module to
collect the slave device statistics while holding the spinlock,
beneath that qede->qed driver statistics flow gets scheduled out
due to usleep_range() used in PTT acquire logic
[ 3673.988874] Hardware name: HPE ProLiant DL365 Gen10 Plus/ProLiant DL365 Gen10 Plus, BIOS A42 10/29/2021
[ 3673.988878] Call Trace:
[ 3673.988891] dump_stack_lvl+0x34/0x44
[ 3673.988908] __schedule_bug.cold+0x47/0x53
[ 3673.988918] __schedule+0x3fb/0x560
[ 3673.988929] schedule+0x43/0xb0
[ 3673.988932] schedule_hrtimeout_range_clock+0xbf/0x1b0
[ 3673.988937] ? __hrtimer_init+0xc0/0xc0
[ 3673.988950] usleep_range+0x5e/0x80
[ 3673.988955] qed_ptt_acquire+0x2b/0xd0 [qed]
[ 3673.988981] _qed_get_vport_stats+0x141/0x240 [qed]
[ 3673.989001] qed_get_vport_stats+0x18/0x80 [qed]
[ 3673.989016] qede_fill_by_demand_stats+0x37/0x400 [qede]
[ 3673.989028] qede_get_stats64+0x19/0xe0 [qede]
[ 3673.989034] dev_get_stats+0x5c/0xc0
[ 3673.989045] netstat_show.constprop.0+0x52/0xb0
[ 3673.989055] dev_attr_show+0x19/0x40
[ 3673.989065] sysfs_kf_seq_show+0x9b/0xf0
[ 3673.989076] seq_read_iter+0x120/0x4b0
[ 3673.989087] new_sync_read+0x118/0x1a0
[ 3673.989095] vfs_read+0xf3/0x180
[ 3673.989099] ksys_read+0x5f/0xe0
[ 3673.989102] do_syscall_64+0x3b/0x90
[ 3673.989109] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 3673.989115] RIP: 0033:0x7f8467d0b082
[ 3673.989119] Code: c0 e9 b2 fe ff ff 50 48 8d 3d ca 05 08 00 e8 35 e7 01 00 0f 1f 44 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 0f 05 <48> 3d 00 f0 ff ff 77 56 c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24
[ 3673.989121] RSP: 002b:00007ffffb21fd08 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
[ 3673.989127] RAX: ffffffffffffffda RBX: 000000000100eca0 RCX: 00007f8467d0b082
[ 3673.989128] RDX: 00000000000003ff RSI: 00007ffffb21fdc0 RDI: 0000000000000003
[ 3673.989130] RBP: 00007f8467b96028 R08: 0000000000000010 R09: 00007ffffb21ec00
[ 3673.989132] R10: 00007ffffb27b170 R11: 0000000000000246 R12: 00000000000000f0
[ 3673.989134] R13: 0000000000000003 R14: 00007f8467b92000 R15: 0000000000045a05
[ 3673.989139] CPU: 30 PID: 285188 Comm: read_all Kdump: loaded Tainted: G W OE
Fix this by collecting the statistics asynchronously from a periodic
delayed work scheduled at default stats coalescing interval and return
the recent copy of statisitcs from .ndo_get_stats64(), also add ability
to configure/retrieve stats coalescing interval using below commands -
ethtool -C ethx stats-block-usecs <val>
ethtool -c ethx
mFixes: 133fac0eedc3 ("qede: Add basic ethtool support")
Cc: Sudarsana Kalluru <skalluru at marvell.com>
Cc: David Miller <davem at davemloft.net>
Signed-off-by: Manish Chopra <manishc at marvell.com>
Link: https://lore.kernel.org/r/20230605112600.48238-1-manishc@marvell.com
Signed-off-by: Paolo Abeni <pabeni at redhat.com>
https://jira.vzint.dev/browse/PSBM-146761
(cherry picked from ms commit 42510dffd0e2c27046905f742172ed6662af5557)
Cherry-pick notes: drop hunks related to setting
supported_coalesce_params, as vz7 kernel does not have corresponding
infrastructure yet.
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
drivers/net/ethernet/qlogic/qed/qed_l2.c | 2 +-
drivers/net/ethernet/qlogic/qede/qede.h | 7 +++-
.../net/ethernet/qlogic/qede/qede_ethtool.c | 18 ++++++++++
drivers/net/ethernet/qlogic/qede/qede_main.c | 34 ++++++++++++++++++-
4 files changed, 58 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index b3937935d2a8..f15ffd9a2850 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1927,7 +1927,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
- if (!cdev) {
+ if (!cdev || cdev->recov_in_prog) {
memset(stats, 0, sizeof(*stats));
return;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 1132531dc466..87f07761a432 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -261,7 +261,12 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
- struct bpf_prog *xdp_prog;
+ struct bpf_prog *xdp_prog;
+
+ struct delayed_work periodic_task;
+ unsigned long stats_coal_ticks;
+ u32 stats_coal_usecs;
+ spinlock_t stats_lock; /* lock for vport stats access */
};
enum QEDE_STATE {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 5abdf0e45031..bf9c3fa67106 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -356,6 +356,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
}
+ spin_lock(&edev->stats_lock);
+
for (i = 0; i < QEDE_NUM_STATS; i++) {
if (qede_is_irrelevant_stat(edev, i))
continue;
@@ -365,6 +367,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
buf++;
}
+ spin_unlock(&edev->stats_lock);
+
__qede_unlock(edev);
}
@@ -846,6 +850,7 @@ static int qede_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
+ coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
return rc;
}
@@ -858,6 +863,19 @@ static int qede_set_coalesce(struct net_device *dev,
int i, rc = 0;
u16 rxc, txc;
+ if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
+ edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
+ if (edev->stats_coal_usecs) {
+ edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
+ schedule_delayed_work(&edev->periodic_task, 0);
+
+ DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
+ edev->stats_coal_ticks);
+ } else {
+ cancel_delayed_work_sync(&edev->periodic_task);
+ }
+ }
+
if (!netif_running(dev)) {
DP_INFO(edev, "Interface is down\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a482d93177be..0e0de29573bf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -326,6 +326,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
+ spin_lock(&edev->stats_lock);
+
p_common->no_buff_discards = stats.common.no_buff_discards;
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
p_common->ttl0_discard = stats.common.ttl0_discard;
@@ -423,6 +425,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_ah->tx_1519_to_max_byte_packets =
stats.ah.tx_1519_to_max_byte_packets;
}
+
+ spin_unlock(&edev->stats_lock);
}
static void qede_get_stats64(struct net_device *dev,
@@ -431,9 +435,10 @@ static void qede_get_stats64(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
- qede_fill_by_demand_stats(edev);
p_common = &edev->stats.common;
+ spin_lock(&edev->stats_lock);
+
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
@@ -453,6 +458,8 @@ static void qede_get_stats64(struct net_device *dev,
stats->collisions = edev->stats.bb.tx_total_collisions;
stats->rx_crc_errors = p_common->rx_crc_errors;
stats->rx_frame_errors = p_common->rx_align_errors;
+
+ spin_unlock(&edev->stats_lock);
}
#ifdef CONFIG_QED_SRIOV
@@ -974,6 +981,23 @@ void qede_unlock(struct qede_dev *edev)
rtnl_unlock();
}
+static void qede_periodic_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ periodic_task.work);
+
+ qede_fill_by_demand_stats(edev);
+ schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
+}
+
+static void qede_init_periodic_task(struct qede_dev *edev)
+{
+ INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
+ spin_lock_init(&edev->stats_lock);
+ edev->stats_coal_usecs = USEC_PER_SEC;
+ edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
+}
+
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
@@ -986,6 +1010,7 @@ static void qede_sp_task(struct work_struct *work)
*/
if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+ cancel_delayed_work_sync(&edev->periodic_task);
#ifdef CONFIG_QED_SRIOV
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
@@ -1155,6 +1180,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
*/
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
+ qede_init_periodic_task(edev);
rc = register_netdev(edev->ndev);
if (rc) {
@@ -1179,6 +1205,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
qede_log_probe(edev);
+
+ /* retain user config (for example - after recovery) */
+ if (edev->stats_coal_usecs)
+ schedule_delayed_work(&edev->periodic_task, 0);
+
return 0;
err4:
@@ -1243,6 +1274,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
+ cancel_delayed_work_sync(&edev->periodic_task);
edev->ops->common->set_power_state(cdev, PCI_D0);
--
2.24.3
More information about the Devel
mailing list