[Devel] [PATCH RH7] net: fix false-positive deadlock detection for sk_receive_queue lock
Pavel Tikhomirov
ptikhomirov at virtuozzo.com
Tue May 21 13:15:17 MSK 2019
Here is the code path of false-positive deadlock:
CPU1 CPU2
__udp_enqueue_schedule_skb
spin_lock(sk->sk_receive_queue.lock) <- udp
ipoib_start_xmit
spin_lock_irqsave(((struct rdma_netdev *)netdev_priv(net_device))->clnt_priv->lock)
path_rec_start
...
__netlink_sendskb
skb_queue_tail
spin_lock_irqsave(sk->sk_receive_queue.lock) <- netlink
do_IRQ
...
ipoib_cm_rx_event_handler
spin_lock_irqsave(((struct rdma_netdev *)netdev_priv(net_device))->clnt_priv->lock)
For sockets of different protocols these sk->sk_receive_queue.lock's
are different on the right and on the left, thus no real deadlock will
happen as CPU2 will continue execution instead of waiting for lock.
So mark the udp lock with separate lockdep class.
https://jira.sw.ru/browse/HCI-223
Signed-off-by: Pavel Tikhomirov <ptikhomirov at virtuozzo.com>
---
include/linux/skbuff.h | 3 ++-
include/net/sock.h | 1 +
net/core/sock.c | 4 ++--
net/ipv4/udp.c | 3 +++
4 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 28a112d06ece..a850fc9285fc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1608,7 +1608,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
skb_queue_head_init(list);
- lockdep_set_class(&list->lock, class);
+ if (class)
+ lockdep_set_class(&list->lock, class);
}
/*
diff --git a/include/net/sock.h b/include/net/sock.h
index 6ac59b86b3a2..f90941ef02a2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1083,6 +1083,7 @@ struct proto {
void (*destroy_cgroup)(struct mem_cgroup *memcg);
struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
+ struct lock_class_key *lockdep_class;
};
/*
diff --git a/net/core/sock.c b/net/core/sock.c
index 97fdfa5da687..ffd747cc30a4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1556,7 +1556,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
atomic_set(&newsk->sk_wmem_alloc, 1);
atomic_set(&newsk->sk_omem_alloc, 0);
- skb_queue_head_init(&newsk->sk_receive_queue);
+ skb_queue_head_init_class(&newsk->sk_receive_queue, newsk->sk_prot->lockdep_class);
skb_queue_head_init(&newsk->sk_write_queue);
rwlock_init(&newsk->sk_callback_lock);
@@ -2412,7 +2412,7 @@ EXPORT_SYMBOL(sk_stop_timer);
void sock_init_data(struct socket *sock, struct sock *sk)
{
- skb_queue_head_init(&sk->sk_receive_queue);
+ skb_queue_head_init_class(&sk->sk_receive_queue, sk->sk_prot->lockdep_class);
skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index a35aea149358..9c3efcb04a0c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2315,6 +2315,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
}
EXPORT_SYMBOL(udp_poll);
+static struct lock_class_key udp_sk_class;
+
struct proto udp_prot = {
.name = "UDP",
.owner = THIS_MODULE,
@@ -2351,6 +2353,7 @@ struct proto udp_prot = {
.destroy_cgroup = udp_destroy_cgroup,
.proto_cgroup = udp_proto_cgroup,
#endif
+ .lockdep_class = udp_sk_class,
};
EXPORT_SYMBOL(udp_prot);
--
2.20.1
More information about the Devel
mailing list