[Devel] [PATCH VZ9 18/20] fuse: pcs: new rpc affinity mode - RSS
Alexey Kuznetsov
kuznet at virtuozzo.com
Fri Oct 6 13:44:08 MSK 2023
The mode aligns socket io jobs to RSS, receive/transmit jobs
are scheduled at cpus which is mapped by RSS from rpc socket.
Precondition is multiqueue device with enabled RSS and XPS.
If RSS and XPS are enabled, sockets are entirely localized
to one cpu, they are not accessed from other cpus, which
minimizes lock contention and keep perfect cache locality
for socket data. Nevertheless, we have to pay, data, prepared
by user at some cpu is forced to be accessed from rss cpu,
which can be a problem, especially with NUMA. This has to
be measured.
When it does not work:
1. Single queue devices. It is not recommended to use such dumb
hardware with vstorage.
2. RDMA. Yet not. It requires some experimenting to figure out
how rdma completion jobs are scheduled.
3. IPsec without binding CSes to separate IP addresses.
Signed-off-by: Alexey Kuznetsov <kuznet at acronis.com>
---
fs/fuse/kio/pcs/pcs_rpc.c | 8 ++++++--
fs/fuse/kio/pcs/pcs_rpc.h | 3 +++
fs/fuse/kio/pcs/pcs_sock_io.c | 4 +++-
3 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/fs/fuse/kio/pcs/pcs_rpc.c b/fs/fuse/kio/pcs/pcs_rpc.c
index 4fbb25d..2b045ed 100644
--- a/fs/fuse/kio/pcs/pcs_rpc.c
+++ b/fs/fuse/kio/pcs/pcs_rpc.c
@@ -36,7 +36,7 @@
#include "fuse_ktrace.h"
-static unsigned int rpc_affinity_mode = RPC_AFFINITY_RETENT;
+unsigned int rpc_affinity_mode = RPC_AFFINITY_RETENT;
module_param(rpc_affinity_mode, uint, 0644);
MODULE_PARM_DESC(rpc_affinity_mode, "RPC affinity mode");
@@ -730,6 +730,10 @@ static void pcs_rpc_affinity(struct pcs_rpc *ep, bool was_idle)
ep->cpu = WORK_CPU_UNBOUND;
}
break;
+ case RPC_AFFINITY_RSS:
+ if (!(ep->flags & PCS_RPC_F_LOCAL) && ep->addr.type != PCS_ADDRTYPE_RDMA)
+ break;
+ fallthrough;
case RPC_AFFINITY_RETENT:
/* Naive socket-to-cpu binding approach */
if (time_is_before_jiffies(ep->cpu_stamp) && was_idle) {
@@ -744,7 +748,7 @@ static void pcs_rpc_affinity(struct pcs_rpc *ep, bool was_idle)
}
break;
default:
- pr_err("Unknown affninity mode: %u\n", rpc_affinity_mode);
+ pr_err("Unknown affinity mode: %u\n", rpc_affinity_mode);
}
}
diff --git a/fs/fuse/kio/pcs/pcs_rpc.h b/fs/fuse/kio/pcs/pcs_rpc.h
index 2ff4494..ef4ab26 100644
--- a/fs/fuse/kio/pcs/pcs_rpc.h
+++ b/fs/fuse/kio/pcs/pcs_rpc.h
@@ -39,8 +39,11 @@ enum {
RPC_AFFINITY_NONE = 0,
RPC_AFFINITY_RETENT = 1,
RPC_AFFINITY_SPREAD = 2,
+ RPC_AFFINITY_RSS = 3,
};
+extern unsigned int rpc_affinity_mode;
+
struct pcs_rpc_params
{
unsigned int alloc_hdr_size;
diff --git a/fs/fuse/kio/pcs/pcs_sock_io.c b/fs/fuse/kio/pcs/pcs_sock_io.c
index fc76e32..46de102 100644
--- a/fs/fuse/kio/pcs/pcs_sock_io.c
+++ b/fs/fuse/kio/pcs/pcs_sock_io.c
@@ -548,7 +548,9 @@ static void pcs_sk_kick_queue(struct sock *sk)
sio = rcu_dereference_sk_user_data(sk);
if (sio) {
struct pcs_rpc *ep = sio->netio.parent;
- TRACE(PEER_FMT" queue\n", PEER_ARGS(ep));
+ TRACE(PEER_FMT" queue cpu=%d\n", PEER_ARGS(ep), smp_processor_id());
+ if (rpc_affinity_mode == RPC_AFFINITY_RSS && !(ep->flags & PCS_RPC_F_LOCAL))
+ ep->cpu = smp_processor_id();
pcs_rpc_kick_queue(ep);
}
rcu_read_unlock();
--
1.8.3.1
More information about the Devel
mailing list