1 |
From 524fc5ee3edabf137b7b27c343f88189c35ddb5b Mon Sep 17 00:00:00 2001 |
2 |
From: Sasha Levin <sashal@kernel.org> |
3 |
Date: Tue, 23 Aug 2022 10:46:46 -0700 |
4 |
Subject: net: Fix data-races around netdev_max_backlog. |
5 |
|
6 |
From: Kuniyuki Iwashima <kuniyu@amazon.com> |
7 |
|
8 |
[ Upstream commit 5dcd08cd19912892586c6082d56718333e2d19db ] |
9 |
|
10 |
While reading netdev_max_backlog, it can be changed concurrently. |
11 |
Thus, we need to add READ_ONCE() to its readers. |
12 |
|
13 |
While at it, we remove the unnecessary spaces in the doc. |
14 |
|
15 |
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") |
16 |
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> |
17 |
Signed-off-by: David S. Miller <davem@davemloft.net> |
18 |
Signed-off-by: Sasha Levin <sashal@kernel.org> |
19 |
--- |
20 |
Documentation/admin-guide/sysctl/net.rst | 2 +- |
21 |
net/core/dev.c | 4 ++-- |
22 |
net/core/gro_cells.c | 2 +- |
23 |
net/xfrm/espintcp.c | 2 +- |
24 |
net/xfrm/xfrm_input.c | 2 +- |
25 |
5 files changed, 6 insertions(+), 6 deletions(-) |
26 |
|
27 |
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst |
28 |
index fcd650bdbc7e2..01d9858197832 100644 |
29 |
--- a/Documentation/admin-guide/sysctl/net.rst |
30 |
+++ b/Documentation/admin-guide/sysctl/net.rst |
31 |
@@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget. |
32 |
netdev_max_backlog |
33 |
------------------ |
34 |
|
35 |
-Maximum number of packets, queued on the INPUT side, when the interface |
36 |
+Maximum number of packets, queued on the INPUT side, when the interface |
37 |
receives packets faster than kernel can process them. |
38 |
|
39 |
netdev_rss_key |
40 |
diff --git a/net/core/dev.c b/net/core/dev.c |
41 |
index 2a7d81cd9e2ea..e1496e626a532 100644 |
42 |
--- a/net/core/dev.c |
43 |
+++ b/net/core/dev.c |
44 |
@@ -4623,7 +4623,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) |
45 |
struct softnet_data *sd; |
46 |
unsigned int old_flow, new_flow; |
47 |
|
48 |
- if (qlen < (netdev_max_backlog >> 1)) |
49 |
+ if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) |
50 |
return false; |
51 |
|
52 |
sd = this_cpu_ptr(&softnet_data); |
53 |
@@ -4671,7 +4671,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, |
54 |
if (!netif_running(skb->dev)) |
55 |
goto drop; |
56 |
qlen = skb_queue_len(&sd->input_pkt_queue); |
57 |
- if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { |
58 |
+ if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { |
59 |
if (qlen) { |
60 |
enqueue: |
61 |
__skb_queue_tail(&sd->input_pkt_queue, skb); |
62 |
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c |
63 |
index 541c7a72a28a4..21619c70a82b7 100644 |
64 |
--- a/net/core/gro_cells.c |
65 |
+++ b/net/core/gro_cells.c |
66 |
@@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) |
67 |
|
68 |
cell = this_cpu_ptr(gcells->cells); |
69 |
|
70 |
- if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { |
71 |
+ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) { |
72 |
drop: |
73 |
dev_core_stats_rx_dropped_inc(dev); |
74 |
kfree_skb(skb); |
75 |
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c |
76 |
index 82d14eea1b5ad..974eb97b77d22 100644 |
77 |
--- a/net/xfrm/espintcp.c |
78 |
+++ b/net/xfrm/espintcp.c |
79 |
@@ -168,7 +168,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) |
80 |
{ |
81 |
struct espintcp_ctx *ctx = espintcp_getctx(sk); |
82 |
|
83 |
- if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog) |
84 |
+ if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog)) |
85 |
return -ENOBUFS; |
86 |
|
87 |
__skb_queue_tail(&ctx->out_queue, skb); |
88 |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c |
89 |
index 70a8c36f0ba6e..b2f4ec9c537f0 100644 |
90 |
--- a/net/xfrm/xfrm_input.c |
91 |
+++ b/net/xfrm/xfrm_input.c |
92 |
@@ -782,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, |
93 |
|
94 |
trans = this_cpu_ptr(&xfrm_trans_tasklet); |
95 |
|
96 |
- if (skb_queue_len(&trans->queue) >= netdev_max_backlog) |
97 |
+ if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) |
98 |
return -ENOBUFS; |
99 |
|
100 |
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); |
101 |
-- |
102 |
2.35.1 |
103 |
|