1 |
From 2270bbd9723fffdb4b9822fa716b765f3ae3177b Mon Sep 17 00:00:00 2001 |
2 |
From: Sasha Levin <sashal@kernel.org> |
3 |
Date: Tue, 23 Aug 2022 10:46:49 -0700 |
4 |
Subject: net: Fix data-races around sysctl_optmem_max. |
5 |
|
6 |
From: Kuniyuki Iwashima <kuniyu@amazon.com> |
7 |
|
8 |
[ Upstream commit 7de6d09f51917c829af2b835aba8bb5040f8e86a ] |
9 |
|
10 |
While reading sysctl_optmem_max, it can be changed concurrently. |
11 |
Thus, we need to add READ_ONCE() to its readers. |
12 |
|
13 |
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") |
14 |
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> |
15 |
Signed-off-by: David S. Miller <davem@davemloft.net> |
16 |
Signed-off-by: Sasha Levin <sashal@kernel.org> |
17 |
--- |
18 |
net/core/bpf_sk_storage.c | 5 +++-- |
19 |
net/core/filter.c | 9 +++++---- |
20 |
net/core/sock.c | 8 +++++--- |
21 |
net/ipv4/ip_sockglue.c | 6 +++--- |
22 |
net/ipv6/ipv6_sockglue.c | 4 ++-- |
23 |
5 files changed, 18 insertions(+), 14 deletions(-) |
24 |
|
25 |
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c |
26 |
index 1b7f385643b4c..94374d529ea42 100644 |
27 |
--- a/net/core/bpf_sk_storage.c |
28 |
+++ b/net/core/bpf_sk_storage.c |
29 |
@@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) |
30 |
static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap, |
31 |
void *owner, u32 size) |
32 |
{ |
33 |
+ int optmem_max = READ_ONCE(sysctl_optmem_max); |
34 |
struct sock *sk = (struct sock *)owner; |
35 |
|
36 |
/* same check as in sock_kmalloc() */ |
37 |
- if (size <= sysctl_optmem_max && |
38 |
- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { |
39 |
+ if (size <= optmem_max && |
40 |
+ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { |
41 |
atomic_add(size, &sk->sk_omem_alloc); |
42 |
return 0; |
43 |
} |
44 |
diff --git a/net/core/filter.c b/net/core/filter.c |
45 |
index 60c854e7d98ba..063176428086b 100644 |
46 |
--- a/net/core/filter.c |
47 |
+++ b/net/core/filter.c |
48 |
@@ -1214,10 +1214,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) |
49 |
static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
50 |
{ |
51 |
u32 filter_size = bpf_prog_size(fp->prog->len); |
52 |
+ int optmem_max = READ_ONCE(sysctl_optmem_max); |
53 |
|
54 |
/* same check as in sock_kmalloc() */ |
55 |
- if (filter_size <= sysctl_optmem_max && |
56 |
- atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { |
57 |
+ if (filter_size <= optmem_max && |
58 |
+ atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) { |
59 |
atomic_add(filter_size, &sk->sk_omem_alloc); |
60 |
return true; |
61 |
} |
62 |
@@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
63 |
if (IS_ERR(prog)) |
64 |
return PTR_ERR(prog); |
65 |
|
66 |
- if (bpf_prog_size(prog->len) > sysctl_optmem_max) |
67 |
+ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) |
68 |
err = -ENOMEM; |
69 |
else |
70 |
err = reuseport_attach_prog(sk, prog); |
71 |
@@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) |
72 |
} |
73 |
} else { |
74 |
/* BPF_PROG_TYPE_SOCKET_FILTER */ |
75 |
- if (bpf_prog_size(prog->len) > sysctl_optmem_max) { |
76 |
+ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) { |
77 |
err = -ENOMEM; |
78 |
goto err_prog_put; |
79 |
} |
80 |
diff --git a/net/core/sock.c b/net/core/sock.c |
81 |
index 62f69bc3a0e6e..d672e63a5c2d4 100644 |
82 |
--- a/net/core/sock.c |
83 |
+++ b/net/core/sock.c |
84 |
@@ -2535,7 +2535,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, |
85 |
|
86 |
/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ |
87 |
if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > |
88 |
- sysctl_optmem_max) |
89 |
+ READ_ONCE(sysctl_optmem_max)) |
90 |
return NULL; |
91 |
|
92 |
skb = alloc_skb(size, priority); |
93 |
@@ -2553,8 +2553,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, |
94 |
*/ |
95 |
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) |
96 |
{ |
97 |
- if ((unsigned int)size <= sysctl_optmem_max && |
98 |
- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { |
99 |
+ int optmem_max = READ_ONCE(sysctl_optmem_max); |
100 |
+ |
101 |
+ if ((unsigned int)size <= optmem_max && |
102 |
+ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { |
103 |
void *mem; |
104 |
/* First do the add, to avoid the race if kmalloc |
105 |
* might sleep. |
106 |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
107 |
index a8a323ecbb54b..e49a61a053a68 100644 |
108 |
--- a/net/ipv4/ip_sockglue.c |
109 |
+++ b/net/ipv4/ip_sockglue.c |
110 |
@@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) |
111 |
|
112 |
if (optlen < GROUP_FILTER_SIZE(0)) |
113 |
return -EINVAL; |
114 |
- if (optlen > sysctl_optmem_max) |
115 |
+ if (optlen > READ_ONCE(sysctl_optmem_max)) |
116 |
return -ENOBUFS; |
117 |
|
118 |
gsf = memdup_sockptr(optval, optlen); |
119 |
@@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, |
120 |
|
121 |
if (optlen < size0) |
122 |
return -EINVAL; |
123 |
- if (optlen > sysctl_optmem_max - 4) |
124 |
+ if (optlen > READ_ONCE(sysctl_optmem_max) - 4) |
125 |
return -ENOBUFS; |
126 |
|
127 |
p = kmalloc(optlen + 4, GFP_KERNEL); |
128 |
@@ -1233,7 +1233,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, |
129 |
|
130 |
if (optlen < IP_MSFILTER_SIZE(0)) |
131 |
goto e_inval; |
132 |
- if (optlen > sysctl_optmem_max) { |
133 |
+ if (optlen > READ_ONCE(sysctl_optmem_max)) { |
134 |
err = -ENOBUFS; |
135 |
break; |
136 |
} |
137 |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c |
138 |
index 222f6bf220ba0..e0dcc7a193df2 100644 |
139 |
--- a/net/ipv6/ipv6_sockglue.c |
140 |
+++ b/net/ipv6/ipv6_sockglue.c |
141 |
@@ -210,7 +210,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, |
142 |
|
143 |
if (optlen < GROUP_FILTER_SIZE(0)) |
144 |
return -EINVAL; |
145 |
- if (optlen > sysctl_optmem_max) |
146 |
+ if (optlen > READ_ONCE(sysctl_optmem_max)) |
147 |
return -ENOBUFS; |
148 |
|
149 |
gsf = memdup_sockptr(optval, optlen); |
150 |
@@ -244,7 +244,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, |
151 |
|
152 |
if (optlen < size0) |
153 |
return -EINVAL; |
154 |
- if (optlen > sysctl_optmem_max - 4) |
155 |
+ if (optlen > READ_ONCE(sysctl_optmem_max) - 4) |
156 |
return -ENOBUFS; |
157 |
|
158 |
p = kmalloc(optlen + 4, GFP_KERNEL); |
159 |
-- |
160 |
2.35.1 |
161 |
|