blob: 29dce78de17966a3dffa79335629889cd74c9ef4 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Generic INET transport hashtables
8 *
9 * Authors: Lotsa people, from code originally in tcp
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070010 */
11
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -070012#include <linux/module.h>
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -080013#include <linux/random.h>
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -070014#include <linux/sched.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070015#include <linux/slab.h>
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -070016#include <linux/wait.h>
Eric Dumazet095dc8e2015-05-26 07:55:34 -070017#include <linux/vmalloc.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070018#include <linux/memblock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070019
Craig Gallekc125e802016-02-10 11:50:40 -050020#include <net/addrconf.h>
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -070021#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070022#include <net/inet_hashtables.h>
Ricardo Dias01770a12020-11-20 11:11:33 +000023#if IS_ENABLED(CONFIG_IPV6)
24#include <net/inet6_hashtables.h>
25#endif
David S. Miller6e5714e2011-08-03 20:50:44 -070026#include <net/secure_seq.h>
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -080027#include <net/ip.h>
David Aherna04a4802016-10-16 20:02:52 -070028#include <net/tcp.h>
Craig Gallekc125e802016-02-10 11:50:40 -050029#include <net/sock_reuseport.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070030
Eric Dumazet6eada012015-03-18 14:05:33 -070031static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
32 const __u16 lport, const __be32 faddr,
33 const __be16 fport)
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020034{
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +020035 static u32 inet_ehash_secret __read_mostly;
36
37 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
38
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020039 return __inet_ehashfn(laddr, lport, faddr, fport,
40 inet_ehash_secret + net_hash_mix(net));
41}
42
Eric Dumazetd1e559d2015-03-18 14:05:35 -070043/* This function handles inet_sock, but also timewait and request sockets
44 * for IPv4/IPv6.
45 */
Eric Dumazet784c3722017-07-03 02:57:54 -070046static u32 sk_ehashfn(const struct sock *sk)
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020047{
Eric Dumazetd1e559d2015-03-18 14:05:35 -070048#if IS_ENABLED(CONFIG_IPV6)
49 if (sk->sk_family == AF_INET6 &&
50 !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
51 return inet6_ehashfn(sock_net(sk),
52 &sk->sk_v6_rcv_saddr, sk->sk_num,
53 &sk->sk_v6_daddr, sk->sk_dport);
54#endif
Eric Dumazet5b441f72015-03-18 14:05:34 -070055 return inet_ehashfn(sock_net(sk),
56 sk->sk_rcv_saddr, sk->sk_num,
57 sk->sk_daddr, sk->sk_dport);
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020058}
59
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070060/*
61 * Allocate and initialize a new local port bind bucket.
62 * The bindhash mutex for snum's hash chain must be held here.
63 */
Christoph Lametere18b8902006-12-06 20:33:20 -080064struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
Pavel Emelyanov941b1d22008-01-31 05:05:50 -080065 struct net *net,
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070066 struct inet_bind_hashbucket *head,
Robert Shearman3c82a212018-11-07 15:36:02 +000067 const unsigned short snum,
68 int l3mdev)
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070069{
Christoph Lameter54e6ecb2006-12-06 20:33:16 -080070 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070071
Ian Morris00db4122015-04-03 09:17:27 +010072 if (tb) {
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -050073 write_pnet(&tb->ib_net, net);
Robert Shearman3c82a212018-11-07 15:36:02 +000074 tb->l3mdev = l3mdev;
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070075 tb->port = snum;
76 tb->fastreuse = 0;
Tom Herbertda5e3632013-01-22 09:50:24 +000077 tb->fastreuseport = 0;
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070078 INIT_HLIST_HEAD(&tb->owners);
79 hlist_add_head(&tb->node, &head->chain);
80 }
81 return tb;
82}
83
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070084/*
85 * Caller must hold hashbucket lock for this tb with local BH disabled
86 */
Christoph Lametere18b8902006-12-06 20:33:20 -080087void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070088{
89 if (hlist_empty(&tb->owners)) {
90 __hlist_del(&tb->node);
91 kmem_cache_free(cachep, tb);
92 }
93}
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -070094
Joanne Koong28044fc2022-08-22 11:10:21 -070095bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
96 unsigned short port, int l3mdev)
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -070097{
Joanne Koong28044fc2022-08-22 11:10:21 -070098 return net_eq(ib_net(tb), net) && tb->port == port &&
99 tb->l3mdev == l3mdev;
100}
101
102static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb,
103 struct net *net,
104 struct inet_bind_hashbucket *head,
105 unsigned short port, int l3mdev,
106 const struct sock *sk)
107{
108 write_pnet(&tb->ib_net, net);
109 tb->l3mdev = l3mdev;
110 tb->port = port;
111#if IS_ENABLED(CONFIG_IPV6)
112 if (sk->sk_family == AF_INET6)
113 tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
114 else
115#endif
116 tb->rcv_saddr = sk->sk_rcv_saddr;
117 INIT_HLIST_HEAD(&tb->owners);
118 hlist_add_head(&tb->node, &head->chain);
119}
120
121struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
122 struct net *net,
123 struct inet_bind_hashbucket *head,
124 unsigned short port,
125 int l3mdev,
126 const struct sock *sk)
127{
128 struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
129
130 if (tb)
131 inet_bind2_bucket_init(tb, net, head, port, l3mdev, sk);
132
133 return tb;
134}
135
136/* Caller must hold hashbucket lock for this tb with local BH disabled */
137void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
138{
139 if (hlist_empty(&tb->owners)) {
140 __hlist_del(&tb->node);
141 kmem_cache_free(cachep, tb);
142 }
143}
144
145static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
146 const struct sock *sk)
147{
148#if IS_ENABLED(CONFIG_IPV6)
149 if (sk->sk_family == AF_INET6)
150 return ipv6_addr_equal(&tb2->v6_rcv_saddr,
151 &sk->sk_v6_rcv_saddr);
152#endif
153 return tb2->rcv_saddr == sk->sk_rcv_saddr;
154}
155
156void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
157 struct inet_bind2_bucket *tb2, unsigned short port)
158{
159 inet_sk(sk)->inet_num = port;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700160 sk_add_bind_node(sk, &tb->owners);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700161 inet_csk(sk)->icsk_bind_hash = tb;
Joanne Koong28044fc2022-08-22 11:10:21 -0700162 sk_add_bind2_node(sk, &tb2->owners);
163 inet_csk(sk)->icsk_bind2_hash = tb2;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700164}
165
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700166/*
167 * Get rid of any references to a local port held by the given sock.
168 */
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800169static void __inet_put_port(struct sock *sk)
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700170{
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -0700171 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
Kuniyuki Iwashima08eaef92022-09-07 18:10:17 -0700172 struct inet_bind_hashbucket *head, *head2;
173 struct net *net = sock_net(sk);
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700174 struct inet_bind_bucket *tb;
Kuniyuki Iwashima08eaef92022-09-07 18:10:17 -0700175 int bhash;
176
177 bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size);
178 head = &hashinfo->bhash[bhash];
179 head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num);
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700180
181 spin_lock(&head->lock);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700182 tb = inet_csk(sk)->icsk_bind_hash;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700183 __sk_del_bind_node(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700184 inet_csk(sk)->icsk_bind_hash = NULL;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000185 inet_sk(sk)->inet_num = 0;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700186 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
Joanne Koong28044fc2022-08-22 11:10:21 -0700187
188 spin_lock(&head2->lock);
189 if (inet_csk(sk)->icsk_bind2_hash) {
190 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash;
191
192 __sk_del_bind2_node(sk);
193 inet_csk(sk)->icsk_bind2_hash = NULL;
194 inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
195 }
196 spin_unlock(&head2->lock);
197
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700198 spin_unlock(&head->lock);
199}
200
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800201void inet_put_port(struct sock *sk)
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700202{
203 local_bh_disable();
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800204 __inet_put_port(sk);
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700205 local_bh_enable();
206}
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700207EXPORT_SYMBOL(inet_put_port);
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -0700208
Eric Dumazet1ce31c92015-09-29 07:42:44 -0700209int __inet_inherit_port(const struct sock *sk, struct sock *child)
Pavel Emelyanov53083772008-04-17 23:18:15 -0700210{
211 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
Balazs Scheidler093d2822010-10-21 13:06:43 +0200212 unsigned short port = inet_sk(child)->inet_num;
Kuniyuki Iwashima08eaef92022-09-07 18:10:17 -0700213 struct inet_bind_hashbucket *head, *head2;
Joanne Koong28044fc2022-08-22 11:10:21 -0700214 bool created_inet_bind_bucket = false;
Joanne Koong28044fc2022-08-22 11:10:21 -0700215 struct net *net = sock_net(sk);
Kuniyuki Iwashima08eaef92022-09-07 18:10:17 -0700216 bool update_fastreuse = false;
Joanne Koong28044fc2022-08-22 11:10:21 -0700217 struct inet_bind2_bucket *tb2;
Pavel Emelyanov53083772008-04-17 23:18:15 -0700218 struct inet_bind_bucket *tb;
Kuniyuki Iwashima08eaef92022-09-07 18:10:17 -0700219 int bhash, l3mdev;
220
221 bhash = inet_bhashfn(net, port, table->bhash_size);
222 head = &table->bhash[bhash];
223 head2 = inet_bhashfn_portaddr(table, child, net, port);
Pavel Emelyanov53083772008-04-17 23:18:15 -0700224
225 spin_lock(&head->lock);
Joanne Koong28044fc2022-08-22 11:10:21 -0700226 spin_lock(&head2->lock);
Pavel Emelyanov53083772008-04-17 23:18:15 -0700227 tb = inet_csk(sk)->icsk_bind_hash;
Joanne Koong28044fc2022-08-22 11:10:21 -0700228 tb2 = inet_csk(sk)->icsk_bind2_hash;
229 if (unlikely(!tb || !tb2)) {
230 spin_unlock(&head2->lock);
Eric Dumazetc2f34a62015-10-14 05:58:38 -0700231 spin_unlock(&head->lock);
232 return -ENOENT;
233 }
Balazs Scheidler093d2822010-10-21 13:06:43 +0200234 if (tb->port != port) {
Robert Shearman3c82a212018-11-07 15:36:02 +0000235 l3mdev = inet_sk_bound_l3mdev(sk);
236
Balazs Scheidler093d2822010-10-21 13:06:43 +0200237 /* NOTE: using tproxy and redirecting skbs to a proxy
238 * on a different listener port breaks the assumption
239 * that the listener socket's icsk_bind_hash is the same
240 * as that of the child socket. We have to look up or
241 * create a new bind bucket for the child here. */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800242 inet_bind_bucket_for_each(tb, &head->chain) {
Joanne Koong28044fc2022-08-22 11:10:21 -0700243 if (inet_bind_bucket_match(tb, net, port, l3mdev))
Balazs Scheidler093d2822010-10-21 13:06:43 +0200244 break;
245 }
Sasha Levinb67bfe02013-02-27 17:06:00 -0800246 if (!tb) {
Balazs Scheidler093d2822010-10-21 13:06:43 +0200247 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
Joanne Koong28044fc2022-08-22 11:10:21 -0700248 net, head, port, l3mdev);
Balazs Scheidler093d2822010-10-21 13:06:43 +0200249 if (!tb) {
Joanne Koong28044fc2022-08-22 11:10:21 -0700250 spin_unlock(&head2->lock);
Balazs Scheidler093d2822010-10-21 13:06:43 +0200251 spin_unlock(&head->lock);
252 return -ENOMEM;
253 }
Joanne Koong28044fc2022-08-22 11:10:21 -0700254 created_inet_bind_bucket = true;
Balazs Scheidler093d2822010-10-21 13:06:43 +0200255 }
Joanne Koong28044fc2022-08-22 11:10:21 -0700256 update_fastreuse = true;
257
258 goto bhash2_find;
259 } else if (!inet_bind2_bucket_addr_match(tb2, child)) {
260 l3mdev = inet_sk_bound_l3mdev(sk);
261
262bhash2_find:
263 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child);
264 if (!tb2) {
265 tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
266 net, head2, port,
267 l3mdev, child);
268 if (!tb2)
269 goto error;
270 }
Balazs Scheidler093d2822010-10-21 13:06:43 +0200271 }
Joanne Koong28044fc2022-08-22 11:10:21 -0700272 if (update_fastreuse)
273 inet_csk_update_fastreuse(tb, child);
274 inet_bind_hash(child, tb, tb2, port);
275 spin_unlock(&head2->lock);
Pavel Emelyanov53083772008-04-17 23:18:15 -0700276 spin_unlock(&head->lock);
Balazs Scheidler093d2822010-10-21 13:06:43 +0200277
278 return 0;
Joanne Koong28044fc2022-08-22 11:10:21 -0700279
280error:
281 if (created_inet_bind_bucket)
282 inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
283 spin_unlock(&head2->lock);
284 spin_unlock(&head->lock);
285 return -ENOMEM;
Pavel Emelyanov53083772008-04-17 23:18:15 -0700286}
Pavel Emelyanov53083772008-04-17 23:18:15 -0700287EXPORT_SYMBOL_GPL(__inet_inherit_port);
288
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800289static struct inet_listen_hashbucket *
290inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
291{
292 u32 hash;
293
294#if IS_ENABLED(CONFIG_IPV6)
295 if (sk->sk_family == AF_INET6)
296 hash = ipv6_portaddr_hash(sock_net(sk),
297 &sk->sk_v6_rcv_saddr,
298 inet_sk(sk)->inet_num);
299 else
300#endif
301 hash = ipv4_portaddr_hash(sock_net(sk),
302 inet_sk(sk)->inet_rcv_saddr,
303 inet_sk(sk)->inet_num);
304 return inet_lhash2_bucket(h, hash);
305}
306
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800307static inline int compute_score(struct sock *sk, struct net *net,
308 const unsigned short hnum, const __be32 daddr,
Miaohe Lin34e1ec32020-08-31 02:26:34 -0400309 const int dif, const int sdif)
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800310{
311 int score = -1;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800312
Peter Oskolkovd9fbc7f2018-12-12 13:15:35 -0800313 if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800314 !ipv6_only_sock(sk)) {
Peter Oskolkovd9fbc7f2018-12-12 13:15:35 -0800315 if (sk->sk_rcv_saddr != daddr)
Mike Manninge7819052018-11-07 15:36:03 +0000316 return -1;
David Ahern3fa6f612017-08-07 08:44:17 -0700317
Peter Oskolkovd9fbc7f2018-12-12 13:15:35 -0800318 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
319 return -1;
Mike Manning8d6c4142021-10-05 14:03:42 +0100320 score = sk->sk_bound_dev_if ? 2 : 1;
Peter Oskolkovd9fbc7f2018-12-12 13:15:35 -0800321
Mike Manning8d6c4142021-10-05 14:03:42 +0100322 if (sk->sk_family == PF_INET)
323 score++;
Eric Dumazet7170a972019-10-30 13:00:04 -0700324 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
Eric Dumazet70da2682015-10-08 19:33:21 -0700325 score++;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800326 }
327 return score;
328}
329
Jakub Sitnicki80b373f2020-07-17 12:35:24 +0200330static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
331 struct sk_buff *skb, int doff,
332 __be32 saddr, __be16 sport,
333 __be32 daddr, unsigned short hnum)
334{
335 struct sock *reuse_sk = NULL;
336 u32 phash;
337
338 if (sk->sk_reuseport) {
339 phash = inet_ehashfn(net, daddr, hnum, saddr, sport);
340 reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
341 }
342 return reuse_sk;
343}
344
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -0700345/*
Eric Dumazet3b24d852016-04-01 08:52:17 -0700346 * Here are some nice properties to exploit here. The BSD API
347 * does not allow a listening sock to specify the remote port nor the
Arnaldo Carvalho de Melo33b62232005-08-09 20:09:06 -0700348 * remote address for the connection. So always assume those are both
349 * wildcarded during the search since they can never be otherwise.
350 */
Arnaldo Carvalho de Melo33b62232005-08-09 20:09:06 -0700351
Eric Dumazet3b24d852016-04-01 08:52:17 -0700352/* called with rcu_read_lock() : No refcount taken on the socket */
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800353static struct sock *inet_lhash2_lookup(struct net *net,
354 struct inet_listen_hashbucket *ilb2,
355 struct sk_buff *skb, int doff,
356 const __be32 saddr, __be16 sport,
357 const __be32 daddr, const unsigned short hnum,
358 const int dif, const int sdif)
359{
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800360 struct sock *sk, *result = NULL;
Martin KaFai Laucae38732022-05-11 17:06:05 -0700361 struct hlist_nulls_node *node;
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800362 int score, hiscore = 0;
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800363
Martin KaFai Laucae38732022-05-11 17:06:05 -0700364 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
Miaohe Lin34e1ec32020-08-31 02:26:34 -0400365 score = compute_score(sk, net, hnum, daddr, dif, sdif);
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800366 if (score > hiscore) {
Jakub Sitnicki80b373f2020-07-17 12:35:24 +0200367 result = lookup_reuseport(net, sk, skb, doff,
368 saddr, sport, daddr, hnum);
369 if (result)
370 return result;
371
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800372 result = sk;
373 hiscore = score;
374 }
375 }
376
377 return result;
378}
379
Jakub Sitnicki1559b4a2020-07-17 12:35:25 +0200380static inline struct sock *inet_lookup_run_bpf(struct net *net,
381 struct inet_hashinfo *hashinfo,
382 struct sk_buff *skb, int doff,
383 __be32 saddr, __be16 sport,
Mark Pashmfouroushf8931562021-11-10 11:10:15 +0000384 __be32 daddr, u16 hnum, const int dif)
Jakub Sitnicki1559b4a2020-07-17 12:35:25 +0200385{
386 struct sock *sk, *reuse_sk;
387 bool no_reuseport;
388
389 if (hashinfo != &tcp_hashinfo)
390 return NULL; /* only TCP is supported */
391
Mark Pashmfouroushf8931562021-11-10 11:10:15 +0000392 no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport,
393 daddr, hnum, dif, &sk);
Jakub Sitnicki1559b4a2020-07-17 12:35:25 +0200394 if (no_reuseport || IS_ERR_OR_NULL(sk))
395 return sk;
396
397 reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
398 if (reuse_sk)
399 sk = reuse_sk;
400 return sk;
401}
402
Pavel Emelyanovc67499c2008-01-31 05:06:40 -0800403struct sock *__inet_lookup_listener(struct net *net,
404 struct inet_hashinfo *hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -0500405 struct sk_buff *skb, int doff,
Tom Herbertda5e3632013-01-22 09:50:24 +0000406 const __be32 saddr, __be16 sport,
Al Virofb99c842006-09-27 18:43:33 -0700407 const __be32 daddr, const unsigned short hnum,
David Ahern3fa6f612017-08-07 08:44:17 -0700408 const int dif, const int sdif)
Herbert Xu99a92ff2006-08-08 02:18:10 -0700409{
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800410 struct inet_listen_hashbucket *ilb2;
Peter Oskolkovd9fbc7f2018-12-12 13:15:35 -0800411 struct sock *result = NULL;
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800412 unsigned int hash2;
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800413
Jakub Sitnicki1559b4a2020-07-17 12:35:25 +0200414 /* Lookup redirect from BPF */
415 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
416 result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
Mark Pashmfouroushf8931562021-11-10 11:10:15 +0000417 saddr, sport, daddr, hnum, dif);
Jakub Sitnicki1559b4a2020-07-17 12:35:25 +0200418 if (result)
419 goto done;
420 }
421
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800422 hash2 = ipv4_portaddr_hash(net, daddr, hnum);
423 ilb2 = inet_lhash2_bucket(hashinfo, hash2);
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800424
425 result = inet_lhash2_lookup(net, ilb2, skb, doff,
426 saddr, sport, daddr, hnum,
427 dif, sdif);
428 if (result)
Martin KaFai Lau8217ca62018-08-08 01:01:26 -0700429 goto done;
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800430
431 /* Lookup lhash2 with INADDR_ANY */
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800432 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
433 ilb2 = inet_lhash2_bucket(hashinfo, hash2);
Martin KaFai Lau61b7c692017-12-01 12:52:31 -0800434
Martin KaFai Lau8217ca62018-08-08 01:01:26 -0700435 result = inet_lhash2_lookup(net, ilb2, skb, doff,
Peter Oskolkovd9fbc7f2018-12-12 13:15:35 -0800436 saddr, sport, htonl(INADDR_ANY), hnum,
Martin KaFai Lau8217ca62018-08-08 01:01:26 -0700437 dif, sdif);
Martin KaFai Lau8217ca62018-08-08 01:01:26 -0700438done:
Enrico Weigelt88e235b2019-06-05 23:09:05 +0200439 if (IS_ERR(result))
Martin KaFai Lau8217ca62018-08-08 01:01:26 -0700440 return NULL;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800441 return result;
Herbert Xu99a92ff2006-08-08 02:18:10 -0700442}
Herbert Xu8f4910692006-08-09 15:47:12 -0700443EXPORT_SYMBOL_GPL(__inet_lookup_listener);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800444
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700445/* All sockets share common refcount, but have different destructors */
446void sock_gen_put(struct sock *sk)
447{
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300448 if (!refcount_dec_and_test(&sk->sk_refcnt))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700449 return;
450
451 if (sk->sk_state == TCP_TIME_WAIT)
452 inet_twsk_free(inet_twsk(sk));
Eric Dumazet41b822c2015-03-12 16:44:08 -0700453 else if (sk->sk_state == TCP_NEW_SYN_RECV)
454 reqsk_free(inet_reqsk(sk));
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700455 else
456 sk_free(sk);
457}
458EXPORT_SYMBOL_GPL(sock_gen_put);
459
Eric Dumazet2c132702015-03-15 21:12:15 -0700460void sock_edemux(struct sk_buff *skb)
461{
462 sock_gen_put(skb->sk);
463}
464EXPORT_SYMBOL(sock_edemux);
465
Daniel Baluta5e73ea12012-04-15 01:34:41 +0000466struct sock *__inet_lookup_established(struct net *net,
Pavel Emelyanovc67499c2008-01-31 05:06:40 -0800467 struct inet_hashinfo *hashinfo,
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800468 const __be32 saddr, const __be16 sport,
469 const __be32 daddr, const u16 hnum,
David Ahern3fa6f612017-08-07 08:44:17 -0700470 const int dif, const int sdif)
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800471{
Joe Perchesc7228312014-05-13 20:30:07 -0700472 INET_ADDR_COOKIE(acookie, saddr, daddr);
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800473 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
474 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800475 const struct hlist_nulls_node *node;
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800476 /* Optimize here for direct hit, only listening connections can
477 * have wildcards anyways.
478 */
Pavel Emelyanov9f26b3a2008-06-16 17:13:27 -0700479 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
Eric Dumazetf373b532009-10-09 00:16:19 +0000480 unsigned int slot = hash & hashinfo->ehash_mask;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800481 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800482
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800483begin:
484 sk_nulls_for_each_rcu(sk, node, &head->chain) {
Eric Dumazetce43b032012-11-30 09:49:27 +0000485 if (sk->sk_hash != hash)
486 continue;
Eric Dumazeteda090c2022-05-13 11:55:50 -0700487 if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300488 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700489 goto out;
Eric Dumazeteda090c2022-05-13 11:55:50 -0700490 if (unlikely(!inet_match(net, sk, acookie,
Eric Dumazet4915d502022-05-12 09:56:01 -0700491 ports, dif, sdif))) {
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700492 sock_gen_put(sk);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800493 goto begin;
494 }
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700495 goto found;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800496 }
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800497 }
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800498 /*
499 * if the nulls value we got at the end of this lookup is
500 * not the expected one, we must restart lookup.
501 * We probably met an item that was moved to another chain.
502 */
503 if (get_nulls_value(node) != slot)
504 goto begin;
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800505out:
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700506 sk = NULL;
507found:
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800508 return sk;
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800509}
510EXPORT_SYMBOL_GPL(__inet_lookup_established);
511
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800512/* called with local bh disabled */
513static int __inet_check_established(struct inet_timewait_death_row *death_row,
514 struct sock *sk, __u16 lport,
515 struct inet_timewait_sock **twp)
516{
517 struct inet_hashinfo *hinfo = death_row->hashinfo;
518 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000519 __be32 daddr = inet->inet_rcv_saddr;
520 __be32 saddr = inet->inet_daddr;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800521 int dif = sk->sk_bound_dev_if;
David Ahern3fa6f612017-08-07 08:44:17 -0700522 struct net *net = sock_net(sk);
523 int sdif = l3mdev_master_ifindex_by_index(net, dif);
Joe Perchesc7228312014-05-13 20:30:07 -0700524 INET_ADDR_COOKIE(acookie, saddr, daddr);
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000525 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000526 unsigned int hash = inet_ehashfn(net, daddr, lport,
527 saddr, inet->inet_dport);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800528 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800529 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800530 struct sock *sk2;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800531 const struct hlist_nulls_node *node;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700532 struct inet_timewait_sock *tw = NULL;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800533
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800534 spin_lock(lock);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800535
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800536 sk_nulls_for_each(sk2, node, &head->chain) {
Eric Dumazetce43b032012-11-30 09:49:27 +0000537 if (sk2->sk_hash != hash)
538 continue;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700539
Eric Dumazeteda090c2022-05-13 11:55:50 -0700540 if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700541 if (sk2->sk_state == TCP_TIME_WAIT) {
542 tw = inet_twsk(sk2);
543 if (twsk_unique(sk, sk2, twp))
544 break;
545 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800546 goto not_unique;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700547 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800548 }
549
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800550 /* Must record num and sport now. Otherwise we will see
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700551 * in hash table socket with a funny identity.
552 */
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000553 inet->inet_num = lport;
554 inet->inet_sport = htons(lport);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800555 sk->sk_hash = hash;
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700556 WARN_ON(!sk_unhashed(sk));
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800557 __sk_nulls_add_node_rcu(sk, &head->chain);
Eric Dumazet13475a32009-12-02 22:31:19 +0000558 if (tw) {
Eric Dumazetfc01538f2015-07-08 14:28:29 -0700559 sk_nulls_del_node_init_rcu((struct sock *)tw);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700560 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
Eric Dumazet13475a32009-12-02 22:31:19 +0000561 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800562 spin_unlock(lock);
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700563 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800564
565 if (twp) {
566 *twp = tw;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800567 } else if (tw) {
568 /* Silly. Should hash-dance instead... */
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700569 inet_twsk_deschedule_put(tw);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800570 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800571 return 0;
572
573not_unique:
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800574 spin_unlock(lock);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800575 return -EADDRNOTAVAIL;
576}
577
Willy Tarreaub2d05752022-05-02 10:46:08 +0200578static u64 inet_sk_port_offset(const struct sock *sk)
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800579{
580 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazete2baad92015-05-27 10:46:02 -0700581
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000582 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
583 inet->inet_daddr,
584 inet->inet_dport);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800585}
586
Ricardo Dias01770a12020-11-20 11:11:33 +0000587/* Searches for an exsiting socket in the ehash bucket list.
588 * Returns true if found, false otherwise.
Eric Dumazet079096f2015-10-02 11:43:32 -0700589 */
Ricardo Dias01770a12020-11-20 11:11:33 +0000590static bool inet_ehash_lookup_by_sk(struct sock *sk,
591 struct hlist_nulls_head *list)
592{
593 const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
594 const int sdif = sk->sk_bound_dev_if;
595 const int dif = sk->sk_bound_dev_if;
596 const struct hlist_nulls_node *node;
597 struct net *net = sock_net(sk);
598 struct sock *esk;
599
600 INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
601
602 sk_nulls_for_each_rcu(esk, node, list) {
603 if (esk->sk_hash != sk->sk_hash)
604 continue;
605 if (sk->sk_family == AF_INET) {
Eric Dumazeteda090c2022-05-13 11:55:50 -0700606 if (unlikely(inet_match(net, esk, acookie,
Ricardo Dias01770a12020-11-20 11:11:33 +0000607 ports, dif, sdif))) {
608 return true;
609 }
610 }
611#if IS_ENABLED(CONFIG_IPV6)
612 else if (sk->sk_family == AF_INET6) {
Eric Dumazet5d368f02022-05-13 11:55:49 -0700613 if (unlikely(inet6_match(net, esk,
Ricardo Dias01770a12020-11-20 11:11:33 +0000614 &sk->sk_v6_daddr,
615 &sk->sk_v6_rcv_saddr,
616 ports, dif, sdif))) {
617 return true;
618 }
619 }
620#endif
621 }
622 return false;
623}
624
625/* Insert a socket into ehash, and eventually remove another one
626 * (The another one can be a SYN_RECV or TIMEWAIT)
627 * If an existing socket already exists, socket sk is not inserted,
628 * and sets found_dup_sk parameter to true.
629 */
630bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
Pavel Emelyanov152da812007-12-20 15:31:33 -0800631{
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -0700632 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800633 struct inet_ehash_bucket *head;
Kuniyuki Iwashima08eaef92022-09-07 18:10:17 -0700634 struct hlist_nulls_head *list;
Eric Dumazet5b441f72015-03-18 14:05:34 -0700635 spinlock_t *lock;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700636 bool ret = true;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800637
Eric Dumazet079096f2015-10-02 11:43:32 -0700638 WARN_ON_ONCE(!sk_unhashed(sk));
Pavel Emelyanov152da812007-12-20 15:31:33 -0800639
Eric Dumazet5b441f72015-03-18 14:05:34 -0700640 sk->sk_hash = sk_ehashfn(sk);
Pavel Emelyanov152da812007-12-20 15:31:33 -0800641 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
642 list = &head->chain;
643 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
644
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800645 spin_lock(lock);
Eric Dumazetfc01538f2015-07-08 14:28:29 -0700646 if (osk) {
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700647 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
648 ret = sk_nulls_del_node_init_rcu(osk);
Ricardo Dias01770a12020-11-20 11:11:33 +0000649 } else if (found_dup_sk) {
650 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
651 if (*found_dup_sk)
652 ret = false;
Eric Dumazet9327f702009-12-04 03:46:54 +0000653 }
Ricardo Dias01770a12020-11-20 11:11:33 +0000654
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700655 if (ret)
656 __sk_nulls_add_node_rcu(sk, list);
Ricardo Dias01770a12020-11-20 11:11:33 +0000657
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800658 spin_unlock(lock);
Ricardo Dias01770a12020-11-20 11:11:33 +0000659
Eric Dumazet079096f2015-10-02 11:43:32 -0700660 return ret;
661}
662
Ricardo Dias01770a12020-11-20 11:11:33 +0000663bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
Eric Dumazet079096f2015-10-02 11:43:32 -0700664{
Ricardo Dias01770a12020-11-20 11:11:33 +0000665 bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700666
667 if (ok) {
668 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
669 } else {
Eric Dumazet19757ce2021-10-14 06:41:26 -0700670 this_cpu_inc(*sk->sk_prot->orphan_count);
Yafang Shao563e0bb2017-12-20 11:12:51 +0800671 inet_sk_set_state(sk, TCP_CLOSE);
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700672 sock_set_flag(sk, SOCK_DEAD);
673 inet_csk_destroy_sock(sk);
674 }
675 return ok;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800676}
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700677EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
Pavel Emelyanov152da812007-12-20 15:31:33 -0800678
Craig Gallekc125e802016-02-10 11:50:40 -0500679static int inet_reuseport_add_sock(struct sock *sk,
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800680 struct inet_listen_hashbucket *ilb)
Craig Gallekc125e802016-02-10 11:50:40 -0500681{
Craig Gallek90e5d0d2016-04-28 19:24:32 -0400682 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
Eric Dumazet8dbd76e2019-12-13 18:20:41 -0800683 const struct hlist_nulls_node *node;
Craig Gallekc125e802016-02-10 11:50:40 -0500684 struct sock *sk2;
Craig Gallekc125e802016-02-10 11:50:40 -0500685 kuid_t uid = sock_i_uid(sk);
686
Eric Dumazet8dbd76e2019-12-13 18:20:41 -0800687 sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
Craig Gallekc125e802016-02-10 11:50:40 -0500688 if (sk2 != sk &&
689 sk2->sk_family == sk->sk_family &&
690 ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
691 sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
Craig Gallek90e5d0d2016-04-28 19:24:32 -0400692 inet_csk(sk2)->icsk_bind_hash == tb &&
Craig Gallekc125e802016-02-10 11:50:40 -0500693 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800694 inet_rcv_saddr_equal(sk, sk2, false))
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -0700695 return reuseport_add_sock(sk, sk2,
696 inet_rcv_saddr_any(sk));
Craig Gallekc125e802016-02-10 11:50:40 -0500697 }
698
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -0700699 return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
Craig Gallekc125e802016-02-10 11:50:40 -0500700}
701
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800702int __inet_hash(struct sock *sk, struct sock *osk)
Pavel Emelyanov152da812007-12-20 15:31:33 -0800703{
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -0700704 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700705 struct inet_listen_hashbucket *ilb2;
Craig Gallekc125e802016-02-10 11:50:40 -0500706 int err = 0;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800707
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700708 if (sk->sk_state != TCP_LISTEN) {
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100709 local_bh_disable();
Ricardo Dias01770a12020-11-20 11:11:33 +0000710 inet_ehash_nolisten(sk, osk, NULL);
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100711 local_bh_enable();
Craig Gallekc125e802016-02-10 11:50:40 -0500712 return 0;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700713 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700714 WARN_ON(!sk_unhashed(sk));
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700715 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
Pavel Emelyanov152da812007-12-20 15:31:33 -0800716
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700717 spin_lock(&ilb2->lock);
Craig Gallekc125e802016-02-10 11:50:40 -0500718 if (sk->sk_reuseport) {
Martin KaFai Laucae38732022-05-11 17:06:05 -0700719 err = inet_reuseport_add_sock(sk, ilb2);
Craig Gallekc125e802016-02-10 11:50:40 -0500720 if (err)
721 goto unlock;
722 }
Craig Gallekd296ba62016-04-25 10:42:12 -0400723 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
Martin KaFai Laucae38732022-05-11 17:06:05 -0700724 sk->sk_family == AF_INET6)
725 __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
726 else
727 __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
Eric Dumazet3b24d852016-04-01 08:52:17 -0700728 sock_set_flag(sk, SOCK_RCU_FREE);
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700729 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Craig Gallekc125e802016-02-10 11:50:40 -0500730unlock:
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700731 spin_unlock(&ilb2->lock);
Craig Gallekc125e802016-02-10 11:50:40 -0500732
733 return err;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800734}
Eric Dumazet77a6a472015-03-18 14:05:36 -0700735EXPORT_SYMBOL(__inet_hash);
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800736
Craig Gallek086c6532016-02-10 11:50:35 -0500737int inet_hash(struct sock *sk)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800738{
Craig Gallekc125e802016-02-10 11:50:40 -0500739 int err = 0;
740
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100741 if (sk->sk_state != TCP_CLOSE)
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800742 err = __inet_hash(sk, NULL);
Craig Gallek086c6532016-02-10 11:50:35 -0500743
Craig Gallekc125e802016-02-10 11:50:40 -0500744 return err;
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800745}
746EXPORT_SYMBOL_GPL(inet_hash);
747
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100748void inet_unhash(struct sock *sk)
749{
750 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
751
752 if (sk_unhashed(sk))
753 return;
754
755 if (sk->sk_state == TCP_LISTEN) {
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700756 struct inet_listen_hashbucket *ilb2;
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100757
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700758 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100759 /* Don't disable bottom halves while acquiring the lock to
760 * avoid circular locking dependency on PREEMPT_RT.
761 */
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700762 spin_lock(&ilb2->lock);
763 if (sk_unhashed(sk)) {
764 spin_unlock(&ilb2->lock);
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700765 return;
766 }
767
768 if (rcu_access_pointer(sk->sk_reuseport_cb))
769 reuseport_stop_listen_sock(sk);
770
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700771 __sk_nulls_del_node_init_rcu(sk);
772 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
773 spin_unlock(&ilb2->lock);
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100774 } else {
775 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
776
777 spin_lock_bh(lock);
Martin KaFai Laue8d00592022-05-11 17:05:58 -0700778 if (sk_unhashed(sk)) {
779 spin_unlock_bh(lock);
780 return;
781 }
782 __sk_nulls_del_node_init_rcu(sk);
783 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
Sebastian Andrzej Siewior4f9bf2a2022-02-09 19:56:57 +0100784 spin_unlock_bh(lock);
785 }
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800786}
787EXPORT_SYMBOL_GPL(inet_unhash);
Pavel Emelyanov152da812007-12-20 15:31:33 -0800788
Joanne Koong28044fc2022-08-22 11:10:21 -0700789static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
790 const struct net *net, unsigned short port,
791 int l3mdev, const struct sock *sk)
792{
793#if IS_ENABLED(CONFIG_IPV6)
794 if (sk->sk_family == AF_INET6)
795 return net_eq(ib2_net(tb), net) && tb->port == port &&
796 tb->l3mdev == l3mdev &&
797 ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
798 else
799#endif
800 return net_eq(ib2_net(tb), net) && tb->port == port &&
801 tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
802}
803
804bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
805 unsigned short port, int l3mdev, const struct sock *sk)
806{
807#if IS_ENABLED(CONFIG_IPV6)
808 struct in6_addr addr_any = {};
809
810 if (sk->sk_family == AF_INET6)
811 return net_eq(ib2_net(tb), net) && tb->port == port &&
812 tb->l3mdev == l3mdev &&
813 ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
814 else
815#endif
816 return net_eq(ib2_net(tb), net) && tb->port == port &&
817 tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
818}
819
820/* The socket's bhash2 hashbucket spinlock must be held when this is called */
821struct inet_bind2_bucket *
822inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net,
823 unsigned short port, int l3mdev, const struct sock *sk)
824{
825 struct inet_bind2_bucket *bhash2 = NULL;
826
827 inet_bind_bucket_for_each(bhash2, &head->chain)
828 if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
829 break;
830
831 return bhash2;
832}
833
834struct inet_bind_hashbucket *
835inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port)
836{
837 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
838 u32 hash;
839#if IS_ENABLED(CONFIG_IPV6)
840 struct in6_addr addr_any = {};
841
842 if (sk->sk_family == AF_INET6)
843 hash = ipv6_portaddr_hash(net, &addr_any, port);
844 else
845#endif
846 hash = ipv4_portaddr_hash(net, 0, port);
847
848 return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
849}
850
851int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk)
852{
853 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
854 struct inet_bind2_bucket *tb2, *new_tb2;
855 int l3mdev = inet_sk_bound_l3mdev(sk);
856 struct inet_bind_hashbucket *head2;
857 int port = inet_sk(sk)->inet_num;
858 struct net *net = sock_net(sk);
859
860 /* Allocate a bind2 bucket ahead of time to avoid permanently putting
861 * the bhash2 table in an inconsistent state if a new tb2 bucket
862 * allocation fails.
863 */
864 new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
865 if (!new_tb2)
866 return -ENOMEM;
867
868 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
869
870 if (prev_saddr) {
871 spin_lock_bh(&prev_saddr->lock);
872 __sk_del_bind2_node(sk);
873 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
874 inet_csk(sk)->icsk_bind2_hash);
875 spin_unlock_bh(&prev_saddr->lock);
876 }
877
878 spin_lock_bh(&head2->lock);
879 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
880 if (!tb2) {
881 tb2 = new_tb2;
882 inet_bind2_bucket_init(tb2, net, head2, port, l3mdev, sk);
883 }
884 sk_add_bind2_node(sk, &tb2->owners);
885 inet_csk(sk)->icsk_bind2_hash = tb2;
886 spin_unlock_bh(&head2->lock);
887
888 if (tb2 != new_tb2)
889 kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
890
891 return 0;
892}
893EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
894
Eric Dumazet190cc822021-02-09 11:20:27 -0800895/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
896 * Note that we use 32bit integers (vs RFC 'short integers')
897 * because 2^16 is not a multiple of num_ephemeral and this
898 * property might be used by clever attacker.
Willy Tarreau4c2c8f02022-05-02 10:46:13 +0200899 * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
900 * attacks were since demonstrated, thus we use 65536 instead to really
901 * give more isolation and privacy, at the expense of 256kB of kernel
902 * memory.
Eric Dumazet190cc822021-02-09 11:20:27 -0800903 */
Willy Tarreau4c2c8f02022-05-02 10:46:13 +0200904#define INET_TABLE_PERTURB_SHIFT 16
Willy Tarreaue9261472022-05-02 10:46:12 +0200905#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
906static u32 *table_perturb;
Eric Dumazet190cc822021-02-09 11:20:27 -0800907
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800908int __inet_hash_connect(struct inet_timewait_death_row *death_row,
Willy Tarreaub2d05752022-05-02 10:46:08 +0200909 struct sock *sk, u64 port_offset,
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800910 int (*check_established)(struct inet_timewait_death_row *,
Eric Dumazetb4d64442015-03-18 14:05:37 -0700911 struct sock *, __u16, struct inet_timewait_sock **))
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800912{
913 struct inet_hashinfo *hinfo = death_row->hashinfo;
Joanne Koong28044fc2022-08-22 11:10:21 -0700914 struct inet_bind_hashbucket *head, *head2;
Eric Dumazet1580ab62016-02-11 16:28:49 -0800915 struct inet_timewait_sock *tw = NULL;
Eric Dumazet1580ab62016-02-11 16:28:49 -0800916 int port = inet_sk(sk)->inet_num;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900917 struct net *net = sock_net(sk);
Joanne Koong28044fc2022-08-22 11:10:21 -0700918 struct inet_bind2_bucket *tb2;
Eric Dumazet1580ab62016-02-11 16:28:49 -0800919 struct inet_bind_bucket *tb;
Joanne Koong28044fc2022-08-22 11:10:21 -0700920 bool tb_created = false;
Eric Dumazet1580ab62016-02-11 16:28:49 -0800921 u32 remaining, offset;
922 int ret, i, low, high;
Robert Shearman3c82a212018-11-07 15:36:02 +0000923 int l3mdev;
Eric Dumazet190cc822021-02-09 11:20:27 -0800924 u32 index;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800925
Eric Dumazet1580ab62016-02-11 16:28:49 -0800926 if (port) {
927 head = &hinfo->bhash[inet_bhashfn(net, port,
928 hinfo->bhash_size)];
929 tb = inet_csk(sk)->icsk_bind_hash;
930 spin_lock_bh(&head->lock);
931 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
Ricardo Dias01770a12020-11-20 11:11:33 +0000932 inet_ehash_nolisten(sk, NULL, NULL);
Eric Dumazet1580ab62016-02-11 16:28:49 -0800933 spin_unlock_bh(&head->lock);
934 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900935 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800936 spin_unlock(&head->lock);
937 /* No definite answer... Walk to established hash table */
Eric Dumazet1580ab62016-02-11 16:28:49 -0800938 ret = check_established(death_row, sk, port, NULL);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800939 local_bh_enable();
940 return ret;
941 }
Eric Dumazet1580ab62016-02-11 16:28:49 -0800942
Robert Shearman3c82a212018-11-07 15:36:02 +0000943 l3mdev = inet_sk_bound_l3mdev(sk);
944
Eric Dumazet1580ab62016-02-11 16:28:49 -0800945 inet_get_local_port_range(net, &low, &high);
946 high++; /* [32768, 60999] -> [32768, 61000[ */
947 remaining = high - low;
948 if (likely(remaining > 1))
949 remaining &= ~1U;
950
Willy Tarreaue9261472022-05-02 10:46:12 +0200951 net_get_random_once(table_perturb,
952 INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
Willy Tarreaue8161342022-05-02 10:46:14 +0200953 index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
Eric Dumazet190cc822021-02-09 11:20:27 -0800954
Willy Tarreau9e9b70a2022-05-02 10:46:09 +0200955 offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
Willy Tarreaub2d05752022-05-02 10:46:08 +0200956 offset %= remaining;
957
Eric Dumazet1580ab62016-02-11 16:28:49 -0800958 /* In first pass we try ports of @low parity.
959 * inet_csk_get_port() does the opposite choice.
960 */
961 offset &= ~1U;
962other_parity_scan:
963 port = low + offset;
964 for (i = 0; i < remaining; i += 2, port += 2) {
965 if (unlikely(port >= high))
966 port -= remaining;
967 if (inet_is_local_reserved_port(net, port))
968 continue;
969 head = &hinfo->bhash[inet_bhashfn(net, port,
970 hinfo->bhash_size)];
971 spin_lock_bh(&head->lock);
972
973 /* Does not bother with rcv_saddr checks, because
974 * the established check is already unique enough.
975 */
976 inet_bind_bucket_for_each(tb, &head->chain) {
Joanne Koong28044fc2022-08-22 11:10:21 -0700977 if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
Eric Dumazet1580ab62016-02-11 16:28:49 -0800978 if (tb->fastreuse >= 0 ||
979 tb->fastreuseport >= 0)
980 goto next_port;
981 WARN_ON(hlist_empty(&tb->owners));
982 if (!check_established(death_row, sk,
983 port, &tw))
984 goto ok;
985 goto next_port;
986 }
987 }
988
989 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
Robert Shearman3c82a212018-11-07 15:36:02 +0000990 net, head, port, l3mdev);
Eric Dumazet1580ab62016-02-11 16:28:49 -0800991 if (!tb) {
992 spin_unlock_bh(&head->lock);
993 return -ENOMEM;
994 }
Joanne Koong28044fc2022-08-22 11:10:21 -0700995 tb_created = true;
Eric Dumazet1580ab62016-02-11 16:28:49 -0800996 tb->fastreuse = -1;
997 tb->fastreuseport = -1;
998 goto ok;
999next_port:
1000 spin_unlock_bh(&head->lock);
1001 cond_resched();
1002 }
1003
1004 offset++;
1005 if ((offset & 1) && remaining > 1)
1006 goto other_parity_scan;
1007
1008 return -EADDRNOTAVAIL;
1009
1010ok:
Joanne Koong28044fc2022-08-22 11:10:21 -07001011 /* Find the corresponding tb2 bucket since we need to
1012 * add the socket to the bhash2 table as well
1013 */
1014 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
1015 spin_lock(&head2->lock);
1016
1017 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
1018 if (!tb2) {
1019 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
1020 head2, port, l3mdev, sk);
1021 if (!tb2)
1022 goto error;
1023 }
1024
Willy Tarreauca7af042022-05-02 10:46:11 +02001025 /* Here we want to add a little bit of randomness to the next source
1026 * port that will be chosen. We use a max() with a random here so that
1027 * on low contention the randomness is maximal and on high contention
1028 * it may be inexistent.
Eric Dumazetc579bd12021-02-09 11:20:28 -08001029 */
Willy Tarreauca7af042022-05-02 10:46:11 +02001030 i = max_t(int, i, (prandom_u32() & 7) * 2);
Eric Dumazet190cc822021-02-09 11:20:27 -08001031 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
Eric Dumazet1580ab62016-02-11 16:28:49 -08001032
1033 /* Head lock still held and bh's disabled */
Joanne Koong28044fc2022-08-22 11:10:21 -07001034 inet_bind_hash(sk, tb, tb2, port);
1035
1036 spin_unlock(&head2->lock);
1037
Eric Dumazet1580ab62016-02-11 16:28:49 -08001038 if (sk_unhashed(sk)) {
1039 inet_sk(sk)->inet_sport = htons(port);
Ricardo Dias01770a12020-11-20 11:11:33 +00001040 inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
Eric Dumazet1580ab62016-02-11 16:28:49 -08001041 }
1042 if (tw)
1043 inet_twsk_bind_unhash(tw, hinfo);
1044 spin_unlock(&head->lock);
1045 if (tw)
1046 inet_twsk_deschedule_put(tw);
1047 local_bh_enable();
1048 return 0;
Joanne Koong28044fc2022-08-22 11:10:21 -07001049
1050error:
1051 spin_unlock(&head2->lock);
1052 if (tb_created)
1053 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
1054 spin_unlock_bh(&head->lock);
1055 return -ENOMEM;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -08001056}
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -08001057
1058/*
1059 * Bind a port for a connect operation and hash it.
1060 */
1061int inet_hash_connect(struct inet_timewait_death_row *death_row,
1062 struct sock *sk)
1063{
Willy Tarreaub2d05752022-05-02 10:46:08 +02001064 u64 port_offset = 0;
Eric Dumazete2baad92015-05-27 10:46:02 -07001065
1066 if (!inet_sk(sk)->inet_num)
1067 port_offset = inet_sk_port_offset(sk);
1068 return __inet_hash_connect(death_row, sk, port_offset,
Eric Dumazetb4d64442015-03-18 14:05:37 -07001069 __inet_check_established);
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -08001070}
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -08001071EXPORT_SYMBOL_GPL(inet_hash_connect);
Eric Dumazet5caea4e2008-11-20 00:40:07 -08001072
Peter Oskolkovc92c81d2018-12-24 12:57:17 -08001073static void init_hashinfo_lhash2(struct inet_hashinfo *h)
1074{
1075 int i;
1076
1077 for (i = 0; i <= h->lhash2_mask; i++) {
1078 spin_lock_init(&h->lhash2[i].lock);
Martin KaFai Laucae38732022-05-11 17:06:05 -07001079 INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head,
1080 i + LISTENING_NULLS_BASE);
Peter Oskolkovc92c81d2018-12-24 12:57:17 -08001081 }
1082}
1083
Martin KaFai Lau61b7c692017-12-01 12:52:31 -08001084void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1085 unsigned long numentries, int scale,
1086 unsigned long low_limit,
1087 unsigned long high_limit)
1088{
Martin KaFai Lau61b7c692017-12-01 12:52:31 -08001089 h->lhash2 = alloc_large_system_hash(name,
1090 sizeof(*h->lhash2),
1091 numentries,
1092 scale,
1093 0,
1094 NULL,
1095 &h->lhash2_mask,
1096 low_limit,
1097 high_limit);
Peter Oskolkovc92c81d2018-12-24 12:57:17 -08001098 init_hashinfo_lhash2(h);
Willy Tarreaue9261472022-05-02 10:46:12 +02001099
1100 /* this one is used for source ports of outgoing connections */
Muchun Songe67b72b2022-06-07 15:02:14 +08001101 table_perturb = alloc_large_system_hash("Table-perturb",
1102 sizeof(*table_perturb),
1103 INET_TABLE_PERTURB_SIZE,
1104 0, 0, NULL, NULL,
1105 INET_TABLE_PERTURB_SIZE,
1106 INET_TABLE_PERTURB_SIZE);
Martin KaFai Lau61b7c692017-12-01 12:52:31 -08001107}
Peter Oskolkovc92c81d2018-12-24 12:57:17 -08001108
1109int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
1110{
1111 h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
1112 if (!h->lhash2)
1113 return -ENOMEM;
1114
1115 h->lhash2_mask = INET_LHTABLE_SIZE - 1;
1116 /* INET_LHTABLE_SIZE must be a power of 2 */
1117 BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
1118
1119 init_hashinfo_lhash2(h);
1120 return 0;
1121}
1122EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
Martin KaFai Lau61b7c692017-12-01 12:52:31 -08001123
Eric Dumazet095dc8e2015-05-26 07:55:34 -07001124int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
1125{
Eric Dumazet89e478a2015-07-22 07:02:00 +02001126 unsigned int locksz = sizeof(spinlock_t);
Eric Dumazet095dc8e2015-05-26 07:55:34 -07001127 unsigned int i, nblocks = 1;
1128
Eric Dumazet89e478a2015-07-22 07:02:00 +02001129 if (locksz != 0) {
Eric Dumazet095dc8e2015-05-26 07:55:34 -07001130 /* allocate 2 cache lines or at least one spinlock per cpu */
Eric Dumazet89e478a2015-07-22 07:02:00 +02001131 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
Eric Dumazet095dc8e2015-05-26 07:55:34 -07001132 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
1133
1134 /* no more locks than number of hash buckets */
1135 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
1136
Michal Hocko752ade62017-05-08 15:57:27 -07001137 hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
Eric Dumazet095dc8e2015-05-26 07:55:34 -07001138 if (!hashinfo->ehash_locks)
1139 return -ENOMEM;
1140
1141 for (i = 0; i < nblocks; i++)
1142 spin_lock_init(&hashinfo->ehash_locks[i]);
1143 }
1144 hashinfo->ehash_locks_mask = nblocks - 1;
1145 return 0;
1146}
1147EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);