blob: 1c6edfdb9a2c031e6b9ecf6b2aa4792ed6e752d7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Eric W. Biederman5f256be2007-09-12 11:50:50 +02002/*
3 * Operations on the network namespace
4 */
5#ifndef __NET_NET_NAMESPACE_H
6#define __NET_NET_NAMESPACE_H
7
Arun Sharma600634972011-07-26 16:09:06 -07008#include <linux/atomic.h>
Reshetova, Elenac122e142017-06-30 13:08:08 +03009#include <linux/refcount.h>
Eric W. Biederman5f256be2007-09-12 11:50:50 +020010#include <linux/workqueue.h>
11#include <linux/list.h>
David S. Millerbee95252011-05-26 16:40:37 -040012#include <linux/sysctl.h>
Tyler Hicksfbdeaed2018-07-20 21:56:53 +000013#include <linux/uidgid.h>
Eric W. Biederman5f256be2007-09-12 11:50:50 +020014
Cong Wang6a662712014-04-15 16:25:34 -070015#include <net/flow.h>
Pavel Emelyanov8efa6e92008-03-31 19:41:14 -070016#include <net/netns/core.h>
Pavel Emelyanov852566f52008-07-18 04:01:24 -070017#include <net/netns/mib.h>
Denis V. Luneva0a53c82007-12-11 04:19:17 -080018#include <net/netns/unix.h>
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -080019#include <net/netns/packet.h>
Pavel Emelyanov8afd3512007-12-16 13:29:36 -080020#include <net/netns/ipv4.h>
Daniel Lezcanob0f159d2008-01-10 02:49:06 -080021#include <net/netns/ipv6.h>
David Ahernab84be72019-05-24 14:43:04 -070022#include <net/netns/nexthop.h>
Alexander Aring633fc862014-02-28 07:32:49 +010023#include <net/netns/ieee802154_6lowpan.h>
Eric W. Biederman4db67e82012-08-06 08:42:04 +000024#include <net/netns/sctp.h>
Pavel Emelyanov67019cc2008-04-13 22:28:42 -070025#include <net/netns/dccp.h>
Gao fengf3c1a442013-03-24 23:50:39 +000026#include <net/netns/netfilter.h>
Alexey Dobriyan8d870052008-01-31 04:02:13 -080027#include <net/netns/x_tables.h>
Alexey Dobriyandfdb8d72008-10-08 11:35:02 +020028#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
29#include <net/netns/conntrack.h>
30#endif
Pablo Neira Ayuso99633ab2013-10-10 23:28:33 +020031#include <net/netns/nftables.h>
Alexey Dobriyand62ddc22008-11-25 17:14:31 -080032#include <net/netns/xfrm.h>
Eric W. Biederman01891972015-03-03 19:10:47 -060033#include <net/netns/mpls.h>
Mario Kicherer8e8cda62017-02-21 12:19:47 +010034#include <net/netns/can.h>
Björn Töpel1d0dc062019-01-24 19:59:37 +010035#include <net/netns/xdp.h>
Al Viro435d5f42014-10-31 22:56:04 -040036#include <linux/ns_common.h>
Pablo Neira Ayuso04c52de2015-06-17 10:28:25 -050037#include <linux/idr.h>
38#include <linux/skbuff.h>
Jiri Pirkoa30c7b42019-09-30 10:15:10 +020039#include <linux/notifier.h>
Denis V. Luneva0a53c82007-12-11 04:19:17 -080040
Eric W. Biederman038e7332012-06-14 02:31:10 -070041struct user_namespace;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020042struct proc_dir_entry;
Eric W. Biederman2774c7a2007-09-26 22:10:56 -070043struct net_device;
Denis V. Lunev97c53ca2007-11-19 22:26:51 -080044struct sock;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +110045struct ctl_table_header;
Pavel Emelyanovdec827d2008-04-15 00:36:08 -070046struct net_generic;
Christian Brauner94e5e302018-03-19 13:17:30 +010047struct uevent_sock;
Julian Anastasov2553d062011-03-04 12:18:07 +020048struct netns_ipvs;
Petar Penkovd58e4682018-09-14 07:46:18 -070049struct bpf_prog;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +110050
Eric Dumazet7c28bd02009-10-24 06:13:17 -070051
52#define NETDEV_HASHBITS 8
53#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
54
Eric W. Biederman5f256be2007-09-12 11:50:50 +020055struct net {
Eric Dumazet2a06b892019-10-18 15:20:05 -070056 /* First cache line can be often dirtied.
57 * Do not place here read-mostly fields.
58 */
Mike Rapoportaad12c22019-08-21 14:29:29 +030059 refcount_t passive; /* To decide when the network
Al Viroa685e082011-06-08 21:13:01 -040060 * namespace should be freed.
61 */
Kirill Tkhai273c28b2018-01-12 18:28:31 +030062 refcount_t count; /* To decided when the network
Al Viroa685e082011-06-08 21:13:01 -040063 * namespace should be shut down.
Eric W. Biederman5f256be2007-09-12 11:50:50 +020064 */
Eric Dumazet8e602ce2010-10-14 05:56:18 +000065 spinlock_t rules_mod_lock;
66
Eric Dumazet2a06b892019-10-18 15:20:05 -070067 unsigned int dev_unreg_count;
68
69 unsigned int dev_base_seq; /* protected by rtnl_mutex */
70 int ifindex;
71
72 spinlock_t nsid_lock;
73 atomic_t fnhe_genid;
Eric Dumazet33cf7c92015-03-11 18:53:14 -070074
Eric W. Biederman5f256be2007-09-12 11:50:50 +020075 struct list_head list; /* list of network namespaces */
Kirill Tkhai19efbd92018-02-19 12:58:38 +030076 struct list_head exit_list; /* To linked to call pernet exit
Kirill Tkhai4420bf22018-03-27 18:02:23 +030077 * methods on dead net (
78 * pernet_ops_rwsem read locked),
79 * or to unregister pernet ops
80 * (pernet_ops_rwsem write locked).
Kirill Tkhai19efbd92018-02-19 12:58:38 +030081 */
Kirill Tkhai65b7b5b2018-02-19 12:58:45 +030082 struct llist_node cleanup_list; /* namespaces on death row */
83
David Howells9b242612019-06-26 21:02:33 +010084#ifdef CONFIG_KEYS
85 struct key_tag *key_domain; /* Key domain of operation tag */
86#endif
Eric W. Biederman038e7332012-06-14 02:31:10 -070087 struct user_namespace *user_ns; /* Owning user namespace */
Eric W. Biederman70328662016-08-08 14:33:23 -050088 struct ucounts *ucounts;
Nicolas Dichtel0c7aecd2015-01-15 15:11:15 +010089 struct idr netns_ids;
Eric W. Biederman038e7332012-06-14 02:31:10 -070090
Al Viro435d5f42014-10-31 22:56:04 -040091 struct ns_common ns;
Eric W. Biederman98f842e2011-06-15 10:21:48 -070092
Eric Dumazet2a06b892019-10-18 15:20:05 -070093 struct list_head dev_base_head;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020094 struct proc_dir_entry *proc_net;
95 struct proc_dir_entry *proc_net_stat;
Eric W. Biederman881d9662007-09-17 11:56:21 -070096
Al Viro73455092008-07-14 21:22:20 -040097#ifdef CONFIG_SYSCTL
98 struct ctl_table_set sysctls;
99#endif
Eric W. Biederman95bdfcc2007-11-30 23:55:42 +1100100
Eric Dumazet8e602ce2010-10-14 05:56:18 +0000101 struct sock *rtnl; /* rtnetlink socket */
102 struct sock *genl_sock;
Eric W. Biederman2774c7a2007-09-26 22:10:56 -0700103
Christian Brauner94e5e302018-03-19 13:17:30 +0100104 struct uevent_sock *uevent_sock; /* uevent socket */
105
Eric W. Biederman881d9662007-09-17 11:56:21 -0700106 struct hlist_head *dev_name_head;
107 struct hlist_head *dev_index_head;
Jiri Pirkoa30c7b42019-09-30 10:15:10 +0200108 struct raw_notifier_head netdev_chain;
109
Eric Dumazet2a06b892019-10-18 15:20:05 -0700110 /* Note that @hash_mix can be read millions times per second,
111 * it is critical that it is on a read_mostly cache line.
112 */
113 u32 hash_mix;
114
115 struct net_device *loopback_dev; /* The loopback */
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800116
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800117 /* core fib_rules */
118 struct list_head rules_ops;
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800119
Pavel Emelyanov8efa6e92008-03-31 19:41:14 -0700120 struct netns_core core;
Pavel Emelyanov852566f52008-07-18 04:01:24 -0700121 struct netns_mib mib;
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -0800122 struct netns_packet packet;
Denis V. Luneva0a53c82007-12-11 04:19:17 -0800123 struct netns_unix unx;
David Ahernab84be72019-05-24 14:43:04 -0700124 struct netns_nexthop nexthop;
Pavel Emelyanov8afd3512007-12-16 13:29:36 -0800125 struct netns_ipv4 ipv4;
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000126#if IS_ENABLED(CONFIG_IPV6)
Daniel Lezcanob0f159d2008-01-10 02:49:06 -0800127 struct netns_ipv6 ipv6;
128#endif
Alexander Aring633fc862014-02-28 07:32:49 +0100129#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
130 struct netns_ieee802154_lowpan ieee802154_lowpan;
131#endif
Eric W. Biederman4db67e82012-08-06 08:42:04 +0000132#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
133 struct netns_sctp sctp;
134#endif
Pavel Emelyanov67019cc2008-04-13 22:28:42 -0700135#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
136 struct netns_dccp dccp;
137#endif
Alexey Dobriyan8d870052008-01-31 04:02:13 -0800138#ifdef CONFIG_NETFILTER
Gao fengf3c1a442013-03-24 23:50:39 +0000139 struct netns_nf nf;
Alexey Dobriyan8d870052008-01-31 04:02:13 -0800140 struct netns_xt xt;
Alexey Dobriyandfdb8d72008-10-08 11:35:02 +0200141#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
142 struct netns_ct ct;
143#endif
Pablo Neira Ayuso99633ab2013-10-10 23:28:33 +0200144#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
145 struct netns_nftables nft;
146#endif
Amerigo Wangc038a762012-09-18 16:50:08 +0000147#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
148 struct netns_nf_frag nf_frag;
Eric Dumazet9ce7bc02018-06-13 10:11:56 -0700149 struct ctl_table_header *nf_frag_frags_hdr;
Amerigo Wangc038a762012-09-18 16:50:08 +0000150#endif
Alexey Dobriyancd8c20b2010-01-13 16:02:14 +0100151 struct sock *nfnl;
152 struct sock *nfnl_stash;
Andreas Schultz3499abb2015-08-05 17:51:45 +0200153#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
154 struct list_head nfnl_acct_list;
155#endif
Pablo Neira19576c92015-12-09 14:07:40 +0100156#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
157 struct list_head nfct_timeout_list;
158#endif
Alexey Dobriyan8d870052008-01-31 04:02:13 -0800159#endif
Johannes Berg3d23e342009-09-29 23:27:28 +0200160#ifdef CONFIG_WEXT_CORE
Johannes Bergb333b3d222009-06-24 01:34:48 +0000161 struct sk_buff_head wext_nlevents;
162#endif
Eric Dumazet1c877332010-10-25 03:20:11 +0000163 struct net_generic __rcu *gen;
Eric Dumazet8e602ce2010-10-14 05:56:18 +0000164
Petar Penkovd58e4682018-09-14 07:46:18 -0700165 struct bpf_prog __rcu *flow_dissector_prog;
166
Eric Dumazet8e602ce2010-10-14 05:56:18 +0000167 /* Note : following structs are cache line aligned */
168#ifdef CONFIG_XFRM
169 struct netns_xfrm xfrm;
170#endif
Daniel Borkmannf3189032020-03-27 16:58:52 +0100171
172 atomic64_t net_cookie; /* written once */
173
JunweiZhang8b4d14d2013-06-26 16:40:06 +0800174#if IS_ENABLED(CONFIG_IP_VS)
Hans Schillstrom61b1ab42011-01-03 14:44:42 +0100175 struct netns_ipvs *ipvs;
JunweiZhang8b4d14d2013-06-26 16:40:06 +0800176#endif
Eric W. Biederman01891972015-03-03 19:10:47 -0600177#if IS_ENABLED(CONFIG_MPLS)
178 struct netns_mpls mpls;
179#endif
Mario Kicherer8e8cda62017-02-21 12:19:47 +0100180#if IS_ENABLED(CONFIG_CAN)
181 struct netns_can can;
182#endif
Björn Töpel1d0dc062019-01-24 19:59:37 +0100183#ifdef CONFIG_XDP_SOCKETS
184 struct netns_xdp xdp;
185#endif
Ondrej Mosnacek91b05a72019-07-09 13:11:24 +0200186#if IS_ENABLED(CONFIG_CRYPTO_USER)
187 struct sock *crypto_nlsk;
188#endif
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000189 struct sock *diag_nlsk;
Kees Cook3859a272016-10-28 01:22:25 -0700190} __randomize_layout;
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200191
Denis V. Lunevc0f39322008-04-02 00:10:28 -0700192#include <linux/seq_file_net.h>
193
Daniel Lezcano4fabcd72007-09-13 09:16:29 +0200194/* Init's network namespace */
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200195extern struct net init_net;
Denis V. Luneva4aa8342008-04-03 13:04:33 -0700196
Eric W. Biedermand727abc2012-06-14 02:16:42 -0700197#ifdef CONFIG_NET_NS
Joe Perchese67e16e2013-09-21 10:22:48 -0700198struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
199 struct net *old_net);
Denis V. Lunev225c0a02008-04-02 00:09:29 -0700200
Tyler Hicksfbdeaed2018-07-20 21:56:53 +0000201void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
202
Florian Westphal7866cc52017-05-30 11:38:12 +0200203void net_ns_barrier(void);
Eric W. Biedermand727abc2012-06-14 02:16:42 -0700204#else /* CONFIG_NET_NS */
205#include <linux/sched.h>
206#include <linux/nsproxy.h>
Eric W. Biederman038e7332012-06-14 02:31:10 -0700207static inline struct net *copy_net_ns(unsigned long flags,
208 struct user_namespace *user_ns, struct net *old_net)
Eric W. Biederman9dd776b2007-09-26 22:04:26 -0700209{
Eric W. Biedermand727abc2012-06-14 02:16:42 -0700210 if (flags & CLONE_NEWNET)
211 return ERR_PTR(-EINVAL);
212 return old_net;
Eric W. Biederman9dd776b2007-09-26 22:04:26 -0700213}
Florian Westphal7866cc52017-05-30 11:38:12 +0200214
Tyler Hicksfbdeaed2018-07-20 21:56:53 +0000215static inline void net_ns_get_ownership(const struct net *net,
216 kuid_t *uid, kgid_t *gid)
217{
218 *uid = GLOBAL_ROOT_UID;
219 *gid = GLOBAL_ROOT_GID;
220}
221
Florian Westphal7866cc52017-05-30 11:38:12 +0200222static inline void net_ns_barrier(void) {}
Eric W. Biedermand727abc2012-06-14 02:16:42 -0700223#endif /* CONFIG_NET_NS */
Denis V. Lunev225c0a02008-04-02 00:09:29 -0700224
225
226extern struct list_head net_namespace_list;
Eric W. Biederman9dd776b2007-09-26 22:04:26 -0700227
Joe Perchese67e16e2013-09-21 10:22:48 -0700228struct net *get_net_ns_by_pid(pid_t pid);
Stefan Hajnoczi0f5258c2016-11-18 09:41:46 +0000229struct net *get_net_ns_by_fd(int fd);
Johannes Berg30ffee82009-07-10 09:51:35 +0000230
Rashika Kheria535d3ae2014-02-09 22:29:14 +0530231#ifdef CONFIG_SYSCTL
232void ipx_register_sysctl(void);
233void ipx_unregister_sysctl(void);
234#else
235#define ipx_register_sysctl()
236#define ipx_unregister_sysctl()
237#endif
238
Pavel Emelyanovd4655792007-11-01 00:43:49 -0700239#ifdef CONFIG_NET_NS
Joe Perchese67e16e2013-09-21 10:22:48 -0700240void __put_net(struct net *net);
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200241
242static inline struct net *get_net(struct net *net)
243{
Kirill Tkhai273c28b2018-01-12 18:28:31 +0300244 refcount_inc(&net->count);
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200245 return net;
246}
247
Eric W. Biederman077130c2007-09-13 09:18:57 +0200248static inline struct net *maybe_get_net(struct net *net)
249{
250 /* Used when we know struct net exists but we
251 * aren't guaranteed a previous reference count
252 * exists. If the reference count is zero this
253 * function fails and returns NULL.
254 */
Kirill Tkhai273c28b2018-01-12 18:28:31 +0300255 if (!refcount_inc_not_zero(&net->count))
Eric W. Biederman077130c2007-09-13 09:18:57 +0200256 net = NULL;
257 return net;
258}
259
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200260static inline void put_net(struct net *net)
261{
Kirill Tkhai273c28b2018-01-12 18:28:31 +0300262 if (refcount_dec_and_test(&net->count))
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200263 __put_net(net);
264}
265
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900266static inline
267int net_eq(const struct net *net1, const struct net *net2)
268{
269 return net1 == net2;
270}
Al Viroa685e082011-06-08 21:13:01 -0400271
Dan Streetman4ee806d2018-01-18 16:14:26 -0500272static inline int check_net(const struct net *net)
273{
David S. Miller3e3ab9c2018-01-29 10:14:59 -0500274 return refcount_read(&net->count) != 0;
Dan Streetman4ee806d2018-01-18 16:14:26 -0500275}
276
Joe Perchese67e16e2013-09-21 10:22:48 -0700277void net_drop_ns(void *);
Al Viroa685e082011-06-08 21:13:01 -0400278
Daniel Borkmannf3189032020-03-27 16:58:52 +0100279u64 net_gen_cookie(struct net *net);
280
Pavel Emelyanovd4655792007-11-01 00:43:49 -0700281#else
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -0700282
Pavel Emelyanovd4655792007-11-01 00:43:49 -0700283static inline struct net *get_net(struct net *net)
284{
285 return net;
286}
287
288static inline void put_net(struct net *net)
289{
290}
291
Pavel Emelyanovd4655792007-11-01 00:43:49 -0700292static inline struct net *maybe_get_net(struct net *net)
293{
294 return net;
295}
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900296
297static inline
298int net_eq(const struct net *net1, const struct net *net2)
299{
300 return 1;
301}
Al Viroa685e082011-06-08 21:13:01 -0400302
Dan Streetman4ee806d2018-01-18 16:14:26 -0500303static inline int check_net(const struct net *net)
304{
305 return 1;
306}
307
Daniel Borkmannf3189032020-03-27 16:58:52 +0100308static inline u64 net_gen_cookie(struct net *net)
309{
310 return 0;
311}
312
Al Viroa685e082011-06-08 21:13:01 -0400313#define net_drop_ns NULL
Pavel Emelyanovd4655792007-11-01 00:43:49 -0700314#endif
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200315
Denis V. Lunev5d1e4462008-04-16 01:58:04 -0700316
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -0500317typedef struct {
Eric Dumazet8f424b52008-11-12 00:53:30 -0800318#ifdef CONFIG_NET_NS
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -0500319 struct net *net;
Eric Dumazet8f424b52008-11-12 00:53:30 -0800320#endif
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -0500321} possible_net_t;
322
323static inline void write_pnet(possible_net_t *pnet, struct net *net)
324{
325#ifdef CONFIG_NET_NS
326 pnet->net = net;
327#endif
328}
329
330static inline struct net *read_pnet(const possible_net_t *pnet)
331{
332#ifdef CONFIG_NET_NS
333 return pnet->net;
334#else
335 return &init_net;
336#endif
337}
Denis V. Lunev5d1e4462008-04-16 01:58:04 -0700338
Kirill Tkhaif0b07bb12018-03-29 19:20:32 +0300339/* Protected by net_rwsem */
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200340#define for_each_net(VAR) \
341 list_for_each_entry(VAR, &net_namespace_list, list)
Jiri Pirkoafa0df52019-09-30 10:15:09 +0200342#define for_each_net_continue_reverse(VAR) \
343 list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list)
Johannes Berg11a28d32009-07-10 09:51:33 +0000344#define for_each_net_rcu(VAR) \
345 list_for_each_entry_rcu(VAR, &net_namespace_list, list)
346
Pavel Emelyanov46650792007-10-08 20:38:39 -0700347#ifdef CONFIG_NET_NS
348#define __net_init
349#define __net_exit
Denis V. Lunev022cbae2007-11-13 03:23:50 -0800350#define __net_initdata
Andi Kleen04a6f822012-10-04 17:12:11 -0700351#define __net_initconst
Pavel Emelyanov46650792007-10-08 20:38:39 -0700352#else
353#define __net_init __init
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700354#define __net_exit __ref
Denis V. Lunev022cbae2007-11-13 03:23:50 -0800355#define __net_initdata __initdata
Andi Kleen04a6f822012-10-04 17:12:11 -0700356#define __net_initconst __initconst
Pavel Emelyanov46650792007-10-08 20:38:39 -0700357#endif
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200358
Guillaume Naultd4e4fdf2019-10-23 18:39:04 +0200359int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
Guillaume Nault56f200c2020-01-16 21:16:46 +0100360int peernet2id(const struct net *net, struct net *peer);
361bool peernet_has_id(const struct net *net, struct net *peer);
362struct net *get_net_ns_by_id(const struct net *net, int id);
Nicolas Dichtel0c7aecd2015-01-15 15:11:15 +0100363
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200364struct pernet_operations {
365 struct list_head list;
Kirill Tkhai6056415d2018-03-13 13:55:55 +0300366 /*
367 * Below methods are called without any exclusive locks.
368 * More than one net may be constructed and destructed
369 * in parallel on several cpus. Every pernet_operations
370 * have to keep in mind all other pernet_operations and
371 * to introduce a locking, if they share common resources.
372 *
Kirill Tkhai8518e9b2018-03-27 18:02:32 +0300373 * The only time they are called with exclusive lock is
374 * from register_pernet_subsys(), unregister_pernet_subsys()
375 * register_pernet_device() and unregister_pernet_device().
376 *
Kirill Tkhai6056415d2018-03-13 13:55:55 +0300377 * Exit methods using blocking RCU primitives, such as
378 * synchronize_rcu(), should be implemented via exit_batch.
379 * Then, destruction of a group of net requires single
380 * synchronize_rcu() related to these pernet_operations,
381 * instead of separate synchronize_rcu() for every net.
382 * Please, avoid synchronize_rcu() at all, where it's possible.
Eric Dumazetd7d99872019-06-18 11:08:59 -0700383 *
384 * Note that a combination of pre_exit() and exit() can
385 * be used, since a synchronize_rcu() is guaranteed between
386 * the calls.
Kirill Tkhai6056415d2018-03-13 13:55:55 +0300387 */
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200388 int (*init)(struct net *net);
Eric Dumazetd7d99872019-06-18 11:08:59 -0700389 void (*pre_exit)(struct net *net);
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200390 void (*exit)(struct net *net);
Eric W. Biederman72ad9372009-12-03 02:29:03 +0000391 void (*exit_batch)(struct list_head *net_exit_list);
Alexey Dobriyanc7d03a02016-11-17 04:58:21 +0300392 unsigned int *id;
Eric W. Biedermanf875bae2009-11-29 22:25:28 +0000393 size_t size;
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200394};
395
Eric W. Biederman17edde52009-02-22 00:11:09 -0800396/*
397 * Use these carefully. If you implement a network device and it
398 * needs per network namespace operations use device pernet operations,
399 * otherwise use pernet subsys operations.
400 *
Johannes Berg4edf5472009-07-15 06:16:34 +0000401 * Network interfaces need to be removed from a dying netns _before_
402 * subsys notifiers can be called, as most of the network code cleanup
403 * (which is done from subsys notifiers) runs with the assumption that
404 * dev_remove_pack has been called so no new packets will arrive during
405 * and after the cleanup functions have been called. dev_remove_pack
406 * is not per namespace so instead the guarantee of no more packets
407 * arriving in a network namespace is provided by ensuring that all
408 * network devices and all sockets have left the network namespace
409 * before the cleanup methods are called.
Eric W. Biederman17edde52009-02-22 00:11:09 -0800410 *
411 * For the longest time the ipv4 icmp code was registered as a pernet
412 * device which caused kernel oops, and panics during network
413 * namespace cleanup. So please don't get this wrong.
414 */
Joe Perchese67e16e2013-09-21 10:22:48 -0700415int register_pernet_subsys(struct pernet_operations *);
416void unregister_pernet_subsys(struct pernet_operations *);
417int register_pernet_device(struct pernet_operations *);
418void unregister_pernet_device(struct pernet_operations *);
Eric W. Biedermanf875bae2009-11-29 22:25:28 +0000419
Eric W. Biederman95bdfcc2007-11-30 23:55:42 +1100420struct ctl_table;
421struct ctl_table_header;
Pavel Emelyanovd62c6122008-05-19 13:45:33 -0700422
Eric W. Biederman2ca794e2012-04-19 13:20:32 +0000423#ifdef CONFIG_SYSCTL
Joe Perchese67e16e2013-09-21 10:22:48 -0700424int net_sysctl_init(void);
425struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
426 struct ctl_table *table);
427void unregister_net_sysctl_table(struct ctl_table_header *header);
Eric W. Biederman48c74952012-04-23 12:13:02 +0000428#else
429static inline int net_sysctl_init(void) { return 0; }
430static inline struct ctl_table_header *register_net_sysctl(struct net *net,
431 const char *path, struct ctl_table *table)
432{
433 return NULL;
434}
435static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
436{
437}
438#endif
439
Guillaume Nault56f200c2020-01-16 21:16:46 +0100440static inline int rt_genid_ipv4(const struct net *net)
Nicolas Dichtelb42664f2012-09-10 22:09:44 +0000441{
fan.duca4c3fc2013-07-30 08:33:53 +0800442 return atomic_read(&net->ipv4.rt_genid);
Nicolas Dichtelb42664f2012-09-10 22:09:44 +0000443}
444
fan.duca4c3fc2013-07-30 08:33:53 +0800445static inline void rt_genid_bump_ipv4(struct net *net)
Nicolas Dichtelb42664f2012-09-10 22:09:44 +0000446{
fan.duca4c3fc2013-07-30 08:33:53 +0800447 atomic_inc(&net->ipv4.rt_genid);
448}
449
Hannes Frederic Sowa705f1c82014-09-28 00:46:06 +0200450extern void (*__fib6_flush_trees)(struct net *net);
fan.duca4c3fc2013-07-30 08:33:53 +0800451static inline void rt_genid_bump_ipv6(struct net *net)
452{
Hannes Frederic Sowa705f1c82014-09-28 00:46:06 +0200453 if (__fib6_flush_trees)
454 __fib6_flush_trees(net);
fan.duca4c3fc2013-07-30 08:33:53 +0800455}
fan.duca4c3fc2013-07-30 08:33:53 +0800456
Luis R. Rodriguez599018a2014-04-17 18:22:54 -0700457#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
458static inline struct netns_ieee802154_lowpan *
459net_ieee802154_lowpan(struct net *net)
460{
461 return &net->ieee802154_lowpan;
462}
Luis R. Rodriguez599018a2014-04-17 18:22:54 -0700463#endif
464
fan.duca4c3fc2013-07-30 08:33:53 +0800465/* For callers who don't really care about whether it's IPv4 or IPv6 */
466static inline void rt_genid_bump_all(struct net *net)
467{
468 rt_genid_bump_ipv4(net);
469 rt_genid_bump_ipv6(net);
Nicolas Dichtelb42664f2012-09-10 22:09:44 +0000470}
Eric W. Biederman95bdfcc2007-11-30 23:55:42 +1100471
Guillaume Nault56f200c2020-01-16 21:16:46 +0100472static inline int fnhe_genid(const struct net *net)
Timo Teräs5aad1de2013-05-27 20:46:33 +0000473{
474 return atomic_read(&net->fnhe_genid);
475}
476
477static inline void fnhe_genid_bump(struct net *net)
478{
479 atomic_inc(&net->fnhe_genid);
480}
481
Eric W. Biederman5f256be2007-09-12 11:50:50 +0200482#endif /* __NET_NET_NAMESPACE_H */