Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Operations on the network namespace |
| 4 | */ |
| 5 | #ifndef __NET_NET_NAMESPACE_H |
| 6 | #define __NET_NET_NAMESPACE_H |
| 7 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 8 | #include <linux/atomic.h> |
Reshetova, Elena | c122e14 | 2017-06-30 13:08:08 +0300 | [diff] [blame] | 9 | #include <linux/refcount.h> |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 10 | #include <linux/workqueue.h> |
| 11 | #include <linux/list.h> |
David S. Miller | bee9525 | 2011-05-26 16:40:37 -0400 | [diff] [blame] | 12 | #include <linux/sysctl.h> |
Tyler Hicks | fbdeaed | 2018-07-20 21:56:53 +0000 | [diff] [blame] | 13 | #include <linux/uidgid.h> |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 14 | |
Cong Wang | 6a66271 | 2014-04-15 16:25:34 -0700 | [diff] [blame] | 15 | #include <net/flow.h> |
Pavel Emelyanov | 8efa6e9 | 2008-03-31 19:41:14 -0700 | [diff] [blame] | 16 | #include <net/netns/core.h> |
Pavel Emelyanov | 852566f5 | 2008-07-18 04:01:24 -0700 | [diff] [blame] | 17 | #include <net/netns/mib.h> |
Denis V. Lunev | a0a53c8 | 2007-12-11 04:19:17 -0800 | [diff] [blame] | 18 | #include <net/netns/unix.h> |
Denis V. Lunev | 2aaef4e | 2007-12-11 04:19:54 -0800 | [diff] [blame] | 19 | #include <net/netns/packet.h> |
Pavel Emelyanov | 8afd351 | 2007-12-16 13:29:36 -0800 | [diff] [blame] | 20 | #include <net/netns/ipv4.h> |
Daniel Lezcano | b0f159d | 2008-01-10 02:49:06 -0800 | [diff] [blame] | 21 | #include <net/netns/ipv6.h> |
David Ahern | ab84be7 | 2019-05-24 14:43:04 -0700 | [diff] [blame] | 22 | #include <net/netns/nexthop.h> |
Alexander Aring | 633fc86 | 2014-02-28 07:32:49 +0100 | [diff] [blame] | 23 | #include <net/netns/ieee802154_6lowpan.h> |
Eric W. Biederman | 4db67e8 | 2012-08-06 08:42:04 +0000 | [diff] [blame] | 24 | #include <net/netns/sctp.h> |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 25 | #include <net/netns/netfilter.h> |
Alexey Dobriyan | dfdb8d7 | 2008-10-08 11:35:02 +0200 | [diff] [blame] | 26 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
| 27 | #include <net/netns/conntrack.h> |
| 28 | #endif |
Pablo Neira Ayuso | 99633ab | 2013-10-10 23:28:33 +0200 | [diff] [blame] | 29 | #include <net/netns/nftables.h> |
Alexey Dobriyan | d62ddc2 | 2008-11-25 17:14:31 -0800 | [diff] [blame] | 30 | #include <net/netns/xfrm.h> |
Eric W. Biederman | 0189197 | 2015-03-03 19:10:47 -0600 | [diff] [blame] | 31 | #include <net/netns/mpls.h> |
Mario Kicherer | 8e8cda6 | 2017-02-21 12:19:47 +0100 | [diff] [blame] | 32 | #include <net/netns/can.h> |
Björn Töpel | 1d0dc06 | 2019-01-24 19:59:37 +0100 | [diff] [blame] | 33 | #include <net/netns/xdp.h> |
Guvenc Gulce | 194730a | 2021-06-16 16:52:58 +0200 | [diff] [blame] | 34 | #include <net/netns/smc.h> |
Jakub Sitnicki | a3fd7ce | 2020-05-31 10:28:36 +0200 | [diff] [blame] | 35 | #include <net/netns/bpf.h> |
Jeremy Kerr | 889b7da | 2021-07-29 10:20:45 +0800 | [diff] [blame] | 36 | #include <net/netns/mctp.h> |
Eric Dumazet | 9ba74e6 | 2021-12-09 23:44:21 -0800 | [diff] [blame] | 37 | #include <net/net_trackers.h> |
Al Viro | 435d5f4 | 2014-10-31 22:56:04 -0400 | [diff] [blame] | 38 | #include <linux/ns_common.h> |
Pablo Neira Ayuso | 04c52de | 2015-06-17 10:28:25 -0500 | [diff] [blame] | 39 | #include <linux/idr.h> |
| 40 | #include <linux/skbuff.h> |
Jiri Pirko | a30c7b4 | 2019-09-30 10:15:10 +0200 | [diff] [blame] | 41 | #include <linux/notifier.h> |
Denis V. Lunev | a0a53c8 | 2007-12-11 04:19:17 -0800 | [diff] [blame] | 42 | |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 43 | struct user_namespace; |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 44 | struct proc_dir_entry; |
Eric W. Biederman | 2774c7a | 2007-09-26 22:10:56 -0700 | [diff] [blame] | 45 | struct net_device; |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 46 | struct sock; |
Pavel Emelyanov | 1597fbc | 2007-12-01 23:51:01 +1100 | [diff] [blame] | 47 | struct ctl_table_header; |
Pavel Emelyanov | dec827d | 2008-04-15 00:36:08 -0700 | [diff] [blame] | 48 | struct net_generic; |
Christian Brauner | 94e5e30 | 2018-03-19 13:17:30 +0100 | [diff] [blame] | 49 | struct uevent_sock; |
Julian Anastasov | 2553d06 | 2011-03-04 12:18:07 +0200 | [diff] [blame] | 50 | struct netns_ipvs; |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 51 | struct bpf_prog; |
Pavel Emelyanov | 1597fbc | 2007-12-01 23:51:01 +1100 | [diff] [blame] | 52 | |
Eric Dumazet | 7c28bd0 | 2009-10-24 06:13:17 -0700 | [diff] [blame] | 53 | |
| 54 | #define NETDEV_HASHBITS 8 |
| 55 | #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) |
| 56 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 57 | struct net { |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 58 | /* First cache line can be often dirtied. |
| 59 | * Do not place here read-mostly fields. |
| 60 | */ |
Mike Rapoport | aad12c2 | 2019-08-21 14:29:29 +0300 | [diff] [blame] | 61 | refcount_t passive; /* To decide when the network |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 62 | * namespace should be freed. |
| 63 | */ |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 64 | spinlock_t rules_mod_lock; |
| 65 | |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 66 | unsigned int dev_unreg_count; |
| 67 | |
| 68 | unsigned int dev_base_seq; /* protected by rtnl_mutex */ |
| 69 | int ifindex; |
| 70 | |
| 71 | spinlock_t nsid_lock; |
| 72 | atomic_t fnhe_genid; |
Eric Dumazet | 33cf7c9 | 2015-03-11 18:53:14 -0700 | [diff] [blame] | 73 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 74 | struct list_head list; /* list of network namespaces */ |
Kirill Tkhai | 19efbd9 | 2018-02-19 12:58:38 +0300 | [diff] [blame] | 75 | struct list_head exit_list; /* To linked to call pernet exit |
Kirill Tkhai | 4420bf2 | 2018-03-27 18:02:23 +0300 | [diff] [blame] | 76 | * methods on dead net ( |
| 77 | * pernet_ops_rwsem read locked), |
| 78 | * or to unregister pernet ops |
| 79 | * (pernet_ops_rwsem write locked). |
Kirill Tkhai | 19efbd9 | 2018-02-19 12:58:38 +0300 | [diff] [blame] | 80 | */ |
Kirill Tkhai | 65b7b5b | 2018-02-19 12:58:45 +0300 | [diff] [blame] | 81 | struct llist_node cleanup_list; /* namespaces on death row */ |
| 82 | |
David Howells | 9b24261 | 2019-06-26 21:02:33 +0100 | [diff] [blame] | 83 | #ifdef CONFIG_KEYS |
| 84 | struct key_tag *key_domain; /* Key domain of operation tag */ |
| 85 | #endif |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 86 | struct user_namespace *user_ns; /* Owning user namespace */ |
Eric W. Biederman | 7032866 | 2016-08-08 14:33:23 -0500 | [diff] [blame] | 87 | struct ucounts *ucounts; |
Nicolas Dichtel | 0c7aecd | 2015-01-15 15:11:15 +0100 | [diff] [blame] | 88 | struct idr netns_ids; |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 89 | |
Al Viro | 435d5f4 | 2014-10-31 22:56:04 -0400 | [diff] [blame] | 90 | struct ns_common ns; |
Eric Dumazet | 9ba74e6 | 2021-12-09 23:44:21 -0800 | [diff] [blame] | 91 | struct ref_tracker_dir refcnt_tracker; |
Eric W. Biederman | 98f842e | 2011-06-15 10:21:48 -0700 | [diff] [blame] | 92 | |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 93 | struct list_head dev_base_head; |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 94 | struct proc_dir_entry *proc_net; |
| 95 | struct proc_dir_entry *proc_net_stat; |
Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 96 | |
Al Viro | 7345509 | 2008-07-14 21:22:20 -0400 | [diff] [blame] | 97 | #ifdef CONFIG_SYSCTL |
| 98 | struct ctl_table_set sysctls; |
| 99 | #endif |
Eric W. Biederman | 95bdfcc | 2007-11-30 23:55:42 +1100 | [diff] [blame] | 100 | |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 101 | struct sock *rtnl; /* rtnetlink socket */ |
| 102 | struct sock *genl_sock; |
Eric W. Biederman | 2774c7a | 2007-09-26 22:10:56 -0700 | [diff] [blame] | 103 | |
Christian Brauner | 94e5e30 | 2018-03-19 13:17:30 +0100 | [diff] [blame] | 104 | struct uevent_sock *uevent_sock; /* uevent socket */ |
| 105 | |
Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 106 | struct hlist_head *dev_name_head; |
| 107 | struct hlist_head *dev_index_head; |
Jiri Pirko | a30c7b4 | 2019-09-30 10:15:10 +0200 | [diff] [blame] | 108 | struct raw_notifier_head netdev_chain; |
| 109 | |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 110 | /* Note that @hash_mix can be read millions times per second, |
| 111 | * it is critical that it is on a read_mostly cache line. |
| 112 | */ |
| 113 | u32 hash_mix; |
| 114 | |
| 115 | struct net_device *loopback_dev; /* The loopback */ |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 116 | |
Denis V. Lunev | 5fd30ee | 2008-01-10 03:20:28 -0800 | [diff] [blame] | 117 | /* core fib_rules */ |
| 118 | struct list_head rules_ops; |
Denis V. Lunev | 5fd30ee | 2008-01-10 03:20:28 -0800 | [diff] [blame] | 119 | |
Pavel Emelyanov | 8efa6e9 | 2008-03-31 19:41:14 -0700 | [diff] [blame] | 120 | struct netns_core core; |
Pavel Emelyanov | 852566f5 | 2008-07-18 04:01:24 -0700 | [diff] [blame] | 121 | struct netns_mib mib; |
Denis V. Lunev | 2aaef4e | 2007-12-11 04:19:54 -0800 | [diff] [blame] | 122 | struct netns_packet packet; |
Denis V. Lunev | a0a53c8 | 2007-12-11 04:19:17 -0800 | [diff] [blame] | 123 | struct netns_unix unx; |
David Ahern | ab84be7 | 2019-05-24 14:43:04 -0700 | [diff] [blame] | 124 | struct netns_nexthop nexthop; |
Pavel Emelyanov | 8afd351 | 2007-12-16 13:29:36 -0800 | [diff] [blame] | 125 | struct netns_ipv4 ipv4; |
Eric Dumazet | dfd56b8 | 2011-12-10 09:48:31 +0000 | [diff] [blame] | 126 | #if IS_ENABLED(CONFIG_IPV6) |
Daniel Lezcano | b0f159d | 2008-01-10 02:49:06 -0800 | [diff] [blame] | 127 | struct netns_ipv6 ipv6; |
| 128 | #endif |
Alexander Aring | 633fc86 | 2014-02-28 07:32:49 +0100 | [diff] [blame] | 129 | #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) |
| 130 | struct netns_ieee802154_lowpan ieee802154_lowpan; |
| 131 | #endif |
Eric W. Biederman | 4db67e8 | 2012-08-06 08:42:04 +0000 | [diff] [blame] | 132 | #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE) |
| 133 | struct netns_sctp sctp; |
| 134 | #endif |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 135 | #ifdef CONFIG_NETFILTER |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 136 | struct netns_nf nf; |
Alexey Dobriyan | dfdb8d7 | 2008-10-08 11:35:02 +0200 | [diff] [blame] | 137 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
| 138 | struct netns_ct ct; |
| 139 | #endif |
Pablo Neira Ayuso | 99633ab | 2013-10-10 23:28:33 +0200 | [diff] [blame] | 140 | #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE) |
| 141 | struct netns_nftables nft; |
| 142 | #endif |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 143 | #endif |
Johannes Berg | 3d23e34 | 2009-09-29 23:27:28 +0200 | [diff] [blame] | 144 | #ifdef CONFIG_WEXT_CORE |
Johannes Berg | b333b3d22 | 2009-06-24 01:34:48 +0000 | [diff] [blame] | 145 | struct sk_buff_head wext_nlevents; |
| 146 | #endif |
Eric Dumazet | 1c87733 | 2010-10-25 03:20:11 +0000 | [diff] [blame] | 147 | struct net_generic __rcu *gen; |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 148 | |
Jakub Sitnicki | a3fd7ce | 2020-05-31 10:28:36 +0200 | [diff] [blame] | 149 | /* Used to store attached BPF programs */ |
| 150 | struct netns_bpf bpf; |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 151 | |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 152 | /* Note : following structs are cache line aligned */ |
| 153 | #ifdef CONFIG_XFRM |
| 154 | struct netns_xfrm xfrm; |
| 155 | #endif |
Daniel Borkmann | f318903 | 2020-03-27 16:58:52 +0100 | [diff] [blame] | 156 | |
Eric Dumazet | 3d368ab | 2021-02-10 06:41:44 -0800 | [diff] [blame] | 157 | u64 net_cookie; /* written once */ |
Daniel Borkmann | f318903 | 2020-03-27 16:58:52 +0100 | [diff] [blame] | 158 | |
JunweiZhang | 8b4d14d | 2013-06-26 16:40:06 +0800 | [diff] [blame] | 159 | #if IS_ENABLED(CONFIG_IP_VS) |
Hans Schillstrom | 61b1ab4 | 2011-01-03 14:44:42 +0100 | [diff] [blame] | 160 | struct netns_ipvs *ipvs; |
JunweiZhang | 8b4d14d | 2013-06-26 16:40:06 +0800 | [diff] [blame] | 161 | #endif |
Eric W. Biederman | 0189197 | 2015-03-03 19:10:47 -0600 | [diff] [blame] | 162 | #if IS_ENABLED(CONFIG_MPLS) |
| 163 | struct netns_mpls mpls; |
| 164 | #endif |
Mario Kicherer | 8e8cda6 | 2017-02-21 12:19:47 +0100 | [diff] [blame] | 165 | #if IS_ENABLED(CONFIG_CAN) |
| 166 | struct netns_can can; |
| 167 | #endif |
Björn Töpel | 1d0dc06 | 2019-01-24 19:59:37 +0100 | [diff] [blame] | 168 | #ifdef CONFIG_XDP_SOCKETS |
| 169 | struct netns_xdp xdp; |
| 170 | #endif |
Jeremy Kerr | 889b7da | 2021-07-29 10:20:45 +0800 | [diff] [blame] | 171 | #if IS_ENABLED(CONFIG_MCTP) |
| 172 | struct netns_mctp mctp; |
| 173 | #endif |
Ondrej Mosnacek | 91b05a7 | 2019-07-09 13:11:24 +0200 | [diff] [blame] | 174 | #if IS_ENABLED(CONFIG_CRYPTO_USER) |
| 175 | struct sock *crypto_nlsk; |
| 176 | #endif |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 177 | struct sock *diag_nlsk; |
Guvenc Gulce | 194730a | 2021-06-16 16:52:58 +0200 | [diff] [blame] | 178 | #if IS_ENABLED(CONFIG_SMC) |
| 179 | struct netns_smc smc; |
| 180 | #endif |
Kees Cook | 3859a27 | 2016-10-28 01:22:25 -0700 | [diff] [blame] | 181 | } __randomize_layout; |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 182 | |
Denis V. Lunev | c0f3932 | 2008-04-02 00:10:28 -0700 | [diff] [blame] | 183 | #include <linux/seq_file_net.h> |
| 184 | |
Daniel Lezcano | 4fabcd7 | 2007-09-13 09:16:29 +0200 | [diff] [blame] | 185 | /* Init's network namespace */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 186 | extern struct net init_net; |
Denis V. Lunev | a4aa834 | 2008-04-03 13:04:33 -0700 | [diff] [blame] | 187 | |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 188 | #ifdef CONFIG_NET_NS |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 189 | struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, |
| 190 | struct net *old_net); |
Denis V. Lunev | 225c0a0 | 2008-04-02 00:09:29 -0700 | [diff] [blame] | 191 | |
Tyler Hicks | fbdeaed | 2018-07-20 21:56:53 +0000 | [diff] [blame] | 192 | void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid); |
| 193 | |
Florian Westphal | 7866cc5 | 2017-05-30 11:38:12 +0200 | [diff] [blame] | 194 | void net_ns_barrier(void); |
Changbin Du | ea6932d | 2021-06-11 22:29:59 +0800 | [diff] [blame] | 195 | |
| 196 | struct ns_common *get_net_ns(struct ns_common *ns); |
Changbin Du | e34492d | 2021-06-15 07:52:43 +0800 | [diff] [blame] | 197 | struct net *get_net_ns_by_fd(int fd); |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 198 | #else /* CONFIG_NET_NS */ |
| 199 | #include <linux/sched.h> |
| 200 | #include <linux/nsproxy.h> |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 201 | static inline struct net *copy_net_ns(unsigned long flags, |
| 202 | struct user_namespace *user_ns, struct net *old_net) |
Eric W. Biederman | 9dd776b | 2007-09-26 22:04:26 -0700 | [diff] [blame] | 203 | { |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 204 | if (flags & CLONE_NEWNET) |
| 205 | return ERR_PTR(-EINVAL); |
| 206 | return old_net; |
Eric W. Biederman | 9dd776b | 2007-09-26 22:04:26 -0700 | [diff] [blame] | 207 | } |
Florian Westphal | 7866cc5 | 2017-05-30 11:38:12 +0200 | [diff] [blame] | 208 | |
Tyler Hicks | fbdeaed | 2018-07-20 21:56:53 +0000 | [diff] [blame] | 209 | static inline void net_ns_get_ownership(const struct net *net, |
| 210 | kuid_t *uid, kgid_t *gid) |
| 211 | { |
| 212 | *uid = GLOBAL_ROOT_UID; |
| 213 | *gid = GLOBAL_ROOT_GID; |
| 214 | } |
| 215 | |
Florian Westphal | 7866cc5 | 2017-05-30 11:38:12 +0200 | [diff] [blame] | 216 | static inline void net_ns_barrier(void) {} |
Changbin Du | ea6932d | 2021-06-11 22:29:59 +0800 | [diff] [blame] | 217 | |
| 218 | static inline struct ns_common *get_net_ns(struct ns_common *ns) |
| 219 | { |
| 220 | return ERR_PTR(-EINVAL); |
| 221 | } |
Changbin Du | e34492d | 2021-06-15 07:52:43 +0800 | [diff] [blame] | 222 | |
| 223 | static inline struct net *get_net_ns_by_fd(int fd) |
| 224 | { |
| 225 | return ERR_PTR(-EINVAL); |
| 226 | } |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 227 | #endif /* CONFIG_NET_NS */ |
Denis V. Lunev | 225c0a0 | 2008-04-02 00:09:29 -0700 | [diff] [blame] | 228 | |
| 229 | |
| 230 | extern struct list_head net_namespace_list; |
Eric W. Biederman | 9dd776b | 2007-09-26 22:04:26 -0700 | [diff] [blame] | 231 | |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 232 | struct net *get_net_ns_by_pid(pid_t pid); |
Johannes Berg | 30ffee8 | 2009-07-10 09:51:35 +0000 | [diff] [blame] | 233 | |
Rashika Kheria | 535d3ae | 2014-02-09 22:29:14 +0530 | [diff] [blame] | 234 | #ifdef CONFIG_SYSCTL |
| 235 | void ipx_register_sysctl(void); |
| 236 | void ipx_unregister_sysctl(void); |
| 237 | #else |
| 238 | #define ipx_register_sysctl() |
| 239 | #define ipx_unregister_sysctl() |
| 240 | #endif |
| 241 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 242 | #ifdef CONFIG_NET_NS |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 243 | void __put_net(struct net *net); |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 244 | |
Eric Dumazet | 9ba74e6 | 2021-12-09 23:44:21 -0800 | [diff] [blame] | 245 | /* Try using get_net_track() instead */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 246 | static inline struct net *get_net(struct net *net) |
| 247 | { |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 248 | refcount_inc(&net->ns.count); |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 249 | return net; |
| 250 | } |
| 251 | |
Eric W. Biederman | 077130c | 2007-09-13 09:18:57 +0200 | [diff] [blame] | 252 | static inline struct net *maybe_get_net(struct net *net) |
| 253 | { |
| 254 | /* Used when we know struct net exists but we |
| 255 | * aren't guaranteed a previous reference count |
| 256 | * exists. If the reference count is zero this |
| 257 | * function fails and returns NULL. |
| 258 | */ |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 259 | if (!refcount_inc_not_zero(&net->ns.count)) |
Eric W. Biederman | 077130c | 2007-09-13 09:18:57 +0200 | [diff] [blame] | 260 | net = NULL; |
| 261 | return net; |
| 262 | } |
| 263 | |
Eric Dumazet | 9ba74e6 | 2021-12-09 23:44:21 -0800 | [diff] [blame] | 264 | /* Try using put_net_track() instead */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 265 | static inline void put_net(struct net *net) |
| 266 | { |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 267 | if (refcount_dec_and_test(&net->ns.count)) |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 268 | __put_net(net); |
| 269 | } |
| 270 | |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 271 | static inline |
| 272 | int net_eq(const struct net *net1, const struct net *net2) |
| 273 | { |
| 274 | return net1 == net2; |
| 275 | } |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 276 | |
Dan Streetman | 4ee806d | 2018-01-18 16:14:26 -0500 | [diff] [blame] | 277 | static inline int check_net(const struct net *net) |
| 278 | { |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 279 | return refcount_read(&net->ns.count) != 0; |
Dan Streetman | 4ee806d | 2018-01-18 16:14:26 -0500 | [diff] [blame] | 280 | } |
| 281 | |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 282 | void net_drop_ns(void *); |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 283 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 284 | #else |
Eric W. Biederman | b9f75f4 | 2008-06-20 22:16:51 -0700 | [diff] [blame] | 285 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 286 | static inline struct net *get_net(struct net *net) |
| 287 | { |
| 288 | return net; |
| 289 | } |
| 290 | |
| 291 | static inline void put_net(struct net *net) |
| 292 | { |
| 293 | } |
| 294 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 295 | static inline struct net *maybe_get_net(struct net *net) |
| 296 | { |
| 297 | return net; |
| 298 | } |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 299 | |
| 300 | static inline |
| 301 | int net_eq(const struct net *net1, const struct net *net2) |
| 302 | { |
| 303 | return 1; |
| 304 | } |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 305 | |
Dan Streetman | 4ee806d | 2018-01-18 16:14:26 -0500 | [diff] [blame] | 306 | static inline int check_net(const struct net *net) |
| 307 | { |
| 308 | return 1; |
| 309 | } |
| 310 | |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 311 | #define net_drop_ns NULL |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 312 | #endif |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 313 | |
Denis V. Lunev | 5d1e446 | 2008-04-16 01:58:04 -0700 | [diff] [blame] | 314 | |
Eric Dumazet | 9ba74e6 | 2021-12-09 23:44:21 -0800 | [diff] [blame] | 315 | static inline void netns_tracker_alloc(struct net *net, |
| 316 | netns_tracker *tracker, gfp_t gfp) |
| 317 | { |
| 318 | #ifdef CONFIG_NET_NS_REFCNT_TRACKER |
| 319 | ref_tracker_alloc(&net->refcnt_tracker, tracker, gfp); |
| 320 | #endif |
| 321 | } |
| 322 | |
| 323 | static inline void netns_tracker_free(struct net *net, |
| 324 | netns_tracker *tracker) |
| 325 | { |
| 326 | #ifdef CONFIG_NET_NS_REFCNT_TRACKER |
| 327 | ref_tracker_free(&net->refcnt_tracker, tracker); |
| 328 | #endif |
| 329 | } |
| 330 | |
| 331 | static inline struct net *get_net_track(struct net *net, |
| 332 | netns_tracker *tracker, gfp_t gfp) |
| 333 | { |
| 334 | get_net(net); |
| 335 | netns_tracker_alloc(net, tracker, gfp); |
| 336 | return net; |
| 337 | } |
| 338 | |
| 339 | static inline void put_net_track(struct net *net, netns_tracker *tracker) |
| 340 | { |
| 341 | netns_tracker_free(net, tracker); |
| 342 | put_net(net); |
| 343 | } |
| 344 | |
Eric W. Biederman | 0c5c9fb | 2015-03-11 23:06:44 -0500 | [diff] [blame] | 345 | typedef struct { |
Eric Dumazet | 8f424b5 | 2008-11-12 00:53:30 -0800 | [diff] [blame] | 346 | #ifdef CONFIG_NET_NS |
Eric W. Biederman | 0c5c9fb | 2015-03-11 23:06:44 -0500 | [diff] [blame] | 347 | struct net *net; |
Eric Dumazet | 8f424b5 | 2008-11-12 00:53:30 -0800 | [diff] [blame] | 348 | #endif |
Eric W. Biederman | 0c5c9fb | 2015-03-11 23:06:44 -0500 | [diff] [blame] | 349 | } possible_net_t; |
| 350 | |
| 351 | static inline void write_pnet(possible_net_t *pnet, struct net *net) |
| 352 | { |
| 353 | #ifdef CONFIG_NET_NS |
| 354 | pnet->net = net; |
| 355 | #endif |
| 356 | } |
| 357 | |
| 358 | static inline struct net *read_pnet(const possible_net_t *pnet) |
| 359 | { |
| 360 | #ifdef CONFIG_NET_NS |
| 361 | return pnet->net; |
| 362 | #else |
| 363 | return &init_net; |
| 364 | #endif |
| 365 | } |
Denis V. Lunev | 5d1e446 | 2008-04-16 01:58:04 -0700 | [diff] [blame] | 366 | |
Kirill Tkhai | f0b07bb1 | 2018-03-29 19:20:32 +0300 | [diff] [blame] | 367 | /* Protected by net_rwsem */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 368 | #define for_each_net(VAR) \ |
| 369 | list_for_each_entry(VAR, &net_namespace_list, list) |
Jiri Pirko | afa0df5 | 2019-09-30 10:15:09 +0200 | [diff] [blame] | 370 | #define for_each_net_continue_reverse(VAR) \ |
| 371 | list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list) |
Johannes Berg | 11a28d3 | 2009-07-10 09:51:33 +0000 | [diff] [blame] | 372 | #define for_each_net_rcu(VAR) \ |
| 373 | list_for_each_entry_rcu(VAR, &net_namespace_list, list) |
| 374 | |
Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 375 | #ifdef CONFIG_NET_NS |
| 376 | #define __net_init |
| 377 | #define __net_exit |
Denis V. Lunev | 022cbae | 2007-11-13 03:23:50 -0800 | [diff] [blame] | 378 | #define __net_initdata |
Andi Kleen | 04a6f82 | 2012-10-04 17:12:11 -0700 | [diff] [blame] | 379 | #define __net_initconst |
Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 380 | #else |
| 381 | #define __net_init __init |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 382 | #define __net_exit __ref |
Denis V. Lunev | 022cbae | 2007-11-13 03:23:50 -0800 | [diff] [blame] | 383 | #define __net_initdata __initdata |
Andi Kleen | 04a6f82 | 2012-10-04 17:12:11 -0700 | [diff] [blame] | 384 | #define __net_initconst __initconst |
Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 385 | #endif |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 386 | |
Guillaume Nault | d4e4fdf | 2019-10-23 18:39:04 +0200 | [diff] [blame] | 387 | int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); |
Guillaume Nault | 56f200c | 2020-01-16 21:16:46 +0100 | [diff] [blame] | 388 | int peernet2id(const struct net *net, struct net *peer); |
| 389 | bool peernet_has_id(const struct net *net, struct net *peer); |
| 390 | struct net *get_net_ns_by_id(const struct net *net, int id); |
Nicolas Dichtel | 0c7aecd | 2015-01-15 15:11:15 +0100 | [diff] [blame] | 391 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 392 | struct pernet_operations { |
| 393 | struct list_head list; |
Kirill Tkhai | 6056415d | 2018-03-13 13:55:55 +0300 | [diff] [blame] | 394 | /* |
| 395 | * Below methods are called without any exclusive locks. |
| 396 | * More than one net may be constructed and destructed |
| 397 | * in parallel on several cpus. Every pernet_operations |
| 398 | * have to keep in mind all other pernet_operations and |
| 399 | * to introduce a locking, if they share common resources. |
| 400 | * |
Kirill Tkhai | 8518e9b | 2018-03-27 18:02:32 +0300 | [diff] [blame] | 401 | * The only time they are called with exclusive lock is |
| 402 | * from register_pernet_subsys(), unregister_pernet_subsys() |
| 403 | * register_pernet_device() and unregister_pernet_device(). |
| 404 | * |
Kirill Tkhai | 6056415d | 2018-03-13 13:55:55 +0300 | [diff] [blame] | 405 | * Exit methods using blocking RCU primitives, such as |
| 406 | * synchronize_rcu(), should be implemented via exit_batch. |
| 407 | * Then, destruction of a group of net requires single |
| 408 | * synchronize_rcu() related to these pernet_operations, |
| 409 | * instead of separate synchronize_rcu() for every net. |
| 410 | * Please, avoid synchronize_rcu() at all, where it's possible. |
Eric Dumazet | d7d9987 | 2019-06-18 11:08:59 -0700 | [diff] [blame] | 411 | * |
| 412 | * Note that a combination of pre_exit() and exit() can |
| 413 | * be used, since a synchronize_rcu() is guaranteed between |
| 414 | * the calls. |
Kirill Tkhai | 6056415d | 2018-03-13 13:55:55 +0300 | [diff] [blame] | 415 | */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 416 | int (*init)(struct net *net); |
Eric Dumazet | d7d9987 | 2019-06-18 11:08:59 -0700 | [diff] [blame] | 417 | void (*pre_exit)(struct net *net); |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 418 | void (*exit)(struct net *net); |
Eric W. Biederman | 72ad937 | 2009-12-03 02:29:03 +0000 | [diff] [blame] | 419 | void (*exit_batch)(struct list_head *net_exit_list); |
Alexey Dobriyan | c7d03a0 | 2016-11-17 04:58:21 +0300 | [diff] [blame] | 420 | unsigned int *id; |
Eric W. Biederman | f875bae | 2009-11-29 22:25:28 +0000 | [diff] [blame] | 421 | size_t size; |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 422 | }; |
| 423 | |
Eric W. Biederman | 17edde5 | 2009-02-22 00:11:09 -0800 | [diff] [blame] | 424 | /* |
| 425 | * Use these carefully. If you implement a network device and it |
| 426 | * needs per network namespace operations use device pernet operations, |
| 427 | * otherwise use pernet subsys operations. |
| 428 | * |
Johannes Berg | 4edf547 | 2009-07-15 06:16:34 +0000 | [diff] [blame] | 429 | * Network interfaces need to be removed from a dying netns _before_ |
| 430 | * subsys notifiers can be called, as most of the network code cleanup |
| 431 | * (which is done from subsys notifiers) runs with the assumption that |
| 432 | * dev_remove_pack has been called so no new packets will arrive during |
| 433 | * and after the cleanup functions have been called. dev_remove_pack |
| 434 | * is not per namespace so instead the guarantee of no more packets |
| 435 | * arriving in a network namespace is provided by ensuring that all |
| 436 | * network devices and all sockets have left the network namespace |
| 437 | * before the cleanup methods are called. |
Eric W. Biederman | 17edde5 | 2009-02-22 00:11:09 -0800 | [diff] [blame] | 438 | * |
| 439 | * For the longest time the ipv4 icmp code was registered as a pernet |
| 440 | * device which caused kernel oops, and panics during network |
| 441 | * namespace cleanup. So please don't get this wrong. |
| 442 | */ |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 443 | int register_pernet_subsys(struct pernet_operations *); |
| 444 | void unregister_pernet_subsys(struct pernet_operations *); |
| 445 | int register_pernet_device(struct pernet_operations *); |
| 446 | void unregister_pernet_device(struct pernet_operations *); |
Eric W. Biederman | f875bae | 2009-11-29 22:25:28 +0000 | [diff] [blame] | 447 | |
Eric W. Biederman | 95bdfcc | 2007-11-30 23:55:42 +1100 | [diff] [blame] | 448 | struct ctl_table; |
Pavel Emelyanov | d62c612 | 2008-05-19 13:45:33 -0700 | [diff] [blame] | 449 | |
Eric W. Biederman | 2ca794e | 2012-04-19 13:20:32 +0000 | [diff] [blame] | 450 | #ifdef CONFIG_SYSCTL |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 451 | int net_sysctl_init(void); |
| 452 | struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, |
| 453 | struct ctl_table *table); |
| 454 | void unregister_net_sysctl_table(struct ctl_table_header *header); |
Eric W. Biederman | 48c7495 | 2012-04-23 12:13:02 +0000 | [diff] [blame] | 455 | #else |
| 456 | static inline int net_sysctl_init(void) { return 0; } |
| 457 | static inline struct ctl_table_header *register_net_sysctl(struct net *net, |
| 458 | const char *path, struct ctl_table *table) |
| 459 | { |
| 460 | return NULL; |
| 461 | } |
| 462 | static inline void unregister_net_sysctl_table(struct ctl_table_header *header) |
| 463 | { |
| 464 | } |
| 465 | #endif |
| 466 | |
Guillaume Nault | 56f200c | 2020-01-16 21:16:46 +0100 | [diff] [blame] | 467 | static inline int rt_genid_ipv4(const struct net *net) |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 468 | { |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 469 | return atomic_read(&net->ipv4.rt_genid); |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 470 | } |
| 471 | |
David Ahern | 8f34e53b | 2020-05-01 08:53:08 -0600 | [diff] [blame] | 472 | #if IS_ENABLED(CONFIG_IPV6) |
| 473 | static inline int rt_genid_ipv6(const struct net *net) |
| 474 | { |
| 475 | return atomic_read(&net->ipv6.fib6_sernum); |
| 476 | } |
| 477 | #endif |
| 478 | |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 479 | static inline void rt_genid_bump_ipv4(struct net *net) |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 480 | { |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 481 | atomic_inc(&net->ipv4.rt_genid); |
| 482 | } |
| 483 | |
Hannes Frederic Sowa | 705f1c8 | 2014-09-28 00:46:06 +0200 | [diff] [blame] | 484 | extern void (*__fib6_flush_trees)(struct net *net); |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 485 | static inline void rt_genid_bump_ipv6(struct net *net) |
| 486 | { |
Hannes Frederic Sowa | 705f1c8 | 2014-09-28 00:46:06 +0200 | [diff] [blame] | 487 | if (__fib6_flush_trees) |
| 488 | __fib6_flush_trees(net); |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 489 | } |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 490 | |
Luis R. Rodriguez | 599018a | 2014-04-17 18:22:54 -0700 | [diff] [blame] | 491 | #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) |
| 492 | static inline struct netns_ieee802154_lowpan * |
| 493 | net_ieee802154_lowpan(struct net *net) |
| 494 | { |
| 495 | return &net->ieee802154_lowpan; |
| 496 | } |
Luis R. Rodriguez | 599018a | 2014-04-17 18:22:54 -0700 | [diff] [blame] | 497 | #endif |
| 498 | |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 499 | /* For callers who don't really care about whether it's IPv4 or IPv6 */ |
| 500 | static inline void rt_genid_bump_all(struct net *net) |
| 501 | { |
| 502 | rt_genid_bump_ipv4(net); |
| 503 | rt_genid_bump_ipv6(net); |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 504 | } |
Eric W. Biederman | 95bdfcc | 2007-11-30 23:55:42 +1100 | [diff] [blame] | 505 | |
Guillaume Nault | 56f200c | 2020-01-16 21:16:46 +0100 | [diff] [blame] | 506 | static inline int fnhe_genid(const struct net *net) |
Timo Teräs | 5aad1de | 2013-05-27 20:46:33 +0000 | [diff] [blame] | 507 | { |
| 508 | return atomic_read(&net->fnhe_genid); |
| 509 | } |
| 510 | |
| 511 | static inline void fnhe_genid_bump(struct net *net) |
| 512 | { |
| 513 | atomic_inc(&net->fnhe_genid); |
| 514 | } |
| 515 | |
Eric Dumazet | 9c1be193 | 2022-02-05 09:01:25 -0800 | [diff] [blame^] | 516 | #ifdef CONFIG_NET |
| 517 | void net_ns_init(void); |
| 518 | #else |
| 519 | static inline void net_ns_init(void) {} |
| 520 | #endif |
| 521 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 522 | #endif /* __NET_NET_NAMESPACE_H */ |