Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * Definitions for the IP module. |
| 8 | * |
| 9 | * Version: @(#)ip.h 1.0.2 05/07/93 |
| 10 | * |
Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 11 | * Authors: Ross Biro |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * Fred N. van Kempen, <[email protected]> |
| 13 | * Alan Cox, <[email protected]> |
| 14 | * |
| 15 | * Changes: |
| 16 | * Mike McLagan : Routing by source |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
| 18 | #ifndef _IP_H |
| 19 | #define _IP_H |
| 20 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/ip.h> |
| 23 | #include <linux/in.h> |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 24 | #include <linux/skbuff.h> |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 25 | #include <linux/jhash.h> |
Christoph Hellwig | de40a3e | 2020-07-23 08:08:57 +0200 | [diff] [blame] | 26 | #include <linux/sockptr.h> |
Eric Dumazet | 020e71a | 2021-10-25 09:48:24 -0700 | [diff] [blame] | 27 | #include <linux/static_key.h> |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 28 | |
| 29 | #include <net/inet_sock.h> |
Francesco Fusco | aa66158 | 2013-09-24 15:43:09 +0200 | [diff] [blame] | 30 | #include <net/route.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <net/snmp.h> |
KOVACS Krisztian | 86b08d8 | 2008-10-01 07:44:42 -0700 | [diff] [blame] | 32 | #include <net/flow.h> |
Jiri Pirko | 1bd758e | 2015-05-12 14:56:07 +0200 | [diff] [blame] | 33 | #include <net/flow_dissector.h> |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 34 | #include <net/netns/hash.h> |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 35 | #include <net/lwtunnel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Gao Feng | 7ed14d9 | 2017-04-12 12:34:03 +0800 | [diff] [blame] | 37 | #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ |
Eric Dumazet | b547602 | 2017-12-11 07:17:39 -0800 | [diff] [blame] | 38 | #define IPV4_MIN_MTU 68 /* RFC 791 */ |
Gao Feng | 7ed14d9 | 2017-04-12 12:34:03 +0800 | [diff] [blame] | 39 | |
David Ahern | 9ab948a | 2019-03-20 09:18:59 -0700 | [diff] [blame] | 40 | extern unsigned int sysctl_fib_sync_mem; |
| 41 | extern unsigned int sysctl_fib_sync_mem_min; |
| 42 | extern unsigned int sysctl_fib_sync_mem_max; |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | struct sock; |
| 45 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 46 | struct inet_skb_parm { |
David Ahern | 0b922b7 | 2016-05-10 11:19:51 -0700 | [diff] [blame] | 47 | int iif; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | struct ip_options opt; /* Compiled IP options */ |
David Ahern | a04a480 | 2016-10-16 20:02:52 -0700 | [diff] [blame] | 49 | u16 flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Hannes Frederic Sowa | df4d925 | 2015-01-23 12:01:26 +0100 | [diff] [blame] | 51 | #define IPSKB_FORWARDED BIT(0) |
| 52 | #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) |
| 53 | #define IPSKB_XFRM_TRANSFORMED BIT(2) |
| 54 | #define IPSKB_FRAG_COMPLETE BIT(3) |
| 55 | #define IPSKB_REROUTED BIT(4) |
| 56 | #define IPSKB_DOREDIRECT BIT(5) |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 57 | #define IPSKB_FRAG_PMTU BIT(6) |
Lance Richardson | 9ee6c5d | 2016-11-02 16:36:17 -0400 | [diff] [blame] | 58 | #define IPSKB_L3SLAVE BIT(7) |
Eyal Birger | e6175a2e | 2022-05-13 23:34:02 +0300 | [diff] [blame] | 59 | #define IPSKB_NOPOLICY BIT(8) |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 60 | |
| 61 | u16 frag_max_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | }; |
| 63 | |
David Ahern | a04a480 | 2016-10-16 20:02:52 -0700 | [diff] [blame] | 64 | static inline bool ipv4_l3mdev_skb(u16 flags) |
| 65 | { |
| 66 | return !!(flags & IPSKB_L3SLAVE); |
| 67 | } |
| 68 | |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 69 | static inline unsigned int ip_hdrlen(const struct sk_buff *skb) |
| 70 | { |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 71 | return ip_hdr(skb)->ihl * 4; |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 72 | } |
| 73 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 74 | struct ipcm_cookie { |
Soheil Hassas Yeganeh | 24025c4 | 2016-04-02 23:08:10 -0400 | [diff] [blame] | 75 | struct sockcm_cookie sockc; |
Al Viro | c1d18f9 | 2006-09-27 18:28:28 -0700 | [diff] [blame] | 76 | __be32 addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | int oif; |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 78 | struct ip_options_rcu *opt; |
Nicolas Dichtel | 3f5413c | 2023-05-22 14:08:20 +0200 | [diff] [blame] | 79 | __u8 protocol; |
Francesco Fusco | f02db31 | 2013-09-24 15:43:08 +0200 | [diff] [blame] | 80 | __u8 ttl; |
| 81 | __s16 tos; |
| 82 | char priority; |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 83 | __u16 gso_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | }; |
| 85 | |
Willem de Bruijn | 3517820 | 2018-07-06 10:12:54 -0400 | [diff] [blame] | 86 | static inline void ipcm_init(struct ipcm_cookie *ipcm) |
| 87 | { |
| 88 | *ipcm = (struct ipcm_cookie) { .tos = -1 }; |
| 89 | } |
| 90 | |
| 91 | static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, |
| 92 | const struct inet_sock *inet) |
| 93 | { |
| 94 | ipcm_init(ipcm); |
| 95 | |
Willem de Bruijn | c6af0c2 | 2019-09-11 15:50:51 -0400 | [diff] [blame] | 96 | ipcm->sockc.mark = inet->sk.sk_mark; |
Willem de Bruijn | 3517820 | 2018-07-06 10:12:54 -0400 | [diff] [blame] | 97 | ipcm->sockc.tsflags = inet->sk.sk_tsflags; |
Eric Dumazet | 4c971d2 | 2022-05-13 11:55:41 -0700 | [diff] [blame] | 98 | ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); |
Willem de Bruijn | 3517820 | 2018-07-06 10:12:54 -0400 | [diff] [blame] | 99 | ipcm->addr = inet->inet_saddr; |
Nicolas Dichtel | 3f5413c | 2023-05-22 14:08:20 +0200 | [diff] [blame] | 100 | ipcm->protocol = inet->inet_num; |
Willem de Bruijn | 3517820 | 2018-07-06 10:12:54 -0400 | [diff] [blame] | 101 | } |
| 102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) |
Hannes Frederic Sowa | 4b261c75 | 2014-01-20 03:43:08 +0100 | [diff] [blame] | 104 | #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 106 | /* return enslaved device index if relevant */ |
Eric Dumazet | d6bb2d1 | 2020-11-09 15:13:48 -0800 | [diff] [blame] | 107 | static inline int inet_sdif(const struct sk_buff *skb) |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 108 | { |
| 109 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) |
| 110 | if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) |
| 111 | return IPCB(skb)->iif; |
| 112 | #endif |
| 113 | return 0; |
| 114 | } |
| 115 | |
Kirill Tkhai | 5796ef7 | 2018-03-22 12:45:32 +0300 | [diff] [blame] | 116 | /* Special input handler for packets caught by router alert option. |
| 117 | They are selected only by protocol field, and then processed likely |
| 118 | local ones; but only if someone wants them! Otherwise, router |
| 119 | not running rsvpd will kill RSVP. |
| 120 | |
| 121 | It is user level problem, what it will make with them. |
| 122 | I have no idea, how it will masquearde or NAT them (it is joke, joke :-)), |
| 123 | but receiver should be enough clever f.e. to forward mtrace requests, |
| 124 | sent to multicast group to reach destination designated router. |
| 125 | */ |
| 126 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 127 | struct ip_ra_chain { |
Eric Dumazet | 43a951e | 2010-10-25 03:32:44 +0000 | [diff] [blame] | 128 | struct ip_ra_chain __rcu *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | struct sock *sk; |
Eric Dumazet | 592fcb9 | 2010-06-09 16:21:07 +0000 | [diff] [blame] | 130 | union { |
| 131 | void (*destructor)(struct sock *); |
| 132 | struct sock *saved_sk; |
| 133 | }; |
Eric Dumazet | 6601850 | 2010-06-07 03:12:08 +0000 | [diff] [blame] | 134 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | }; |
| 136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | /* IP flags. */ |
| 138 | #define IP_CE 0x8000 /* Flag: "Congestion" */ |
| 139 | #define IP_DF 0x4000 /* Flag: "Don't Fragment" */ |
| 140 | #define IP_MF 0x2000 /* Flag: "More Fragments" */ |
| 141 | #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ |
| 142 | |
| 143 | #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */ |
| 144 | |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 145 | struct msghdr; |
| 146 | struct net_device; |
| 147 | struct packet_type; |
| 148 | struct rtable; |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 149 | struct sockaddr; |
| 150 | |
WANG Cong | 72c1d3b | 2014-01-10 16:09:45 -0800 | [diff] [blame] | 151 | int igmp_mc_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
| 153 | /* |
| 154 | * Functions provided by ip.c |
| 155 | */ |
| 156 | |
Eric Dumazet | cfe673b | 2015-09-25 07:39:16 -0700 | [diff] [blame] | 157 | int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 158 | __be32 saddr, __be32 daddr, |
Wei Wang | de033b7d | 2020-09-09 17:50:47 -0700 | [diff] [blame] | 159 | struct ip_options_rcu *opt, u8 tos); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 160 | int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, |
| 161 | struct net_device *orig_dev); |
Edward Cree | 17266ee | 2018-07-02 16:14:12 +0100 | [diff] [blame] | 162 | void ip_list_rcv(struct list_head *head, struct packet_type *pt, |
| 163 | struct net_device *orig_dev); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 164 | int ip_local_deliver(struct sk_buff *skb); |
Paolo Abeni | 68cb7d5 | 2018-11-07 12:38:31 +0100 | [diff] [blame] | 165 | void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 166 | int ip_mr_input(struct sk_buff *skb); |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 167 | int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); |
| 168 | int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); |
Eric W. Biederman | 694869b | 2015-06-12 21:55:31 -0500 | [diff] [blame] | 169 | int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
| 170 | int (*output)(struct net *, struct sock *, struct sk_buff *)); |
Pablo Neira Ayuso | c8b17be | 2019-05-29 13:25:31 +0200 | [diff] [blame] | 171 | |
| 172 | struct ip_fraglist_iter { |
Pablo Neira Ayuso | c8b17be | 2019-05-29 13:25:31 +0200 | [diff] [blame] | 173 | struct sk_buff *frag; |
| 174 | struct iphdr *iph; |
| 175 | int offset; |
| 176 | unsigned int hlen; |
| 177 | }; |
| 178 | |
| 179 | void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, |
| 180 | unsigned int hlen, struct ip_fraglist_iter *iter); |
| 181 | void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter); |
| 182 | |
| 183 | static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter) |
| 184 | { |
| 185 | struct sk_buff *skb = iter->frag; |
| 186 | |
| 187 | iter->frag = skb->next; |
| 188 | skb_mark_not_on_list(skb); |
| 189 | |
| 190 | return skb; |
| 191 | } |
| 192 | |
Pablo Neira Ayuso | 065ff79 | 2019-05-29 13:25:33 +0200 | [diff] [blame] | 193 | struct ip_frag_state { |
Eric Dumazet | e7a409c | 2019-10-19 09:26:37 -0700 | [diff] [blame] | 194 | bool DF; |
Pablo Neira Ayuso | 065ff79 | 2019-05-29 13:25:33 +0200 | [diff] [blame] | 195 | unsigned int hlen; |
| 196 | unsigned int ll_rs; |
| 197 | unsigned int mtu; |
| 198 | unsigned int left; |
| 199 | int offset; |
| 200 | int ptr; |
| 201 | __be16 not_last_frag; |
| 202 | }; |
| 203 | |
| 204 | void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs, |
Eric Dumazet | e7a409c | 2019-10-19 09:26:37 -0700 | [diff] [blame] | 205 | unsigned int mtu, bool DF, struct ip_frag_state *state); |
Pablo Neira Ayuso | 065ff79 | 2019-05-29 13:25:33 +0200 | [diff] [blame] | 206 | struct sk_buff *ip_frag_next(struct sk_buff *skb, |
| 207 | struct ip_frag_state *state); |
| 208 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 209 | void ip_send_check(struct iphdr *ip); |
Eric W. Biederman | cf91a99 | 2015-10-07 16:48:45 -0500 | [diff] [blame] | 210 | int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
Eric W. Biederman | 33224b1 | 2015-10-07 16:48:46 -0500 | [diff] [blame] | 211 | int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
Eric Dumazet | aad8872 | 2014-04-15 13:47:15 -0400 | [diff] [blame] | 212 | |
Xin Long | 69b9e1e | 2018-07-02 18:21:11 +0800 | [diff] [blame] | 213 | int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, |
| 214 | __u8 tos); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 215 | void ip_init(void); |
| 216 | int ip_append_data(struct sock *sk, struct flowi4 *fl4, |
| 217 | int getfrag(void *from, char *to, int offset, int len, |
| 218 | int odd, struct sk_buff *skb), |
| 219 | void *from, int len, int protolen, |
| 220 | struct ipcm_cookie *ipc, |
| 221 | struct rtable **rt, |
| 222 | unsigned int flags); |
| 223 | int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, |
| 224 | struct sk_buff *skb); |
| 225 | ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, |
| 226 | int offset, size_t size, int flags); |
| 227 | struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, |
| 228 | struct sk_buff_head *queue, |
| 229 | struct inet_cork *cork); |
| 230 | int ip_send_skb(struct net *net, struct sk_buff *skb); |
| 231 | int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); |
| 232 | void ip_flush_pending_frames(struct sock *sk); |
| 233 | struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, |
| 234 | int getfrag(void *from, char *to, int offset, |
| 235 | int len, int odd, struct sk_buff *skb), |
| 236 | void *from, int length, int transhdrlen, |
| 237 | struct ipcm_cookie *ipc, struct rtable **rtp, |
Willem de Bruijn | 1cd7884 | 2018-04-26 13:42:15 -0400 | [diff] [blame] | 238 | struct inet_cork *cork, unsigned int flags); |
Herbert Xu | 1c32c5a | 2011-03-01 02:36:47 +0000 | [diff] [blame] | 239 | |
Eric Dumazet | 05e22e8 | 2020-06-19 12:12:34 -0700 | [diff] [blame] | 240 | int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); |
Xin Long | 69b9e1e | 2018-07-02 18:21:11 +0800 | [diff] [blame] | 241 | |
David S. Miller | 77968b7 | 2011-05-08 17:12:19 -0700 | [diff] [blame] | 242 | static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) |
Herbert Xu | 1c32c5a | 2011-03-01 02:36:47 +0000 | [diff] [blame] | 243 | { |
David S. Miller | 77968b7 | 2011-05-08 17:12:19 -0700 | [diff] [blame] | 244 | return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); |
Herbert Xu | 1c32c5a | 2011-03-01 02:36:47 +0000 | [diff] [blame] | 245 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | |
Francesco Fusco | aa66158 | 2013-09-24 15:43:09 +0200 | [diff] [blame] | 247 | static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) |
| 248 | { |
| 249 | return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); |
| 250 | } |
| 251 | |
| 252 | static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) |
| 253 | { |
| 254 | return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); |
| 255 | } |
| 256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | /* datagram.c */ |
Eric Dumazet | 03645a1 | 2015-07-14 08:10:22 +0200 | [diff] [blame] | 258 | int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 259 | int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 261 | void ip4_datagram_release_cb(struct sock *sk); |
Steffen Klassert | 8141ed9 | 2013-01-21 02:00:03 +0000 | [diff] [blame] | 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | struct ip_reply_arg { |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 264 | struct kvec iov[1]; |
KOVACS Krisztian | 88ef4a5 | 2008-10-01 07:41:00 -0700 | [diff] [blame] | 265 | int flags; |
Al Viro | d6f5493c | 2006-11-14 21:26:08 -0800 | [diff] [blame] | 266 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | int csumoffset; /* u16 offset of csum in iov[0].iov_base */ |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 268 | /* -1 if not needed */ |
Patrick McHardy | f0e48db | 2007-06-04 21:32:46 -0700 | [diff] [blame] | 269 | int bound_dev_if; |
Eric Dumazet | 66b13d9 | 2011-10-24 03:06:21 -0400 | [diff] [blame] | 270 | u8 tos; |
Lorenzo Colitti | e2d118a | 2016-11-04 02:23:43 +0900 | [diff] [blame] | 271 | kuid_t uid; |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 272 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
KOVACS Krisztian | 88ef4a5 | 2008-10-01 07:41:00 -0700 | [diff] [blame] | 274 | #define IP_REPLY_ARG_NOSRCCHECK 1 |
| 275 | |
KOVACS Krisztian | 86b08d8 | 2008-10-01 07:44:42 -0700 | [diff] [blame] | 276 | static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) |
| 277 | { |
| 278 | return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; |
| 279 | } |
| 280 | |
Eric Dumazet | bdbbb85 | 2015-01-29 21:35:05 -0800 | [diff] [blame] | 281 | void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, |
Eric Dumazet | 24a2d43 | 2014-09-27 09:50:55 -0700 | [diff] [blame] | 282 | const struct ip_options *sopt, |
| 283 | __be32 daddr, __be32 saddr, |
| 284 | const struct ip_reply_arg *arg, |
Antoine Tenart | f62a00b | 2023-05-23 18:14:52 +0200 | [diff] [blame] | 285 | unsigned int len, u64 transmit_time, u32 txhash); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | |
Eric Dumazet | 4ce3c18 | 2010-06-30 13:31:19 -0700 | [diff] [blame] | 287 | #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 288 | #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) |
Eric Dumazet | 4ce3c18 | 2010-06-30 13:31:19 -0700 | [diff] [blame] | 289 | #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 290 | #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) |
Eric Dumazet | 4ce3c18 | 2010-06-30 13:31:19 -0700 | [diff] [blame] | 291 | #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 292 | #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) |
Pavel Emelyanov | 61a7e26 | 2008-07-18 04:03:08 -0700 | [diff] [blame] | 293 | #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 294 | #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field) |
David S. Miller | f7324ac | 2014-03-06 15:03:17 -0500 | [diff] [blame] | 295 | #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 296 | #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | |
Eric Dumazet | 59f09ae | 2021-09-29 18:03:32 -0700 | [diff] [blame] | 298 | static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) |
| 299 | { |
| 300 | return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); |
| 301 | } |
| 302 | |
WANG Cong | 698365f | 2014-05-05 15:55:55 -0700 | [diff] [blame] | 303 | unsigned long snmp_fold_field(void __percpu *mib, int offt); |
Eric Dumazet | 4ce3c18 | 2010-06-30 13:31:19 -0700 | [diff] [blame] | 304 | #if BITS_PER_LONG==32 |
Raghavendra K T | c4c6bc3 | 2015-08-30 11:29:41 +0530 | [diff] [blame] | 305 | u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, |
| 306 | size_t syncp_offset); |
WANG Cong | 698365f | 2014-05-05 15:55:55 -0700 | [diff] [blame] | 307 | u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); |
Eric Dumazet | 4ce3c18 | 2010-06-30 13:31:19 -0700 | [diff] [blame] | 308 | #else |
Raghavendra K T | c4c6bc3 | 2015-08-30 11:29:41 +0530 | [diff] [blame] | 309 | static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, |
| 310 | size_t syncp_offset) |
| 311 | { |
| 312 | return snmp_get_cpu_field(mib, cpu, offct); |
| 313 | |
| 314 | } |
| 315 | |
WANG Cong | 698365f | 2014-05-05 15:55:55 -0700 | [diff] [blame] | 316 | static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) |
Eric Dumazet | 4ce3c18 | 2010-06-30 13:31:19 -0700 | [diff] [blame] | 317 | { |
| 318 | return snmp_fold_field(mib, offt); |
| 319 | } |
| 320 | #endif |
YOSHIFUJI Hideaki | 3349017 | 2007-04-20 15:57:15 -0700 | [diff] [blame] | 321 | |
Jia He | 6348ef2 | 2016-09-30 11:28:58 +0800 | [diff] [blame] | 322 | #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \ |
| 323 | { \ |
| 324 | int i, c; \ |
| 325 | for_each_possible_cpu(c) { \ |
| 326 | for (i = 0; stats_list[i].name; i++) \ |
| 327 | buff64[i] += snmp_get_cpu_field64( \ |
| 328 | mib_statistic, \ |
| 329 | c, stats_list[i].entry, \ |
| 330 | offset); \ |
| 331 | } \ |
| 332 | } |
| 333 | |
| 334 | #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \ |
| 335 | { \ |
| 336 | int i, c; \ |
| 337 | for_each_possible_cpu(c) { \ |
| 338 | for (i = 0; stats_list[i].name; i++) \ |
| 339 | buff[i] += snmp_get_cpu_field( \ |
| 340 | mib_statistic, \ |
| 341 | c, stats_list[i].entry); \ |
| 342 | } \ |
| 343 | } |
| 344 | |
Jakub Sitnicki | 6728486 | 2023-01-24 14:36:43 +0100 | [diff] [blame] | 345 | void inet_get_local_port_range(const struct net *net, int *low, int *high); |
| 346 | void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high); |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 347 | |
David S. Miller | fcd77db | 2014-05-15 13:43:14 -0400 | [diff] [blame] | 348 | #ifdef CONFIG_SYSCTL |
Maciej Żenczykowski | 66e2f5f | 2019-11-26 14:44:16 -0800 | [diff] [blame] | 349 | static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port) |
Amerigo Wang | e3826f1 | 2010-05-05 00:27:06 +0000 | [diff] [blame] | 350 | { |
WANG Cong | 122ff24 | 2014-05-12 16:04:53 -0700 | [diff] [blame] | 351 | if (!net->ipv4.sysctl_local_reserved_ports) |
Maciej Żenczykowski | 30429fb | 2019-11-22 13:50:52 -0800 | [diff] [blame] | 352 | return false; |
WANG Cong | 122ff24 | 2014-05-12 16:04:53 -0700 | [diff] [blame] | 353 | return test_bit(port, net->ipv4.sysctl_local_reserved_ports); |
Amerigo Wang | e3826f1 | 2010-05-05 00:27:06 +0000 | [diff] [blame] | 354 | } |
WANG Cong | 20e61da | 2014-07-25 15:25:08 -0700 | [diff] [blame] | 355 | |
| 356 | static inline bool sysctl_dev_name_is_allowed(const char *name) |
| 357 | { |
| 358 | return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; |
| 359 | } |
| 360 | |
Maciej Żenczykowski | 82f31eb | 2019-11-25 15:37:04 -0800 | [diff] [blame] | 361 | static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port) |
Krister Johansen | 4548b68 | 2017-01-20 17:49:11 -0800 | [diff] [blame] | 362 | { |
Kuniyuki Iwashima | 9b55c20 | 2022-07-18 10:26:42 -0700 | [diff] [blame] | 363 | return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); |
Krister Johansen | 4548b68 | 2017-01-20 17:49:11 -0800 | [diff] [blame] | 364 | } |
| 365 | |
WANG Cong | 122ff24 | 2014-05-12 16:04:53 -0700 | [diff] [blame] | 366 | #else |
Maciej Żenczykowski | 66e2f5f | 2019-11-26 14:44:16 -0800 | [diff] [blame] | 367 | static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port) |
WANG Cong | 122ff24 | 2014-05-12 16:04:53 -0700 | [diff] [blame] | 368 | { |
Maciej Żenczykowski | 30429fb | 2019-11-22 13:50:52 -0800 | [diff] [blame] | 369 | return false; |
WANG Cong | 122ff24 | 2014-05-12 16:04:53 -0700 | [diff] [blame] | 370 | } |
Krister Johansen | 4548b68 | 2017-01-20 17:49:11 -0800 | [diff] [blame] | 371 | |
Maciej Żenczykowski | 82f31eb | 2019-11-25 15:37:04 -0800 | [diff] [blame] | 372 | static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port) |
Krister Johansen | 4548b68 | 2017-01-20 17:49:11 -0800 | [diff] [blame] | 373 | { |
Maciej Żenczykowski | 82f31eb | 2019-11-25 15:37:04 -0800 | [diff] [blame] | 374 | return port < PROT_SOCK; |
Krister Johansen | 4548b68 | 2017-01-20 17:49:11 -0800 | [diff] [blame] | 375 | } |
WANG Cong | 122ff24 | 2014-05-12 16:04:53 -0700 | [diff] [blame] | 376 | #endif |
Amerigo Wang | e3826f1 | 2010-05-05 00:27:06 +0000 | [diff] [blame] | 377 | |
Deepa Dinamani | 822c868 | 2016-02-27 00:32:15 -0800 | [diff] [blame] | 378 | __be32 inet_current_timestamp(void); |
| 379 | |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 380 | /* From inetpeer.c */ |
| 381 | extern int inet_peer_threshold; |
| 382 | extern int inet_peer_minttl; |
| 383 | extern int inet_peer_maxttl; |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 384 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 385 | void ipfrag_init(void); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 386 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 387 | void ip_static_sysctl_init(void); |
Al Viro | bd7b153 | 2008-07-15 16:00:59 -0400 | [diff] [blame] | 388 | |
Lorenzo Colitti | e110861 | 2014-05-13 10:17:33 -0700 | [diff] [blame] | 389 | #define IP4_REPLY_MARK(net, mark) \ |
Kuniyuki Iwashima | 85d0b4d | 2022-07-13 13:51:57 -0700 | [diff] [blame] | 390 | (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0) |
Lorenzo Colitti | e110861 | 2014-05-13 10:17:33 -0700 | [diff] [blame] | 391 | |
David S. Miller | d18cd55 | 2011-06-23 21:28:52 -0700 | [diff] [blame] | 392 | static inline bool ip_is_fragment(const struct iphdr *iph) |
| 393 | { |
| 394 | return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; |
| 395 | } |
| 396 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | #ifdef CONFIG_INET |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 398 | #include <net/dst.h> |
| 399 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | /* The function in 2.2 was invalid, producing wrong result for |
| 401 | * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ |
| 402 | static inline |
| 403 | int ip_decrease_ttl(struct iphdr *iph) |
| 404 | { |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 405 | u32 check = (__force u32)iph->check; |
| 406 | check += (__force u32)htons(0x0100); |
| 407 | iph->check = (__force __sum16)(check + (check>=0xFFFF)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | return --iph->ttl; |
| 409 | } |
| 410 | |
Sabrina Dubroca | d52e5a7 | 2018-03-14 10:21:14 +0100 | [diff] [blame] | 411 | static inline int ip_mtu_locked(const struct dst_entry *dst) |
| 412 | { |
| 413 | const struct rtable *rt = (const struct rtable *)dst; |
| 414 | |
| 415 | return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); |
| 416 | } |
| 417 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | static inline |
Eric Dumazet | 4e3f5d7 | 2015-09-25 07:39:14 -0700 | [diff] [blame] | 419 | int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | { |
Eric Dumazet | 4e3f5d7 | 2015-09-25 07:39:14 -0700 | [diff] [blame] | 421 | u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); |
| 422 | |
| 423 | return pmtudisc == IP_PMTUDISC_DO || |
| 424 | (pmtudisc == IP_PMTUDISC_WANT && |
Sabrina Dubroca | d52e5a7 | 2018-03-14 10:21:14 +0100 | [diff] [blame] | 425 | !ip_mtu_locked(dst)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | } |
| 427 | |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 428 | static inline bool ip_sk_accept_pmtu(const struct sock *sk) |
| 429 | { |
Hannes Frederic Sowa | 1b34657 | 2014-02-26 01:20:42 +0100 | [diff] [blame] | 430 | return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && |
| 431 | inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | static inline bool ip_sk_use_pmtu(const struct sock *sk) |
| 435 | { |
| 436 | return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; |
| 437 | } |
| 438 | |
WANG Cong | 60ff746 | 2014-05-04 16:39:18 -0700 | [diff] [blame] | 439 | static inline bool ip_sk_ignore_df(const struct sock *sk) |
Hannes Frederic Sowa | 1b34657 | 2014-02-26 01:20:42 +0100 | [diff] [blame] | 440 | { |
| 441 | return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || |
| 442 | inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; |
| 443 | } |
| 444 | |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 445 | static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, |
| 446 | bool forwarding) |
| 447 | { |
Vadim Fedorenko | ac6627a | 2021-07-20 23:06:28 +0300 | [diff] [blame] | 448 | const struct rtable *rt = container_of(dst, struct rtable, dst); |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 449 | struct net *net = dev_net(dst->dev); |
Maciej Żenczykowski | 02a1b17 | 2020-09-23 13:18:15 -0700 | [diff] [blame] | 450 | unsigned int mtu; |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 451 | |
Kuniyuki Iwashima | 60c158d | 2022-07-13 13:51:53 -0700 | [diff] [blame] | 452 | if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || |
Sabrina Dubroca | d52e5a7 | 2018-03-14 10:21:14 +0100 | [diff] [blame] | 453 | ip_mtu_locked(dst) || |
Vadim Fedorenko | ac6627a | 2021-07-20 23:06:28 +0300 | [diff] [blame] | 454 | !forwarding) { |
| 455 | mtu = rt->rt_pmtu; |
| 456 | if (mtu && time_before(jiffies, rt->dst.expires)) |
| 457 | goto out; |
| 458 | } |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 459 | |
Maciej Żenczykowski | 02a1b17 | 2020-09-23 13:18:15 -0700 | [diff] [blame] | 460 | /* 'forwarding = true' case should always honour route mtu */ |
| 461 | mtu = dst_metric_raw(dst, RTAX_MTU); |
Vadim Fedorenko | ac6627a | 2021-07-20 23:06:28 +0300 | [diff] [blame] | 462 | if (mtu) |
| 463 | goto out; |
| 464 | |
| 465 | mtu = READ_ONCE(dst->dev->mtu); |
| 466 | |
| 467 | if (unlikely(ip_mtu_locked(dst))) { |
| 468 | if (rt->rt_uses_gateway && mtu > 576) |
| 469 | mtu = 576; |
| 470 | } |
| 471 | |
| 472 | out: |
| 473 | mtu = min_t(unsigned int, mtu, IP_MAX_MTU); |
Maciej Żenczykowski | 02a1b17 | 2020-09-23 13:18:15 -0700 | [diff] [blame] | 474 | |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 475 | return mtu - lwtunnel_headroom(dst->lwtstate, mtu); |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 476 | } |
| 477 | |
Shmulik Ladkani | fedbb6b4 | 2016-06-29 21:47:03 +0300 | [diff] [blame] | 478 | static inline unsigned int ip_skb_dst_mtu(struct sock *sk, |
| 479 | const struct sk_buff *skb) |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 480 | { |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 481 | unsigned int mtu; |
| 482 | |
Eric Dumazet | caf3f26 | 2015-10-04 21:08:08 -0700 | [diff] [blame] | 483 | if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 484 | bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; |
Eric Dumazet | caf3f26 | 2015-10-04 21:08:08 -0700 | [diff] [blame] | 485 | |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 486 | return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 487 | } |
Eric Dumazet | caf3f26 | 2015-10-04 21:08:08 -0700 | [diff] [blame] | 488 | |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 489 | mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); |
| 490 | return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); |
Hannes Frederic Sowa | f87c10a | 2014-01-09 10:01:15 +0100 | [diff] [blame] | 491 | } |
| 492 | |
David Ahern | 767a221 | 2018-10-04 20:07:51 -0700 | [diff] [blame] | 493 | struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, |
David Ahern | d7e774f | 2018-11-06 12:51:15 -0800 | [diff] [blame] | 494 | int fc_mx_len, |
| 495 | struct netlink_ext_ack *extack); |
David Ahern | cc5f0eb | 2018-10-04 20:07:52 -0700 | [diff] [blame] | 496 | static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics) |
| 497 | { |
| 498 | if (fib_metrics != &dst_default_metrics && |
| 499 | refcount_dec_and_test(&fib_metrics->refcnt)) |
| 500 | kfree(fib_metrics); |
| 501 | } |
David Ahern | a919525 | 2018-04-17 17:33:07 -0700 | [diff] [blame] | 502 | |
David Ahern | e1255ed | 2018-10-04 20:07:53 -0700 | [diff] [blame] | 503 | /* ipv4 and ipv6 both use refcounted metrics if it is not the default */ |
| 504 | static inline |
| 505 | void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics) |
| 506 | { |
| 507 | dst_init_metrics(dst, fib_metrics->metrics, true); |
| 508 | |
| 509 | if (fib_metrics != &dst_default_metrics) { |
| 510 | dst->_metrics |= DST_METRICS_REFCOUNTED; |
| 511 | refcount_inc(&fib_metrics->refcnt); |
| 512 | } |
| 513 | } |
| 514 | |
David Ahern | 1620a33 | 2018-10-04 20:07:54 -0700 | [diff] [blame] | 515 | static inline |
| 516 | void ip_dst_metrics_put(struct dst_entry *dst) |
| 517 | { |
| 518 | struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); |
| 519 | |
| 520 | if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) |
| 521 | kfree(p); |
| 522 | } |
| 523 | |
Hannes Frederic Sowa | b6a7719 | 2015-03-25 17:07:44 +0100 | [diff] [blame] | 524 | void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); |
Eric Dumazet | 73f156a | 2014-06-02 05:26:03 -0700 | [diff] [blame] | 525 | |
Hannes Frederic Sowa | b6a7719 | 2015-03-25 17:07:44 +0100 | [diff] [blame] | 526 | static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, |
| 527 | struct sock *sk, int segs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | { |
Ansis Atteka | 703133de | 2013-09-18 15:29:53 -0700 | [diff] [blame] | 529 | struct iphdr *iph = ip_hdr(skb); |
| 530 | |
Eric Dumazet | 23f57406 | 2022-01-26 17:10:22 -0800 | [diff] [blame] | 531 | /* We had many attacks based on IPID, use the private |
| 532 | * generator as much as we can. |
| 533 | */ |
| 534 | if (sk && inet_sk(sk)->inet_daddr) { |
| 535 | iph->id = htons(inet_sk(sk)->inet_id); |
| 536 | inet_sk(sk)->inet_id += segs; |
| 537 | return; |
| 538 | } |
WANG Cong | 60ff746 | 2014-05-04 16:39:18 -0700 | [diff] [blame] | 539 | if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { |
Eric Dumazet | 23f57406 | 2022-01-26 17:10:22 -0800 | [diff] [blame] | 540 | iph->id = 0; |
Eric Dumazet | 73f156a | 2014-06-02 05:26:03 -0700 | [diff] [blame] | 541 | } else { |
Eric Dumazet | 23f57406 | 2022-01-26 17:10:22 -0800 | [diff] [blame] | 542 | /* Unfortunately we need the big hammer to get a suitable IPID */ |
Hannes Frederic Sowa | b6a7719 | 2015-03-25 17:07:44 +0100 | [diff] [blame] | 543 | __ip_select_ident(net, iph, segs); |
Eric Dumazet | 73f156a | 2014-06-02 05:26:03 -0700 | [diff] [blame] | 544 | } |
| 545 | } |
| 546 | |
Hannes Frederic Sowa | b6a7719 | 2015-03-25 17:07:44 +0100 | [diff] [blame] | 547 | static inline void ip_select_ident(struct net *net, struct sk_buff *skb, |
| 548 | struct sock *sk) |
Eric Dumazet | 73f156a | 2014-06-02 05:26:03 -0700 | [diff] [blame] | 549 | { |
Hannes Frederic Sowa | b6a7719 | 2015-03-25 17:07:44 +0100 | [diff] [blame] | 550 | ip_select_ident_segs(net, skb, sk, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | } |
| 552 | |
Tom Herbert | ed70fcf | 2014-05-02 16:29:38 -0700 | [diff] [blame] | 553 | static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) |
| 554 | { |
| 555 | return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, |
| 556 | skb->len, proto, 0); |
| 557 | } |
| 558 | |
Tom Herbert | c3f8324 | 2015-06-04 09:16:40 -0700 | [diff] [blame] | 559 | /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store |
| 560 | * Equivalent to : flow->v4addrs.src = iph->saddr; |
| 561 | * flow->v4addrs.dst = iph->daddr; |
| 562 | */ |
| 563 | static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, |
| 564 | const struct iphdr *iph) |
| 565 | { |
| 566 | BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != |
| 567 | offsetof(typeof(flow->addrs), v4addrs.src) + |
| 568 | sizeof(flow->addrs.v4addrs.src)); |
Hangbin Liu | 58e0be1e | 2022-11-15 22:24:00 +0800 | [diff] [blame] | 569 | memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs)); |
Tom Herbert | c3f8324 | 2015-06-04 09:16:40 -0700 | [diff] [blame] | 570 | flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; |
| 571 | } |
| 572 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | /* |
| 574 | * Map a multicast IP onto multicast MAC for type ethernet. |
| 575 | */ |
| 576 | |
Al Viro | 714e85be | 2006-11-14 20:51:49 -0800 | [diff] [blame] | 577 | static inline void ip_eth_mc_map(__be32 naddr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | { |
Al Viro | 714e85be | 2006-11-14 20:51:49 -0800 | [diff] [blame] | 579 | __u32 addr=ntohl(naddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | buf[0]=0x01; |
| 581 | buf[1]=0x00; |
| 582 | buf[2]=0x5e; |
| 583 | buf[5]=addr&0xFF; |
| 584 | addr>>=8; |
| 585 | buf[4]=addr&0xFF; |
| 586 | addr>>=8; |
| 587 | buf[3]=addr&0x7F; |
| 588 | } |
| 589 | |
| 590 | /* |
| 591 | * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand. |
| 592 | * Leave P_Key as 0 to be filled in by driver. |
| 593 | */ |
| 594 | |
Rolf Manderscheid | a9e527e3 | 2007-12-10 13:38:41 -0700 | [diff] [blame] | 595 | static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | { |
Al Viro | 714e85be | 2006-11-14 20:51:49 -0800 | [diff] [blame] | 597 | __u32 addr; |
Rolf Manderscheid | a9e527e3 | 2007-12-10 13:38:41 -0700 | [diff] [blame] | 598 | unsigned char scope = broadcast[5] & 0xF; |
| 599 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | buf[0] = 0; /* Reserved */ |
| 601 | buf[1] = 0xff; /* Multicast QPN */ |
| 602 | buf[2] = 0xff; |
| 603 | buf[3] = 0xff; |
Al Viro | 714e85be | 2006-11-14 20:51:49 -0800 | [diff] [blame] | 604 | addr = ntohl(naddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | buf[4] = 0xff; |
Rolf Manderscheid | a9e527e3 | 2007-12-10 13:38:41 -0700 | [diff] [blame] | 606 | buf[5] = 0x10 | scope; /* scope from broadcast address */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | buf[6] = 0x40; /* IPv4 signature */ |
| 608 | buf[7] = 0x1b; |
Rolf Manderscheid | a9e527e3 | 2007-12-10 13:38:41 -0700 | [diff] [blame] | 609 | buf[8] = broadcast[8]; /* P_Key */ |
| 610 | buf[9] = broadcast[9]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | buf[10] = 0; |
| 612 | buf[11] = 0; |
| 613 | buf[12] = 0; |
| 614 | buf[13] = 0; |
| 615 | buf[14] = 0; |
| 616 | buf[15] = 0; |
| 617 | buf[19] = addr & 0xff; |
| 618 | addr >>= 8; |
| 619 | buf[18] = addr & 0xff; |
| 620 | addr >>= 8; |
| 621 | buf[17] = addr & 0xff; |
| 622 | addr >>= 8; |
| 623 | buf[16] = addr & 0x0f; |
| 624 | } |
| 625 | |
Timo Teräs | 93ca3bb | 2011-03-28 22:40:53 +0000 | [diff] [blame] | 626 | static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) |
| 627 | { |
| 628 | if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) |
| 629 | memcpy(buf, broadcast, 4); |
| 630 | else |
| 631 | memcpy(buf, &naddr, sizeof(naddr)); |
| 632 | } |
| 633 | |
Eric Dumazet | dfd56b8 | 2011-12-10 09:48:31 +0000 | [diff] [blame] | 634 | #if IS_ENABLED(CONFIG_IPV6) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | #include <linux/ipv6.h> |
| 636 | #endif |
| 637 | |
| 638 | static __inline__ void inet_reset_saddr(struct sock *sk) |
| 639 | { |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 640 | inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; |
Eric Dumazet | dfd56b8 | 2011-12-10 09:48:31 +0000 | [diff] [blame] | 641 | #if IS_ENABLED(CONFIG_IPV6) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | if (sk->sk_family == PF_INET6) { |
| 643 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 644 | |
| 645 | memset(&np->saddr, 0, sizeof(np->saddr)); |
Eric Dumazet | efe4208 | 2013-10-03 15:42:29 -0700 | [diff] [blame] | 646 | memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | } |
| 648 | #endif |
| 649 | } |
| 650 | |
| 651 | #endif |
| 652 | |
David Ahern | 72afa35 | 2015-08-27 16:06:59 -0700 | [diff] [blame] | 653 | static inline unsigned int ipv4_addr_hash(__be32 ip) |
| 654 | { |
| 655 | return (__force unsigned int) ip; |
| 656 | } |
| 657 | |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 658 | static inline u32 ipv4_portaddr_hash(const struct net *net, |
| 659 | __be32 saddr, |
| 660 | unsigned int port) |
| 661 | { |
| 662 | return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; |
| 663 | } |
| 664 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 665 | bool ip_call_ra_chain(struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | |
| 667 | /* |
Rami Rosen | b798232 | 2008-01-01 21:13:09 -0800 | [diff] [blame] | 668 | * Functions provided by ip_fragment.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | */ |
| 670 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 671 | enum ip_defrag_users { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | IP_DEFRAG_LOCAL_DELIVER, |
| 673 | IP_DEFRAG_CALL_RA_CHAIN, |
| 674 | IP_DEFRAG_CONNTRACK_IN, |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 675 | __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | IP_DEFRAG_CONNTRACK_OUT, |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 677 | __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, |
Patrick McHardy | 8fa9ff6 | 2009-12-15 16:59:59 +0100 | [diff] [blame] | 678 | IP_DEFRAG_CONNTRACK_BRIDGE_IN, |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 679 | __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | IP_DEFRAG_VS_IN, |
| 681 | IP_DEFRAG_VS_OUT, |
David S. Miller | 595fc71 | 2011-07-05 01:05:48 -0700 | [diff] [blame] | 682 | IP_DEFRAG_VS_FWD, |
| 683 | IP_DEFRAG_AF_PACKET, |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 684 | IP_DEFRAG_MACVLAN, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | }; |
| 686 | |
Andy Zhou | 5cf4228 | 2015-05-15 14:15:35 -0700 | [diff] [blame] | 687 | /* Return true if the value of 'user' is between 'lower_bond' |
| 688 | * and 'upper_bond' inclusively. |
| 689 | */ |
| 690 | static inline bool ip_defrag_user_in_between(u32 user, |
| 691 | enum ip_defrag_users lower_bond, |
| 692 | enum ip_defrag_users upper_bond) |
| 693 | { |
| 694 | return user >= lower_bond && user <= upper_bond; |
| 695 | } |
| 696 | |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 697 | int ip_defrag(struct net *net, struct sk_buff *skb, u32 user); |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 698 | #ifdef CONFIG_INET |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 699 | struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user); |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 700 | #else |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 701 | static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 702 | { |
| 703 | return skb; |
| 704 | } |
| 705 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | |
| 707 | /* |
| 708 | * Functions provided by ip_forward.c |
| 709 | */ |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 710 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 711 | int ip_forward(struct sk_buff *skb); |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 712 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | /* |
| 714 | * Functions provided by ip_options.c |
| 715 | */ |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 716 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 717 | void ip_options_build(struct sk_buff *skb, struct ip_options *opt, |
Jakub Kicinski | 4f0e304 | 2022-01-28 08:06:54 -0800 | [diff] [blame] | 718 | __be32 daddr, struct rtable *rt); |
Eric Dumazet | 24a2d43 | 2014-09-27 09:50:55 -0700 | [diff] [blame] | 719 | |
Paolo Abeni | 91ed1e6 | 2017-08-03 18:07:06 +0200 | [diff] [blame] | 720 | int __ip_options_echo(struct net *net, struct ip_options *dopt, |
| 721 | struct sk_buff *skb, const struct ip_options *sopt); |
| 722 | static inline int ip_options_echo(struct net *net, struct ip_options *dopt, |
| 723 | struct sk_buff *skb) |
Eric Dumazet | 24a2d43 | 2014-09-27 09:50:55 -0700 | [diff] [blame] | 724 | { |
Paolo Abeni | 91ed1e6 | 2017-08-03 18:07:06 +0200 | [diff] [blame] | 725 | return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt); |
Eric Dumazet | 24a2d43 | 2014-09-27 09:50:55 -0700 | [diff] [blame] | 726 | } |
| 727 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 728 | void ip_options_fragment(struct sk_buff *skb); |
Nazarov Sergey | 3da1ed7 | 2019-02-25 19:27:15 +0300 | [diff] [blame] | 729 | int __ip_options_compile(struct net *net, struct ip_options *opt, |
| 730 | struct sk_buff *skb, __be32 *info); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 731 | int ip_options_compile(struct net *net, struct ip_options *opt, |
| 732 | struct sk_buff *skb); |
| 733 | int ip_options_get(struct net *net, struct ip_options_rcu **optp, |
Christoph Hellwig | de40a3e | 2020-07-23 08:08:57 +0200 | [diff] [blame] | 734 | sockptr_t data, int optlen); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 735 | void ip_options_undo(struct ip_options *opt); |
| 736 | void ip_forward_options(struct sk_buff *skb); |
Stephen Suryaputra | 8c83f2d | 2019-04-01 09:17:32 -0400 | [diff] [blame] | 737 | int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | |
| 739 | /* |
| 740 | * Functions provided by ip_sockglue.c |
| 741 | */ |
| 742 | |
Shawn Bohrer | fbf8866 | 2013-10-07 11:01:40 -0500 | [diff] [blame] | 743 | void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); |
Paolo Abeni | ad95903 | 2016-11-04 11:28:58 +0100 | [diff] [blame] | 744 | void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, |
| 745 | struct sk_buff *skb, int tlen, int offset); |
Soheil Hassas Yeganeh | 24025c4 | 2016-04-02 23:08:10 -0400 | [diff] [blame] | 746 | int ip_cmsg_send(struct sock *sk, struct msghdr *msg, |
Hannes Frederic Sowa | c8e6ad0 | 2014-02-18 21:38:08 +0100 | [diff] [blame] | 747 | struct ipcm_cookie *ipc, bool allow_ipv6); |
Eric Dumazet | 020e71a | 2021-10-25 09:48:24 -0700 | [diff] [blame] | 748 | DECLARE_STATIC_KEY_FALSE(ip4_min_ttl); |
Martin KaFai Lau | ee7f1e1 | 2022-08-16 23:18:26 -0700 | [diff] [blame] | 749 | int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, |
| 750 | unsigned int optlen); |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 751 | int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 752 | unsigned int optlen); |
Martin KaFai Lau | fd969f2 | 2022-09-01 17:29:25 -0700 | [diff] [blame] | 753 | int do_ip_getsockopt(struct sock *sk, int level, int optname, |
| 754 | sockptr_t optval, sockptr_t optlen); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 755 | int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, |
| 756 | int __user *optlen); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 757 | int ip_ra_control(struct sock *sk, unsigned char on, |
| 758 | void (*destructor)(struct sock *)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | |
Hannes Frederic Sowa | 85fbaa7 | 2013-11-23 00:46:12 +0100 | [diff] [blame] | 760 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 761 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, |
| 762 | u32 info, u8 *payload); |
| 763 | void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, |
| 764 | u32 info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | |
Tom Herbert | 5961de9 | 2015-01-05 13:56:16 -0800 | [diff] [blame] | 766 | static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) |
| 767 | { |
Paolo Abeni | ad95903 | 2016-11-04 11:28:58 +0100 | [diff] [blame] | 768 | ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0); |
Tom Herbert | 5961de9 | 2015-01-05 13:56:16 -0800 | [diff] [blame] | 769 | } |
| 770 | |
Eric Dumazet | 4cdf507 | 2014-09-19 07:38:40 -0700 | [diff] [blame] | 771 | bool icmp_global_allow(void); |
| 772 | extern int sysctl_icmp_msgs_per_sec; |
| 773 | extern int sysctl_icmp_msgs_burst; |
| 774 | |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 775 | #ifdef CONFIG_PROC_FS |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 776 | int ip_misc_proc_init(void); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 777 | #endif |
| 778 | |
Hangbin Liu | 5e1a99e | 2019-02-27 16:15:29 +0800 | [diff] [blame] | 779 | int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family, |
Roopa Prabhu | 404eb77 | 2018-05-22 14:03:27 -0700 | [diff] [blame] | 780 | struct netlink_ext_ack *extack); |
| 781 | |
Eric Dumazet | 501a90c | 2019-12-05 20:43:46 -0800 | [diff] [blame] | 782 | static inline bool inetdev_valid_mtu(unsigned int mtu) |
| 783 | { |
| 784 | return likely(mtu >= IPV4_MIN_MTU); |
| 785 | } |
| 786 | |
Christoph Hellwig | c4e446b | 2020-05-28 07:12:27 +0200 | [diff] [blame] | 787 | void ip_sock_set_freebind(struct sock *sk); |
Christoph Hellwig | 2de569b | 2020-05-28 07:12:29 +0200 | [diff] [blame] | 788 | int ip_sock_set_mtu_discover(struct sock *sk, int val); |
Christoph Hellwig | c1f9ec5 | 2020-05-28 07:12:30 +0200 | [diff] [blame] | 789 | void ip_sock_set_pktinfo(struct sock *sk); |
Christoph Hellwig | db45c0e | 2020-05-28 07:12:28 +0200 | [diff] [blame] | 790 | void ip_sock_set_recverr(struct sock *sk); |
Christoph Hellwig | 6ebf71b | 2020-05-28 07:12:26 +0200 | [diff] [blame] | 791 | void ip_sock_set_tos(struct sock *sk, int val); |
Poorva Sonparote | 4f47d5d | 2021-11-19 12:41:34 -0800 | [diff] [blame] | 792 | void __ip_sock_set_tos(struct sock *sk, int val); |
Christoph Hellwig | 6ebf71b | 2020-05-28 07:12:26 +0200 | [diff] [blame] | 793 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | #endif /* _IP_H */ |