Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * Definitions for the UDP protocol. |
| 8 | * |
| 9 | * Version: @(#)udp.h 1.0.2 04/28/93 |
| 10 | * |
| 11 | * Author: Fred N. van Kempen, <[email protected]> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ |
| 13 | #ifndef _LINUX_UDP_H |
| 14 | #define _LINUX_UDP_H |
| 15 | |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 16 | #include <net/inet_sock.h> |
Joe Perches | 310afe8 | 2008-03-23 22:06:51 -0700 | [diff] [blame] | 17 | #include <linux/skbuff.h> |
Pavel Emelyanov | 0b44191 | 2008-06-16 17:14:11 -0700 | [diff] [blame] | 18 | #include <net/netns/hash.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 19 | #include <uapi/linux/udp.h> |
Joe Perches | 310afe8 | 2008-03-23 22:06:51 -0700 | [diff] [blame] | 20 | |
| 21 | static inline struct udphdr *udp_hdr(const struct sk_buff *skb) |
| 22 | { |
| 23 | return (struct udphdr *)skb_transport_header(skb); |
| 24 | } |
| 25 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 26 | #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Eric Dumazet | 6eada01 | 2015-03-18 14:05:33 -0700 | [diff] [blame] | 28 | static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) |
Pavel Emelyanov | d626628 | 2008-06-16 17:11:50 -0700 | [diff] [blame] | 29 | { |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 30 | return (num + net_hash_mix(net)) & mask; |
Pavel Emelyanov | d626628 | 2008-06-16 17:11:50 -0700 | [diff] [blame] | 31 | } |
| 32 | |
Eric Dumazet | e2a4392 | 2023-09-12 09:17:21 +0000 | [diff] [blame] | 33 | enum { |
| 34 | UDP_FLAGS_CORK, /* Cork is required */ |
Eric Dumazet | 50e41aa | 2023-09-12 09:17:22 +0000 | [diff] [blame] | 35 | UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */ |
Eric Dumazet | a01cff1 | 2023-09-12 09:17:23 +0000 | [diff] [blame] | 36 | UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */ |
Eric Dumazet | 753886c | 2023-09-12 09:17:24 +0000 | [diff] [blame] | 37 | UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */ |
Eric Dumazet | b680a90 | 2023-09-12 09:17:26 +0000 | [diff] [blame] | 38 | UDP_FLAGS_ACCEPT_FRAGLIST, |
| 39 | UDP_FLAGS_ACCEPT_L4, |
Eric Dumazet | 8d929b6 | 2023-09-12 09:17:27 +0000 | [diff] [blame] | 40 | UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */ |
Eric Dumazet | e2a4392 | 2023-09-12 09:17:21 +0000 | [diff] [blame] | 41 | }; |
| 42 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | struct udp_sock { |
| 44 | /* inet_sock has to be the first member */ |
| 45 | struct inet_sock inet; |
Eric Dumazet | d4cada4a | 2009-11-08 10:17:30 +0000 | [diff] [blame] | 46 | #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0] |
| 47 | #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1] |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 48 | #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node |
Eric Dumazet | e2a4392 | 2023-09-12 09:17:21 +0000 | [diff] [blame] | 49 | |
| 50 | unsigned long udp_flags; |
| 51 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | int pending; /* Any pending frames ? */ |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 53 | __u8 encap_type; /* Is this an Encapsulation socket? */ |
Eric Dumazet | 8d929b6 | 2023-09-12 09:17:27 +0000 | [diff] [blame] | 54 | |
Eric Dumazet | e2a4392 | 2023-09-12 09:17:21 +0000 | [diff] [blame] | 55 | /* indicator bits used by pcflag: */ |
| 56 | #define UDPLITE_BIT 0x1 /* set by udplite proto init function */ |
| 57 | #define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */ |
| 58 | #define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */ |
| 59 | __u8 pcflag; /* marks socket as UDP-Lite if > 0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | /* |
Adrian Bunk | 47bdd71 | 2006-06-30 18:25:18 +0200 | [diff] [blame] | 61 | * Following member retains the information to create a UDP header |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | * when the socket is uncorked. |
| 63 | */ |
| 64 | __u16 len; /* total length of pending frames */ |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 65 | __u16 gso_size; |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 66 | /* |
| 67 | * Fields specific to UDP-Lite. |
| 68 | */ |
| 69 | __u16 pcslen; |
| 70 | __u16 pcrlen; |
James Chapman | 342f023 | 2007-06-27 15:37:46 -0700 | [diff] [blame] | 71 | /* |
| 72 | * For encapsulation sockets. |
| 73 | */ |
| 74 | int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); |
David Howells | ac56a0b | 2022-08-26 15:39:28 +0100 | [diff] [blame] | 75 | void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset); |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 76 | int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb); |
Tom Parkin | 44046a5 | 2013-03-19 06:11:12 +0000 | [diff] [blame] | 77 | void (*encap_destroy)(struct sock *sk); |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 78 | |
| 79 | /* GRO functions for UDP socket */ |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 80 | struct sk_buff * (*gro_receive)(struct sock *sk, |
| 81 | struct list_head *head, |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 82 | struct sk_buff *skb); |
| 83 | int (*gro_complete)(struct sock *sk, |
| 84 | struct sk_buff *skb, |
| 85 | int nhoff); |
Eric Dumazet | 6b229cf | 2016-12-08 11:41:56 -0800 | [diff] [blame] | 86 | |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 87 | /* udp_recvmsg try to use this before splicing sk_receive_queue */ |
| 88 | struct sk_buff_head reader_queue ____cacheline_aligned_in_smp; |
| 89 | |
Eric Dumazet | 6b229cf | 2016-12-08 11:41:56 -0800 | [diff] [blame] | 90 | /* This field is dirtied by udp_recvmsg() */ |
| 91 | int forward_deficit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | }; |
| 93 | |
Eric Dumazet | e2a4392 | 2023-09-12 09:17:21 +0000 | [diff] [blame] | 94 | #define udp_test_bit(nr, sk) \ |
| 95 | test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) |
| 96 | #define udp_set_bit(nr, sk) \ |
| 97 | set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) |
Eric Dumazet | 8d929b6 | 2023-09-12 09:17:27 +0000 | [diff] [blame] | 98 | #define udp_test_and_set_bit(nr, sk) \ |
| 99 | test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) |
Eric Dumazet | e2a4392 | 2023-09-12 09:17:21 +0000 | [diff] [blame] | 100 | #define udp_clear_bit(nr, sk) \ |
| 101 | clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) |
| 102 | #define udp_assign_bit(nr, sk, val) \ |
| 103 | assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val) |
| 104 | |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 105 | #define UDP_MAX_SEGMENTS (1 << 6UL) |
| 106 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | static inline struct udp_sock *udp_sk(const struct sock *sk) |
| 108 | { |
| 109 | return (struct udp_sock *)sk; |
| 110 | } |
YOSHIFUJI Hideaki | e898d4d | 2008-03-01 01:06:47 +0900 | [diff] [blame] | 111 | |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 112 | static inline void udp_set_no_check6_tx(struct sock *sk, bool val) |
| 113 | { |
Eric Dumazet | 50e41aa | 2023-09-12 09:17:22 +0000 | [diff] [blame] | 114 | udp_assign_bit(NO_CHECK6_TX, sk, val); |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | static inline void udp_set_no_check6_rx(struct sock *sk, bool val) |
| 118 | { |
Eric Dumazet | a01cff1 | 2023-09-12 09:17:23 +0000 | [diff] [blame] | 119 | udp_assign_bit(NO_CHECK6_RX, sk, val); |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 120 | } |
| 121 | |
Eric Dumazet | 50e41aa | 2023-09-12 09:17:22 +0000 | [diff] [blame] | 122 | static inline bool udp_get_no_check6_tx(const struct sock *sk) |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 123 | { |
Eric Dumazet | 50e41aa | 2023-09-12 09:17:22 +0000 | [diff] [blame] | 124 | return udp_test_bit(NO_CHECK6_TX, sk); |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Eric Dumazet | a01cff1 | 2023-09-12 09:17:23 +0000 | [diff] [blame] | 127 | static inline bool udp_get_no_check6_rx(const struct sock *sk) |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 128 | { |
Eric Dumazet | a01cff1 | 2023-09-12 09:17:23 +0000 | [diff] [blame] | 129 | return udp_test_bit(NO_CHECK6_RX, sk); |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 130 | } |
| 131 | |
Paolo Abeni | bcd1665 | 2018-11-07 12:38:30 +0100 | [diff] [blame] | 132 | static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, |
| 133 | struct sk_buff *skb) |
| 134 | { |
| 135 | int gso_size; |
| 136 | |
| 137 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { |
| 138 | gso_size = skb_shinfo(skb)->gso_size; |
| 139 | put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size); |
| 140 | } |
| 141 | } |
| 142 | |
Antoine Tenart | d122450 | 2024-03-26 12:33:58 +0100 | [diff] [blame] | 143 | DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); |
| 144 | #if IS_ENABLED(CONFIG_IPV6) |
| 145 | DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); |
| 146 | #endif |
| 147 | |
| 148 | static inline bool udp_encap_needed(void) |
| 149 | { |
| 150 | if (static_branch_unlikely(&udp_encap_needed_key)) |
| 151 | return true; |
| 152 | |
| 153 | #if IS_ENABLED(CONFIG_IPV6) |
| 154 | if (static_branch_unlikely(&udpv6_encap_needed_key)) |
| 155 | return true; |
| 156 | #endif |
| 157 | |
| 158 | return false; |
| 159 | } |
| 160 | |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 161 | static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) |
| 162 | { |
Paolo Abeni | 78352f7 | 2021-03-30 12:28:52 +0200 | [diff] [blame] | 163 | if (!skb_is_gso(skb)) |
| 164 | return false; |
| 165 | |
Eric Dumazet | b680a90 | 2023-09-12 09:17:26 +0000 | [diff] [blame] | 166 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && |
| 167 | !udp_test_bit(ACCEPT_L4, sk)) |
Paolo Abeni | 78352f7 | 2021-03-30 12:28:52 +0200 | [diff] [blame] | 168 | return true; |
| 169 | |
Eric Dumazet | b680a90 | 2023-09-12 09:17:26 +0000 | [diff] [blame] | 170 | if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && |
| 171 | !udp_test_bit(ACCEPT_FRAGLIST, sk)) |
Paolo Abeni | 78352f7 | 2021-03-30 12:28:52 +0200 | [diff] [blame] | 172 | return true; |
| 173 | |
Antoine Tenart | d122450 | 2024-03-26 12:33:58 +0100 | [diff] [blame] | 174 | /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still |
| 175 | * land in a tunnel as the socket check in udp_gro_receive cannot be |
| 176 | * foolproof. |
| 177 | */ |
| 178 | if (udp_encap_needed() && |
| 179 | READ_ONCE(udp_sk(sk)->encap_rcv) && |
| 180 | !(skb_shinfo(skb)->gso_type & |
| 181 | (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM))) |
| 182 | return true; |
| 183 | |
Paolo Abeni | 78352f7 | 2021-03-30 12:28:52 +0200 | [diff] [blame] | 184 | return false; |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 185 | } |
| 186 | |
Paolo Abeni | d18931a | 2021-03-30 12:28:53 +0200 | [diff] [blame] | 187 | static inline void udp_allow_gso(struct sock *sk) |
| 188 | { |
Eric Dumazet | b680a90 | 2023-09-12 09:17:26 +0000 | [diff] [blame] | 189 | udp_set_bit(ACCEPT_L4, sk); |
| 190 | udp_set_bit(ACCEPT_FRAGLIST, sk); |
Paolo Abeni | d18931a | 2021-03-30 12:28:53 +0200 | [diff] [blame] | 191 | } |
| 192 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 193 | #define udp_portaddr_for_each_entry(__sk, list) \ |
| 194 | hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 195 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 196 | #define udp_portaddr_for_each_entry_rcu(__sk, list) \ |
| 197 | hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 198 | |
Paolo Abeni | 3d8417d | 2017-03-31 11:47:39 +0200 | [diff] [blame] | 199 | #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | #endif /* _LINUX_UDP_H */ |