blob: 79a4eae6f1f8f316f0f9c0c6ff1bc68602ca5724 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP protocol.
8 *
9 * Version: @(#)udp.h 1.0.2 04/28/93
10 *
11 * Author: Fred N. van Kempen, <[email protected]>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13#ifndef _LINUX_UDP_H
14#define _LINUX_UDP_H
15
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020016#include <net/inet_sock.h>
Joe Perches310afe82008-03-23 22:06:51 -070017#include <linux/skbuff.h>
Pavel Emelyanov0b441912008-06-16 17:14:11 -070018#include <net/netns/hash.h>
David Howells607ca462012-10-13 10:46:48 +010019#include <uapi/linux/udp.h>
Joe Perches310afe82008-03-23 22:06:51 -070020
21static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
22{
23 return (struct udphdr *)skb_transport_header(skb);
24}
25
Eric Dumazetf86dcc52009-10-07 00:37:59 +000026#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Eric Dumazet6eada012015-03-18 14:05:33 -070028static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
Pavel Emelyanovd6266282008-06-16 17:11:50 -070029{
Eric Dumazetf86dcc52009-10-07 00:37:59 +000030 return (num + net_hash_mix(net)) & mask;
Pavel Emelyanovd6266282008-06-16 17:11:50 -070031}
32
Eric Dumazete2a43922023-09-12 09:17:21 +000033enum {
34 UDP_FLAGS_CORK, /* Cork is required */
Eric Dumazet50e41aa2023-09-12 09:17:22 +000035 UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
Eric Dumazeta01cff12023-09-12 09:17:23 +000036 UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
Eric Dumazet753886c2023-09-12 09:17:24 +000037 UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
Eric Dumazetb680a902023-09-12 09:17:26 +000038 UDP_FLAGS_ACCEPT_FRAGLIST,
39 UDP_FLAGS_ACCEPT_L4,
Eric Dumazet8d929b62023-09-12 09:17:27 +000040 UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
Eric Dumazete2a43922023-09-12 09:17:21 +000041};
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043struct udp_sock {
44 /* inet_sock has to be the first member */
45 struct inet_sock inet;
Eric Dumazetd4cada4a2009-11-08 10:17:30 +000046#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
47#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
Eric Dumazet512615b2009-11-08 10:17:58 +000048#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
Eric Dumazete2a43922023-09-12 09:17:21 +000049
50 unsigned long udp_flags;
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 int pending; /* Any pending frames ? */
Tom Herbert1c194482014-05-23 08:47:32 -070053 __u8 encap_type; /* Is this an Encapsulation socket? */
Eric Dumazet8d929b62023-09-12 09:17:27 +000054
Eric Dumazete2a43922023-09-12 09:17:21 +000055/* indicator bits used by pcflag: */
56#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
57#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
58#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
59 __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /*
Adrian Bunk47bdd712006-06-30 18:25:18 +020061 * Following member retains the information to create a UDP header
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * when the socket is uncorked.
63 */
64 __u16 len; /* total length of pending frames */
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -040065 __u16 gso_size;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -080066 /*
67 * Fields specific to UDP-Lite.
68 */
69 __u16 pcslen;
70 __u16 pcrlen;
James Chapman342f0232007-06-27 15:37:46 -070071 /*
72 * For encapsulation sockets.
73 */
74 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
David Howellsac56a0b2022-08-26 15:39:28 +010075 void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
Stefano Brivioa36e1852018-11-08 12:19:14 +010076 int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
Tom Parkin44046a52013-03-19 06:11:12 +000077 void (*encap_destroy)(struct sock *sk);
Tom Herberta6024562016-04-05 08:22:51 -070078
79 /* GRO functions for UDP socket */
David Millerd4546c22018-06-24 14:13:49 +090080 struct sk_buff * (*gro_receive)(struct sock *sk,
81 struct list_head *head,
Tom Herberta6024562016-04-05 08:22:51 -070082 struct sk_buff *skb);
83 int (*gro_complete)(struct sock *sk,
84 struct sk_buff *skb,
85 int nhoff);
Eric Dumazet6b229cf2016-12-08 11:41:56 -080086
Paolo Abeni2276f582017-05-16 11:20:14 +020087 /* udp_recvmsg try to use this before splicing sk_receive_queue */
88 struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
89
Eric Dumazet6b229cf2016-12-08 11:41:56 -080090 /* This field is dirtied by udp_recvmsg() */
91 int forward_deficit;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092};
93
Eric Dumazete2a43922023-09-12 09:17:21 +000094#define udp_test_bit(nr, sk) \
95 test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
96#define udp_set_bit(nr, sk) \
97 set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
Eric Dumazet8d929b62023-09-12 09:17:27 +000098#define udp_test_and_set_bit(nr, sk) \
99 test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
Eric Dumazete2a43922023-09-12 09:17:21 +0000100#define udp_clear_bit(nr, sk) \
101 clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
102#define udp_assign_bit(nr, sk, val) \
103 assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
104
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400105#define UDP_MAX_SEGMENTS (1 << 6UL)
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107static inline struct udp_sock *udp_sk(const struct sock *sk)
108{
109 return (struct udp_sock *)sk;
110}
YOSHIFUJI Hideakie898d4d2008-03-01 01:06:47 +0900111
Tom Herbert1c194482014-05-23 08:47:32 -0700112static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
113{
Eric Dumazet50e41aa2023-09-12 09:17:22 +0000114 udp_assign_bit(NO_CHECK6_TX, sk, val);
Tom Herbert1c194482014-05-23 08:47:32 -0700115}
116
117static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
118{
Eric Dumazeta01cff12023-09-12 09:17:23 +0000119 udp_assign_bit(NO_CHECK6_RX, sk, val);
Tom Herbert1c194482014-05-23 08:47:32 -0700120}
121
Eric Dumazet50e41aa2023-09-12 09:17:22 +0000122static inline bool udp_get_no_check6_tx(const struct sock *sk)
Tom Herbert1c194482014-05-23 08:47:32 -0700123{
Eric Dumazet50e41aa2023-09-12 09:17:22 +0000124 return udp_test_bit(NO_CHECK6_TX, sk);
Tom Herbert1c194482014-05-23 08:47:32 -0700125}
126
Eric Dumazeta01cff12023-09-12 09:17:23 +0000127static inline bool udp_get_no_check6_rx(const struct sock *sk)
Tom Herbert1c194482014-05-23 08:47:32 -0700128{
Eric Dumazeta01cff12023-09-12 09:17:23 +0000129 return udp_test_bit(NO_CHECK6_RX, sk);
Tom Herbert1c194482014-05-23 08:47:32 -0700130}
131
Paolo Abenibcd16652018-11-07 12:38:30 +0100132static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
133 struct sk_buff *skb)
134{
135 int gso_size;
136
137 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
138 gso_size = skb_shinfo(skb)->gso_size;
139 put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
140 }
141}
142
Antoine Tenartd1224502024-03-26 12:33:58 +0100143DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
144#if IS_ENABLED(CONFIG_IPV6)
145DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
146#endif
147
148static inline bool udp_encap_needed(void)
149{
150 if (static_branch_unlikely(&udp_encap_needed_key))
151 return true;
152
153#if IS_ENABLED(CONFIG_IPV6)
154 if (static_branch_unlikely(&udpv6_encap_needed_key))
155 return true;
156#endif
157
158 return false;
159}
160
Paolo Abenicf329aa2018-11-07 12:38:33 +0100161static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
162{
Paolo Abeni78352f72021-03-30 12:28:52 +0200163 if (!skb_is_gso(skb))
164 return false;
165
Eric Dumazetb680a902023-09-12 09:17:26 +0000166 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
167 !udp_test_bit(ACCEPT_L4, sk))
Paolo Abeni78352f72021-03-30 12:28:52 +0200168 return true;
169
Eric Dumazetb680a902023-09-12 09:17:26 +0000170 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
171 !udp_test_bit(ACCEPT_FRAGLIST, sk))
Paolo Abeni78352f72021-03-30 12:28:52 +0200172 return true;
173
Antoine Tenartd1224502024-03-26 12:33:58 +0100174 /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
175 * land in a tunnel as the socket check in udp_gro_receive cannot be
176 * foolproof.
177 */
178 if (udp_encap_needed() &&
179 READ_ONCE(udp_sk(sk)->encap_rcv) &&
180 !(skb_shinfo(skb)->gso_type &
181 (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
182 return true;
183
Paolo Abeni78352f72021-03-30 12:28:52 +0200184 return false;
Paolo Abenicf329aa2018-11-07 12:38:33 +0100185}
186
Paolo Abenid18931a2021-03-30 12:28:53 +0200187static inline void udp_allow_gso(struct sock *sk)
188{
Eric Dumazetb680a902023-09-12 09:17:26 +0000189 udp_set_bit(ACCEPT_L4, sk);
190 udp_set_bit(ACCEPT_FRAGLIST, sk);
Paolo Abenid18931a2021-03-30 12:28:53 +0200191}
192
Eric Dumazetca065d02016-04-01 08:52:13 -0700193#define udp_portaddr_for_each_entry(__sk, list) \
194 hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
Eric Dumazet30fff922009-11-09 05:26:33 +0000195
Eric Dumazetca065d02016-04-01 08:52:13 -0700196#define udp_portaddr_for_each_entry_rcu(__sk, list) \
197 hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
Eric Dumazet30fff922009-11-09 05:26:33 +0000198
Paolo Abeni3d8417d2017-03-31 11:47:39 +0200199#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#endif /* _LINUX_UDP_H */