blob: d5b2bc6de0572835bccceacd9d78b3efe97400eb [file] [log] [blame]
Chuck Lever3b3009e2023-04-17 10:32:26 -04001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Handshake request lifetime events
4 *
5 * Author: Chuck Lever <[email protected]>
6 *
7 * Copyright (c) 2023, Oracle and/or its affiliates.
8 */
9
10#include <linux/types.h>
11#include <linux/socket.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/inet.h>
16#include <linux/fdtable.h>
17#include <linux/rhashtable.h>
18
19#include <net/sock.h>
20#include <net/genetlink.h>
21#include <net/netns/generic.h>
22
23#include <uapi/linux/handshake.h>
24#include "handshake.h"
25
26#include <trace/events/handshake.h>
27
28/*
29 * We need both a handshake_req -> sock mapping, and a sock ->
30 * handshake_req mapping. Both are one-to-one.
31 *
32 * To avoid adding another pointer field to struct sock, net/handshake
33 * maintains a hash table, indexed by the memory address of @sock, to
34 * find the struct handshake_req outstanding for that socket. The
35 * reverse direction uses a simple pointer field in the handshake_req
36 * struct.
37 */
38
39static struct rhashtable handshake_rhashtbl ____cacheline_aligned_in_smp;
40
41static const struct rhashtable_params handshake_rhash_params = {
42 .key_len = sizeof_field(struct handshake_req, hr_sk),
43 .key_offset = offsetof(struct handshake_req, hr_sk),
44 .head_offset = offsetof(struct handshake_req, hr_rhash),
45 .automatic_shrinking = true,
46};
47
48int handshake_req_hash_init(void)
49{
50 return rhashtable_init(&handshake_rhashtbl, &handshake_rhash_params);
51}
52
53void handshake_req_hash_destroy(void)
54{
55 rhashtable_destroy(&handshake_rhashtbl);
56}
57
58struct handshake_req *handshake_req_hash_lookup(struct sock *sk)
59{
60 return rhashtable_lookup_fast(&handshake_rhashtbl, &sk,
61 handshake_rhash_params);
62}
63
64static bool handshake_req_hash_add(struct handshake_req *req)
65{
66 int ret;
67
68 ret = rhashtable_lookup_insert_fast(&handshake_rhashtbl,
69 &req->hr_rhash,
70 handshake_rhash_params);
71 return ret == 0;
72}
73
74static void handshake_req_destroy(struct handshake_req *req)
75{
76 if (req->hr_proto->hp_destroy)
77 req->hr_proto->hp_destroy(req);
78 rhashtable_remove_fast(&handshake_rhashtbl, &req->hr_rhash,
79 handshake_rhash_params);
80 kfree(req);
81}
82
83static void handshake_sk_destruct(struct sock *sk)
84{
85 void (*sk_destruct)(struct sock *sk);
86 struct handshake_req *req;
87
88 req = handshake_req_hash_lookup(sk);
89 if (!req)
90 return;
91
92 trace_handshake_destruct(sock_net(sk), req, sk);
93 sk_destruct = req->hr_odestruct;
94 handshake_req_destroy(req);
95 if (sk_destruct)
96 sk_destruct(sk);
97}
98
99/**
100 * handshake_req_alloc - Allocate a handshake request
101 * @proto: security protocol
102 * @flags: memory allocation flags
103 *
104 * Returns an initialized handshake_req or NULL.
105 */
106struct handshake_req *handshake_req_alloc(const struct handshake_proto *proto,
107 gfp_t flags)
108{
109 struct handshake_req *req;
110
111 if (!proto)
112 return NULL;
113 if (proto->hp_handler_class <= HANDSHAKE_HANDLER_CLASS_NONE)
114 return NULL;
115 if (proto->hp_handler_class >= HANDSHAKE_HANDLER_CLASS_MAX)
116 return NULL;
117 if (!proto->hp_accept || !proto->hp_done)
118 return NULL;
119
120 req = kzalloc(struct_size(req, hr_priv, proto->hp_privsize), flags);
121 if (!req)
122 return NULL;
123
124 INIT_LIST_HEAD(&req->hr_list);
125 req->hr_proto = proto;
126 return req;
127}
128EXPORT_SYMBOL(handshake_req_alloc);
129
130/**
131 * handshake_req_private - Get per-handshake private data
132 * @req: handshake arguments
133 *
134 */
135void *handshake_req_private(struct handshake_req *req)
136{
137 return (void *)&req->hr_priv;
138}
139EXPORT_SYMBOL(handshake_req_private);
140
141static bool __add_pending_locked(struct handshake_net *hn,
142 struct handshake_req *req)
143{
144 if (WARN_ON_ONCE(!list_empty(&req->hr_list)))
145 return false;
146 hn->hn_pending++;
147 list_add_tail(&req->hr_list, &hn->hn_requests);
148 return true;
149}
150
151static void __remove_pending_locked(struct handshake_net *hn,
152 struct handshake_req *req)
153{
154 hn->hn_pending--;
155 list_del_init(&req->hr_list);
156}
157
158/*
159 * Returns %true if the request was found on @net's pending list,
160 * otherwise %false.
161 *
162 * If @req was on a pending list, it has not yet been accepted.
163 */
164static bool remove_pending(struct handshake_net *hn, struct handshake_req *req)
165{
166 bool ret = false;
167
168 spin_lock(&hn->hn_lock);
169 if (!list_empty(&req->hr_list)) {
170 __remove_pending_locked(hn, req);
171 ret = true;
172 }
173 spin_unlock(&hn->hn_lock);
174
175 return ret;
176}
177
178struct handshake_req *handshake_req_next(struct handshake_net *hn, int class)
179{
180 struct handshake_req *req, *pos;
181
182 req = NULL;
183 spin_lock(&hn->hn_lock);
184 list_for_each_entry(pos, &hn->hn_requests, hr_list) {
185 if (pos->hr_proto->hp_handler_class != class)
186 continue;
187 __remove_pending_locked(hn, pos);
188 req = pos;
189 break;
190 }
191 spin_unlock(&hn->hn_lock);
192
193 return req;
194}
195
196/**
197 * handshake_req_submit - Submit a handshake request
198 * @sock: open socket on which to perform the handshake
199 * @req: handshake arguments
200 * @flags: memory allocation flags
201 *
202 * Return values:
203 * %0: Request queued
204 * %-EINVAL: Invalid argument
205 * %-EBUSY: A handshake is already under way for this socket
206 * %-ESRCH: No handshake agent is available
207 * %-EAGAIN: Too many pending handshake requests
208 * %-ENOMEM: Failed to allocate memory
209 * %-EMSGSIZE: Failed to construct notification message
210 * %-EOPNOTSUPP: Handshake module not initialized
211 *
212 * A zero return value from handshake_req_submit() means that
213 * exactly one subsequent completion callback is guaranteed.
214 *
215 * A negative return value from handshake_req_submit() means that
216 * no completion callback will be done and that @req has been
217 * destroyed.
218 */
219int handshake_req_submit(struct socket *sock, struct handshake_req *req,
220 gfp_t flags)
221{
222 struct handshake_net *hn;
223 struct net *net;
224 int ret;
225
226 if (!sock || !req || !sock->file) {
227 kfree(req);
228 return -EINVAL;
229 }
230
231 req->hr_sk = sock->sk;
232 if (!req->hr_sk) {
233 kfree(req);
234 return -EINVAL;
235 }
236 req->hr_odestruct = req->hr_sk->sk_destruct;
237 req->hr_sk->sk_destruct = handshake_sk_destruct;
238
239 ret = -EOPNOTSUPP;
240 net = sock_net(req->hr_sk);
241 hn = handshake_pernet(net);
242 if (!hn)
243 goto out_err;
244
245 ret = -EAGAIN;
246 if (READ_ONCE(hn->hn_pending) >= hn->hn_pending_max)
247 goto out_err;
248
249 spin_lock(&hn->hn_lock);
250 ret = -EOPNOTSUPP;
251 if (test_bit(HANDSHAKE_F_NET_DRAINING, &hn->hn_flags))
252 goto out_unlock;
253 ret = -EBUSY;
254 if (!handshake_req_hash_add(req))
255 goto out_unlock;
256 if (!__add_pending_locked(hn, req))
257 goto out_unlock;
258 spin_unlock(&hn->hn_lock);
259
260 ret = handshake_genl_notify(net, req->hr_proto, flags);
261 if (ret) {
262 trace_handshake_notify_err(net, req, req->hr_sk, ret);
263 if (remove_pending(hn, req))
264 goto out_err;
265 }
266
267 /* Prevent socket release while a handshake request is pending */
268 sock_hold(req->hr_sk);
269
270 trace_handshake_submit(net, req, req->hr_sk);
271 return 0;
272
273out_unlock:
274 spin_unlock(&hn->hn_lock);
275out_err:
276 trace_handshake_submit_err(net, req, req->hr_sk, ret);
277 handshake_req_destroy(req);
278 return ret;
279}
280EXPORT_SYMBOL(handshake_req_submit);
281
282void handshake_complete(struct handshake_req *req, unsigned int status,
283 struct genl_info *info)
284{
285 struct sock *sk = req->hr_sk;
286 struct net *net = sock_net(sk);
287
288 if (!test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
289 trace_handshake_complete(net, req, sk, status);
290 req->hr_proto->hp_done(req, status, info);
291
292 /* Handshake request is no longer pending */
293 sock_put(sk);
294 }
295}
296
297/**
298 * handshake_req_cancel - Cancel an in-progress handshake
299 * @sk: socket on which there is an ongoing handshake
300 *
301 * Request cancellation races with request completion. To determine
302 * who won, callers examine the return value from this function.
303 *
304 * Return values:
305 * %true - Uncompleted handshake request was canceled
306 * %false - Handshake request already completed or not found
307 */
308bool handshake_req_cancel(struct sock *sk)
309{
310 struct handshake_req *req;
311 struct handshake_net *hn;
312 struct net *net;
313
314 net = sock_net(sk);
315 req = handshake_req_hash_lookup(sk);
316 if (!req) {
317 trace_handshake_cancel_none(net, req, sk);
318 return false;
319 }
320
321 hn = handshake_pernet(net);
322 if (hn && remove_pending(hn, req)) {
323 /* Request hadn't been accepted */
324 goto out_true;
325 }
326 if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
327 /* Request already completed */
328 trace_handshake_cancel_busy(net, req, sk);
329 return false;
330 }
331
332out_true:
333 trace_handshake_cancel(net, req, sk);
334
335 /* Handshake request is no longer pending */
336 sock_put(sk);
337 return true;
338}
339EXPORT_SYMBOL(handshake_req_cancel);