blob: 2de056c3139cf81be8d9dc2f5406d2ebf9fa8798 [file] [log] [blame]
Stephan Mueller400c40c2015-02-28 20:50:00 +01001/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <[email protected]>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
Stephan Mueller400c40c2015-02-28 20:50:00 +01008 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
Stephan Muellerd887c522017-06-25 17:12:59 +020012 *
13 * The following concept of the memory management is used:
14 *
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
20 *
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
24 *
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
27 * the RX SGL release.
Stephan Mueller400c40c2015-02-28 20:50:00 +010028 */
29
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080030#include <crypto/internal/aead.h>
Stephan Mueller400c40c2015-02-28 20:50:00 +010031#include <crypto/scatterwalk.h>
32#include <crypto/if_alg.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/kernel.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010036#include <linux/sched/signal.h>
Stephan Mueller400c40c2015-02-28 20:50:00 +010037#include <linux/mm.h>
38#include <linux/module.h>
39#include <linux/net.h>
40#include <net/sock.h>
41
Stephan Muellerd887c522017-06-25 17:12:59 +020042struct aead_tsgl {
43 struct list_head list;
44 unsigned int cur; /* Last processed SG entry */
45 struct scatterlist sg[0]; /* Array of SGs forming the SGL */
Stephan Mueller400c40c2015-02-28 20:50:00 +010046};
47
Stephan Muellerd887c522017-06-25 17:12:59 +020048struct aead_rsgl {
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080049 struct af_alg_sgl sgl;
50 struct list_head list;
Stephan Muellerd887c522017-06-25 17:12:59 +020051 size_t sg_num_bytes; /* Bytes of data in that SGL */
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080052};
53
54struct aead_async_req {
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080055 struct kiocb *iocb;
Herbert Xue6534ae2017-04-10 17:59:07 +080056 struct sock *sk;
Stephan Muellerd887c522017-06-25 17:12:59 +020057
58 struct aead_rsgl first_rsgl; /* First RX SG */
59 struct list_head rsgl_list; /* Track RX SGs */
60
61 struct scatterlist *tsgl; /* priv. TX SGL of buffers to process */
62 unsigned int tsgl_entries; /* number of entries in priv. TX SGL */
63
64 unsigned int outlen; /* Filled output buf length */
65
66 unsigned int areqlen; /* Length of this data struct */
67 struct aead_request aead_req; /* req ctx trails this struct */
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080068};
69
Stephan Mueller2a2a2512017-04-24 11:15:23 +020070struct aead_tfm {
71 struct crypto_aead *aead;
72 bool has_key;
73};
74
Stephan Mueller400c40c2015-02-28 20:50:00 +010075struct aead_ctx {
Stephan Muellerd887c522017-06-25 17:12:59 +020076 struct list_head tsgl_list; /* Link to TX SGL */
Stephan Mueller400c40c2015-02-28 20:50:00 +010077
78 void *iv;
Stephan Mueller400c40c2015-02-28 20:50:00 +010079 size_t aead_assoclen;
Stephan Muellerd887c522017-06-25 17:12:59 +020080
81 struct af_alg_completion completion; /* sync work queue */
82
83 size_t used; /* TX bytes sent to kernel */
84 size_t rcvused; /* total RX bytes to be processed by kernel */
85
86 bool more; /* More data to be expected? */
87 bool merge; /* Merge new data into existing SG */
88 bool enc; /* Crypto operation: enc, dec */
89
90 unsigned int len; /* Length of allocated memory for this struct */
Stephan Mueller400c40c2015-02-28 20:50:00 +010091};
92
Stephan Muellerd887c522017-06-25 17:12:59 +020093#define MAX_SGL_ENTS ((4096 - sizeof(struct aead_tsgl)) / \
94 sizeof(struct scatterlist) - 1)
95
Stephan Mueller400c40c2015-02-28 20:50:00 +010096static inline int aead_sndbuf(struct sock *sk)
97{
98 struct alg_sock *ask = alg_sk(sk);
99 struct aead_ctx *ctx = ask->private;
100
101 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
102 ctx->used, 0);
103}
104
105static inline bool aead_writable(struct sock *sk)
106{
107 return PAGE_SIZE <= aead_sndbuf(sk);
108}
109
Stephan Muellerd887c522017-06-25 17:12:59 +0200110static inline int aead_rcvbuf(struct sock *sk)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100111{
Stephan Muellerd887c522017-06-25 17:12:59 +0200112 struct alg_sock *ask = alg_sk(sk);
113 struct aead_ctx *ctx = ask->private;
114
115 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
116 ctx->rcvused, 0);
117}
118
119static inline bool aead_readable(struct sock *sk)
120{
121 return PAGE_SIZE <= aead_rcvbuf(sk);
122}
123
124static inline bool aead_sufficient_data(struct sock *sk)
125{
126 struct alg_sock *ask = alg_sk(sk);
127 struct sock *psk = ask->parent;
128 struct alg_sock *pask = alg_sk(psk);
129 struct aead_ctx *ctx = ask->private;
130 struct aead_tfm *aeadc = pask->private;
131 struct crypto_aead *tfm = aeadc->aead;
132 unsigned int as = crypto_aead_authsize(tfm);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100133
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100134 /*
135 * The minimum amount of memory needed for an AEAD cipher is
136 * the AAD and in case of decryption the tag.
137 */
138 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100139}
140
Stephan Muellerd887c522017-06-25 17:12:59 +0200141static int aead_alloc_tsgl(struct sock *sk)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100142{
143 struct alg_sock *ask = alg_sk(sk);
144 struct aead_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +0200145 struct aead_tsgl *sgl;
146 struct scatterlist *sg = NULL;
147
148 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
149 if (!list_empty(&ctx->tsgl_list))
150 sg = sgl->sg;
151
152 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
153 sgl = sock_kmalloc(sk, sizeof(*sgl) +
154 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
155 GFP_KERNEL);
156 if (!sgl)
157 return -ENOMEM;
158
159 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
160 sgl->cur = 0;
161
162 if (sg)
163 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
164
165 list_add_tail(&sgl->list, &ctx->tsgl_list);
166 }
167
168 return 0;
169}
170
171static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes)
172{
173 struct alg_sock *ask = alg_sk(sk);
174 struct aead_ctx *ctx = ask->private;
175 struct aead_tsgl *sgl, *tmp;
176 unsigned int i;
177 unsigned int sgl_count = 0;
178
179 if (!bytes)
180 return 0;
181
182 list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
183 struct scatterlist *sg = sgl->sg;
184
185 for (i = 0; i < sgl->cur; i++) {
186 sgl_count++;
187 if (sg[i].length >= bytes)
188 return sgl_count;
189
190 bytes -= sg[i].length;
191 }
192 }
193
194 return sgl_count;
195}
196
197static void aead_pull_tsgl(struct sock *sk, size_t used,
198 struct scatterlist *dst)
199{
200 struct alg_sock *ask = alg_sk(sk);
201 struct aead_ctx *ctx = ask->private;
202 struct aead_tsgl *sgl;
203 struct scatterlist *sg;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100204 unsigned int i;
205
Stephan Muellerd887c522017-06-25 17:12:59 +0200206 while (!list_empty(&ctx->tsgl_list)) {
207 sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl,
208 list);
209 sg = sgl->sg;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100210
Stephan Muellerd887c522017-06-25 17:12:59 +0200211 for (i = 0; i < sgl->cur; i++) {
212 size_t plen = min_t(size_t, used, sg[i].length);
213 struct page *page = sg_page(sg + i);
214
215 if (!page)
216 continue;
217
218 /*
219 * Assumption: caller created aead_count_tsgl(len)
220 * SG entries in dst.
221 */
222 if (dst)
223 sg_set_page(dst + i, page, plen, sg[i].offset);
224
225 sg[i].length -= plen;
226 sg[i].offset += plen;
227
228 used -= plen;
229 ctx->used -= plen;
230
231 if (sg[i].length)
232 return;
233
234 if (!dst)
235 put_page(page);
236 sg_assign_page(sg + i, NULL);
237 }
238
239 list_del(&sgl->list);
240 sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
241 (MAX_SGL_ENTS + 1));
Stephan Mueller400c40c2015-02-28 20:50:00 +0100242 }
Stephan Muellerd887c522017-06-25 17:12:59 +0200243
244 if (!ctx->used)
245 ctx->merge = 0;
246}
247
248static void aead_free_areq_sgls(struct aead_async_req *areq)
249{
250 struct sock *sk = areq->sk;
251 struct alg_sock *ask = alg_sk(sk);
252 struct aead_ctx *ctx = ask->private;
253 struct aead_rsgl *rsgl, *tmp;
254 struct scatterlist *tsgl;
255 struct scatterlist *sg;
256 unsigned int i;
257
258 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
259 ctx->rcvused -= rsgl->sg_num_bytes;
260 af_alg_free_sg(&rsgl->sgl);
261 list_del(&rsgl->list);
262 if (rsgl != &areq->first_rsgl)
263 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
264 }
265
266 tsgl = areq->tsgl;
267 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
268 if (!sg_page(sg))
269 continue;
270 put_page(sg_page(sg));
271 }
272
273 if (areq->tsgl && areq->tsgl_entries)
274 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
275}
276
277static int aead_wait_for_wmem(struct sock *sk, unsigned int flags)
278{
279 DEFINE_WAIT_FUNC(wait, woken_wake_function);
280 int err = -ERESTARTSYS;
281 long timeout;
282
283 if (flags & MSG_DONTWAIT)
284 return -EAGAIN;
285
286 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
287
288 add_wait_queue(sk_sleep(sk), &wait);
289 for (;;) {
290 if (signal_pending(current))
291 break;
292 timeout = MAX_SCHEDULE_TIMEOUT;
293 if (sk_wait_event(sk, &timeout, aead_writable(sk), &wait)) {
294 err = 0;
295 break;
296 }
297 }
298 remove_wait_queue(sk_sleep(sk), &wait);
299
300 return err;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100301}
302
303static void aead_wmem_wakeup(struct sock *sk)
304{
305 struct socket_wq *wq;
306
307 if (!aead_writable(sk))
308 return;
309
310 rcu_read_lock();
311 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800312 if (skwq_has_sleeper(wq))
Stephan Mueller400c40c2015-02-28 20:50:00 +0100313 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
314 POLLRDNORM |
315 POLLRDBAND);
316 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
317 rcu_read_unlock();
318}
319
320static int aead_wait_for_data(struct sock *sk, unsigned flags)
321{
WANG Congd9dc8b02016-11-11 10:20:50 -0800322 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100323 struct alg_sock *ask = alg_sk(sk);
324 struct aead_ctx *ctx = ask->private;
325 long timeout;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100326 int err = -ERESTARTSYS;
327
328 if (flags & MSG_DONTWAIT)
329 return -EAGAIN;
330
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800331 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Stephan Muellerd887c522017-06-25 17:12:59 +0200332
WANG Congd9dc8b02016-11-11 10:20:50 -0800333 add_wait_queue(sk_sleep(sk), &wait);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100334 for (;;) {
335 if (signal_pending(current))
336 break;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100337 timeout = MAX_SCHEDULE_TIMEOUT;
WANG Congd9dc8b02016-11-11 10:20:50 -0800338 if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
Stephan Mueller400c40c2015-02-28 20:50:00 +0100339 err = 0;
340 break;
341 }
342 }
WANG Congd9dc8b02016-11-11 10:20:50 -0800343 remove_wait_queue(sk_sleep(sk), &wait);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100344
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800345 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100346
347 return err;
348}
349
350static void aead_data_wakeup(struct sock *sk)
351{
352 struct alg_sock *ask = alg_sk(sk);
353 struct aead_ctx *ctx = ask->private;
354 struct socket_wq *wq;
355
Stephan Mueller400c40c2015-02-28 20:50:00 +0100356 if (!ctx->used)
357 return;
358
359 rcu_read_lock();
360 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800361 if (skwq_has_sleeper(wq))
Stephan Mueller400c40c2015-02-28 20:50:00 +0100362 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
363 POLLRDNORM |
364 POLLRDBAND);
365 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
366 rcu_read_unlock();
367}
368
Linus Torvaldseccd02f2015-04-15 14:09:46 -0700369static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100370{
371 struct sock *sk = sock->sk;
372 struct alg_sock *ask = alg_sk(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +0200373 struct sock *psk = ask->parent;
374 struct alg_sock *pask = alg_sk(psk);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100375 struct aead_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +0200376 struct aead_tfm *aeadc = pask->private;
377 struct crypto_aead *tfm = aeadc->aead;
378 unsigned int ivsize = crypto_aead_ivsize(tfm);
379 struct aead_tsgl *sgl;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100380 struct af_alg_control con = {};
381 long copied = 0;
382 bool enc = 0;
383 bool init = 0;
Stephan Muellerd887c522017-06-25 17:12:59 +0200384 int err = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100385
386 if (msg->msg_controllen) {
387 err = af_alg_cmsg_send(msg, &con);
388 if (err)
389 return err;
390
391 init = 1;
392 switch (con.op) {
393 case ALG_OP_ENCRYPT:
394 enc = 1;
395 break;
396 case ALG_OP_DECRYPT:
397 enc = 0;
398 break;
399 default:
400 return -EINVAL;
401 }
402
403 if (con.iv && con.iv->ivlen != ivsize)
404 return -EINVAL;
405 }
406
407 lock_sock(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +0200408 if (!ctx->more && ctx->used) {
409 err = -EINVAL;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100410 goto unlock;
Stephan Muellerd887c522017-06-25 17:12:59 +0200411 }
Stephan Mueller400c40c2015-02-28 20:50:00 +0100412
413 if (init) {
414 ctx->enc = enc;
415 if (con.iv)
416 memcpy(ctx->iv, con.iv->iv, ivsize);
417
418 ctx->aead_assoclen = con.aead_assoclen;
419 }
420
421 while (size) {
Stephan Muellerd887c522017-06-25 17:12:59 +0200422 struct scatterlist *sg;
LABBE Corentin652d5b82015-10-23 14:10:36 +0200423 size_t len = size;
Stephan Muellerd887c522017-06-25 17:12:59 +0200424 size_t plen;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100425
426 /* use the existing memory in an allocated page */
427 if (ctx->merge) {
Stephan Muellerd887c522017-06-25 17:12:59 +0200428 sgl = list_entry(ctx->tsgl_list.prev,
429 struct aead_tsgl, list);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100430 sg = sgl->sg + sgl->cur - 1;
431 len = min_t(unsigned long, len,
432 PAGE_SIZE - sg->offset - sg->length);
433 err = memcpy_from_msg(page_address(sg_page(sg)) +
434 sg->offset + sg->length,
435 msg, len);
436 if (err)
437 goto unlock;
438
439 sg->length += len;
440 ctx->merge = (sg->offset + sg->length) &
441 (PAGE_SIZE - 1);
442
443 ctx->used += len;
444 copied += len;
445 size -= len;
446 continue;
447 }
448
449 if (!aead_writable(sk)) {
Stephan Muellerd887c522017-06-25 17:12:59 +0200450 err = aead_wait_for_wmem(sk, msg->msg_flags);
451 if (err)
452 goto unlock;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100453 }
454
455 /* allocate a new page */
456 len = min_t(unsigned long, size, aead_sndbuf(sk));
Stephan Mueller400c40c2015-02-28 20:50:00 +0100457
Stephan Muellerd887c522017-06-25 17:12:59 +0200458 err = aead_alloc_tsgl(sk);
459 if (err)
460 goto unlock;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100461
Stephan Muellerd887c522017-06-25 17:12:59 +0200462 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl,
463 list);
464 sg = sgl->sg;
465 if (sgl->cur)
466 sg_unmark_end(sg + sgl->cur - 1);
467
468 do {
469 unsigned int i = sgl->cur;
470
LABBE Corentin652d5b82015-10-23 14:10:36 +0200471 plen = min_t(size_t, len, PAGE_SIZE);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100472
Stephan Muellerd887c522017-06-25 17:12:59 +0200473 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
474 if (!sg_page(sg + i)) {
475 err = -ENOMEM;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100476 goto unlock;
477 }
478
Stephan Muellerd887c522017-06-25 17:12:59 +0200479 err = memcpy_from_msg(page_address(sg_page(sg + i)),
480 msg, plen);
481 if (err) {
482 __free_page(sg_page(sg + i));
483 sg_assign_page(sg + i, NULL);
484 goto unlock;
485 }
486
487 sg[i].length = plen;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100488 len -= plen;
489 ctx->used += plen;
490 copied += plen;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100491 size -= plen;
Stephan Muellerd887c522017-06-25 17:12:59 +0200492 sgl->cur++;
493 } while (len && sgl->cur < MAX_SGL_ENTS);
494
495 if (!size)
496 sg_mark_end(sg + sgl->cur - 1);
497
498 ctx->merge = plen & (PAGE_SIZE - 1);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100499 }
500
501 err = 0;
502
503 ctx->more = msg->msg_flags & MSG_MORE;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100504
505unlock:
506 aead_data_wakeup(sk);
507 release_sock(sk);
508
509 return err ?: copied;
510}
511
512static ssize_t aead_sendpage(struct socket *sock, struct page *page,
513 int offset, size_t size, int flags)
514{
515 struct sock *sk = sock->sk;
516 struct alg_sock *ask = alg_sk(sk);
517 struct aead_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +0200518 struct aead_tsgl *sgl;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100519 int err = -EINVAL;
520
521 if (flags & MSG_SENDPAGE_NOTLAST)
522 flags |= MSG_MORE;
523
Stephan Mueller400c40c2015-02-28 20:50:00 +0100524 lock_sock(sk);
525 if (!ctx->more && ctx->used)
526 goto unlock;
527
528 if (!size)
529 goto done;
530
531 if (!aead_writable(sk)) {
Stephan Muellerd887c522017-06-25 17:12:59 +0200532 err = aead_wait_for_wmem(sk, flags);
533 if (err)
534 goto unlock;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100535 }
536
Stephan Muellerd887c522017-06-25 17:12:59 +0200537 err = aead_alloc_tsgl(sk);
538 if (err)
539 goto unlock;
540
Stephan Mueller400c40c2015-02-28 20:50:00 +0100541 ctx->merge = 0;
Stephan Muellerd887c522017-06-25 17:12:59 +0200542 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
543
544 if (sgl->cur)
545 sg_unmark_end(sgl->sg + sgl->cur - 1);
546
547 sg_mark_end(sgl->sg + sgl->cur);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100548
549 get_page(page);
550 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
551 sgl->cur++;
552 ctx->used += size;
553
554 err = 0;
555
556done:
557 ctx->more = flags & MSG_MORE;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100558unlock:
559 aead_data_wakeup(sk);
560 release_sock(sk);
561
562 return err ?: size;
563}
564
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800565static void aead_async_cb(struct crypto_async_request *_req, int err)
566{
Stephan Muellerd887c522017-06-25 17:12:59 +0200567 struct aead_async_req *areq = _req->data;
Herbert Xue6534ae2017-04-10 17:59:07 +0800568 struct sock *sk = areq->sk;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800569 struct kiocb *iocb = areq->iocb;
Stephan Muellerd887c522017-06-25 17:12:59 +0200570 unsigned int resultlen;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800571
Stephan Muellerd887c522017-06-25 17:12:59 +0200572 lock_sock(sk);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800573
Stephan Muellerd887c522017-06-25 17:12:59 +0200574 /* Buffer size written by crypto operation. */
575 resultlen = areq->outlen;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800576
Stephan Muellerd887c522017-06-25 17:12:59 +0200577 aead_free_areq_sgls(areq);
578 sock_kfree_s(sk, areq, areq->areqlen);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800579 __sock_put(sk);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800580
Stephan Muellerd887c522017-06-25 17:12:59 +0200581 iocb->ki_complete(iocb, err ? err : resultlen, 0);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800582
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800583 release_sock(sk);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800584}
585
Stephan Muellerd887c522017-06-25 17:12:59 +0200586static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
587 size_t ignored, int flags)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100588{
589 struct sock *sk = sock->sk;
590 struct alg_sock *ask = alg_sk(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +0200591 struct sock *psk = ask->parent;
592 struct alg_sock *pask = alg_sk(psk);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100593 struct aead_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +0200594 struct aead_tfm *aeadc = pask->private;
595 struct crypto_aead *tfm = aeadc->aead;
596 unsigned int as = crypto_aead_authsize(tfm);
597 unsigned int areqlen =
598 sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm);
599 struct aead_async_req *areq;
600 struct aead_rsgl *last_rsgl = NULL;
601 int err = 0;
602 size_t used = 0; /* [in] TX bufs to be en/decrypted */
603 size_t outlen = 0; /* [out] RX bufs produced by kernel */
604 size_t usedpages = 0; /* [in] RX bufs to be used from user */
605 size_t processed = 0; /* [in] TX bufs to be consumed */
Stephan Mueller400c40c2015-02-28 20:50:00 +0100606
607 /*
Stephan Muellerd887c522017-06-25 17:12:59 +0200608 * Data length provided by caller via sendmsg/sendpage that has not
609 * yet been processed.
Stephan Mueller400c40c2015-02-28 20:50:00 +0100610 */
Stephan Mueller400c40c2015-02-28 20:50:00 +0100611 used = ctx->used;
612
613 /*
614 * Make sure sufficient data is present -- note, the same check is
615 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
616 * shall provide an information to the data sender that something is
617 * wrong, but they are irrelevant to maintain the kernel integrity.
618 * We need this check here too in case user space decides to not honor
619 * the error message in sendmsg/sendpage and still call recvmsg. This
620 * check here protects the kernel integrity.
621 */
Stephan Muellerd887c522017-06-25 17:12:59 +0200622 if (!aead_sufficient_data(sk))
623 return -EINVAL;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100624
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100625 /*
626 * Calculate the minimum output buffer size holding the result of the
627 * cipher operation. When encrypting data, the receiving buffer is
628 * larger by the tag length compared to the input buffer as the
629 * encryption operation generates the tag. For decryption, the input
630 * buffer provides the tag which is consumed resulting in only the
631 * plaintext without a buffer for the tag returned to the caller.
632 */
633 if (ctx->enc)
634 outlen = used + as;
635 else
636 outlen = used - as;
Herbert Xu19fa7752015-05-27 17:24:41 +0800637
Stephan Mueller400c40c2015-02-28 20:50:00 +0100638 /*
639 * The cipher operation input data is reduced by the associated data
640 * length as this data is processed separately later on.
641 */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100642 used -= ctx->aead_assoclen;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100643
Stephan Muellerd887c522017-06-25 17:12:59 +0200644 /* Allocate cipher request for current operation. */
645 areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
646 if (unlikely(!areq))
647 return -ENOMEM;
648 areq->areqlen = areqlen;
649 areq->sk = sk;
650 INIT_LIST_HEAD(&areq->rsgl_list);
651 areq->tsgl = NULL;
652 areq->tsgl_entries = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100653
Stephan Muellerd887c522017-06-25 17:12:59 +0200654 /* convert iovecs of output buffers into RX SGL */
655 while (outlen > usedpages && msg_data_left(msg)) {
656 struct aead_rsgl *rsgl;
657 size_t seglen;
658
659 /* limit the amount of readable buffers */
660 if (!aead_readable(sk))
661 break;
662
663 if (!ctx->used) {
664 err = aead_wait_for_data(sk, flags);
665 if (err)
666 goto free;
667 }
668
669 seglen = min_t(size_t, (outlen - usedpages),
670 msg_data_left(msg));
671
672 if (list_empty(&areq->rsgl_list)) {
673 rsgl = &areq->first_rsgl;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800674 } else {
675 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
676 if (unlikely(!rsgl)) {
677 err = -ENOMEM;
Stephan Muellerd887c522017-06-25 17:12:59 +0200678 goto free;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800679 }
680 }
Stephan Muellerd887c522017-06-25 17:12:59 +0200681
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800682 rsgl->sgl.npages = 0;
Stephan Muellerd887c522017-06-25 17:12:59 +0200683 list_add_tail(&rsgl->list, &areq->rsgl_list);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800684
Stephan Mueller400c40c2015-02-28 20:50:00 +0100685 /* make one iovec available as scatterlist */
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800686 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100687 if (err < 0)
Stephan Muellerd887c522017-06-25 17:12:59 +0200688 goto free;
689
Tadeusz Struk7b2a18e2015-05-15 10:18:37 -0700690 /* chain the new scatterlist with previous one */
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800691 if (last_rsgl)
692 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
693
694 last_rsgl = rsgl;
Stephan Muellerd887c522017-06-25 17:12:59 +0200695 usedpages += err;
696 ctx->rcvused += err;
697 rsgl->sg_num_bytes = err;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100698 iov_iter_advance(&msg->msg_iter, err);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100699 }
700
Stephan Muellerd887c522017-06-25 17:12:59 +0200701 /*
702 * Ensure output buffer is sufficiently large. If the caller provides
703 * less buffer space, only use the relative required input size. This
704 * allows AIO operation where the caller sent all data to be processed
705 * and the AIO operation performs the operation on the different chunks
706 * of the input data.
707 */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100708 if (usedpages < outlen) {
Stephan Muellerd887c522017-06-25 17:12:59 +0200709 size_t less = outlen - usedpages;
710
711 if (used < less) {
712 err = -EINVAL;
713 goto free;
714 }
715 used -= less;
716 outlen -= less;
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100717 }
Stephan Mueller400c40c2015-02-28 20:50:00 +0100718
Stephan Muellerd887c522017-06-25 17:12:59 +0200719 /*
720 * Create a per request TX SGL for this request which tracks the
721 * SG entries from the global TX SGL.
722 */
723 processed = used + ctx->aead_assoclen;
724 areq->tsgl_entries = aead_count_tsgl(sk, processed);
725 if (!areq->tsgl_entries)
726 areq->tsgl_entries = 1;
727 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
728 GFP_KERNEL);
729 if (!areq->tsgl) {
730 err = -ENOMEM;
731 goto free;
732 }
733 sg_init_table(areq->tsgl, areq->tsgl_entries);
734 aead_pull_tsgl(sk, processed, areq->tsgl);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100735
Stephan Muellerd887c522017-06-25 17:12:59 +0200736 /* Initialize the crypto operation */
737 aead_request_set_crypt(&areq->aead_req, areq->tsgl,
738 areq->first_rsgl.sgl.sg, used, ctx->iv);
739 aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen);
740 aead_request_set_tfm(&areq->aead_req, tfm);
741
742 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
743 /* AIO operation */
744 areq->iocb = msg->msg_iocb;
745 aead_request_set_callback(&areq->aead_req,
746 CRYPTO_TFM_REQ_MAY_BACKLOG,
747 aead_async_cb, areq);
748 err = ctx->enc ? crypto_aead_encrypt(&areq->aead_req) :
749 crypto_aead_decrypt(&areq->aead_req);
750 } else {
751 /* Synchronous operation */
752 aead_request_set_callback(&areq->aead_req,
753 CRYPTO_TFM_REQ_MAY_BACKLOG,
754 af_alg_complete, &ctx->completion);
755 err = af_alg_wait_for_completion(ctx->enc ?
756 crypto_aead_encrypt(&areq->aead_req) :
757 crypto_aead_decrypt(&areq->aead_req),
Stephan Mueller400c40c2015-02-28 20:50:00 +0100758 &ctx->completion);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100759 }
760
Stephan Muellerd887c522017-06-25 17:12:59 +0200761 /* AIO operation in progress */
762 if (err == -EINPROGRESS) {
763 sock_hold(sk);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100764
Stephan Muellerd887c522017-06-25 17:12:59 +0200765 /* Remember output size that will be generated. */
766 areq->outlen = outlen;
767
768 return -EIOCBQUEUED;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800769 }
Stephan Muellerd887c522017-06-25 17:12:59 +0200770
771free:
772 aead_free_areq_sgls(areq);
773 if (areq)
774 sock_kfree_s(sk, areq, areqlen);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100775
776 return err ? err : outlen;
777}
778
Stephan Muellerd887c522017-06-25 17:12:59 +0200779static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
780 size_t ignored, int flags)
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800781{
Stephan Muellerd887c522017-06-25 17:12:59 +0200782 struct sock *sk = sock->sk;
783 int ret = 0;
784
785 lock_sock(sk);
786 while (msg_data_left(msg)) {
787 int err = _aead_recvmsg(sock, msg, ignored, flags);
788
789 /*
790 * This error covers -EIOCBQUEUED which implies that we can
791 * only handle one AIO request. If the caller wants to have
792 * multiple AIO requests in parallel, he must make multiple
793 * separate AIO calls.
Stephan Mueller5703c822017-07-30 14:31:18 +0200794 *
795 * Also return the error if no data has been processed so far.
Stephan Muellerd887c522017-06-25 17:12:59 +0200796 */
797 if (err <= 0) {
Stephan Mueller5703c822017-07-30 14:31:18 +0200798 if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
Stephan Muellerd887c522017-06-25 17:12:59 +0200799 ret = err;
800 goto out;
801 }
802
803 ret += err;
804 }
805
806out:
807 aead_wmem_wakeup(sk);
808 release_sock(sk);
809 return ret;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800810}
811
Stephan Mueller400c40c2015-02-28 20:50:00 +0100812static unsigned int aead_poll(struct file *file, struct socket *sock,
813 poll_table *wait)
814{
815 struct sock *sk = sock->sk;
816 struct alg_sock *ask = alg_sk(sk);
817 struct aead_ctx *ctx = ask->private;
818 unsigned int mask;
819
820 sock_poll_wait(file, sk_sleep(sk), wait);
821 mask = 0;
822
823 if (!ctx->more)
824 mask |= POLLIN | POLLRDNORM;
825
826 if (aead_writable(sk))
827 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
828
829 return mask;
830}
831
832static struct proto_ops algif_aead_ops = {
833 .family = PF_ALG,
834
835 .connect = sock_no_connect,
836 .socketpair = sock_no_socketpair,
837 .getname = sock_no_getname,
838 .ioctl = sock_no_ioctl,
839 .listen = sock_no_listen,
840 .shutdown = sock_no_shutdown,
841 .getsockopt = sock_no_getsockopt,
842 .mmap = sock_no_mmap,
843 .bind = sock_no_bind,
844 .accept = sock_no_accept,
845 .setsockopt = sock_no_setsockopt,
846
847 .release = af_alg_release,
848 .sendmsg = aead_sendmsg,
849 .sendpage = aead_sendpage,
850 .recvmsg = aead_recvmsg,
851 .poll = aead_poll,
852};
853
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200854static int aead_check_key(struct socket *sock)
855{
856 int err = 0;
857 struct sock *psk;
858 struct alg_sock *pask;
859 struct aead_tfm *tfm;
860 struct sock *sk = sock->sk;
861 struct alg_sock *ask = alg_sk(sk);
862
863 lock_sock(sk);
864 if (ask->refcnt)
865 goto unlock_child;
866
867 psk = ask->parent;
868 pask = alg_sk(ask->parent);
869 tfm = pask->private;
870
871 err = -ENOKEY;
872 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
873 if (!tfm->has_key)
874 goto unlock;
875
876 if (!pask->refcnt++)
877 sock_hold(psk);
878
879 ask->refcnt = 1;
880 sock_put(psk);
881
882 err = 0;
883
884unlock:
885 release_sock(psk);
886unlock_child:
887 release_sock(sk);
888
889 return err;
890}
891
892static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
893 size_t size)
894{
895 int err;
896
897 err = aead_check_key(sock);
898 if (err)
899 return err;
900
901 return aead_sendmsg(sock, msg, size);
902}
903
904static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
905 int offset, size_t size, int flags)
906{
907 int err;
908
909 err = aead_check_key(sock);
910 if (err)
911 return err;
912
913 return aead_sendpage(sock, page, offset, size, flags);
914}
915
916static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
917 size_t ignored, int flags)
918{
919 int err;
920
921 err = aead_check_key(sock);
922 if (err)
923 return err;
924
925 return aead_recvmsg(sock, msg, ignored, flags);
926}
927
928static struct proto_ops algif_aead_ops_nokey = {
929 .family = PF_ALG,
930
931 .connect = sock_no_connect,
932 .socketpair = sock_no_socketpair,
933 .getname = sock_no_getname,
934 .ioctl = sock_no_ioctl,
935 .listen = sock_no_listen,
936 .shutdown = sock_no_shutdown,
937 .getsockopt = sock_no_getsockopt,
938 .mmap = sock_no_mmap,
939 .bind = sock_no_bind,
940 .accept = sock_no_accept,
941 .setsockopt = sock_no_setsockopt,
942
943 .release = af_alg_release,
944 .sendmsg = aead_sendmsg_nokey,
945 .sendpage = aead_sendpage_nokey,
946 .recvmsg = aead_recvmsg_nokey,
947 .poll = aead_poll,
948};
949
Stephan Mueller400c40c2015-02-28 20:50:00 +0100950static void *aead_bind(const char *name, u32 type, u32 mask)
951{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200952 struct aead_tfm *tfm;
953 struct crypto_aead *aead;
954
955 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
956 if (!tfm)
957 return ERR_PTR(-ENOMEM);
958
959 aead = crypto_alloc_aead(name, type, mask);
960 if (IS_ERR(aead)) {
961 kfree(tfm);
962 return ERR_CAST(aead);
963 }
964
965 tfm->aead = aead;
966
967 return tfm;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100968}
969
970static void aead_release(void *private)
971{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200972 struct aead_tfm *tfm = private;
973
974 crypto_free_aead(tfm->aead);
975 kfree(tfm);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100976}
977
978static int aead_setauthsize(void *private, unsigned int authsize)
979{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200980 struct aead_tfm *tfm = private;
981
982 return crypto_aead_setauthsize(tfm->aead, authsize);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100983}
984
985static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
986{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200987 struct aead_tfm *tfm = private;
988 int err;
989
990 err = crypto_aead_setkey(tfm->aead, key, keylen);
991 tfm->has_key = !err;
992
993 return err;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100994}
995
996static void aead_sock_destruct(struct sock *sk)
997{
998 struct alg_sock *ask = alg_sk(sk);
999 struct aead_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +02001000 struct sock *psk = ask->parent;
1001 struct alg_sock *pask = alg_sk(psk);
1002 struct aead_tfm *aeadc = pask->private;
1003 struct crypto_aead *tfm = aeadc->aead;
1004 unsigned int ivlen = crypto_aead_ivsize(tfm);
Stephan Mueller400c40c2015-02-28 20:50:00 +01001005
Stephan Muellerd887c522017-06-25 17:12:59 +02001006 aead_pull_tsgl(sk, ctx->used, NULL);
Stephan Mueller400c40c2015-02-28 20:50:00 +01001007 sock_kzfree_s(sk, ctx->iv, ivlen);
1008 sock_kfree_s(sk, ctx, ctx->len);
1009 af_alg_release_parent(sk);
1010}
1011
Stephan Mueller2a2a2512017-04-24 11:15:23 +02001012static int aead_accept_parent_nokey(void *private, struct sock *sk)
Stephan Mueller400c40c2015-02-28 20:50:00 +01001013{
1014 struct aead_ctx *ctx;
1015 struct alg_sock *ask = alg_sk(sk);
Stephan Mueller2a2a2512017-04-24 11:15:23 +02001016 struct aead_tfm *tfm = private;
1017 struct crypto_aead *aead = tfm->aead;
Stephan Muellerd887c522017-06-25 17:12:59 +02001018 unsigned int len = sizeof(*ctx);
Stephan Mueller2a2a2512017-04-24 11:15:23 +02001019 unsigned int ivlen = crypto_aead_ivsize(aead);
Stephan Mueller400c40c2015-02-28 20:50:00 +01001020
1021 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
1022 if (!ctx)
1023 return -ENOMEM;
1024 memset(ctx, 0, len);
1025
1026 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
1027 if (!ctx->iv) {
1028 sock_kfree_s(sk, ctx, len);
1029 return -ENOMEM;
1030 }
1031 memset(ctx->iv, 0, ivlen);
1032
Stephan Muellerd887c522017-06-25 17:12:59 +02001033 INIT_LIST_HEAD(&ctx->tsgl_list);
Stephan Mueller400c40c2015-02-28 20:50:00 +01001034 ctx->len = len;
1035 ctx->used = 0;
Stephan Muellerd887c522017-06-25 17:12:59 +02001036 ctx->rcvused = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +01001037 ctx->more = 0;
1038 ctx->merge = 0;
1039 ctx->enc = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +01001040 ctx->aead_assoclen = 0;
1041 af_alg_init_completion(&ctx->completion);
Stephan Mueller400c40c2015-02-28 20:50:00 +01001042
1043 ask->private = ctx;
1044
Stephan Mueller400c40c2015-02-28 20:50:00 +01001045 sk->sk_destruct = aead_sock_destruct;
1046
1047 return 0;
1048}
1049
Stephan Mueller2a2a2512017-04-24 11:15:23 +02001050static int aead_accept_parent(void *private, struct sock *sk)
1051{
1052 struct aead_tfm *tfm = private;
1053
1054 if (!tfm->has_key)
1055 return -ENOKEY;
1056
1057 return aead_accept_parent_nokey(private, sk);
1058}
1059
Stephan Mueller400c40c2015-02-28 20:50:00 +01001060static const struct af_alg_type algif_type_aead = {
1061 .bind = aead_bind,
1062 .release = aead_release,
1063 .setkey = aead_setkey,
1064 .setauthsize = aead_setauthsize,
1065 .accept = aead_accept_parent,
Stephan Mueller2a2a2512017-04-24 11:15:23 +02001066 .accept_nokey = aead_accept_parent_nokey,
Stephan Mueller400c40c2015-02-28 20:50:00 +01001067 .ops = &algif_aead_ops,
Stephan Mueller2a2a2512017-04-24 11:15:23 +02001068 .ops_nokey = &algif_aead_ops_nokey,
Stephan Mueller400c40c2015-02-28 20:50:00 +01001069 .name = "aead",
1070 .owner = THIS_MODULE
1071};
1072
1073static int __init algif_aead_init(void)
1074{
1075 return af_alg_register_type(&algif_type_aead);
1076}
1077
1078static void __exit algif_aead_exit(void)
1079{
1080 int err = af_alg_unregister_type(&algif_type_aead);
1081 BUG_ON(err);
1082}
1083
1084module_init(algif_aead_init);
1085module_exit(algif_aead_exit);
1086MODULE_LICENSE("GPL");
1087MODULE_AUTHOR("Stephan Mueller <[email protected]>");
1088MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");