blob: bf6758f7433951e56659f8e84fc5a33e1d2de432 [file] [log] [blame]
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +01001/* net/core/xdp.c
2 *
3 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 * Released under terms in GPL version 2. See COPYING.
5 */
6#include <linux/types.h>
7#include <linux/mm.h>
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +02008#include <linux/slab.h>
9#include <linux/idr.h>
10#include <linux/rhashtable.h>
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +020011#include <net/page_pool.h>
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +010012
13#include <net/xdp.h>
14
15#define REG_STATE_NEW 0x0
16#define REG_STATE_REGISTERED 0x1
17#define REG_STATE_UNREGISTERED 0x2
18#define REG_STATE_UNUSED 0x3
19
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020020static DEFINE_IDA(mem_id_pool);
21static DEFINE_MUTEX(mem_id_lock);
22#define MEM_ID_MAX 0xFFFE
23#define MEM_ID_MIN 1
24static int mem_id_next = MEM_ID_MIN;
25
26static bool mem_id_init; /* false */
27static struct rhashtable *mem_id_ht;
28
29struct xdp_mem_allocator {
30 struct xdp_mem_info mem;
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +020031 union {
32 void *allocator;
33 struct page_pool *page_pool;
34 };
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020035 struct rhash_head node;
36 struct rcu_head rcu;
37};
38
39static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
40{
41 const u32 *k = data;
42 const u32 key = *k;
43
44 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
45 != sizeof(u32));
46
47 /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */
48 return key << RHT_HASH_RESERVED_SPACE;
49}
50
51static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
52 const void *ptr)
53{
54 const struct xdp_mem_allocator *xa = ptr;
55 u32 mem_id = *(u32 *)arg->key;
56
57 return xa->mem.id != mem_id;
58}
59
60static const struct rhashtable_params mem_id_rht_params = {
61 .nelem_hint = 64,
62 .head_offset = offsetof(struct xdp_mem_allocator, node),
63 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
64 .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id),
65 .max_size = MEM_ID_MAX,
66 .min_size = 8,
67 .automatic_shrinking = true,
68 .hashfn = xdp_mem_id_hashfn,
69 .obj_cmpfn = xdp_mem_id_cmp,
70};
71
72static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
73{
74 struct xdp_mem_allocator *xa;
75
76 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
77
78 /* Allow this ID to be reused */
79 ida_simple_remove(&mem_id_pool, xa->mem.id);
80
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +020081 /* Notice, driver is expected to free the *allocator,
82 * e.g. page_pool, and MUST also use RCU free.
83 */
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020084
85 /* Poison memory */
86 xa->mem.id = 0xFFFF;
87 xa->mem.type = 0xF0F0;
88 xa->allocator = (void *)0xDEAD9001;
89
90 kfree(xa);
91}
92
93static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
94{
95 struct xdp_mem_allocator *xa;
96 int id = xdp_rxq->mem.id;
97 int err;
98
99 if (id == 0)
100 return;
101
102 mutex_lock(&mem_id_lock);
103
104 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
105 if (!xa) {
106 mutex_unlock(&mem_id_lock);
107 return;
108 }
109
110 err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params);
111 WARN_ON(err);
112
113 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
114
115 mutex_unlock(&mem_id_lock);
116}
117
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100118void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
119{
120 /* Simplify driver cleanup code paths, allow unreg "unused" */
121 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
122 return;
123
124 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
125
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200126 __xdp_rxq_info_unreg_mem_model(xdp_rxq);
127
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100128 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
129 xdp_rxq->dev = NULL;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200130
131 /* Reset mem info to defaults */
132 xdp_rxq->mem.id = 0;
133 xdp_rxq->mem.type = 0;
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100134}
135EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
136
137static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
138{
139 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
140}
141
142/* Returns 0 on success, negative on failure */
143int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
144 struct net_device *dev, u32 queue_index)
145{
146 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
147 WARN(1, "Driver promised not to register this");
148 return -EINVAL;
149 }
150
151 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
152 WARN(1, "Missing unregister, handled but fix driver");
153 xdp_rxq_info_unreg(xdp_rxq);
154 }
155
156 if (!dev) {
157 WARN(1, "Missing net_device from driver");
158 return -ENODEV;
159 }
160
161 /* State either UNREGISTERED or NEW */
162 xdp_rxq_info_init(xdp_rxq);
163 xdp_rxq->dev = dev;
164 xdp_rxq->queue_index = queue_index;
165
166 xdp_rxq->reg_state = REG_STATE_REGISTERED;
167 return 0;
168}
169EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
170
171void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
172{
173 xdp_rxq->reg_state = REG_STATE_UNUSED;
174}
175EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
Jesper Dangaard Brouerc0124f32018-01-03 11:25:34 +0100176
177bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
178{
179 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
180}
181EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200182
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200183static int __mem_id_init_hash_table(void)
184{
185 struct rhashtable *rht;
186 int ret;
187
188 if (unlikely(mem_id_init))
189 return 0;
190
191 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
192 if (!rht)
193 return -ENOMEM;
194
195 ret = rhashtable_init(rht, &mem_id_rht_params);
196 if (ret < 0) {
197 kfree(rht);
198 return ret;
199 }
200 mem_id_ht = rht;
201 smp_mb(); /* mutex lock should provide enough pairing */
202 mem_id_init = true;
203
204 return 0;
205}
206
207/* Allocate a cyclic ID that maps to allocator pointer.
208 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
209 *
210 * Caller must lock mem_id_lock.
211 */
212static int __mem_id_cyclic_get(gfp_t gfp)
213{
214 int retries = 1;
215 int id;
216
217again:
218 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
219 if (id < 0) {
220 if (id == -ENOSPC) {
221 /* Cyclic allocator, reset next id */
222 if (retries--) {
223 mem_id_next = MEM_ID_MIN;
224 goto again;
225 }
226 }
227 return id; /* errno */
228 }
229 mem_id_next = id + 1;
230
231 return id;
232}
233
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200234static bool __is_supported_mem_type(enum xdp_mem_type type)
235{
236 if (type == MEM_TYPE_PAGE_POOL)
237 return is_page_pool_compiled_in();
238
239 if (type >= MEM_TYPE_MAX)
240 return false;
241
242 return true;
243}
244
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200245int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
246 enum xdp_mem_type type, void *allocator)
247{
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200248 struct xdp_mem_allocator *xdp_alloc;
249 gfp_t gfp = GFP_KERNEL;
250 int id, errno, ret;
251 void *ptr;
252
253 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
254 WARN(1, "Missing register, driver bug");
255 return -EFAULT;
256 }
257
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200258 if (!__is_supported_mem_type(type))
259 return -EOPNOTSUPP;
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200260
261 xdp_rxq->mem.type = type;
262
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200263 if (!allocator) {
264 if (type == MEM_TYPE_PAGE_POOL)
265 return -EINVAL; /* Setup time check page_pool req */
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200266 return 0;
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200267 }
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200268
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200269 /* Delay init of rhashtable to save memory if feature isn't used */
270 if (!mem_id_init) {
271 mutex_lock(&mem_id_lock);
272 ret = __mem_id_init_hash_table();
273 mutex_unlock(&mem_id_lock);
274 if (ret < 0) {
275 WARN_ON(1);
276 return ret;
277 }
278 }
279
280 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
281 if (!xdp_alloc)
282 return -ENOMEM;
283
284 mutex_lock(&mem_id_lock);
285 id = __mem_id_cyclic_get(gfp);
286 if (id < 0) {
287 errno = id;
288 goto err;
289 }
290 xdp_rxq->mem.id = id;
291 xdp_alloc->mem = xdp_rxq->mem;
292 xdp_alloc->allocator = allocator;
293
294 /* Insert allocator into ID lookup table */
295 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
296 if (IS_ERR(ptr)) {
297 errno = PTR_ERR(ptr);
298 goto err;
299 }
300
301 mutex_unlock(&mem_id_lock);
302
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200303 return 0;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200304err:
305 mutex_unlock(&mem_id_lock);
306 kfree(xdp_alloc);
307 return errno;
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200308}
309EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200310
Björn Töpelc4971762018-05-02 13:01:27 +0200311static void xdp_return(void *data, struct xdp_mem_info *mem)
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200312{
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200313 struct xdp_mem_allocator *xa;
314 struct page *page;
315
316 switch (mem->type) {
317 case MEM_TYPE_PAGE_POOL:
318 rcu_read_lock();
319 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
320 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
321 page = virt_to_head_page(data);
322 if (xa)
323 page_pool_put_page(xa->page_pool, page);
324 else
325 put_page(page);
326 rcu_read_unlock();
327 break;
328 case MEM_TYPE_PAGE_SHARED:
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200329 page_frag_free(data);
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200330 break;
331 case MEM_TYPE_PAGE_ORDER0:
332 page = virt_to_page(data); /* Assumes order0 page*/
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200333 put_page(page);
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200334 break;
335 default:
336 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
337 break;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200338 }
339}
Björn Töpelc4971762018-05-02 13:01:27 +0200340
341void xdp_return_frame(struct xdp_frame *xdpf)
342{
343 xdp_return(xdpf->data, &xdpf->mem);
344}
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200345EXPORT_SYMBOL_GPL(xdp_return_frame);
Björn Töpelc4971762018-05-02 13:01:27 +0200346
347void xdp_return_buff(struct xdp_buff *xdp)
348{
349 xdp_return(xdp->data, &xdp->rxq->mem);
350}
351EXPORT_SYMBOL_GPL(xdp_return_buff);