Thomas Gleixner | ddc64d0 | 2019-05-31 01:09:24 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 2 | /* net/core/xdp.c |
| 3 | * |
| 4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 5 | */ |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 6 | #include <linux/bpf.h> |
| 7 | #include <linux/filter.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 8 | #include <linux/types.h> |
| 9 | #include <linux/mm.h> |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 10 | #include <linux/netdevice.h> |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 11 | #include <linux/slab.h> |
| 12 | #include <linux/idr.h> |
| 13 | #include <linux/rhashtable.h> |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 14 | #include <net/page_pool.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 15 | |
| 16 | #include <net/xdp.h> |
| 17 | |
| 18 | #define REG_STATE_NEW 0x0 |
| 19 | #define REG_STATE_REGISTERED 0x1 |
| 20 | #define REG_STATE_UNREGISTERED 0x2 |
| 21 | #define REG_STATE_UNUSED 0x3 |
| 22 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 23 | static DEFINE_IDA(mem_id_pool); |
| 24 | static DEFINE_MUTEX(mem_id_lock); |
| 25 | #define MEM_ID_MAX 0xFFFE |
| 26 | #define MEM_ID_MIN 1 |
| 27 | static int mem_id_next = MEM_ID_MIN; |
| 28 | |
| 29 | static bool mem_id_init; /* false */ |
| 30 | static struct rhashtable *mem_id_ht; |
| 31 | |
| 32 | struct xdp_mem_allocator { |
| 33 | struct xdp_mem_info mem; |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 34 | union { |
| 35 | void *allocator; |
| 36 | struct page_pool *page_pool; |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 37 | struct zero_copy_allocator *zc_alloc; |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 38 | }; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 39 | struct rhash_head node; |
| 40 | struct rcu_head rcu; |
| 41 | }; |
| 42 | |
| 43 | static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) |
| 44 | { |
| 45 | const u32 *k = data; |
| 46 | const u32 key = *k; |
| 47 | |
| 48 | BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) |
| 49 | != sizeof(u32)); |
| 50 | |
NeilBrown | 9f9a707 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 51 | /* Use cyclic increasing ID as direct hash key */ |
| 52 | return key; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, |
| 56 | const void *ptr) |
| 57 | { |
| 58 | const struct xdp_mem_allocator *xa = ptr; |
| 59 | u32 mem_id = *(u32 *)arg->key; |
| 60 | |
| 61 | return xa->mem.id != mem_id; |
| 62 | } |
| 63 | |
| 64 | static const struct rhashtable_params mem_id_rht_params = { |
| 65 | .nelem_hint = 64, |
| 66 | .head_offset = offsetof(struct xdp_mem_allocator, node), |
| 67 | .key_offset = offsetof(struct xdp_mem_allocator, mem.id), |
| 68 | .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id), |
| 69 | .max_size = MEM_ID_MAX, |
| 70 | .min_size = 8, |
| 71 | .automatic_shrinking = true, |
| 72 | .hashfn = xdp_mem_id_hashfn, |
| 73 | .obj_cmpfn = xdp_mem_id_cmp, |
| 74 | }; |
| 75 | |
| 76 | static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) |
| 77 | { |
| 78 | struct xdp_mem_allocator *xa; |
| 79 | |
| 80 | xa = container_of(rcu, struct xdp_mem_allocator, rcu); |
| 81 | |
| 82 | /* Allow this ID to be reused */ |
| 83 | ida_simple_remove(&mem_id_pool, xa->mem.id); |
| 84 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 85 | /* Notice, driver is expected to free the *allocator, |
| 86 | * e.g. page_pool, and MUST also use RCU free. |
| 87 | */ |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 88 | |
| 89 | /* Poison memory */ |
| 90 | xa->mem.id = 0xFFFF; |
| 91 | xa->mem.type = 0xF0F0; |
| 92 | xa->allocator = (void *)0xDEAD9001; |
| 93 | |
| 94 | kfree(xa); |
| 95 | } |
| 96 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 97 | void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 98 | { |
| 99 | struct xdp_mem_allocator *xa; |
| 100 | int id = xdp_rxq->mem.id; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 101 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 102 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
| 103 | WARN(1, "Missing register, driver bug"); |
| 104 | return; |
| 105 | } |
| 106 | |
| 107 | if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL && |
| 108 | xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) { |
| 109 | return; |
| 110 | } |
| 111 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 112 | if (id == 0) |
| 113 | return; |
| 114 | |
| 115 | mutex_lock(&mem_id_lock); |
| 116 | |
Tariq Toukan | 21b172e | 2018-08-13 12:21:58 +0300 | [diff] [blame] | 117 | xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); |
| 118 | if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) |
| 119 | call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 120 | |
| 121 | mutex_unlock(&mem_id_lock); |
| 122 | } |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 123 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 124 | |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 125 | void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) |
| 126 | { |
| 127 | /* Simplify driver cleanup code paths, allow unreg "unused" */ |
| 128 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) |
| 129 | return; |
| 130 | |
| 131 | WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); |
| 132 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 133 | xdp_rxq_info_unreg_mem_model(xdp_rxq); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 134 | |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 135 | xdp_rxq->reg_state = REG_STATE_UNREGISTERED; |
| 136 | xdp_rxq->dev = NULL; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 137 | |
| 138 | /* Reset mem info to defaults */ |
| 139 | xdp_rxq->mem.id = 0; |
| 140 | xdp_rxq->mem.type = 0; |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 141 | } |
| 142 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); |
| 143 | |
| 144 | static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) |
| 145 | { |
| 146 | memset(xdp_rxq, 0, sizeof(*xdp_rxq)); |
| 147 | } |
| 148 | |
| 149 | /* Returns 0 on success, negative on failure */ |
| 150 | int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
| 151 | struct net_device *dev, u32 queue_index) |
| 152 | { |
| 153 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) { |
| 154 | WARN(1, "Driver promised not to register this"); |
| 155 | return -EINVAL; |
| 156 | } |
| 157 | |
| 158 | if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { |
| 159 | WARN(1, "Missing unregister, handled but fix driver"); |
| 160 | xdp_rxq_info_unreg(xdp_rxq); |
| 161 | } |
| 162 | |
| 163 | if (!dev) { |
| 164 | WARN(1, "Missing net_device from driver"); |
| 165 | return -ENODEV; |
| 166 | } |
| 167 | |
| 168 | /* State either UNREGISTERED or NEW */ |
| 169 | xdp_rxq_info_init(xdp_rxq); |
| 170 | xdp_rxq->dev = dev; |
| 171 | xdp_rxq->queue_index = queue_index; |
| 172 | |
| 173 | xdp_rxq->reg_state = REG_STATE_REGISTERED; |
| 174 | return 0; |
| 175 | } |
| 176 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg); |
| 177 | |
| 178 | void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) |
| 179 | { |
| 180 | xdp_rxq->reg_state = REG_STATE_UNUSED; |
| 181 | } |
| 182 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); |
Jesper Dangaard Brouer | c0124f3 | 2018-01-03 11:25:34 +0100 | [diff] [blame] | 183 | |
| 184 | bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) |
| 185 | { |
| 186 | return (xdp_rxq->reg_state == REG_STATE_REGISTERED); |
| 187 | } |
| 188 | EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 189 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 190 | static int __mem_id_init_hash_table(void) |
| 191 | { |
| 192 | struct rhashtable *rht; |
| 193 | int ret; |
| 194 | |
| 195 | if (unlikely(mem_id_init)) |
| 196 | return 0; |
| 197 | |
| 198 | rht = kzalloc(sizeof(*rht), GFP_KERNEL); |
| 199 | if (!rht) |
| 200 | return -ENOMEM; |
| 201 | |
| 202 | ret = rhashtable_init(rht, &mem_id_rht_params); |
| 203 | if (ret < 0) { |
| 204 | kfree(rht); |
| 205 | return ret; |
| 206 | } |
| 207 | mem_id_ht = rht; |
| 208 | smp_mb(); /* mutex lock should provide enough pairing */ |
| 209 | mem_id_init = true; |
| 210 | |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | /* Allocate a cyclic ID that maps to allocator pointer. |
| 215 | * See: https://www.kernel.org/doc/html/latest/core-api/idr.html |
| 216 | * |
| 217 | * Caller must lock mem_id_lock. |
| 218 | */ |
| 219 | static int __mem_id_cyclic_get(gfp_t gfp) |
| 220 | { |
| 221 | int retries = 1; |
| 222 | int id; |
| 223 | |
| 224 | again: |
| 225 | id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); |
| 226 | if (id < 0) { |
| 227 | if (id == -ENOSPC) { |
| 228 | /* Cyclic allocator, reset next id */ |
| 229 | if (retries--) { |
| 230 | mem_id_next = MEM_ID_MIN; |
| 231 | goto again; |
| 232 | } |
| 233 | } |
| 234 | return id; /* errno */ |
| 235 | } |
| 236 | mem_id_next = id + 1; |
| 237 | |
| 238 | return id; |
| 239 | } |
| 240 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 241 | static bool __is_supported_mem_type(enum xdp_mem_type type) |
| 242 | { |
| 243 | if (type == MEM_TYPE_PAGE_POOL) |
| 244 | return is_page_pool_compiled_in(); |
| 245 | |
| 246 | if (type >= MEM_TYPE_MAX) |
| 247 | return false; |
| 248 | |
| 249 | return true; |
| 250 | } |
| 251 | |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 252 | int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, |
| 253 | enum xdp_mem_type type, void *allocator) |
| 254 | { |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 255 | struct xdp_mem_allocator *xdp_alloc; |
| 256 | gfp_t gfp = GFP_KERNEL; |
| 257 | int id, errno, ret; |
| 258 | void *ptr; |
| 259 | |
| 260 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
| 261 | WARN(1, "Missing register, driver bug"); |
| 262 | return -EFAULT; |
| 263 | } |
| 264 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 265 | if (!__is_supported_mem_type(type)) |
| 266 | return -EOPNOTSUPP; |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 267 | |
| 268 | xdp_rxq->mem.type = type; |
| 269 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 270 | if (!allocator) { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 271 | if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY) |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 272 | return -EINVAL; /* Setup time check page_pool req */ |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 273 | return 0; |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 274 | } |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 275 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 276 | /* Delay init of rhashtable to save memory if feature isn't used */ |
| 277 | if (!mem_id_init) { |
| 278 | mutex_lock(&mem_id_lock); |
| 279 | ret = __mem_id_init_hash_table(); |
| 280 | mutex_unlock(&mem_id_lock); |
| 281 | if (ret < 0) { |
| 282 | WARN_ON(1); |
| 283 | return ret; |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); |
| 288 | if (!xdp_alloc) |
| 289 | return -ENOMEM; |
| 290 | |
| 291 | mutex_lock(&mem_id_lock); |
| 292 | id = __mem_id_cyclic_get(gfp); |
| 293 | if (id < 0) { |
| 294 | errno = id; |
| 295 | goto err; |
| 296 | } |
| 297 | xdp_rxq->mem.id = id; |
| 298 | xdp_alloc->mem = xdp_rxq->mem; |
| 299 | xdp_alloc->allocator = allocator; |
| 300 | |
| 301 | /* Insert allocator into ID lookup table */ |
| 302 | ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); |
| 303 | if (IS_ERR(ptr)) { |
Jesper Dangaard Brouer | 516a759 | 2019-06-18 15:05:22 +0200 | [diff] [blame] | 304 | ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id); |
| 305 | xdp_rxq->mem.id = 0; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 306 | errno = PTR_ERR(ptr); |
| 307 | goto err; |
| 308 | } |
| 309 | |
| 310 | mutex_unlock(&mem_id_lock); |
| 311 | |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 312 | return 0; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 313 | err: |
| 314 | mutex_unlock(&mem_id_lock); |
| 315 | kfree(xdp_alloc); |
| 316 | return errno; |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 317 | } |
| 318 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 319 | |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 320 | /* XDP RX runs under NAPI protection, and in different delivery error |
| 321 | * scenarios (e.g. queue full), it is possible to return the xdp_frame |
| 322 | * while still leveraging this protection. The @napi_direct boolian |
| 323 | * is used for those calls sites. Thus, allowing for faster recycling |
| 324 | * of xdp_frames/pages in those cases. |
| 325 | */ |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 326 | static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, |
| 327 | unsigned long handle) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 328 | { |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 329 | struct xdp_mem_allocator *xa; |
| 330 | struct page *page; |
| 331 | |
| 332 | switch (mem->type) { |
| 333 | case MEM_TYPE_PAGE_POOL: |
| 334 | rcu_read_lock(); |
| 335 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ |
| 336 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 337 | page = virt_to_head_page(data); |
Toshiaki Makita | 2539650 | 2018-08-03 16:58:16 +0900 | [diff] [blame] | 338 | if (xa) { |
| 339 | napi_direct &= !xdp_return_frame_no_direct(); |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 340 | page_pool_put_page(xa->page_pool, page, napi_direct); |
Toshiaki Makita | 2539650 | 2018-08-03 16:58:16 +0900 | [diff] [blame] | 341 | } else { |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 342 | put_page(page); |
Toshiaki Makita | 2539650 | 2018-08-03 16:58:16 +0900 | [diff] [blame] | 343 | } |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 344 | rcu_read_unlock(); |
| 345 | break; |
| 346 | case MEM_TYPE_PAGE_SHARED: |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 347 | page_frag_free(data); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 348 | break; |
| 349 | case MEM_TYPE_PAGE_ORDER0: |
| 350 | page = virt_to_page(data); /* Assumes order0 page*/ |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 351 | put_page(page); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 352 | break; |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 353 | case MEM_TYPE_ZERO_COPY: |
| 354 | /* NB! Only valid from an xdp_buff! */ |
| 355 | rcu_read_lock(); |
| 356 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ |
| 357 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
Björn Töpel | eb91e4d | 2018-08-10 11:28:02 +0200 | [diff] [blame] | 358 | xa->zc_alloc->free(xa->zc_alloc, handle); |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 359 | rcu_read_unlock(); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 360 | default: |
| 361 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ |
| 362 | break; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 363 | } |
| 364 | } |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 365 | |
| 366 | void xdp_return_frame(struct xdp_frame *xdpf) |
| 367 | { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 368 | __xdp_return(xdpf->data, &xdpf->mem, false, 0); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 369 | } |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 370 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 371 | |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 372 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
| 373 | { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 374 | __xdp_return(xdpf->data, &xdpf->mem, true, 0); |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 375 | } |
| 376 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); |
| 377 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 378 | void xdp_return_buff(struct xdp_buff *xdp) |
| 379 | { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 380 | __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 381 | } |
| 382 | EXPORT_SYMBOL_GPL(xdp_return_buff); |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 383 | |
Jesper Dangaard Brouer | 6bf071b | 2019-06-18 15:05:27 +0200 | [diff] [blame^] | 384 | /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ |
| 385 | void __xdp_release_frame(void *data, struct xdp_mem_info *mem) |
| 386 | { |
| 387 | struct xdp_mem_allocator *xa; |
| 388 | struct page *page; |
| 389 | |
| 390 | rcu_read_lock(); |
| 391 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 392 | page = virt_to_head_page(data); |
| 393 | if (xa) |
| 394 | page_pool_release_page(xa->page_pool, page); |
| 395 | rcu_read_unlock(); |
| 396 | } |
| 397 | EXPORT_SYMBOL_GPL(__xdp_release_frame); |
| 398 | |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 399 | int xdp_attachment_query(struct xdp_attachment_info *info, |
| 400 | struct netdev_bpf *bpf) |
| 401 | { |
| 402 | bpf->prog_id = info->prog ? info->prog->aux->id : 0; |
| 403 | bpf->prog_flags = info->prog ? info->flags : 0; |
| 404 | return 0; |
| 405 | } |
| 406 | EXPORT_SYMBOL_GPL(xdp_attachment_query); |
| 407 | |
| 408 | bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, |
| 409 | struct netdev_bpf *bpf) |
| 410 | { |
| 411 | if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) { |
| 412 | NL_SET_ERR_MSG(bpf->extack, |
| 413 | "program loaded with different flags"); |
| 414 | return false; |
| 415 | } |
| 416 | return true; |
| 417 | } |
| 418 | EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok); |
| 419 | |
| 420 | void xdp_attachment_setup(struct xdp_attachment_info *info, |
| 421 | struct netdev_bpf *bpf) |
| 422 | { |
| 423 | if (info->prog) |
| 424 | bpf_prog_put(info->prog); |
| 425 | info->prog = bpf->prog; |
| 426 | info->flags = bpf->flags; |
| 427 | } |
| 428 | EXPORT_SYMBOL_GPL(xdp_attachment_setup); |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 429 | |
| 430 | struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) |
| 431 | { |
Colin Ian King | 7296216 | 2018-08-30 15:27:18 +0100 | [diff] [blame] | 432 | unsigned int metasize, totsize; |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 433 | void *addr, *data_to_copy; |
| 434 | struct xdp_frame *xdpf; |
| 435 | struct page *page; |
| 436 | |
| 437 | /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ |
| 438 | metasize = xdp_data_meta_unsupported(xdp) ? 0 : |
| 439 | xdp->data - xdp->data_meta; |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 440 | totsize = xdp->data_end - xdp->data + metasize; |
| 441 | |
| 442 | if (sizeof(*xdpf) + totsize > PAGE_SIZE) |
| 443 | return NULL; |
| 444 | |
| 445 | page = dev_alloc_page(); |
| 446 | if (!page) |
| 447 | return NULL; |
| 448 | |
| 449 | addr = page_to_virt(page); |
| 450 | xdpf = addr; |
| 451 | memset(xdpf, 0, sizeof(*xdpf)); |
| 452 | |
| 453 | addr += sizeof(*xdpf); |
| 454 | data_to_copy = metasize ? xdp->data_meta : xdp->data; |
| 455 | memcpy(addr, data_to_copy, totsize); |
| 456 | |
| 457 | xdpf->data = addr + metasize; |
| 458 | xdpf->len = totsize - metasize; |
| 459 | xdpf->headroom = 0; |
| 460 | xdpf->metasize = metasize; |
| 461 | xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; |
| 462 | |
| 463 | xdp_return_buff(xdp); |
| 464 | return xdpf; |
| 465 | } |
| 466 | EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); |