blob: f47abb46c5874c8b949b8d3cb3d278c479775e06 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
14
15#include "xdp_umem.h"
Björn Töpele61e62b92018-06-04 14:05:51 +020016#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020017
Björn Töpelbbff2f32018-06-04 13:57:13 +020018#define XDP_UMEM_MIN_CHUNK_SIZE 2048
Björn Töpelc0c77d82018-05-02 13:01:23 +020019
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020020void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
21{
22 unsigned long flags;
23
24 spin_lock_irqsave(&umem->xsk_list_lock, flags);
25 list_add_rcu(&xs->list, &umem->xsk_list);
26 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
27}
28
29void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
30{
31 unsigned long flags;
32
33 if (xs->dev) {
34 spin_lock_irqsave(&umem->xsk_list_lock, flags);
35 list_del_rcu(&xs->list);
36 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
37
38 if (umem->zc)
39 synchronize_net();
40 }
41}
42
Björn Töpel173d3ad2018-06-04 14:05:55 +020043int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
44 u32 queue_id, u16 flags)
45{
46 bool force_zc, force_copy;
47 struct netdev_bpf bpf;
48 int err;
49
50 force_zc = flags & XDP_ZEROCOPY;
51 force_copy = flags & XDP_COPY;
52
53 if (force_zc && force_copy)
54 return -EINVAL;
55
56 if (force_copy)
57 return 0;
58
59 dev_hold(dev);
60
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020061 if (dev->netdev_ops->ndo_bpf && dev->netdev_ops->ndo_xsk_async_xmit) {
Björn Töpel173d3ad2018-06-04 14:05:55 +020062 bpf.command = XDP_QUERY_XSK_UMEM;
63
64 rtnl_lock();
65 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
66 rtnl_unlock();
67
68 if (err) {
69 dev_put(dev);
70 return force_zc ? -ENOTSUPP : 0;
71 }
72
73 bpf.command = XDP_SETUP_XSK_UMEM;
74 bpf.xsk.umem = umem;
75 bpf.xsk.queue_id = queue_id;
76
77 rtnl_lock();
78 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
79 rtnl_unlock();
80
81 if (err) {
82 dev_put(dev);
83 return force_zc ? err : 0; /* fail or fallback */
84 }
85
86 umem->dev = dev;
87 umem->queue_id = queue_id;
88 umem->zc = true;
89 return 0;
90 }
91
92 dev_put(dev);
93 return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
94}
95
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020096static void xdp_umem_clear_dev(struct xdp_umem *umem)
Björn Töpel173d3ad2018-06-04 14:05:55 +020097{
98 struct netdev_bpf bpf;
99 int err;
100
101 if (umem->dev) {
102 bpf.command = XDP_SETUP_XSK_UMEM;
103 bpf.xsk.umem = NULL;
104 bpf.xsk.queue_id = umem->queue_id;
105
106 rtnl_lock();
107 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
108 rtnl_unlock();
109
110 if (err)
111 WARN(1, "failed to disable umem!\n");
112
113 dev_put(umem->dev);
114 umem->dev = NULL;
115 }
116}
117
Björn Töpelc0c77d82018-05-02 13:01:23 +0200118static void xdp_umem_unpin_pages(struct xdp_umem *umem)
119{
120 unsigned int i;
121
Björn Töpela49049e2018-05-22 09:35:02 +0200122 for (i = 0; i < umem->npgs; i++) {
123 struct page *page = umem->pgs[i];
Björn Töpelc0c77d82018-05-02 13:01:23 +0200124
Björn Töpela49049e2018-05-22 09:35:02 +0200125 set_page_dirty_lock(page);
126 put_page(page);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200127 }
Björn Töpela49049e2018-05-22 09:35:02 +0200128
129 kfree(umem->pgs);
130 umem->pgs = NULL;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200131}
132
133static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
134{
Daniel Borkmannc09290c2018-06-08 00:06:01 +0200135 if (umem->user) {
136 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
137 free_uid(umem->user);
138 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200139}
140
141static void xdp_umem_release(struct xdp_umem *umem)
142{
143 struct task_struct *task;
144 struct mm_struct *mm;
145
Björn Töpel173d3ad2018-06-04 14:05:55 +0200146 xdp_umem_clear_dev(umem);
147
Magnus Karlsson423f3832018-05-02 13:01:24 +0200148 if (umem->fq) {
149 xskq_destroy(umem->fq);
150 umem->fq = NULL;
151 }
152
Magnus Karlssonfe230832018-05-02 13:01:31 +0200153 if (umem->cq) {
154 xskq_destroy(umem->cq);
155 umem->cq = NULL;
156 }
157
Björn Töpela49049e2018-05-22 09:35:02 +0200158 xdp_umem_unpin_pages(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200159
Björn Töpela49049e2018-05-22 09:35:02 +0200160 task = get_pid_task(umem->pid, PIDTYPE_PID);
161 put_pid(umem->pid);
162 if (!task)
163 goto out;
164 mm = get_task_mm(task);
165 put_task_struct(task);
166 if (!mm)
167 goto out;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200168
Björn Töpela49049e2018-05-22 09:35:02 +0200169 mmput(mm);
Björn Töpel8aef7342018-06-04 14:05:52 +0200170 kfree(umem->pages);
171 umem->pages = NULL;
172
Björn Töpelc0c77d82018-05-02 13:01:23 +0200173 xdp_umem_unaccount_pages(umem);
174out:
175 kfree(umem);
176}
177
178static void xdp_umem_release_deferred(struct work_struct *work)
179{
180 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
181
182 xdp_umem_release(umem);
183}
184
185void xdp_get_umem(struct xdp_umem *umem)
186{
Björn Töpeld3b42f12018-05-22 09:35:03 +0200187 refcount_inc(&umem->users);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200188}
189
190void xdp_put_umem(struct xdp_umem *umem)
191{
192 if (!umem)
193 return;
194
Björn Töpeld3b42f12018-05-22 09:35:03 +0200195 if (refcount_dec_and_test(&umem->users)) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200196 INIT_WORK(&umem->work, xdp_umem_release_deferred);
197 schedule_work(&umem->work);
198 }
199}
200
201static int xdp_umem_pin_pages(struct xdp_umem *umem)
202{
203 unsigned int gup_flags = FOLL_WRITE;
204 long npgs;
205 int err;
206
Björn Töpela3439932018-06-11 13:57:12 +0200207 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
208 GFP_KERNEL | __GFP_NOWARN);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200209 if (!umem->pgs)
210 return -ENOMEM;
211
212 down_write(&current->mm->mmap_sem);
213 npgs = get_user_pages(umem->address, umem->npgs,
214 gup_flags, &umem->pgs[0], NULL);
215 up_write(&current->mm->mmap_sem);
216
217 if (npgs != umem->npgs) {
218 if (npgs >= 0) {
219 umem->npgs = npgs;
220 err = -ENOMEM;
221 goto out_pin;
222 }
223 err = npgs;
224 goto out_pgs;
225 }
226 return 0;
227
228out_pin:
229 xdp_umem_unpin_pages(umem);
230out_pgs:
231 kfree(umem->pgs);
232 umem->pgs = NULL;
233 return err;
234}
235
236static int xdp_umem_account_pages(struct xdp_umem *umem)
237{
238 unsigned long lock_limit, new_npgs, old_npgs;
239
240 if (capable(CAP_IPC_LOCK))
241 return 0;
242
243 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
244 umem->user = get_uid(current_user());
245
246 do {
247 old_npgs = atomic_long_read(&umem->user->locked_vm);
248 new_npgs = old_npgs + umem->npgs;
249 if (new_npgs > lock_limit) {
250 free_uid(umem->user);
251 umem->user = NULL;
252 return -ENOBUFS;
253 }
254 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
255 new_npgs) != old_npgs);
256 return 0;
257}
258
Björn Töpela49049e2018-05-22 09:35:02 +0200259static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200260{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200261 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
262 unsigned int chunks, chunks_per_page;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200263 u64 addr = mr->addr, size = mr->len;
Björn Töpel8aef7342018-06-04 14:05:52 +0200264 int size_chk, err, i;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200265
Björn Töpelbbff2f32018-06-04 13:57:13 +0200266 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200267 /* Strictly speaking we could support this, if:
268 * - huge pages, or*
269 * - using an IOMMU, or
270 * - making sure the memory area is consecutive
271 * but for now, we simply say "computer says no".
272 */
273 return -EINVAL;
274 }
275
Björn Töpelbbff2f32018-06-04 13:57:13 +0200276 if (!is_power_of_2(chunk_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200277 return -EINVAL;
278
279 if (!PAGE_ALIGNED(addr)) {
280 /* Memory area has to be page size aligned. For
281 * simplicity, this might change.
282 */
283 return -EINVAL;
284 }
285
286 if ((addr + size) < addr)
287 return -EINVAL;
288
Björn Töpelbbff2f32018-06-04 13:57:13 +0200289 chunks = (unsigned int)div_u64(size, chunk_size);
290 if (chunks == 0)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200291 return -EINVAL;
292
Björn Töpelbbff2f32018-06-04 13:57:13 +0200293 chunks_per_page = PAGE_SIZE / chunk_size;
294 if (chunks < chunks_per_page || chunks % chunks_per_page)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200295 return -EINVAL;
296
Björn Töpelbbff2f32018-06-04 13:57:13 +0200297 headroom = ALIGN(headroom, 64);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200298
Björn Töpelbbff2f32018-06-04 13:57:13 +0200299 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200300 if (size_chk < 0)
301 return -EINVAL;
302
303 umem->pid = get_task_pid(current, PIDTYPE_PID);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200304 umem->address = (unsigned long)addr;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200305 umem->props.chunk_mask = ~((u64)chunk_size - 1);
306 umem->props.size = size;
307 umem->headroom = headroom;
308 umem->chunk_size_nohr = chunk_size - headroom;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200309 umem->npgs = size / PAGE_SIZE;
310 umem->pgs = NULL;
311 umem->user = NULL;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200312 INIT_LIST_HEAD(&umem->xsk_list);
313 spin_lock_init(&umem->xsk_list_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200314
Björn Töpeld3b42f12018-05-22 09:35:03 +0200315 refcount_set(&umem->users, 1);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200316
317 err = xdp_umem_account_pages(umem);
318 if (err)
319 goto out;
320
321 err = xdp_umem_pin_pages(umem);
322 if (err)
323 goto out_account;
Björn Töpel8aef7342018-06-04 14:05:52 +0200324
325 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
326 if (!umem->pages) {
327 err = -ENOMEM;
328 goto out_account;
329 }
330
331 for (i = 0; i < umem->npgs; i++)
332 umem->pages[i].addr = page_address(umem->pgs[i]);
333
Björn Töpelc0c77d82018-05-02 13:01:23 +0200334 return 0;
335
336out_account:
337 xdp_umem_unaccount_pages(umem);
338out:
339 put_pid(umem->pid);
340 return err;
341}
Magnus Karlsson965a99092018-05-02 13:01:26 +0200342
Björn Töpela49049e2018-05-22 09:35:02 +0200343struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
344{
345 struct xdp_umem *umem;
346 int err;
347
348 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
349 if (!umem)
350 return ERR_PTR(-ENOMEM);
351
352 err = xdp_umem_reg(umem, mr);
353 if (err) {
354 kfree(umem);
355 return ERR_PTR(err);
356 }
357
358 return umem;
359}
360
Magnus Karlsson965a99092018-05-02 13:01:26 +0200361bool xdp_umem_validate_queues(struct xdp_umem *umem)
362{
Björn Töpelda60cf02018-05-18 14:00:23 +0200363 return umem->fq && umem->cq;
Magnus Karlsson965a99092018-05-02 13:01:26 +0200364}