blob: a8d7b8a3688a6e11f3b9a8ef52dba8a4c636cda9 [file] [log] [blame]
Björn Töpel2b43470a2020-05-20 21:20:53 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef XSK_BUFF_POOL_H_
5#define XSK_BUFF_POOL_H_
6
Björn Töpel26062b12020-05-20 21:21:02 +02007#include <linux/if_xdp.h>
Björn Töpel2b43470a2020-05-20 21:20:53 +02008#include <linux/types.h>
9#include <linux/dma-mapping.h>
Magnus Karlsson94033cd2021-09-22 09:56:06 +020010#include <linux/bpf.h>
Björn Töpel2b43470a2020-05-20 21:20:53 +020011#include <net/xdp.h>
12
13struct xsk_buff_pool;
14struct xdp_rxq_info;
15struct xsk_queue;
16struct xdp_desc;
Magnus Karlsson1742b3d2020-08-28 10:26:15 +020017struct xdp_umem;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020018struct xdp_sock;
Björn Töpel2b43470a2020-05-20 21:20:53 +020019struct device;
20struct page;
21
Toke Høiland-Jørgensen94ecc5c2023-01-19 14:15:33 -080022#define XSK_PRIV_MAX 24
23
Björn Töpel2b43470a2020-05-20 21:20:53 +020024struct xdp_buff_xsk {
25 struct xdp_buff xdp;
Toke Høiland-Jørgensen94ecc5c2023-01-19 14:15:33 -080026 u8 cb[XSK_PRIV_MAX];
Björn Töpel2b43470a2020-05-20 21:20:53 +020027 dma_addr_t dma;
28 dma_addr_t frame_dma;
29 struct xsk_buff_pool *pool;
Björn Töpel2b43470a2020-05-20 21:20:53 +020030 u64 orig_addr;
31 struct list_head free_list_node;
32};
33
Toke Høiland-Jørgensen94ecc5c2023-01-19 14:15:33 -080034#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
35
Magnus Karlsson921b6862020-08-28 10:26:22 +020036struct xsk_dma_map {
37 dma_addr_t *dma_pages;
38 struct device *dev;
39 struct net_device *netdev;
40 refcount_t users;
41 struct list_head list; /* Protected by the RTNL_LOCK */
42 u32 dma_pages_cnt;
43 bool dma_need_sync;
44};
45
Björn Töpel26062b12020-05-20 21:21:02 +020046struct xsk_buff_pool {
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020047 /* Members only used in the control path first. */
48 struct device *dev;
49 struct net_device *netdev;
50 struct list_head xsk_tx_list;
51 /* Protects modifications to the xsk_tx_list */
52 spinlock_t xsk_tx_list_lock;
53 refcount_t users;
54 struct xdp_umem *umem;
55 struct work_struct work;
Björn Töpel26062b12020-05-20 21:21:02 +020056 struct list_head free_list;
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020057 u32 heads_cnt;
58 u16 queue_id;
59
60 /* Data path members as close to free_heads at the end as possible. */
61 struct xsk_queue *fq ____cacheline_aligned_in_smp;
62 struct xsk_queue *cq;
Magnus Karlsson921b6862020-08-28 10:26:22 +020063 /* For performance reasons, each buff pool has its own array of dma_pages
64 * even when they are identical.
65 */
Björn Töpel26062b12020-05-20 21:21:02 +020066 dma_addr_t *dma_pages;
67 struct xdp_buff_xsk *heads;
Magnus Karlssond1bc5322022-01-25 17:04:43 +010068 struct xdp_desc *tx_descs;
Björn Töpel26062b12020-05-20 21:21:02 +020069 u64 chunk_mask;
70 u64 addrs_cnt;
71 u32 free_list_cnt;
72 u32 dma_pages_cnt;
Björn Töpel26062b12020-05-20 21:21:02 +020073 u32 free_heads_cnt;
74 u32 headroom;
75 u32 chunk_size;
Magnus Karlsson94033cd2021-09-22 09:56:06 +020076 u32 chunk_shift;
Björn Töpel26062b12020-05-20 21:21:02 +020077 u32 frame_len;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020078 u8 cached_need_wakeup;
79 bool uses_need_wakeup;
Christoph Hellwig91d5b702020-06-29 15:03:57 +020080 bool dma_need_sync;
Björn Töpel26062b12020-05-20 21:21:02 +020081 bool unaligned;
82 void *addrs;
Magnus Karlssonf09ced42020-12-18 14:45:24 +010083 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
84 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
85 * sockets share a single cq when the same netdev and queue id is shared.
86 */
87 spinlock_t cq_lock;
Björn Töpel26062b12020-05-20 21:21:02 +020088 struct xdp_buff_xsk *free_heads[];
89};
90
Magnus Karlsson94033cd2021-09-22 09:56:06 +020091/* Masks for xdp_umem_page flags.
92 * The low 12-bits of the addr will be 0 since this is the page address, so we
93 * can use them for flags.
94 */
95#define XSK_NEXT_PG_CONTIG_SHIFT 0
96#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
97
Björn Töpel2b43470a2020-05-20 21:20:53 +020098/* AF_XDP core. */
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020099struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
100 struct xdp_umem *umem);
101int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
102 u16 queue_id, u16 flags);
Jalal Mostafa60240bc2022-09-21 13:57:01 +0000103int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
Magnus Karlssonb5aea282020-08-28 10:26:25 +0200104 struct net_device *dev, u16 queue_id);
Maciej Fijalkowskiba3beec2022-04-25 17:37:45 +0200105int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
Björn Töpel2b43470a2020-05-20 21:20:53 +0200106void xp_destroy(struct xsk_buff_pool *pool);
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200107void xp_get_pool(struct xsk_buff_pool *pool);
Magnus Karlssone5e1a4b2020-10-27 13:32:01 +0100108bool xp_put_pool(struct xsk_buff_pool *pool);
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200109void xp_clear_dev(struct xsk_buff_pool *pool);
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200110void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
111void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
Björn Töpel2b43470a2020-05-20 21:20:53 +0200112
113/* AF_XDP, and XDP core. */
114void xp_free(struct xdp_buff_xsk *xskb);
115
Magnus Karlsson94033cd2021-09-22 09:56:06 +0200116static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
117 u64 addr)
118{
119 xskb->orig_addr = addr;
120 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
121}
122
123static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
124 dma_addr_t *dma_pages, u64 addr)
125{
126 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
127 (addr & ~PAGE_MASK);
128 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
129}
130
Björn Töpel2b43470a2020-05-20 21:20:53 +0200131/* AF_XDP ZC drivers, via xdp_sock_buff.h */
132void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
133int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
134 unsigned long attrs, struct page **pages, u32 nr_pages);
135void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
136struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
Magnus Karlsson47e40752021-09-22 09:56:02 +0200137u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
Björn Töpel2b43470a2020-05-20 21:20:53 +0200138bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
139void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
140dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
Björn Töpel26062b12020-05-20 21:21:02 +0200141static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
142{
143 return xskb->dma;
144}
145
146static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
147{
148 return xskb->frame_dma;
149}
150
151void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
152static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
153{
Björn Töpel26062b12020-05-20 21:21:02 +0200154 xp_dma_sync_for_cpu_slow(xskb);
155}
156
157void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
158 size_t size);
159static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
160 dma_addr_t dma, size_t size)
161{
Christoph Hellwig91d5b702020-06-29 15:03:57 +0200162 if (!pool->dma_need_sync)
Björn Töpel26062b12020-05-20 21:21:02 +0200163 return;
164
165 xp_dma_sync_for_device_slow(pool, dma, size);
166}
167
168/* Masks for xdp_umem_page flags.
169 * The low 12-bits of the addr will be 0 since this is the page address, so we
170 * can use them for flags.
171 */
172#define XSK_NEXT_PG_CONTIG_SHIFT 0
173#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
174
175static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
176 u64 addr, u32 len)
177{
178 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
179
Magnus Karlsson2f996192021-06-17 11:22:55 +0200180 if (likely(!cross_pg))
181 return false;
182
Kal Conley6ec7be92023-04-23 20:01:56 +0200183 return pool->dma_pages &&
Kal Conleyd769cca2023-04-06 01:59:18 +0200184 !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
Björn Töpel26062b12020-05-20 21:21:02 +0200185}
186
187static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
188{
189 return addr & pool->chunk_mask;
190}
191
192static inline u64 xp_unaligned_extract_addr(u64 addr)
193{
194 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
195}
196
197static inline u64 xp_unaligned_extract_offset(u64 addr)
198{
199 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
200}
201
202static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
203{
204 return xp_unaligned_extract_addr(addr) +
205 xp_unaligned_extract_offset(addr);
206}
Björn Töpel2b43470a2020-05-20 21:20:53 +0200207
Magnus Karlsson94033cd2021-09-22 09:56:06 +0200208static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
209{
210 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
211}
212
213static inline void xp_release(struct xdp_buff_xsk *xskb)
214{
215 if (xskb->pool->unaligned)
216 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
217}
218
219static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
220{
221 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
222
223 offset += xskb->pool->headroom;
224 if (!xskb->pool->unaligned)
225 return xskb->orig_addr + offset;
226 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
227}
228
Björn Töpel2b43470a2020-05-20 21:20:53 +0200229#endif /* XSK_BUFF_POOL_H_ */