blob: 4dabeb6c76d31da1e3725a091a0a2636fcc9667c [file] [log] [blame]
Thomas Gleixnera61127c2019-05-29 16:57:49 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Eliezer Tamir06021292013-06-10 11:39:50 +03002/*
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03003 * net busy poll support
Eliezer Tamir06021292013-06-10 11:39:50 +03004 * Copyright(c) 2013 Intel Corporation.
5 *
Eliezer Tamir06021292013-06-10 11:39:50 +03006 * Author: Eliezer Tamir
7 *
8 * Contact Information:
9 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
10 */
11
Eliezer Tamir8b80cda2013-07-10 17:13:26 +030012#ifndef _LINUX_NET_BUSY_POLL_H
13#define _LINUX_NET_BUSY_POLL_H
Eliezer Tamir06021292013-06-10 11:39:50 +030014
15#include <linux/netdevice.h>
Ingo Molnare6017572017-02-01 16:36:40 +010016#include <linux/sched/clock.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010017#include <linux/sched/signal.h>
Eliezer Tamir06021292013-06-10 11:39:50 +030018#include <net/ip.h>
Jakub Kicinski680ee042023-08-02 18:02:30 -070019#include <net/xdp.h>
Eliezer Tamir06021292013-06-10 11:39:50 +030020
Alexander Duyck545cd5e2017-03-24 10:07:53 -070021/* 0 - Reserved to indicate value not set
22 * 1..NR_CPUS - Reserved for sender_cpu
23 * NR_CPUS+1..~0 - Region available for NAPI IDs
24 */
25#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
26
Björn Töpel7c951caf2020-11-30 19:51:57 +010027#define BUSY_POLL_BUDGET 8
28
Daniel Borkmanne4dde4122017-08-11 18:31:24 +020029#ifdef CONFIG_NET_RX_BUSY_POLL
30
31struct napi_struct;
32extern unsigned int sysctl_net_busy_read __read_mostly;
33extern unsigned int sysctl_net_busy_poll __read_mostly;
34
Eliezer Tamircbf55002013-07-08 16:20:34 +030035static inline bool net_busy_loop_on(void)
Eliezer Tamir91e2fd332013-06-28 15:59:35 +030036{
Kuniyuki Iwashimac42b7cd2022-08-23 10:46:51 -070037 return READ_ONCE(sysctl_net_busy_poll);
Eliezer Tamir91e2fd332013-06-28 15:59:35 +030038}
39
Eric Dumazet21cb84c2016-11-15 10:15:12 -080040static inline bool sk_can_busy_loop(const struct sock *sk)
Eliezer Tamir06021292013-06-10 11:39:50 +030041{
Eric Dumazet0dbffbb2021-06-29 07:12:45 -070042 return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
Eliezer Tamir06021292013-06-10 11:39:50 +030043}
44
Sridhar Samudrala7db6b042017-03-24 10:08:24 -070045bool sk_busy_loop_end(void *p, unsigned long start_time);
46
47void napi_busy_loop(unsigned int napi_id,
48 bool (*loop_end)(void *, unsigned long),
Björn Töpel7c951caf2020-11-30 19:51:57 +010049 void *loop_end_arg, bool prefer_busy_poll, u16 budget);
Eliezer Tamir06021292013-06-10 11:39:50 +030050
Cong Wange0d10952013-08-01 11:10:25 +080051#else /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamircbf55002013-07-08 16:20:34 +030052static inline unsigned long net_busy_loop_on(void)
Eliezer Tamir91e2fd332013-06-28 15:59:35 +030053{
54 return 0;
55}
Eliezer Tamir06021292013-06-10 11:39:50 +030056
Eliezer Tamircbf55002013-07-08 16:20:34 +030057static inline bool sk_can_busy_loop(struct sock *sk)
Eliezer Tamir06021292013-06-10 11:39:50 +030058{
59 return false;
60}
61
Cong Wange0d10952013-08-01 11:10:25 +080062#endif /* CONFIG_NET_RX_BUSY_POLL */
Eric Dumazete68b6e52016-11-16 09:10:42 -080063
Alexander Duyck37056712017-03-24 10:08:18 -070064static inline unsigned long busy_loop_current_time(void)
65{
66#ifdef CONFIG_NET_RX_BUSY_POLL
67 return (unsigned long)(local_clock() >> 10);
68#else
69 return 0;
70#endif
71}
72
73/* in poll/select we use the global sysctl_net_ll_poll value */
74static inline bool busy_loop_timeout(unsigned long start_time)
75{
76#ifdef CONFIG_NET_RX_BUSY_POLL
77 unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
78
79 if (bp_usec) {
80 unsigned long end_time = start_time + bp_usec;
81 unsigned long now = busy_loop_current_time();
82
83 return time_after(now, end_time);
84 }
85#endif
86 return true;
87}
88
89static inline bool sk_busy_loop_timeout(struct sock *sk,
90 unsigned long start_time)
91{
92#ifdef CONFIG_NET_RX_BUSY_POLL
93 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
94
95 if (bp_usec) {
96 unsigned long end_time = start_time + bp_usec;
97 unsigned long now = busy_loop_current_time();
98
99 return time_after(now, end_time);
100 }
101#endif
102 return true;
103}
104
Sridhar Samudrala7db6b042017-03-24 10:08:24 -0700105static inline void sk_busy_loop(struct sock *sk, int nonblock)
106{
107#ifdef CONFIG_NET_RX_BUSY_POLL
108 unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
109
110 if (napi_id >= MIN_NAPI_ID)
Björn Töpel7fd32532020-11-30 19:51:56 +0100111 napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
Björn Töpel7c951caf2020-11-30 19:51:57 +0100112 READ_ONCE(sk->sk_prefer_busy_poll),
113 READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
Sridhar Samudrala7db6b042017-03-24 10:08:24 -0700114#endif
115}
116
Alexander Duyckd2e64db2017-03-24 10:08:06 -0700117/* used in the NIC receive handler to mark the skb */
118static inline void skb_mark_napi_id(struct sk_buff *skb,
119 struct napi_struct *napi)
120{
121#ifdef CONFIG_NET_RX_BUSY_POLL
Amritha Nambiar78e57f12020-06-18 14:22:15 -0700122 /* If the skb was already marked with a valid NAPI ID, avoid overwriting
123 * it.
124 */
125 if (skb->napi_id < MIN_NAPI_ID)
126 skb->napi_id = napi->napi_id;
Alexander Duyckd2e64db2017-03-24 10:08:06 -0700127#endif
128}
129
Eric Dumazete68b6e52016-11-16 09:10:42 -0800130/* used in the protocol hanlder to propagate the napi_id to the socket */
131static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
132{
133#ifdef CONFIG_NET_RX_BUSY_POLL
Eric Dumazet2b13af82021-10-25 09:48:18 -0700134 if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
135 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
Eric Dumazete68b6e52016-11-16 09:10:42 -0800136#endif
Eric Dumazeta37a0ee2021-11-30 10:29:39 -0800137 sk_rx_queue_update(sk, skb);
Eric Dumazete68b6e52016-11-16 09:10:42 -0800138}
139
Eric Dumazet03cfda4f2021-12-02 15:37:24 -0800140/* Variant of sk_mark_napi_id() for passive flow setup,
141 * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
142 * needs to be set.
143 */
144static inline void sk_mark_napi_id_set(struct sock *sk,
145 const struct sk_buff *skb)
146{
147#ifdef CONFIG_NET_RX_BUSY_POLL
148 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
149#endif
150 sk_rx_queue_set(sk, skb);
151}
152
Daniel Borkmannba058172020-12-01 15:22:59 +0100153static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
Björn Töpelb02e5a02020-11-30 19:52:01 +0100154{
155#ifdef CONFIG_NET_RX_BUSY_POLL
156 if (!READ_ONCE(sk->sk_napi_id))
157 WRITE_ONCE(sk->sk_napi_id, napi_id);
158#endif
159}
160
Eric Dumazete68b6e52016-11-16 09:10:42 -0800161/* variant used for unconnected sockets */
162static inline void sk_mark_napi_id_once(struct sock *sk,
163 const struct sk_buff *skb)
164{
Daniel Borkmannba058172020-12-01 15:22:59 +0100165#ifdef CONFIG_NET_RX_BUSY_POLL
166 __sk_mark_napi_id_once(sk, skb->napi_id);
167#endif
Björn Töpelb02e5a02020-11-30 19:52:01 +0100168}
169
170static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
171 const struct xdp_buff *xdp)
172{
Daniel Borkmannba058172020-12-01 15:22:59 +0100173#ifdef CONFIG_NET_RX_BUSY_POLL
174 __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
175#endif
Eric Dumazete68b6e52016-11-16 09:10:42 -0800176}
177
Eliezer Tamir8b80cda2013-07-10 17:13:26 +0300178#endif /* _LINUX_NET_BUSY_POLL_H */