Thomas Gleixner | a61127c | 2019-05-29 16:57:49 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 2 | /* |
Eliezer Tamir | 8b80cda | 2013-07-10 17:13:26 +0300 | [diff] [blame] | 3 | * net busy poll support |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 4 | * Copyright(c) 2013 Intel Corporation. |
| 5 | * |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 6 | * Author: Eliezer Tamir |
| 7 | * |
| 8 | * Contact Information: |
| 9 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 10 | */ |
| 11 | |
Eliezer Tamir | 8b80cda | 2013-07-10 17:13:26 +0300 | [diff] [blame] | 12 | #ifndef _LINUX_NET_BUSY_POLL_H |
| 13 | #define _LINUX_NET_BUSY_POLL_H |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 14 | |
| 15 | #include <linux/netdevice.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 16 | #include <linux/sched/clock.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 17 | #include <linux/sched/signal.h> |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 18 | #include <net/ip.h> |
Jakub Kicinski | 680ee04 | 2023-08-02 18:02:30 -0700 | [diff] [blame] | 19 | #include <net/xdp.h> |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 20 | |
Alexander Duyck | 545cd5e | 2017-03-24 10:07:53 -0700 | [diff] [blame] | 21 | /* 0 - Reserved to indicate value not set |
| 22 | * 1..NR_CPUS - Reserved for sender_cpu |
| 23 | * NR_CPUS+1..~0 - Region available for NAPI IDs |
| 24 | */ |
| 25 | #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) |
| 26 | |
Björn Töpel | 7c951caf | 2020-11-30 19:51:57 +0100 | [diff] [blame] | 27 | #define BUSY_POLL_BUDGET 8 |
| 28 | |
Daniel Borkmann | e4dde412 | 2017-08-11 18:31:24 +0200 | [diff] [blame] | 29 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 30 | |
| 31 | struct napi_struct; |
| 32 | extern unsigned int sysctl_net_busy_read __read_mostly; |
| 33 | extern unsigned int sysctl_net_busy_poll __read_mostly; |
| 34 | |
Eliezer Tamir | cbf5500 | 2013-07-08 16:20:34 +0300 | [diff] [blame] | 35 | static inline bool net_busy_loop_on(void) |
Eliezer Tamir | 91e2fd33 | 2013-06-28 15:59:35 +0300 | [diff] [blame] | 36 | { |
Kuniyuki Iwashima | c42b7cd | 2022-08-23 10:46:51 -0700 | [diff] [blame] | 37 | return READ_ONCE(sysctl_net_busy_poll); |
Eliezer Tamir | 91e2fd33 | 2013-06-28 15:59:35 +0300 | [diff] [blame] | 38 | } |
| 39 | |
Eric Dumazet | 21cb84c | 2016-11-15 10:15:12 -0800 | [diff] [blame] | 40 | static inline bool sk_can_busy_loop(const struct sock *sk) |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 41 | { |
Eric Dumazet | 0dbffbb | 2021-06-29 07:12:45 -0700 | [diff] [blame] | 42 | return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 43 | } |
| 44 | |
Sridhar Samudrala | 7db6b04 | 2017-03-24 10:08:24 -0700 | [diff] [blame] | 45 | bool sk_busy_loop_end(void *p, unsigned long start_time); |
| 46 | |
| 47 | void napi_busy_loop(unsigned int napi_id, |
| 48 | bool (*loop_end)(void *, unsigned long), |
Björn Töpel | 7c951caf | 2020-11-30 19:51:57 +0100 | [diff] [blame] | 49 | void *loop_end_arg, bool prefer_busy_poll, u16 budget); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 50 | |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 51 | #else /* CONFIG_NET_RX_BUSY_POLL */ |
Eliezer Tamir | cbf5500 | 2013-07-08 16:20:34 +0300 | [diff] [blame] | 52 | static inline unsigned long net_busy_loop_on(void) |
Eliezer Tamir | 91e2fd33 | 2013-06-28 15:59:35 +0300 | [diff] [blame] | 53 | { |
| 54 | return 0; |
| 55 | } |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 56 | |
Eliezer Tamir | cbf5500 | 2013-07-08 16:20:34 +0300 | [diff] [blame] | 57 | static inline bool sk_can_busy_loop(struct sock *sk) |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 58 | { |
| 59 | return false; |
| 60 | } |
| 61 | |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 62 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 63 | |
Alexander Duyck | 3705671 | 2017-03-24 10:08:18 -0700 | [diff] [blame] | 64 | static inline unsigned long busy_loop_current_time(void) |
| 65 | { |
| 66 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 67 | return (unsigned long)(local_clock() >> 10); |
| 68 | #else |
| 69 | return 0; |
| 70 | #endif |
| 71 | } |
| 72 | |
| 73 | /* in poll/select we use the global sysctl_net_ll_poll value */ |
| 74 | static inline bool busy_loop_timeout(unsigned long start_time) |
| 75 | { |
| 76 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 77 | unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); |
| 78 | |
| 79 | if (bp_usec) { |
| 80 | unsigned long end_time = start_time + bp_usec; |
| 81 | unsigned long now = busy_loop_current_time(); |
| 82 | |
| 83 | return time_after(now, end_time); |
| 84 | } |
| 85 | #endif |
| 86 | return true; |
| 87 | } |
| 88 | |
| 89 | static inline bool sk_busy_loop_timeout(struct sock *sk, |
| 90 | unsigned long start_time) |
| 91 | { |
| 92 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 93 | unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); |
| 94 | |
| 95 | if (bp_usec) { |
| 96 | unsigned long end_time = start_time + bp_usec; |
| 97 | unsigned long now = busy_loop_current_time(); |
| 98 | |
| 99 | return time_after(now, end_time); |
| 100 | } |
| 101 | #endif |
| 102 | return true; |
| 103 | } |
| 104 | |
Sridhar Samudrala | 7db6b04 | 2017-03-24 10:08:24 -0700 | [diff] [blame] | 105 | static inline void sk_busy_loop(struct sock *sk, int nonblock) |
| 106 | { |
| 107 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 108 | unsigned int napi_id = READ_ONCE(sk->sk_napi_id); |
| 109 | |
| 110 | if (napi_id >= MIN_NAPI_ID) |
Björn Töpel | 7fd3253 | 2020-11-30 19:51:56 +0100 | [diff] [blame] | 111 | napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk, |
Björn Töpel | 7c951caf | 2020-11-30 19:51:57 +0100 | [diff] [blame] | 112 | READ_ONCE(sk->sk_prefer_busy_poll), |
| 113 | READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); |
Sridhar Samudrala | 7db6b04 | 2017-03-24 10:08:24 -0700 | [diff] [blame] | 114 | #endif |
| 115 | } |
| 116 | |
Alexander Duyck | d2e64db | 2017-03-24 10:08:06 -0700 | [diff] [blame] | 117 | /* used in the NIC receive handler to mark the skb */ |
| 118 | static inline void skb_mark_napi_id(struct sk_buff *skb, |
| 119 | struct napi_struct *napi) |
| 120 | { |
| 121 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Amritha Nambiar | 78e57f1 | 2020-06-18 14:22:15 -0700 | [diff] [blame] | 122 | /* If the skb was already marked with a valid NAPI ID, avoid overwriting |
| 123 | * it. |
| 124 | */ |
| 125 | if (skb->napi_id < MIN_NAPI_ID) |
| 126 | skb->napi_id = napi->napi_id; |
Alexander Duyck | d2e64db | 2017-03-24 10:08:06 -0700 | [diff] [blame] | 127 | #endif |
| 128 | } |
| 129 | |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 130 | /* used in the protocol hanlder to propagate the napi_id to the socket */ |
| 131 | static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) |
| 132 | { |
| 133 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Eric Dumazet | 2b13af8 | 2021-10-25 09:48:18 -0700 | [diff] [blame] | 134 | if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) |
| 135 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 136 | #endif |
Eric Dumazet | a37a0ee | 2021-11-30 10:29:39 -0800 | [diff] [blame] | 137 | sk_rx_queue_update(sk, skb); |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 138 | } |
| 139 | |
Eric Dumazet | 03cfda4f | 2021-12-02 15:37:24 -0800 | [diff] [blame] | 140 | /* Variant of sk_mark_napi_id() for passive flow setup, |
| 141 | * as sk->sk_napi_id and sk->sk_rx_queue_mapping content |
| 142 | * needs to be set. |
| 143 | */ |
| 144 | static inline void sk_mark_napi_id_set(struct sock *sk, |
| 145 | const struct sk_buff *skb) |
| 146 | { |
| 147 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 148 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
| 149 | #endif |
| 150 | sk_rx_queue_set(sk, skb); |
| 151 | } |
| 152 | |
Daniel Borkmann | ba05817 | 2020-12-01 15:22:59 +0100 | [diff] [blame] | 153 | static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id) |
Björn Töpel | b02e5a0 | 2020-11-30 19:52:01 +0100 | [diff] [blame] | 154 | { |
| 155 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 156 | if (!READ_ONCE(sk->sk_napi_id)) |
| 157 | WRITE_ONCE(sk->sk_napi_id, napi_id); |
| 158 | #endif |
| 159 | } |
| 160 | |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 161 | /* variant used for unconnected sockets */ |
| 162 | static inline void sk_mark_napi_id_once(struct sock *sk, |
| 163 | const struct sk_buff *skb) |
| 164 | { |
Daniel Borkmann | ba05817 | 2020-12-01 15:22:59 +0100 | [diff] [blame] | 165 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 166 | __sk_mark_napi_id_once(sk, skb->napi_id); |
| 167 | #endif |
Björn Töpel | b02e5a0 | 2020-11-30 19:52:01 +0100 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | static inline void sk_mark_napi_id_once_xdp(struct sock *sk, |
| 171 | const struct xdp_buff *xdp) |
| 172 | { |
Daniel Borkmann | ba05817 | 2020-12-01 15:22:59 +0100 | [diff] [blame] | 173 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 174 | __sk_mark_napi_id_once(sk, xdp->rxq->napi_id); |
| 175 | #endif |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 176 | } |
| 177 | |
Eliezer Tamir | 8b80cda | 2013-07-10 17:13:26 +0300 | [diff] [blame] | 178 | #endif /* _LINUX_NET_BUSY_POLL_H */ |