blob: ebc9a8521765faf90201f7804d0d231c1b708f5f [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
2/*
Alexander Aringc2085102014-10-26 09:37:05 +01003 *
4 * Authors:
5 * Alexander Aring <aar@pengutronix.de>
6 *
7 * Based on: net/mac80211/util.c
8 */
9
10#include "ieee802154_i.h"
Alexander Aringc4227c82015-06-24 11:36:34 +020011#include "driver-ops.h"
Alexander Aringc2085102014-10-26 09:37:05 +010012
Alexander Aring6322d502014-11-12 03:36:51 +010013/* privid for wpan_phys to determine whether they belong to us or not */
14const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
15
Miquel Raynal20a19d12022-05-19 17:05:10 +020016/**
17 * ieee802154_wake_queue - wake ieee802154 queue
Stefan Schmidt982e2b72022-10-26 09:40:34 +020018 * @hw: main hardware object
Miquel Raynal20a19d12022-05-19 17:05:10 +020019 *
20 * Tranceivers usually have either one transmit framebuffer or one framebuffer
21 * for both transmitting and receiving. Hence, the core currently only handles
22 * one frame at a time for each phy, which means we had to stop the queue to
23 * avoid new skb to come during the transmission. The queue then needs to be
24 * woken up after the operation.
25 */
26static void ieee802154_wake_queue(struct ieee802154_hw *hw)
Alexander Aringc2085102014-10-26 09:37:05 +010027{
28 struct ieee802154_local *local = hw_to_local(hw);
29 struct ieee802154_sub_if_data *sdata;
30
31 rcu_read_lock();
Miquel Raynal2b13db12022-05-19 17:05:15 +020032 clear_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
Alexander Aringc2085102014-10-26 09:37:05 +010033 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
34 if (!sdata->dev)
35 continue;
36
37 netif_wake_queue(sdata->dev);
38 }
39 rcu_read_unlock();
40}
Alexander Aringc2085102014-10-26 09:37:05 +010041
Miquel Raynal20a19d12022-05-19 17:05:10 +020042/**
43 * ieee802154_stop_queue - stop ieee802154 queue
Stefan Schmidt982e2b72022-10-26 09:40:34 +020044 * @hw: main hardware object
Miquel Raynal20a19d12022-05-19 17:05:10 +020045 *
46 * Tranceivers usually have either one transmit framebuffer or one framebuffer
47 * for both transmitting and receiving. Hence, the core currently only handles
48 * one frame at a time for each phy, which means we need to tell upper layers to
49 * stop giving us new skbs while we are busy with the transmitted one. The queue
50 * must then be stopped before transmitting.
51 */
52static void ieee802154_stop_queue(struct ieee802154_hw *hw)
Alexander Aringc2085102014-10-26 09:37:05 +010053{
54 struct ieee802154_local *local = hw_to_local(hw);
55 struct ieee802154_sub_if_data *sdata;
56
57 rcu_read_lock();
58 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
59 if (!sdata->dev)
60 continue;
61
62 netif_stop_queue(sdata->dev);
63 }
64 rcu_read_unlock();
65}
Miquel Raynal20a19d12022-05-19 17:05:10 +020066
67void ieee802154_hold_queue(struct ieee802154_local *local)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(&local->phy->queue_lock, flags);
72 if (!atomic_fetch_inc(&local->phy->hold_txs))
73 ieee802154_stop_queue(&local->hw);
74 spin_unlock_irqrestore(&local->phy->queue_lock, flags);
75}
76
77void ieee802154_release_queue(struct ieee802154_local *local)
78{
79 unsigned long flags;
80
81 spin_lock_irqsave(&local->phy->queue_lock, flags);
Alexander Aring2ec2f6b2022-06-13 00:37:34 -040082 if (atomic_dec_and_test(&local->phy->hold_txs))
Miquel Raynal20a19d12022-05-19 17:05:10 +020083 ieee802154_wake_queue(&local->hw);
84 spin_unlock_irqrestore(&local->phy->queue_lock, flags);
85}
Alexander Aringc2085102014-10-26 09:37:05 +010086
Miquel Raynala40612f2022-05-19 17:05:12 +020087void ieee802154_disable_queue(struct ieee802154_local *local)
88{
89 struct ieee802154_sub_if_data *sdata;
90
91 rcu_read_lock();
92 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
93 if (!sdata->dev)
94 continue;
95
96 netif_tx_disable(sdata->dev);
97 }
98 rcu_read_unlock();
99}
100
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100101enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
Alexander Aringc2085102014-10-26 09:37:05 +0100102{
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100103 struct ieee802154_local *local =
104 container_of(timer, struct ieee802154_local, ifs_timer);
105
Miquel Raynal20a19d12022-05-19 17:05:10 +0200106 ieee802154_release_queue(local);
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100107
108 return HRTIMER_NORESTART;
109}
110
111void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
112 bool ifs_handling)
113{
Miquel Raynal337e2f82022-04-07 12:08:56 +0200114 struct ieee802154_local *local = hw_to_local(hw);
115
116 local->tx_result = IEEE802154_SUCCESS;
117
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100118 if (ifs_handling) {
Alexander Aring3f3c4bb2015-03-04 21:19:59 +0100119 u8 max_sifs_size;
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100120
Alexander Aring3f3c4bb2015-03-04 21:19:59 +0100121 /* If transceiver sets CRC on his own we need to use lifs
122 * threshold len above 16 otherwise 18, because it's not
123 * part of skb->len.
124 */
125 if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
126 max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
127 IEEE802154_FCS_LEN;
128 else
129 max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
130
131 if (skb->len > max_sifs_size)
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100132 hrtimer_start(&local->ifs_timer,
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100133 hw->phy->lifs_period * NSEC_PER_USEC,
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100134 HRTIMER_MODE_REL);
135 else
136 hrtimer_start(&local->ifs_timer,
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100137 hw->phy->sifs_period * NSEC_PER_USEC,
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100138 HRTIMER_MODE_REL);
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100139 } else {
Miquel Raynal20a19d12022-05-19 17:05:10 +0200140 ieee802154_release_queue(local);
Alexander Aring61f2dcb2014-11-12 19:51:56 +0100141 }
Alexander Aring3862eba2015-05-17 21:44:56 +0200142
143 dev_consume_skb_any(skb);
Alexander Aring6c1c78d2022-06-13 00:37:35 -0400144 if (atomic_dec_and_test(&hw->phy->ongoing_txs))
Miquel Raynalf0feb342022-05-19 17:05:13 +0200145 wake_up(&hw->phy->sync_txq);
Alexander Aringc2085102014-10-26 09:37:05 +0100146}
147EXPORT_SYMBOL(ieee802154_xmit_complete);
Alexander Aringc4227c82015-06-24 11:36:34 +0200148
Miquel Raynal30ca44e2022-04-07 12:08:57 +0200149void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
150 int reason)
151{
152 struct ieee802154_local *local = hw_to_local(hw);
153
154 local->tx_result = reason;
Miquel Raynal20a19d12022-05-19 17:05:10 +0200155 ieee802154_release_queue(local);
Miquel Raynal30ca44e2022-04-07 12:08:57 +0200156 dev_kfree_skb_any(skb);
Alexander Aring6c1c78d2022-06-13 00:37:35 -0400157 if (atomic_dec_and_test(&hw->phy->ongoing_txs))
Miquel Raynalf0feb342022-05-19 17:05:13 +0200158 wake_up(&hw->phy->sync_txq);
Miquel Raynal30ca44e2022-04-07 12:08:57 +0200159}
160EXPORT_SYMBOL(ieee802154_xmit_error);
161
Miquel Raynal5a1b57c2022-04-07 12:08:58 +0200162void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb)
163{
164 ieee802154_xmit_error(hw, skb, IEEE802154_SYSTEM_ERROR);
165}
166EXPORT_SYMBOL(ieee802154_xmit_hw_error);
167
Alexander Aringc4227c82015-06-24 11:36:34 +0200168void ieee802154_stop_device(struct ieee802154_local *local)
169{
170 flush_workqueue(local->workqueue);
171 hrtimer_cancel(&local->ifs_timer);
172 drv_stop(local);
173}