blob: c3e0e23e0161ac7daac34190c3b363b99fc94399 [file] [log] [blame]
Ryder Lee0e3d6772019-07-24 16:58:20 +08001// SPDX-License-Identifier: ISC
Felix Fietkau17f1de52017-11-21 10:50:52 +01002/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
Felix Fietkau17f1de52017-11-21 10:50:52 +01004 */
5
6#include "mt76.h"
Lorenzo Bianconib92158a2023-10-20 12:30:48 +02007#include "dma.h"
Felix Fietkau17f1de52017-11-21 10:50:52 +01008#include "trace.h"
9
10static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
11{
12 u32 val;
13
Felix Fietkaud908d4e2019-03-23 15:24:56 +010014 val = readl(dev->mmio.regs + offset);
Felix Fietkau17f1de52017-11-21 10:50:52 +010015 trace_reg_rr(dev, offset, val);
16
17 return val;
18}
19
20static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
21{
22 trace_reg_wr(dev, offset, val);
Felix Fietkaud908d4e2019-03-23 15:24:56 +010023 writel(val, dev->mmio.regs + offset);
Felix Fietkau17f1de52017-11-21 10:50:52 +010024}
25
26static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
27{
28 val |= mt76_mmio_rr(dev, offset) & ~mask;
29 mt76_mmio_wr(dev, offset, val);
30 return val;
31}
32
Lorenzo Bianconi35e4ebea2019-07-13 17:09:06 +020033static void mt76_mmio_write_copy(struct mt76_dev *dev, u32 offset,
34 const void *data, int len)
Felix Fietkau17f1de52017-11-21 10:50:52 +010035{
Felix Fietkau850e8f62019-07-01 13:15:07 +020036 __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
Felix Fietkau17f1de52017-11-21 10:50:52 +010037}
38
Lorenzo Bianconi35e4ebea2019-07-13 17:09:06 +020039static void mt76_mmio_read_copy(struct mt76_dev *dev, u32 offset,
40 void *data, int len)
41{
42 __ioread32_copy(data, dev->mmio.regs + offset, DIV_ROUND_UP(len, 4));
43}
44
Lorenzo Bianconi13fd2d22018-09-28 13:38:50 +020045static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
46 const struct mt76_reg_pair *data, int len)
47{
48 while (len > 0) {
49 mt76_mmio_wr(dev, data->reg, data->value);
50 data++;
51 len--;
52 }
53
54 return 0;
55}
56
57static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
58 struct mt76_reg_pair *data, int len)
59{
60 while (len > 0) {
61 data->value = mt76_mmio_rr(dev, data->reg);
62 data++;
63 len--;
64 }
65
66 return 0;
67}
68
Lorenzo Bianconi9220f692019-02-28 17:54:31 +010069void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
70 u32 clear, u32 set)
71{
72 unsigned long flags;
73
74 spin_lock_irqsave(&dev->mmio.irq_lock, flags);
75 dev->mmio.irqmask &= ~clear;
76 dev->mmio.irqmask |= set;
Felix Fietkauf68d6762021-12-06 13:45:54 +010077 if (addr) {
78 if (mtk_wed_device_active(&dev->mmio.wed))
79 mtk_wed_device_irq_set_mask(&dev->mmio.wed,
80 dev->mmio.irqmask);
81 else
82 mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
83 }
Lorenzo Bianconi9220f692019-02-28 17:54:31 +010084 spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
85}
86EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
87
Lorenzo Bianconib92158a2023-10-20 12:30:48 +020088#ifdef CONFIG_NET_MEDIATEK_SOC_WED
89void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
90{
91 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
92 int i;
93
94 for (i = 0; i < dev->rx_token_size; i++) {
95 struct mt76_txwi_cache *t;
96
97 t = mt76_rx_token_release(dev, i);
98 if (!t || !t->ptr)
99 continue;
100
101 mt76_put_page_pool_buf(t->ptr, false);
102 t->ptr = NULL;
103
104 mt76_put_rxwi(dev, t);
105 }
106
107 mt76_free_pending_rxwi(dev);
108}
109EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
110
111u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
112{
113 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
114 struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
115 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
116 int i, len = SKB_WITH_OVERHEAD(q->buf_size);
117 struct mt76_txwi_cache *t = NULL;
118
119 for (i = 0; i < size; i++) {
120 enum dma_data_direction dir;
121 dma_addr_t addr;
122 u32 offset;
123 int token;
124 void *buf;
125
126 t = mt76_get_rxwi(dev);
127 if (!t)
128 goto unmap;
129
130 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
131 if (!buf)
132 goto unmap;
133
134 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
135 dir = page_pool_get_dma_dir(q->page_pool);
136 dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
137
138 desc->buf0 = cpu_to_le32(addr);
139 token = mt76_rx_token_consume(dev, buf, t, addr);
140 if (token < 0) {
141 mt76_put_page_pool_buf(buf, false);
142 goto unmap;
143 }
144
Sujuan Chen4920a3a2023-11-17 18:13:19 +0100145 token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
146#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
147 token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
148#endif
149 desc->token |= cpu_to_le32(token);
Lorenzo Bianconib92158a2023-10-20 12:30:48 +0200150 desc++;
151 }
152
153 return 0;
154
155unmap:
156 if (t)
157 mt76_put_rxwi(dev, t);
158 mt76_mmio_wed_release_rx_buf(wed);
159
160 return -ENOMEM;
161}
162EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
Lorenzo Bianconi5f607352023-10-20 12:30:49 +0200163
164int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
165{
166 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
167
168 spin_lock_bh(&dev->token_lock);
169 dev->token_size = wed->wlan.token_start;
170 spin_unlock_bh(&dev->token_lock);
171
172 return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
173}
174EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
175
176void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
177{
178 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
179
180 spin_lock_bh(&dev->token_lock);
181 dev->token_size = dev->drv->token_size;
182 spin_unlock_bh(&dev->token_lock);
183}
184EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
Lorenzo Bianconid4b85af2023-10-20 12:30:58 +0200185
186void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed)
187{
188 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
189
190 complete(&dev->mmio.wed_reset_complete);
191}
192EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete);
Lorenzo Bianconib92158a2023-10-20 12:30:48 +0200193#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
194
Felix Fietkau17f1de52017-11-21 10:50:52 +0100195void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
196{
197 static const struct mt76_bus_ops mt76_mmio_ops = {
198 .rr = mt76_mmio_rr,
199 .rmw = mt76_mmio_rmw,
200 .wr = mt76_mmio_wr,
Lorenzo Bianconi35e4ebea2019-07-13 17:09:06 +0200201 .write_copy = mt76_mmio_write_copy,
202 .read_copy = mt76_mmio_read_copy,
Lorenzo Bianconi13fd2d22018-09-28 13:38:50 +0200203 .wr_rp = mt76_mmio_wr_rp,
204 .rd_rp = mt76_mmio_rd_rp,
Stanislaw Gruszkac50479f2018-10-04 12:04:53 +0200205 .type = MT76_BUS_MMIO,
Felix Fietkau17f1de52017-11-21 10:50:52 +0100206 };
207
208 dev->bus = &mt76_mmio_ops;
Lorenzo Bianconi27db1ad2018-09-09 23:57:58 +0200209 dev->mmio.regs = regs;
Lorenzo Bianconif7bbb802018-09-09 23:57:57 +0200210
Lorenzo Bianconi957068c2018-09-28 13:38:47 +0200211 spin_lock_init(&dev->mmio.irq_lock);
Felix Fietkau17f1de52017-11-21 10:50:52 +0100212}
213EXPORT_SYMBOL_GPL(mt76_mmio_init);