Ryder Lee | 0e3d677 | 2019-07-24 16:58:20 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: ISC |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include "mt76.h" |
Lorenzo Bianconi | b92158a | 2023-10-20 12:30:48 +0200 | [diff] [blame] | 7 | #include "dma.h" |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 8 | #include "trace.h" |
| 9 | |
| 10 | static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset) |
| 11 | { |
| 12 | u32 val; |
| 13 | |
Felix Fietkau | d908d4e | 2019-03-23 15:24:56 +0100 | [diff] [blame] | 14 | val = readl(dev->mmio.regs + offset); |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 15 | trace_reg_rr(dev, offset, val); |
| 16 | |
| 17 | return val; |
| 18 | } |
| 19 | |
| 20 | static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val) |
| 21 | { |
| 22 | trace_reg_wr(dev, offset, val); |
Felix Fietkau | d908d4e | 2019-03-23 15:24:56 +0100 | [diff] [blame] | 23 | writel(val, dev->mmio.regs + offset); |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 24 | } |
| 25 | |
| 26 | static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) |
| 27 | { |
| 28 | val |= mt76_mmio_rr(dev, offset) & ~mask; |
| 29 | mt76_mmio_wr(dev, offset, val); |
| 30 | return val; |
| 31 | } |
| 32 | |
Lorenzo Bianconi | 35e4ebea | 2019-07-13 17:09:06 +0200 | [diff] [blame] | 33 | static void mt76_mmio_write_copy(struct mt76_dev *dev, u32 offset, |
| 34 | const void *data, int len) |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 35 | { |
Felix Fietkau | 850e8f6 | 2019-07-01 13:15:07 +0200 | [diff] [blame] | 36 | __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4)); |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 37 | } |
| 38 | |
Lorenzo Bianconi | 35e4ebea | 2019-07-13 17:09:06 +0200 | [diff] [blame] | 39 | static void mt76_mmio_read_copy(struct mt76_dev *dev, u32 offset, |
| 40 | void *data, int len) |
| 41 | { |
| 42 | __ioread32_copy(data, dev->mmio.regs + offset, DIV_ROUND_UP(len, 4)); |
| 43 | } |
| 44 | |
Lorenzo Bianconi | 13fd2d2 | 2018-09-28 13:38:50 +0200 | [diff] [blame] | 45 | static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base, |
| 46 | const struct mt76_reg_pair *data, int len) |
| 47 | { |
| 48 | while (len > 0) { |
| 49 | mt76_mmio_wr(dev, data->reg, data->value); |
| 50 | data++; |
| 51 | len--; |
| 52 | } |
| 53 | |
| 54 | return 0; |
| 55 | } |
| 56 | |
| 57 | static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base, |
| 58 | struct mt76_reg_pair *data, int len) |
| 59 | { |
| 60 | while (len > 0) { |
| 61 | data->value = mt76_mmio_rr(dev, data->reg); |
| 62 | data++; |
| 63 | len--; |
| 64 | } |
| 65 | |
| 66 | return 0; |
| 67 | } |
| 68 | |
Lorenzo Bianconi | 9220f69 | 2019-02-28 17:54:31 +0100 | [diff] [blame] | 69 | void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, |
| 70 | u32 clear, u32 set) |
| 71 | { |
| 72 | unsigned long flags; |
| 73 | |
| 74 | spin_lock_irqsave(&dev->mmio.irq_lock, flags); |
| 75 | dev->mmio.irqmask &= ~clear; |
| 76 | dev->mmio.irqmask |= set; |
Felix Fietkau | f68d676 | 2021-12-06 13:45:54 +0100 | [diff] [blame] | 77 | if (addr) { |
| 78 | if (mtk_wed_device_active(&dev->mmio.wed)) |
| 79 | mtk_wed_device_irq_set_mask(&dev->mmio.wed, |
| 80 | dev->mmio.irqmask); |
| 81 | else |
| 82 | mt76_mmio_wr(dev, addr, dev->mmio.irqmask); |
| 83 | } |
Lorenzo Bianconi | 9220f69 | 2019-02-28 17:54:31 +0100 | [diff] [blame] | 84 | spin_unlock_irqrestore(&dev->mmio.irq_lock, flags); |
| 85 | } |
| 86 | EXPORT_SYMBOL_GPL(mt76_set_irq_mask); |
| 87 | |
Lorenzo Bianconi | b92158a | 2023-10-20 12:30:48 +0200 | [diff] [blame] | 88 | #ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| 89 | void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 90 | { |
| 91 | struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); |
| 92 | int i; |
| 93 | |
| 94 | for (i = 0; i < dev->rx_token_size; i++) { |
| 95 | struct mt76_txwi_cache *t; |
| 96 | |
| 97 | t = mt76_rx_token_release(dev, i); |
| 98 | if (!t || !t->ptr) |
| 99 | continue; |
| 100 | |
| 101 | mt76_put_page_pool_buf(t->ptr, false); |
| 102 | t->ptr = NULL; |
| 103 | |
| 104 | mt76_put_rxwi(dev, t); |
| 105 | } |
| 106 | |
| 107 | mt76_free_pending_rxwi(dev); |
| 108 | } |
| 109 | EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf); |
| 110 | |
| 111 | u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 112 | { |
| 113 | struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); |
| 114 | struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc; |
| 115 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; |
| 116 | int i, len = SKB_WITH_OVERHEAD(q->buf_size); |
| 117 | struct mt76_txwi_cache *t = NULL; |
| 118 | |
| 119 | for (i = 0; i < size; i++) { |
| 120 | enum dma_data_direction dir; |
| 121 | dma_addr_t addr; |
| 122 | u32 offset; |
| 123 | int token; |
| 124 | void *buf; |
| 125 | |
| 126 | t = mt76_get_rxwi(dev); |
| 127 | if (!t) |
| 128 | goto unmap; |
| 129 | |
| 130 | buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); |
| 131 | if (!buf) |
| 132 | goto unmap; |
| 133 | |
| 134 | addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; |
| 135 | dir = page_pool_get_dma_dir(q->page_pool); |
| 136 | dma_sync_single_for_device(dev->dma_dev, addr, len, dir); |
| 137 | |
| 138 | desc->buf0 = cpu_to_le32(addr); |
| 139 | token = mt76_rx_token_consume(dev, buf, t, addr); |
| 140 | if (token < 0) { |
| 141 | mt76_put_page_pool_buf(buf, false); |
| 142 | goto unmap; |
| 143 | } |
| 144 | |
Sujuan Chen | 4920a3a | 2023-11-17 18:13:19 +0100 | [diff] [blame] | 145 | token = FIELD_PREP(MT_DMA_CTL_TOKEN, token); |
| 146 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
| 147 | token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32); |
| 148 | #endif |
| 149 | desc->token |= cpu_to_le32(token); |
Lorenzo Bianconi | b92158a | 2023-10-20 12:30:48 +0200 | [diff] [blame] | 150 | desc++; |
| 151 | } |
| 152 | |
| 153 | return 0; |
| 154 | |
| 155 | unmap: |
| 156 | if (t) |
| 157 | mt76_put_rxwi(dev, t); |
| 158 | mt76_mmio_wed_release_rx_buf(wed); |
| 159 | |
| 160 | return -ENOMEM; |
| 161 | } |
| 162 | EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf); |
Lorenzo Bianconi | 5f60735 | 2023-10-20 12:30:49 +0200 | [diff] [blame] | 163 | |
| 164 | int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed) |
| 165 | { |
| 166 | struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); |
| 167 | |
| 168 | spin_lock_bh(&dev->token_lock); |
| 169 | dev->token_size = wed->wlan.token_start; |
| 170 | spin_unlock_bh(&dev->token_lock); |
| 171 | |
| 172 | return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ); |
| 173 | } |
| 174 | EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable); |
| 175 | |
| 176 | void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed) |
| 177 | { |
| 178 | struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); |
| 179 | |
| 180 | spin_lock_bh(&dev->token_lock); |
| 181 | dev->token_size = dev->drv->token_size; |
| 182 | spin_unlock_bh(&dev->token_lock); |
| 183 | } |
| 184 | EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable); |
Lorenzo Bianconi | d4b85af | 2023-10-20 12:30:58 +0200 | [diff] [blame] | 185 | |
| 186 | void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed) |
| 187 | { |
| 188 | struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); |
| 189 | |
| 190 | complete(&dev->mmio.wed_reset_complete); |
| 191 | } |
| 192 | EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete); |
Lorenzo Bianconi | b92158a | 2023-10-20 12:30:48 +0200 | [diff] [blame] | 193 | #endif /*CONFIG_NET_MEDIATEK_SOC_WED */ |
| 194 | |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 195 | void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs) |
| 196 | { |
| 197 | static const struct mt76_bus_ops mt76_mmio_ops = { |
| 198 | .rr = mt76_mmio_rr, |
| 199 | .rmw = mt76_mmio_rmw, |
| 200 | .wr = mt76_mmio_wr, |
Lorenzo Bianconi | 35e4ebea | 2019-07-13 17:09:06 +0200 | [diff] [blame] | 201 | .write_copy = mt76_mmio_write_copy, |
| 202 | .read_copy = mt76_mmio_read_copy, |
Lorenzo Bianconi | 13fd2d2 | 2018-09-28 13:38:50 +0200 | [diff] [blame] | 203 | .wr_rp = mt76_mmio_wr_rp, |
| 204 | .rd_rp = mt76_mmio_rd_rp, |
Stanislaw Gruszka | c50479f | 2018-10-04 12:04:53 +0200 | [diff] [blame] | 205 | .type = MT76_BUS_MMIO, |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 206 | }; |
| 207 | |
| 208 | dev->bus = &mt76_mmio_ops; |
Lorenzo Bianconi | 27db1ad | 2018-09-09 23:57:58 +0200 | [diff] [blame] | 209 | dev->mmio.regs = regs; |
Lorenzo Bianconi | f7bbb80 | 2018-09-09 23:57:57 +0200 | [diff] [blame] | 210 | |
Lorenzo Bianconi | 957068c | 2018-09-28 13:38:47 +0200 | [diff] [blame] | 211 | spin_lock_init(&dev->mmio.irq_lock); |
Felix Fietkau | 17f1de5 | 2017-11-21 10:50:52 +0100 | [diff] [blame] | 212 | } |
| 213 | EXPORT_SYMBOL_GPL(mt76_mmio_init); |