Thomas Gleixner | 1802d0b | 2019-05-27 08:55:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2015-2016 MediaTek Inc. |
| 4 | * Author: Yong Wu <yong.wu@mediatek.com> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 5 | */ |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 6 | #include <linux/bitfield.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 7 | #include <linux/bug.h> |
| 8 | #include <linux/clk.h> |
| 9 | #include <linux/component.h> |
| 10 | #include <linux/device.h> |
Yong Wu | 803cf9e | 2021-01-11 19:19:08 +0800 | [diff] [blame] | 11 | #include <linux/dma-direct.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 12 | #include <linux/err.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/io.h> |
| 15 | #include <linux/iommu.h> |
| 16 | #include <linux/iopoll.h> |
Yong Wu | 6a513de | 2022-05-03 15:14:18 +0800 | [diff] [blame] | 17 | #include <linux/io-pgtable.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 18 | #include <linux/list.h> |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 19 | #include <linux/mfd/syscon.h> |
Yong Wu | 18d8c74 | 2021-03-26 11:23:37 +0800 | [diff] [blame] | 20 | #include <linux/module.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 21 | #include <linux/of_address.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 22 | #include <linux/of_irq.h> |
| 23 | #include <linux/of_platform.h> |
Yong Wu | e762907 | 2022-05-03 15:14:13 +0800 | [diff] [blame] | 24 | #include <linux/pci.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 25 | #include <linux/platform_device.h> |
Yong Wu | baf94e6 | 2021-01-11 19:18:59 +0800 | [diff] [blame] | 26 | #include <linux/pm_runtime.h> |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 27 | #include <linux/regmap.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 28 | #include <linux/slab.h> |
| 29 | #include <linux/spinlock.h> |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 30 | #include <linux/soc/mediatek/infracfg.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 31 | #include <asm/barrier.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 32 | #include <soc/mediatek/smi.h> |
| 33 | |
Yong Wu | 6a513de | 2022-05-03 15:14:18 +0800 | [diff] [blame] | 34 | #include <dt-bindings/memory/mtk-memory-port.h> |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 35 | |
| 36 | #define REG_MMU_PT_BASE_ADDR 0x000 |
| 37 | |
| 38 | #define REG_MMU_INVALIDATE 0x020 |
| 39 | #define F_ALL_INVLD 0x2 |
| 40 | #define F_MMU_INV_RANGE 0x1 |
| 41 | |
| 42 | #define REG_MMU_INVLD_START_A 0x024 |
| 43 | #define REG_MMU_INVLD_END_A 0x028 |
| 44 | |
Chao Hao | 068c86e | 2020-07-03 12:41:27 +0800 | [diff] [blame] | 45 | #define REG_MMU_INV_SEL_GEN2 0x02c |
Chao Hao | b053bc7 | 2020-07-03 12:41:22 +0800 | [diff] [blame] | 46 | #define REG_MMU_INV_SEL_GEN1 0x038 |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 47 | #define F_INVLD_EN0 BIT(0) |
| 48 | #define F_INVLD_EN1 BIT(1) |
| 49 | |
Chao Hao | 75eed35 | 2020-07-03 12:41:19 +0800 | [diff] [blame] | 50 | #define REG_MMU_MISC_CTRL 0x048 |
Chao Hao | 4bb2bf4 | 2020-07-03 12:41:21 +0800 | [diff] [blame] | 51 | #define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17)) |
| 52 | #define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) |
| 53 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 54 | #define REG_MMU_DCM_DIS 0x050 |
Yong Wu | 9a87005 | 2022-05-03 15:14:02 +0800 | [diff] [blame] | 55 | #define F_MMU_DCM BIT(8) |
| 56 | |
Chao Hao | 35c1b48 | 2020-07-03 12:41:24 +0800 | [diff] [blame] | 57 | #define REG_MMU_WR_LEN_CTRL 0x054 |
| 58 | #define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21)) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 59 | |
| 60 | #define REG_MMU_CTRL_REG 0x110 |
Yong Wu | acb3c92 | 2019-08-24 11:01:58 +0800 | [diff] [blame] | 61 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 62 | #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) |
Yong Wu | acb3c92 | 2019-08-24 11:01:58 +0800 | [diff] [blame] | 63 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 64 | |
| 65 | #define REG_MMU_IVRP_PADDR 0x114 |
Yong Wu | 70ca608 | 2018-03-18 09:52:54 +0800 | [diff] [blame] | 66 | |
Yong Wu | 30e2fcc | 2017-08-21 19:00:20 +0800 | [diff] [blame] | 67 | #define REG_MMU_VLD_PA_RNG 0x118 |
| 68 | #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 69 | |
| 70 | #define REG_MMU_INT_CONTROL0 0x120 |
| 71 | #define F_L2_MULIT_HIT_EN BIT(0) |
| 72 | #define F_TABLE_WALK_FAULT_INT_EN BIT(1) |
| 73 | #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) |
| 74 | #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) |
| 75 | #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) |
| 76 | #define F_MISS_FIFO_ERR_INT_EN BIT(6) |
| 77 | #define F_INT_CLR_BIT BIT(12) |
| 78 | |
| 79 | #define REG_MMU_INT_MAIN_CONTROL 0x124 |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 80 | /* mmu0 | mmu1 */ |
| 81 | #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) |
| 82 | #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) |
| 83 | #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) |
| 84 | #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) |
| 85 | #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) |
| 86 | #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) |
| 87 | #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 88 | |
| 89 | #define REG_MMU_CPE_DONE 0x12C |
| 90 | |
| 91 | #define REG_MMU_FAULT_ST1 0x134 |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 92 | #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) |
| 93 | #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 94 | |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 95 | #define REG_MMU0_FAULT_VA 0x13c |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 96 | #define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12) |
| 97 | #define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9) |
| 98 | #define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 99 | #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) |
| 100 | #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) |
| 101 | |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 102 | #define REG_MMU0_INVLD_PA 0x140 |
| 103 | #define REG_MMU1_FAULT_VA 0x144 |
| 104 | #define REG_MMU1_INVLD_PA 0x148 |
| 105 | #define REG_MMU0_INT_ID 0x150 |
| 106 | #define REG_MMU1_INT_ID 0x154 |
Chao Hao | 37276e0 | 2020-07-03 12:41:23 +0800 | [diff] [blame] | 107 | #define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) |
| 108 | #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) |
Yong Wu | 9ec30c0 | 2022-05-03 15:14:06 +0800 | [diff] [blame] | 109 | #define F_MMU_INT_ID_COMM_ID_EXT(a) (((a) >> 10) & 0x7) |
| 110 | #define F_MMU_INT_ID_SUB_COMM_ID_EXT(a) (((a) >> 7) & 0x7) |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 111 | #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) |
| 112 | #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 113 | |
Chao Hao | 829316b | 2020-07-03 12:41:25 +0800 | [diff] [blame] | 114 | #define MTK_PROTECT_PA_ALIGN 256 |
Yong Wu | 42d57fc | 2022-05-03 15:14:24 +0800 | [diff] [blame] | 115 | #define MTK_IOMMU_BANK_SZ 0x1000 |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 116 | |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 117 | #define PERICFG_IOMMU_1 0x714 |
| 118 | |
Chao Hao | 6b71779 | 2020-07-03 12:41:20 +0800 | [diff] [blame] | 119 | #define HAS_4GB_MODE BIT(0) |
| 120 | /* HW will use the EMI clock if there isn't the "bclk". */ |
| 121 | #define HAS_BCLK BIT(1) |
| 122 | #define HAS_VLD_PA_RNG BIT(2) |
| 123 | #define RESET_AXI BIT(3) |
Chao Hao | 4bb2bf4 | 2020-07-03 12:41:21 +0800 | [diff] [blame] | 124 | #define OUT_ORDER_WR_EN BIT(4) |
Yong Wu | 9ec30c0 | 2022-05-03 15:14:06 +0800 | [diff] [blame] | 125 | #define HAS_SUB_COMM_2BITS BIT(5) |
| 126 | #define HAS_SUB_COMM_3BITS BIT(6) |
| 127 | #define WR_THROT_EN BIT(7) |
| 128 | #define HAS_LEGACY_IVRP_PADDR BIT(8) |
| 129 | #define IOVA_34_EN BIT(9) |
| 130 | #define SHARE_PGTABLE BIT(10) /* 2 HW share pgtable */ |
| 131 | #define DCM_DISABLE BIT(11) |
| 132 | #define STD_AXI_MODE BIT(12) /* For non MM iommu */ |
Yong Wu | 8cd1e61 | 2022-05-03 15:14:07 +0800 | [diff] [blame] | 133 | /* 2 bits: iommu type */ |
| 134 | #define MTK_IOMMU_TYPE_MM (0x0 << 13) |
| 135 | #define MTK_IOMMU_TYPE_INFRA (0x1 << 13) |
| 136 | #define MTK_IOMMU_TYPE_MASK (0x3 << 13) |
Yong Wu | 6077c7e | 2022-05-03 15:14:11 +0800 | [diff] [blame] | 137 | /* PM and clock always on. e.g. infra iommu */ |
| 138 | #define PM_CLK_AO BIT(15) |
Yong Wu | e762907 | 2022-05-03 15:14:13 +0800 | [diff] [blame] | 139 | #define IFA_IOMMU_PCIE_SUPPORT BIT(16) |
Yunfei Wang | 301c3ca | 2022-06-30 17:29:26 +0800 | [diff] [blame] | 140 | #define PGTABLE_PA_35_EN BIT(17) |
AngeloGioacchino Del Regno | 86580ec | 2022-09-13 17:11:47 +0200 | [diff] [blame] | 141 | #define TF_PORT_TO_ADDR_MT8173 BIT(18) |
Chao Hao | 6b71779 | 2020-07-03 12:41:20 +0800 | [diff] [blame] | 142 | |
Yong Wu | 8cd1e61 | 2022-05-03 15:14:07 +0800 | [diff] [blame] | 143 | #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ |
| 144 | ((((pdata)->flags) & (mask)) == (_x)) |
| 145 | |
| 146 | #define MTK_IOMMU_HAS_FLAG(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, _x) |
| 147 | #define MTK_IOMMU_IS_TYPE(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x,\ |
| 148 | MTK_IOMMU_TYPE_MASK) |
Chao Hao | 6b71779 | 2020-07-03 12:41:20 +0800 | [diff] [blame] | 149 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 150 | #define MTK_INVALID_LARBID MTK_LARB_NR_MAX |
| 151 | |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 152 | #define MTK_LARB_COM_MAX 8 |
| 153 | #define MTK_LARB_SUBCOM_MAX 8 |
| 154 | |
| 155 | #define MTK_IOMMU_GROUP_MAX 8 |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 156 | #define MTK_IOMMU_BANK_MAX 5 |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 157 | |
| 158 | enum mtk_iommu_plat { |
| 159 | M4U_MT2712, |
| 160 | M4U_MT6779, |
AngeloGioacchino Del Regno | 717ec15e | 2022-09-13 17:11:48 +0200 | [diff] [blame] | 161 | M4U_MT6795, |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 162 | M4U_MT8167, |
| 163 | M4U_MT8173, |
| 164 | M4U_MT8183, |
Yong Wu | e8d7cca | 2022-05-03 15:14:27 +0800 | [diff] [blame] | 165 | M4U_MT8186, |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 166 | M4U_MT8192, |
| 167 | M4U_MT8195, |
| 168 | }; |
| 169 | |
| 170 | struct mtk_iommu_iova_region { |
| 171 | dma_addr_t iova_base; |
| 172 | unsigned long long size; |
| 173 | }; |
| 174 | |
Yong Wu | 6a513de | 2022-05-03 15:14:18 +0800 | [diff] [blame] | 175 | struct mtk_iommu_suspend_reg { |
| 176 | u32 misc_ctrl; |
| 177 | u32 dcm_dis; |
| 178 | u32 ctrl_reg; |
Yong Wu | 6a513de | 2022-05-03 15:14:18 +0800 | [diff] [blame] | 179 | u32 vld_pa_rng; |
| 180 | u32 wr_len_ctrl; |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 181 | |
| 182 | u32 int_control[MTK_IOMMU_BANK_MAX]; |
| 183 | u32 int_main_control[MTK_IOMMU_BANK_MAX]; |
| 184 | u32 ivrp_paddr[MTK_IOMMU_BANK_MAX]; |
Yong Wu | 6a513de | 2022-05-03 15:14:18 +0800 | [diff] [blame] | 185 | }; |
| 186 | |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 187 | struct mtk_iommu_plat_data { |
| 188 | enum mtk_iommu_plat m4u_plat; |
| 189 | u32 flags; |
| 190 | u32 inv_sel_reg; |
| 191 | |
| 192 | char *pericfg_comp_str; |
| 193 | struct list_head *hw_list; |
| 194 | unsigned int iova_region_nr; |
| 195 | const struct mtk_iommu_iova_region *iova_region; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 196 | |
| 197 | u8 banks_num; |
| 198 | bool banks_enable[MTK_IOMMU_BANK_MAX]; |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 199 | unsigned int banks_portmsk[MTK_IOMMU_BANK_MAX]; |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 200 | unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX]; |
| 201 | }; |
| 202 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 203 | struct mtk_iommu_bank_data { |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 204 | void __iomem *base; |
| 205 | int irq; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 206 | u8 id; |
| 207 | struct device *parent_dev; |
| 208 | struct mtk_iommu_data *parent_data; |
| 209 | spinlock_t tlb_lock; /* lock for tlb range flush */ |
| 210 | struct mtk_iommu_domain *m4u_dom; /* Each bank has a domain */ |
| 211 | }; |
| 212 | |
| 213 | struct mtk_iommu_data { |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 214 | struct device *dev; |
| 215 | struct clk *bclk; |
| 216 | phys_addr_t protect_base; /* protect memory base */ |
| 217 | struct mtk_iommu_suspend_reg reg; |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 218 | struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX]; |
| 219 | bool enable_4GB; |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 220 | |
| 221 | struct iommu_device iommu; |
| 222 | const struct mtk_iommu_plat_data *plat_data; |
| 223 | struct device *smicomm_dev; |
| 224 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 225 | struct mtk_iommu_bank_data *bank; |
| 226 | |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 227 | struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */ |
| 228 | struct regmap *pericfg; |
| 229 | |
| 230 | struct mutex mutex; /* Protect m4u_group/m4u_dom above */ |
| 231 | |
| 232 | /* |
| 233 | * In the sharing pgtable case, list data->list to the global list like m4ulist. |
| 234 | * In the non-sharing pgtable case, list data->list to the itself hw_list_head. |
| 235 | */ |
| 236 | struct list_head *hw_list; |
| 237 | struct list_head hw_list_head; |
| 238 | struct list_head list; |
| 239 | struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; |
| 240 | }; |
| 241 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 242 | struct mtk_iommu_domain { |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 243 | struct io_pgtable_cfg cfg; |
| 244 | struct io_pgtable_ops *iop; |
| 245 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 246 | struct mtk_iommu_bank_data *bank; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 247 | struct iommu_domain domain; |
Yong Wu | ddf67a8 | 2022-05-03 15:13:59 +0800 | [diff] [blame] | 248 | |
| 249 | struct mutex mutex; /* Protect "data" in this structure */ |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 250 | }; |
| 251 | |
Yong Wu | 9485a04 | 2022-05-03 15:14:17 +0800 | [diff] [blame] | 252 | static int mtk_iommu_bind(struct device *dev) |
| 253 | { |
| 254 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
| 255 | |
| 256 | return component_bind_all(dev, &data->larb_imu); |
| 257 | } |
| 258 | |
| 259 | static void mtk_iommu_unbind(struct device *dev) |
| 260 | { |
| 261 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
| 262 | |
| 263 | component_unbind_all(dev, &data->larb_imu); |
| 264 | } |
| 265 | |
Arvind Yadav | b65f501 | 2018-10-18 19:13:38 +0800 | [diff] [blame] | 266 | static const struct iommu_ops mtk_iommu_ops; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 267 | |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 268 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid); |
Yong Wu | 7f37a91 | 2021-01-11 19:18:57 +0800 | [diff] [blame] | 269 | |
Yong Wu | bfed873 | 2021-01-11 19:19:02 +0800 | [diff] [blame] | 270 | #define MTK_IOMMU_TLB_ADDR(iova) ({ \ |
| 271 | dma_addr_t _addr = iova; \ |
| 272 | ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\ |
| 273 | }) |
| 274 | |
Yong Wu | 76ce654 | 2019-08-24 11:01:50 +0800 | [diff] [blame] | 275 | /* |
| 276 | * In M4U 4GB mode, the physical address is remapped as below: |
| 277 | * |
| 278 | * CPU Physical address: |
| 279 | * ==================== |
| 280 | * |
| 281 | * 0 1G 2G 3G 4G 5G |
| 282 | * |---A---|---B---|---C---|---D---|---E---| |
| 283 | * +--I/O--+------------Memory-------------+ |
| 284 | * |
| 285 | * IOMMU output physical address: |
| 286 | * ============================= |
| 287 | * |
| 288 | * 4G 5G 6G 7G 8G |
| 289 | * |---E---|---B---|---C---|---D---| |
| 290 | * +------------Memory-------------+ |
| 291 | * |
| 292 | * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the |
| 293 | * bit32 of the CPU physical address always is needed to set, and for Region |
| 294 | * 'E', the CPU physical address keep as is. |
| 295 | * Additionally, The iommu consumers always use the CPU phyiscal address. |
| 296 | */ |
Yong Wu | b4dad40 | 2019-08-24 11:01:55 +0800 | [diff] [blame] | 297 | #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL |
Yong Wu | 76ce654 | 2019-08-24 11:01:50 +0800 | [diff] [blame] | 298 | |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 299 | static LIST_HEAD(m4ulist); /* List all the M4U HWs */ |
| 300 | |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 301 | #define for_each_m4u(data, head) list_for_each_entry(data, head, list) |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 302 | |
Yong Wu | 585e58f | 2021-01-11 19:19:07 +0800 | [diff] [blame] | 303 | static const struct mtk_iommu_iova_region single_domain[] = { |
| 304 | {.iova_base = 0, .size = SZ_4G}, |
| 305 | }; |
| 306 | |
Yong Wu | 9e3489e | 2021-01-11 19:19:13 +0800 | [diff] [blame] | 307 | static const struct mtk_iommu_iova_region mt8192_multi_dom[] = { |
Yong Wu | 129a3b8 | 2022-05-03 15:14:01 +0800 | [diff] [blame] | 308 | { .iova_base = 0x0, .size = SZ_4G}, /* 0 ~ 4G */ |
Yong Wu | 9e3489e | 2021-01-11 19:19:13 +0800 | [diff] [blame] | 309 | #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) |
Yong Wu | 129a3b8 | 2022-05-03 15:14:01 +0800 | [diff] [blame] | 310 | { .iova_base = SZ_4G, .size = SZ_4G}, /* 4G ~ 8G */ |
| 311 | { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* 8G ~ 12G */ |
| 312 | { .iova_base = SZ_4G * 3, .size = SZ_4G}, /* 12G ~ 16G */ |
| 313 | |
Yong Wu | 9e3489e | 2021-01-11 19:19:13 +0800 | [diff] [blame] | 314 | { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */ |
| 315 | { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */ |
| 316 | #endif |
| 317 | }; |
| 318 | |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 319 | /* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/ |
| 320 | static struct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist) |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 321 | { |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 322 | return list_first_entry(hwlist, struct mtk_iommu_data, list); |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 323 | } |
| 324 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 325 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) |
| 326 | { |
| 327 | return container_of(dom, struct mtk_iommu_domain, domain); |
| 328 | } |
| 329 | |
Yong Wu | 0954d61 | 2021-01-07 20:29:09 +0800 | [diff] [blame] | 330 | static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 331 | { |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 332 | /* Tlb flush all always is in bank0. */ |
| 333 | struct mtk_iommu_bank_data *bank = &data->bank[0]; |
| 334 | void __iomem *base = bank->base; |
Yong Wu | 15672b6 | 2021-12-08 14:07:43 +0200 | [diff] [blame] | 335 | unsigned long flags; |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 336 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 337 | spin_lock_irqsave(&bank->tlb_lock, flags); |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 338 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg); |
| 339 | writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE); |
Yong Wu | 17224e0 | 2021-12-08 14:07:40 +0200 | [diff] [blame] | 340 | wmb(); /* Make sure the tlb flush all done */ |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 341 | spin_unlock_irqrestore(&bank->tlb_lock, flags); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 342 | } |
| 343 | |
Yong Wu | 1f4fd62 | 2019-11-04 15:01:06 +0800 | [diff] [blame] | 344 | static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 345 | struct mtk_iommu_bank_data *bank) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 346 | { |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 347 | struct list_head *head = bank->parent_data->hw_list; |
| 348 | struct mtk_iommu_bank_data *curbank; |
| 349 | struct mtk_iommu_data *data; |
Yong Wu | 6077c7e | 2022-05-03 15:14:11 +0800 | [diff] [blame] | 350 | bool check_pm_status; |
Yong Wu | 1f4fd62 | 2019-11-04 15:01:06 +0800 | [diff] [blame] | 351 | unsigned long flags; |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 352 | void __iomem *base; |
Yong Wu | 1f4fd62 | 2019-11-04 15:01:06 +0800 | [diff] [blame] | 353 | int ret; |
| 354 | u32 tmp; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 355 | |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 356 | for_each_m4u(data, head) { |
Yong Wu | 6077c7e | 2022-05-03 15:14:11 +0800 | [diff] [blame] | 357 | /* |
| 358 | * To avoid resume the iommu device frequently when the iommu device |
| 359 | * is not active, it doesn't always call pm_runtime_get here, then tlb |
| 360 | * flush depends on the tlb flush all in the runtime resume. |
| 361 | * |
| 362 | * There are 2 special cases: |
| 363 | * |
| 364 | * Case1: The iommu dev doesn't have power domain but has bclk. This case |
| 365 | * should also avoid the tlb flush while the dev is not active to mute |
| 366 | * the tlb timeout log. like mt8173. |
| 367 | * |
| 368 | * Case2: The power/clock of infra iommu is always on, and it doesn't |
| 369 | * have the device link with the master devices. This case should avoid |
| 370 | * the PM status check. |
| 371 | */ |
| 372 | check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO); |
| 373 | |
| 374 | if (check_pm_status) { |
| 375 | if (pm_runtime_get_if_in_use(data->dev) <= 0) |
| 376 | continue; |
| 377 | } |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 378 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 379 | curbank = &data->bank[bank->id]; |
| 380 | base = curbank->base; |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 381 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 382 | spin_lock_irqsave(&curbank->tlb_lock, flags); |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 383 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 384 | base + data->plat_data->inv_sel_reg); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 385 | |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 386 | writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A); |
Yong Wu | bfed873 | 2021-01-11 19:19:02 +0800 | [diff] [blame] | 387 | writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1), |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 388 | base + REG_MMU_INVLD_END_A); |
| 389 | writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 390 | |
Yong Wu | 1f4fd62 | 2019-11-04 15:01:06 +0800 | [diff] [blame] | 391 | /* tlb sync */ |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 392 | ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE, |
Yong Wu | c90ae4a | 2019-11-04 15:01:08 +0800 | [diff] [blame] | 393 | tmp, tmp != 0, 10, 1000); |
Yong Wu | 15672b6 | 2021-12-08 14:07:43 +0200 | [diff] [blame] | 394 | |
| 395 | /* Clear the CPE status */ |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 396 | writel_relaxed(0, base + REG_MMU_CPE_DONE); |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 397 | spin_unlock_irqrestore(&curbank->tlb_lock, flags); |
Yong Wu | 15672b6 | 2021-12-08 14:07:43 +0200 | [diff] [blame] | 398 | |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 399 | if (ret) { |
| 400 | dev_warn(data->dev, |
| 401 | "Partial TLB flush timed out, falling back to full flush\n"); |
Yong Wu | 0954d61 | 2021-01-07 20:29:09 +0800 | [diff] [blame] | 402 | mtk_iommu_tlb_flush_all(data); |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 403 | } |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 404 | |
Yong Wu | 6077c7e | 2022-05-03 15:14:11 +0800 | [diff] [blame] | 405 | if (check_pm_status) |
| 406 | pm_runtime_put(data->dev); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 407 | } |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 408 | } |
| 409 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 410 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) |
| 411 | { |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 412 | struct mtk_iommu_bank_data *bank = dev_id; |
| 413 | struct mtk_iommu_data *data = bank->parent_data; |
| 414 | struct mtk_iommu_domain *dom = bank->m4u_dom; |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 415 | unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0; |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 416 | u32 int_state, regval, va34_32, pa34_32; |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 417 | const struct mtk_iommu_plat_data *plat_data = data->plat_data; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 418 | void __iomem *base = bank->base; |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 419 | u64 fault_iova, fault_pa; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 420 | bool layer, write; |
| 421 | |
| 422 | /* Read error info from registers */ |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 423 | int_state = readl_relaxed(base + REG_MMU_FAULT_ST1); |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 424 | if (int_state & F_REG_MMU0_FAULT_MASK) { |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 425 | regval = readl_relaxed(base + REG_MMU0_INT_ID); |
| 426 | fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA); |
| 427 | fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA); |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 428 | } else { |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 429 | regval = readl_relaxed(base + REG_MMU1_INT_ID); |
| 430 | fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA); |
| 431 | fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA); |
Yong Wu | 15a01f4 | 2019-08-24 11:02:03 +0800 | [diff] [blame] | 432 | } |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 433 | layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; |
| 434 | write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 435 | if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) { |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 436 | va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova); |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 437 | fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK; |
| 438 | fault_iova |= (u64)va34_32 << 32; |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 439 | } |
Yong Wu | 82e5177 | 2022-05-03 15:14:05 +0800 | [diff] [blame] | 440 | pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova); |
| 441 | fault_pa |= (u64)pa34_32 << 32; |
Yong Wu | ef0f098 | 2021-01-11 19:19:03 +0800 | [diff] [blame] | 442 | |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 443 | if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) { |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 444 | fault_port = F_MMU_INT_ID_PORT_ID(regval); |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 445 | if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) { |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 446 | fault_larb = F_MMU_INT_ID_COMM_ID(regval); |
| 447 | sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 448 | } else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) { |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 449 | fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval); |
| 450 | sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval); |
| 451 | } else { |
| 452 | fault_larb = F_MMU_INT_ID_LARB_ID(regval); |
| 453 | } |
| 454 | fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; |
Chao Hao | 37276e0 | 2020-07-03 12:41:23 +0800 | [diff] [blame] | 455 | } |
Yong Wu | b3e5eee7 | 2019-08-24 11:01:57 +0800 | [diff] [blame] | 456 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 457 | if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 458 | write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { |
| 459 | dev_err_ratelimited( |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 460 | bank->parent_dev, |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 461 | "fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n", |
| 462 | int_state, fault_iova, fault_pa, regval, fault_larb, fault_port, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 463 | layer, write ? "write" : "read"); |
| 464 | } |
| 465 | |
| 466 | /* Interrupt clear */ |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 467 | regval = readl_relaxed(base + REG_MMU_INT_CONTROL0); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 468 | regval |= F_INT_CLR_BIT; |
Yong Wu | 887cf6a | 2022-05-03 15:14:15 +0800 | [diff] [blame] | 469 | writel_relaxed(regval, base + REG_MMU_INT_CONTROL0); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 470 | |
| 471 | mtk_iommu_tlb_flush_all(data); |
| 472 | |
| 473 | return IRQ_HANDLED; |
| 474 | } |
| 475 | |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 476 | static unsigned int mtk_iommu_get_bank_id(struct device *dev, |
| 477 | const struct mtk_iommu_plat_data *plat_data) |
| 478 | { |
| 479 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
| 480 | unsigned int i, portmsk = 0, bankid = 0; |
| 481 | |
| 482 | if (plat_data->banks_num == 1) |
| 483 | return bankid; |
| 484 | |
| 485 | for (i = 0; i < fwspec->num_ids; i++) |
| 486 | portmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i])); |
| 487 | |
| 488 | for (i = 0; i < plat_data->banks_num && i < MTK_IOMMU_BANK_MAX; i++) { |
| 489 | if (!plat_data->banks_enable[i]) |
| 490 | continue; |
| 491 | |
| 492 | if (portmsk & plat_data->banks_portmsk[i]) { |
| 493 | bankid = i; |
| 494 | break; |
| 495 | } |
| 496 | } |
| 497 | return bankid; /* default is 0 */ |
| 498 | } |
| 499 | |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 500 | static int mtk_iommu_get_iova_region_id(struct device *dev, |
| 501 | const struct mtk_iommu_plat_data *plat_data) |
Yong Wu | 803cf9e | 2021-01-11 19:19:08 +0800 | [diff] [blame] | 502 | { |
| 503 | const struct mtk_iommu_iova_region *rgn = plat_data->iova_region; |
| 504 | const struct bus_dma_region *dma_rgn = dev->dma_range_map; |
| 505 | int i, candidate = -1; |
| 506 | dma_addr_t dma_end; |
| 507 | |
| 508 | if (!dma_rgn || plat_data->iova_region_nr == 1) |
| 509 | return 0; |
| 510 | |
| 511 | dma_end = dma_rgn->dma_start + dma_rgn->size - 1; |
| 512 | for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) { |
| 513 | /* Best fit. */ |
| 514 | if (dma_rgn->dma_start == rgn->iova_base && |
| 515 | dma_end == rgn->iova_base + rgn->size - 1) |
| 516 | return i; |
| 517 | /* ok if it is inside this region. */ |
| 518 | if (dma_rgn->dma_start >= rgn->iova_base && |
| 519 | dma_end < rgn->iova_base + rgn->size) |
| 520 | candidate = i; |
| 521 | } |
| 522 | |
| 523 | if (candidate >= 0) |
| 524 | return candidate; |
| 525 | dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n", |
| 526 | &dma_rgn->dma_start, dma_rgn->size); |
| 527 | return -EINVAL; |
| 528 | } |
| 529 | |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 530 | static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 531 | bool enable, unsigned int regionid) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 532 | { |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 533 | struct mtk_smi_larb_iommu *larb_mmu; |
| 534 | unsigned int larbid, portid; |
Joerg Roedel | a9bf2ee | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 535 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Yong Wu | 8d2c749 | 2021-01-11 19:19:11 +0800 | [diff] [blame] | 536 | const struct mtk_iommu_iova_region *region; |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 537 | u32 peri_mmuen, peri_mmuen_msk; |
| 538 | int i, ret = 0; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 539 | |
Robin Murphy | 58f0d1d | 2016-10-17 12:49:20 +0100 | [diff] [blame] | 540 | for (i = 0; i < fwspec->num_ids; ++i) { |
| 541 | larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); |
| 542 | portid = MTK_M4U_TO_PORT(fwspec->ids[i]); |
Yong Wu | 8d2c749 | 2021-01-11 19:19:11 +0800 | [diff] [blame] | 543 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 544 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
| 545 | larb_mmu = &data->larb_imu[larbid]; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 546 | |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 547 | region = data->plat_data->iova_region + regionid; |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 548 | larb_mmu->bank[portid] = upper_32_bits(region->iova_base); |
Yong Wu | 8d2c749 | 2021-01-11 19:19:11 +0800 | [diff] [blame] | 549 | |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 550 | dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n", |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 551 | enable ? "enable" : "disable", dev_name(larb_mmu->dev), |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 552 | portid, regionid, larb_mmu->bank[portid]); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 553 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 554 | if (enable) |
| 555 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); |
| 556 | else |
| 557 | larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 558 | } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { |
| 559 | peri_mmuen_msk = BIT(portid); |
Yong Wu | e762907 | 2022-05-03 15:14:13 +0800 | [diff] [blame] | 560 | /* PCI dev has only one output id, enable the next writing bit for PCIe */ |
| 561 | if (dev_is_pci(dev)) |
| 562 | peri_mmuen_msk |= BIT(portid + 1); |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 563 | |
Yong Wu | e762907 | 2022-05-03 15:14:13 +0800 | [diff] [blame] | 564 | peri_mmuen = enable ? peri_mmuen_msk : 0; |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 565 | ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1, |
| 566 | peri_mmuen_msk, peri_mmuen); |
| 567 | if (ret) |
| 568 | dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n", |
| 569 | enable ? "enable" : "disable", |
| 570 | dev_name(data->dev), peri_mmuen_msk, ret); |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 571 | } |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 572 | } |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 573 | return ret; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 574 | } |
| 575 | |
Yong Wu | 4f956c9 | 2021-01-11 19:19:05 +0800 | [diff] [blame] | 576 | static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 577 | struct mtk_iommu_data *data, |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 578 | unsigned int region_id) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 579 | { |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 580 | const struct mtk_iommu_iova_region *region; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 581 | struct mtk_iommu_domain *m4u_dom; |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 582 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 583 | /* Always use bank0 in sharing pgtable case */ |
| 584 | m4u_dom = data->bank[0].m4u_dom; |
| 585 | if (m4u_dom) { |
| 586 | dom->iop = m4u_dom->iop; |
| 587 | dom->cfg = m4u_dom->cfg; |
| 588 | dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap; |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 589 | goto update_iova_region; |
| 590 | } |
| 591 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 592 | dom->cfg = (struct io_pgtable_cfg) { |
| 593 | .quirks = IO_PGTABLE_QUIRK_ARM_NS | |
| 594 | IO_PGTABLE_QUIRK_NO_PERMS | |
Yong Wu | b4dad40 | 2019-08-24 11:01:55 +0800 | [diff] [blame] | 595 | IO_PGTABLE_QUIRK_ARM_MTK_EXT, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 596 | .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, |
Yong Wu | 2f317da | 2021-01-11 19:18:55 +0800 | [diff] [blame] | 597 | .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 598 | .iommu_dev = data->dev, |
| 599 | }; |
| 600 | |
Yunfei Wang | 301c3ca | 2022-06-30 17:29:26 +0800 | [diff] [blame] | 601 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) |
| 602 | dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT; |
| 603 | |
Yong Wu | 9bdfe4c | 2021-01-11 19:18:56 +0800 | [diff] [blame] | 604 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) |
| 605 | dom->cfg.oas = data->enable_4GB ? 33 : 32; |
| 606 | else |
| 607 | dom->cfg.oas = 35; |
| 608 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 609 | dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); |
| 610 | if (!dom->iop) { |
| 611 | dev_err(data->dev, "Failed to alloc io pgtable\n"); |
| 612 | return -EINVAL; |
| 613 | } |
| 614 | |
| 615 | /* Update our support page sizes bitmap */ |
Robin Murphy | d16e0fa | 2016-04-07 18:42:06 +0100 | [diff] [blame] | 616 | dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; |
Yong Wu | b7875eb | 2021-01-11 19:19:06 +0800 | [diff] [blame] | 617 | |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 618 | update_iova_region: |
| 619 | /* Update the iova region for this domain */ |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 620 | region = data->plat_data->iova_region + region_id; |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 621 | dom->domain.geometry.aperture_start = region->iova_base; |
| 622 | dom->domain.geometry.aperture_end = region->iova_base + region->size - 1; |
Yong Wu | b7875eb | 2021-01-11 19:19:06 +0800 | [diff] [blame] | 623 | dom->domain.geometry.force_aperture = true; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 624 | return 0; |
| 625 | } |
| 626 | |
| 627 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) |
| 628 | { |
| 629 | struct mtk_iommu_domain *dom; |
| 630 | |
Yong Wu | 32e1ccc | 2022-05-03 15:14:10 +0800 | [diff] [blame] | 631 | if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 632 | return NULL; |
| 633 | |
| 634 | dom = kzalloc(sizeof(*dom), GFP_KERNEL); |
| 635 | if (!dom) |
| 636 | return NULL; |
Yong Wu | ddf67a8 | 2022-05-03 15:13:59 +0800 | [diff] [blame] | 637 | mutex_init(&dom->mutex); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 638 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 639 | return &dom->domain; |
| 640 | } |
| 641 | |
| 642 | static void mtk_iommu_domain_free(struct iommu_domain *domain) |
| 643 | { |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 644 | kfree(to_mtk_domain(domain)); |
| 645 | } |
| 646 | |
| 647 | static int mtk_iommu_attach_device(struct iommu_domain *domain, |
| 648 | struct device *dev) |
| 649 | { |
Yong Wu | 645b87c | 2022-05-03 15:13:55 +0800 | [diff] [blame] | 650 | struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 651 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 652 | struct list_head *hw_list = data->hw_list; |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 653 | struct device *m4udev = data->dev; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 654 | struct mtk_iommu_bank_data *bank; |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 655 | unsigned int bankid; |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 656 | int ret, region_id; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 657 | |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 658 | region_id = mtk_iommu_get_iova_region_id(dev, data->plat_data); |
| 659 | if (region_id < 0) |
| 660 | return region_id; |
Yong Wu | 803cf9e | 2021-01-11 19:19:08 +0800 | [diff] [blame] | 661 | |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 662 | bankid = mtk_iommu_get_bank_id(dev, data->plat_data); |
Yong Wu | ddf67a8 | 2022-05-03 15:13:59 +0800 | [diff] [blame] | 663 | mutex_lock(&dom->mutex); |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 664 | if (!dom->bank) { |
Yong Wu | 645b87c | 2022-05-03 15:13:55 +0800 | [diff] [blame] | 665 | /* Data is in the frstdata in sharing pgtable case. */ |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 666 | frstdata = mtk_iommu_get_frst_data(hw_list); |
Yong Wu | 645b87c | 2022-05-03 15:13:55 +0800 | [diff] [blame] | 667 | |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 668 | ret = mtk_iommu_domain_finalise(dom, frstdata, region_id); |
Yong Wu | ddf67a8 | 2022-05-03 15:13:59 +0800 | [diff] [blame] | 669 | if (ret) { |
| 670 | mutex_unlock(&dom->mutex); |
Yong Wu | 4f956c9 | 2021-01-11 19:19:05 +0800 | [diff] [blame] | 671 | return -ENODEV; |
Yong Wu | ddf67a8 | 2022-05-03 15:13:59 +0800 | [diff] [blame] | 672 | } |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 673 | dom->bank = &data->bank[bankid]; |
Yong Wu | 4f956c9 | 2021-01-11 19:19:05 +0800 | [diff] [blame] | 674 | } |
Yong Wu | ddf67a8 | 2022-05-03 15:13:59 +0800 | [diff] [blame] | 675 | mutex_unlock(&dom->mutex); |
Yong Wu | 4f956c9 | 2021-01-11 19:19:05 +0800 | [diff] [blame] | 676 | |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 677 | mutex_lock(&data->mutex); |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 678 | bank = &data->bank[bankid]; |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 679 | if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */ |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 680 | ret = pm_runtime_resume_and_get(m4udev); |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 681 | if (ret < 0) { |
| 682 | dev_err(m4udev, "pm get fail(%d) in attach.\n", ret); |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 683 | goto err_unlock; |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 684 | } |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 685 | |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 686 | ret = mtk_iommu_hw_init(data, bankid); |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 687 | if (ret) { |
| 688 | pm_runtime_put(m4udev); |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 689 | goto err_unlock; |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 690 | } |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 691 | bank->m4u_dom = dom; |
Yunfei Wang | 301c3ca | 2022-06-30 17:29:26 +0800 | [diff] [blame] | 692 | writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR); |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 693 | |
| 694 | pm_runtime_put(m4udev); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 695 | } |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 696 | mutex_unlock(&data->mutex); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 697 | |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 698 | return mtk_iommu_config(data, dev, true, region_id); |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 699 | |
| 700 | err_unlock: |
| 701 | mutex_unlock(&data->mutex); |
| 702 | return ret; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 703 | } |
| 704 | |
| 705 | static void mtk_iommu_detach_device(struct iommu_domain *domain, |
| 706 | struct device *dev) |
| 707 | { |
Joerg Roedel | 3524b55 | 2020-03-26 16:08:38 +0100 | [diff] [blame] | 708 | struct mtk_iommu_data *data = dev_iommu_priv_get(dev); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 709 | |
Yong Wu | 8d2c749 | 2021-01-11 19:19:11 +0800 | [diff] [blame] | 710 | mtk_iommu_config(data, dev, false, 0); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 711 | } |
| 712 | |
| 713 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 714 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 715 | { |
| 716 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 717 | |
Yong Wu | b4dad40 | 2019-08-24 11:01:55 +0800 | [diff] [blame] | 718 | /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 719 | if (dom->bank->parent_data->enable_4GB) |
Yong Wu | b4dad40 | 2019-08-24 11:01:55 +0800 | [diff] [blame] | 720 | paddr |= BIT_ULL(32); |
| 721 | |
Yong Wu | 60829b4 | 2019-11-04 15:01:07 +0800 | [diff] [blame] | 722 | /* Synchronize with the tlb_lock */ |
Baolin Wang | f34ce7a | 2020-06-12 11:39:55 +0800 | [diff] [blame] | 723 | return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 724 | } |
| 725 | |
| 726 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 727 | unsigned long iova, size_t size, |
| 728 | struct iommu_iotlb_gather *gather) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 729 | { |
| 730 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 731 | |
Robin Murphy | 3136895 | 2021-07-23 02:32:05 -0700 | [diff] [blame] | 732 | iommu_iotlb_gather_add_range(gather, iova, size); |
Yong Wu | 60829b4 | 2019-11-04 15:01:07 +0800 | [diff] [blame] | 733 | return dom->iop->unmap(dom->iop, iova, size, gather); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 734 | } |
| 735 | |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 736 | static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) |
| 737 | { |
Yong Wu | 08500c4 | 2021-01-11 19:19:04 +0800 | [diff] [blame] | 738 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
| 739 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 740 | mtk_iommu_tlb_flush_all(dom->bank->parent_data); |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 741 | } |
| 742 | |
| 743 | static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, |
| 744 | struct iommu_iotlb_gather *gather) |
Robin Murphy | 4d689b6 | 2017-09-28 15:55:02 +0100 | [diff] [blame] | 745 | { |
Yong Wu | 08500c4 | 2021-01-11 19:19:04 +0800 | [diff] [blame] | 746 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
Yong Wu | 862c371 | 2021-01-07 20:29:06 +0800 | [diff] [blame] | 747 | size_t length = gather->end - gather->start + 1; |
Yong Wu | da3cc91 | 2019-11-04 15:01:03 +0800 | [diff] [blame] | 748 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 749 | mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank); |
Robin Murphy | 4d689b6 | 2017-09-28 15:55:02 +0100 | [diff] [blame] | 750 | } |
| 751 | |
Yong Wu | 2014345 | 2021-01-07 20:29:05 +0800 | [diff] [blame] | 752 | static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, |
| 753 | size_t size) |
| 754 | { |
Yong Wu | 08500c4 | 2021-01-11 19:19:04 +0800 | [diff] [blame] | 755 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
Yong Wu | 2014345 | 2021-01-07 20:29:05 +0800 | [diff] [blame] | 756 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 757 | mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank); |
Yong Wu | 2014345 | 2021-01-07 20:29:05 +0800 | [diff] [blame] | 758 | } |
| 759 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 760 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
| 761 | dma_addr_t iova) |
| 762 | { |
| 763 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 764 | phys_addr_t pa; |
| 765 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 766 | pa = dom->iop->iova_to_phys(dom->iop, iova); |
Arnd Bergmann | f13efaf | 2021-09-27 14:18:44 +0200 | [diff] [blame] | 767 | if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 768 | dom->bank->parent_data->enable_4GB && |
Arnd Bergmann | f13efaf | 2021-09-27 14:18:44 +0200 | [diff] [blame] | 769 | pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) |
Yong Wu | b4dad40 | 2019-08-24 11:01:55 +0800 | [diff] [blame] | 770 | pa &= ~BIT_ULL(32); |
Yong Wu | 30e2fcc | 2017-08-21 19:00:20 +0800 | [diff] [blame] | 771 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 772 | return pa; |
| 773 | } |
| 774 | |
Joerg Roedel | 80e4592 | 2020-04-29 15:37:00 +0200 | [diff] [blame] | 775 | static struct iommu_device *mtk_iommu_probe_device(struct device *dev) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 776 | { |
Joerg Roedel | a9bf2ee | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 777 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 778 | struct mtk_iommu_data *data; |
Yong Wu | 635319a | 2022-01-17 08:05:02 +0100 | [diff] [blame] | 779 | struct device_link *link; |
| 780 | struct device *larbdev; |
| 781 | unsigned int larbid, larbidx, i; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 782 | |
Joerg Roedel | a9bf2ee | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 783 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
Joerg Roedel | 80e4592 | 2020-04-29 15:37:00 +0200 | [diff] [blame] | 784 | return ERR_PTR(-ENODEV); /* Not a iommu client device */ |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 785 | |
Joerg Roedel | 3524b55 | 2020-03-26 16:08:38 +0100 | [diff] [blame] | 786 | data = dev_iommu_priv_get(dev); |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 787 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 788 | if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) |
| 789 | return &data->iommu; |
| 790 | |
Yong Wu | 635319a | 2022-01-17 08:05:02 +0100 | [diff] [blame] | 791 | /* |
| 792 | * Link the consumer device with the smi-larb device(supplier). |
| 793 | * The device that connects with each a larb is a independent HW. |
| 794 | * All the ports in each a device should be in the same larbs. |
| 795 | */ |
| 796 | larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); |
Miles Chen | de78657 | 2022-05-05 21:27:30 +0800 | [diff] [blame] | 797 | if (larbid >= MTK_LARB_NR_MAX) |
| 798 | return ERR_PTR(-EINVAL); |
| 799 | |
Yong Wu | 635319a | 2022-01-17 08:05:02 +0100 | [diff] [blame] | 800 | for (i = 1; i < fwspec->num_ids; i++) { |
| 801 | larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]); |
| 802 | if (larbid != larbidx) { |
| 803 | dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n", |
| 804 | larbid, larbidx); |
| 805 | return ERR_PTR(-EINVAL); |
| 806 | } |
| 807 | } |
| 808 | larbdev = data->larb_imu[larbid].dev; |
Miles Chen | de78657 | 2022-05-05 21:27:30 +0800 | [diff] [blame] | 809 | if (!larbdev) |
| 810 | return ERR_PTR(-EINVAL); |
| 811 | |
Yong Wu | 635319a | 2022-01-17 08:05:02 +0100 | [diff] [blame] | 812 | link = device_link_add(dev, larbdev, |
| 813 | DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); |
| 814 | if (!link) |
| 815 | dev_err(dev, "Unable to link %s\n", dev_name(larbdev)); |
Joerg Roedel | 80e4592 | 2020-04-29 15:37:00 +0200 | [diff] [blame] | 816 | return &data->iommu; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 817 | } |
| 818 | |
Joerg Roedel | 80e4592 | 2020-04-29 15:37:00 +0200 | [diff] [blame] | 819 | static void mtk_iommu_release_device(struct device *dev) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 820 | { |
Joerg Roedel | a9bf2ee | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 821 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Yong Wu | 635319a | 2022-01-17 08:05:02 +0100 | [diff] [blame] | 822 | struct mtk_iommu_data *data; |
| 823 | struct device *larbdev; |
| 824 | unsigned int larbid; |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 825 | |
Yong Wu | 635319a | 2022-01-17 08:05:02 +0100 | [diff] [blame] | 826 | data = dev_iommu_priv_get(dev); |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 827 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
| 828 | larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); |
| 829 | larbdev = data->larb_imu[larbid].dev; |
| 830 | device_link_remove(dev, larbdev); |
| 831 | } |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 832 | } |
| 833 | |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 834 | static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data) |
| 835 | { |
| 836 | unsigned int bankid; |
| 837 | |
| 838 | /* |
| 839 | * If the bank function is enabled, each bank is a iommu group/domain. |
| 840 | * Otherwise, each iova region is a iommu group/domain. |
| 841 | */ |
| 842 | bankid = mtk_iommu_get_bank_id(dev, plat_data); |
| 843 | if (bankid) |
| 844 | return bankid; |
| 845 | |
| 846 | return mtk_iommu_get_iova_region_id(dev, plat_data); |
| 847 | } |
| 848 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 849 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) |
| 850 | { |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 851 | struct mtk_iommu_data *c_data = dev_iommu_priv_get(dev), *data; |
| 852 | struct list_head *hw_list = c_data->hw_list; |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 853 | struct iommu_group *group; |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 854 | int groupid; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 855 | |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 856 | data = mtk_iommu_get_frst_data(hw_list); |
Robin Murphy | 58f0d1d | 2016-10-17 12:49:20 +0100 | [diff] [blame] | 857 | if (!data) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 858 | return ERR_PTR(-ENODEV); |
| 859 | |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 860 | groupid = mtk_iommu_get_group_id(dev, data->plat_data); |
| 861 | if (groupid < 0) |
| 862 | return ERR_PTR(groupid); |
Yong Wu | 803cf9e | 2021-01-11 19:19:08 +0800 | [diff] [blame] | 863 | |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 864 | mutex_lock(&data->mutex); |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 865 | group = data->m4u_group[groupid]; |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 866 | if (!group) { |
| 867 | group = iommu_group_alloc(); |
| 868 | if (!IS_ERR(group)) |
Yong Wu | 57fb481 | 2022-05-03 15:14:23 +0800 | [diff] [blame] | 869 | data->m4u_group[groupid] = group; |
Robin Murphy | 3a8d40b | 2016-11-11 17:59:24 +0000 | [diff] [blame] | 870 | } else { |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 871 | iommu_group_ref_get(group); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 872 | } |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 873 | mutex_unlock(&data->mutex); |
Yong Wu | c3045f3 | 2021-01-11 19:19:09 +0800 | [diff] [blame] | 874 | return group; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 875 | } |
| 876 | |
| 877 | static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) |
| 878 | { |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 879 | struct platform_device *m4updev; |
| 880 | |
| 881 | if (args->args_count != 1) { |
| 882 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", |
| 883 | args->args_count); |
| 884 | return -EINVAL; |
| 885 | } |
| 886 | |
Joerg Roedel | 3524b55 | 2020-03-26 16:08:38 +0100 | [diff] [blame] | 887 | if (!dev_iommu_priv_get(dev)) { |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 888 | /* Get the m4u device */ |
| 889 | m4updev = of_find_device_by_node(args->np); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 890 | if (WARN_ON(!m4updev)) |
| 891 | return -EINVAL; |
| 892 | |
Joerg Roedel | 3524b55 | 2020-03-26 16:08:38 +0100 | [diff] [blame] | 893 | dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 894 | } |
| 895 | |
Robin Murphy | 58f0d1d | 2016-10-17 12:49:20 +0100 | [diff] [blame] | 896 | return iommu_fwspec_add_ids(dev, args->args, 1); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 897 | } |
| 898 | |
Yong Wu | ab1d528 | 2021-01-11 19:19:10 +0800 | [diff] [blame] | 899 | static void mtk_iommu_get_resv_regions(struct device *dev, |
| 900 | struct list_head *head) |
| 901 | { |
| 902 | struct mtk_iommu_data *data = dev_iommu_priv_get(dev); |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 903 | unsigned int regionid = mtk_iommu_get_iova_region_id(dev, data->plat_data), i; |
Yong Wu | ab1d528 | 2021-01-11 19:19:10 +0800 | [diff] [blame] | 904 | const struct mtk_iommu_iova_region *resv, *curdom; |
| 905 | struct iommu_resv_region *region; |
| 906 | int prot = IOMMU_WRITE | IOMMU_READ; |
| 907 | |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 908 | if ((int)regionid < 0) |
Yong Wu | ab1d528 | 2021-01-11 19:19:10 +0800 | [diff] [blame] | 909 | return; |
Yong Wu | d72e0ff | 2022-05-03 15:14:22 +0800 | [diff] [blame] | 910 | curdom = data->plat_data->iova_region + regionid; |
Yong Wu | ab1d528 | 2021-01-11 19:19:10 +0800 | [diff] [blame] | 911 | for (i = 0; i < data->plat_data->iova_region_nr; i++) { |
| 912 | resv = data->plat_data->iova_region + i; |
| 913 | |
| 914 | /* Only reserve when the region is inside the current domain */ |
| 915 | if (resv->iova_base <= curdom->iova_base || |
| 916 | resv->iova_base + resv->size >= curdom->iova_base + curdom->size) |
| 917 | continue; |
| 918 | |
| 919 | region = iommu_alloc_resv_region(resv->iova_base, resv->size, |
| 920 | prot, IOMMU_RESV_RESERVED); |
| 921 | if (!region) |
| 922 | return; |
| 923 | |
| 924 | list_add_tail(®ion->list, head); |
| 925 | } |
| 926 | } |
| 927 | |
Arvind Yadav | b65f501 | 2018-10-18 19:13:38 +0800 | [diff] [blame] | 928 | static const struct iommu_ops mtk_iommu_ops = { |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 929 | .domain_alloc = mtk_iommu_domain_alloc, |
Joerg Roedel | 80e4592 | 2020-04-29 15:37:00 +0200 | [diff] [blame] | 930 | .probe_device = mtk_iommu_probe_device, |
| 931 | .release_device = mtk_iommu_release_device, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 932 | .device_group = mtk_iommu_device_group, |
| 933 | .of_xlate = mtk_iommu_of_xlate, |
Yong Wu | ab1d528 | 2021-01-11 19:19:10 +0800 | [diff] [blame] | 934 | .get_resv_regions = mtk_iommu_get_resv_regions, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 935 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, |
Yong Wu | 18d8c74 | 2021-03-26 11:23:37 +0800 | [diff] [blame] | 936 | .owner = THIS_MODULE, |
Lu Baolu | 9a630a4 | 2022-02-16 10:52:49 +0800 | [diff] [blame] | 937 | .default_domain_ops = &(const struct iommu_domain_ops) { |
| 938 | .attach_dev = mtk_iommu_attach_device, |
| 939 | .detach_dev = mtk_iommu_detach_device, |
| 940 | .map = mtk_iommu_map, |
| 941 | .unmap = mtk_iommu_unmap, |
| 942 | .flush_iotlb_all = mtk_iommu_flush_iotlb_all, |
| 943 | .iotlb_sync = mtk_iommu_iotlb_sync, |
| 944 | .iotlb_sync_map = mtk_iommu_sync_map, |
| 945 | .iova_to_phys = mtk_iommu_iova_to_phys, |
| 946 | .free = mtk_iommu_domain_free, |
| 947 | } |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 948 | }; |
| 949 | |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 950 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 951 | { |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 952 | const struct mtk_iommu_bank_data *bankx = &data->bank[bankid]; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 953 | const struct mtk_iommu_bank_data *bank0 = &data->bank[0]; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 954 | u32 regval; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 955 | |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 956 | /* |
| 957 | * Global control settings are in bank0. May re-init these global registers |
| 958 | * since no sure if there is bank0 consumers. |
| 959 | */ |
AngeloGioacchino Del Regno | 86580ec | 2022-09-13 17:11:47 +0200 | [diff] [blame] | 960 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) { |
Yong Wu | acb3c92 | 2019-08-24 11:01:58 +0800 | [diff] [blame] | 961 | regval = F_MMU_PREFETCH_RT_REPLACE_MOD | |
| 962 | F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; |
Chao Hao | 8644441 | 2020-07-03 12:41:26 +0800 | [diff] [blame] | 963 | } else { |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 964 | regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG); |
Chao Hao | 8644441 | 2020-07-03 12:41:26 +0800 | [diff] [blame] | 965 | regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR; |
| 966 | } |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 967 | writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 968 | |
Chao Hao | 6b71779 | 2020-07-03 12:41:20 +0800 | [diff] [blame] | 969 | if (data->enable_4GB && |
| 970 | MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { |
Yong Wu | 30e2fcc | 2017-08-21 19:00:20 +0800 | [diff] [blame] | 971 | /* |
| 972 | * If 4GB mode is enabled, the validate PA range is from |
| 973 | * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. |
| 974 | */ |
| 975 | regval = F_MMU_VLD_PA_RNG(7, 4); |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 976 | writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG); |
Yong Wu | 30e2fcc | 2017-08-21 19:00:20 +0800 | [diff] [blame] | 977 | } |
Yong Wu | 9a87005 | 2022-05-03 15:14:02 +0800 | [diff] [blame] | 978 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE)) |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 979 | writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS); |
Yong Wu | 9a87005 | 2022-05-03 15:14:02 +0800 | [diff] [blame] | 980 | else |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 981 | writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS); |
Yong Wu | 9a87005 | 2022-05-03 15:14:02 +0800 | [diff] [blame] | 982 | |
Chao Hao | 35c1b48 | 2020-07-03 12:41:24 +0800 | [diff] [blame] | 983 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) { |
| 984 | /* write command throttling mode */ |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 985 | regval = readl_relaxed(bank0->base + REG_MMU_WR_LEN_CTRL); |
Chao Hao | 35c1b48 | 2020-07-03 12:41:24 +0800 | [diff] [blame] | 986 | regval &= ~F_MMU_WR_THROT_DIS_MASK; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 987 | writel_relaxed(regval, bank0->base + REG_MMU_WR_LEN_CTRL); |
Chao Hao | 35c1b48 | 2020-07-03 12:41:24 +0800 | [diff] [blame] | 988 | } |
Yong Wu | e6dec92 | 2017-08-21 19:00:16 +0800 | [diff] [blame] | 989 | |
Chao Hao | 6b71779 | 2020-07-03 12:41:20 +0800 | [diff] [blame] | 990 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { |
Chao Hao | 75eed35 | 2020-07-03 12:41:19 +0800 | [diff] [blame] | 991 | /* The register is called STANDARD_AXI_MODE in this case */ |
Chao Hao | 4bb2bf4 | 2020-07-03 12:41:21 +0800 | [diff] [blame] | 992 | regval = 0; |
| 993 | } else { |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 994 | regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL); |
Yong Wu | d265a4a | 2022-05-03 15:14:03 +0800 | [diff] [blame] | 995 | if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE)) |
| 996 | regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; |
Chao Hao | 4bb2bf4 | 2020-07-03 12:41:21 +0800 | [diff] [blame] | 997 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) |
| 998 | regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; |
Chao Hao | 75eed35 | 2020-07-03 12:41:19 +0800 | [diff] [blame] | 999 | } |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1000 | writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1001 | |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 1002 | /* Independent settings for each bank */ |
Yong Wu | 634f57d | 2022-05-03 15:14:16 +0800 | [diff] [blame] | 1003 | regval = F_L2_MULIT_HIT_EN | |
| 1004 | F_TABLE_WALK_FAULT_INT_EN | |
| 1005 | F_PREETCH_FIFO_OVERFLOW_INT_EN | |
| 1006 | F_MISS_FIFO_OVERFLOW_INT_EN | |
| 1007 | F_PREFETCH_FIFO_ERR_INT_EN | |
| 1008 | F_MISS_FIFO_ERR_INT_EN; |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 1009 | writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0); |
Yong Wu | 634f57d | 2022-05-03 15:14:16 +0800 | [diff] [blame] | 1010 | |
| 1011 | regval = F_INT_TRANSLATION_FAULT | |
| 1012 | F_INT_MAIN_MULTI_HIT_FAULT | |
| 1013 | F_INT_INVALID_PA_FAULT | |
| 1014 | F_INT_ENTRY_REPLACEMENT_FAULT | |
| 1015 | F_INT_TLB_MISS_FAULT | |
| 1016 | F_INT_MISS_TRANSACTION_FIFO_FAULT | |
| 1017 | F_INT_PRETETCH_TRANSATION_FIFO_FAULT; |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 1018 | writel_relaxed(regval, bankx->base + REG_MMU_INT_MAIN_CONTROL); |
Yong Wu | 634f57d | 2022-05-03 15:14:16 +0800 | [diff] [blame] | 1019 | |
| 1020 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR)) |
| 1021 | regval = (data->protect_base >> 1) | (data->enable_4GB << 31); |
| 1022 | else |
| 1023 | regval = lower_32_bits(data->protect_base) | |
| 1024 | upper_32_bits(data->protect_base); |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 1025 | writel_relaxed(regval, bankx->base + REG_MMU_IVRP_PADDR); |
Yong Wu | 634f57d | 2022-05-03 15:14:16 +0800 | [diff] [blame] | 1026 | |
Yong Wu | e24453e | 2022-05-03 15:14:21 +0800 | [diff] [blame] | 1027 | if (devm_request_irq(bankx->parent_dev, bankx->irq, mtk_iommu_isr, 0, |
| 1028 | dev_name(bankx->parent_dev), (void *)bankx)) { |
| 1029 | writel_relaxed(0, bankx->base + REG_MMU_PT_BASE_ADDR); |
| 1030 | dev_err(bankx->parent_dev, "Failed @ IRQ-%d Request\n", bankx->irq); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1031 | return -ENODEV; |
| 1032 | } |
| 1033 | |
| 1034 | return 0; |
| 1035 | } |
| 1036 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1037 | static const struct component_master_ops mtk_iommu_com_ops = { |
| 1038 | .bind = mtk_iommu_bind, |
| 1039 | .unbind = mtk_iommu_unbind, |
| 1040 | }; |
| 1041 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1042 | static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match, |
| 1043 | struct mtk_iommu_data *data) |
| 1044 | { |
Yong Wu | f7b71d0 | 2022-05-03 15:14:09 +0800 | [diff] [blame] | 1045 | struct device_node *larbnode, *smicomm_node, *smi_subcomm_node; |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1046 | struct platform_device *plarbdev; |
| 1047 | struct device_link *link; |
| 1048 | int i, larb_nr, ret; |
| 1049 | |
| 1050 | larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL); |
| 1051 | if (larb_nr < 0) |
| 1052 | return larb_nr; |
| 1053 | |
| 1054 | for (i = 0; i < larb_nr; i++) { |
| 1055 | u32 id; |
| 1056 | |
| 1057 | larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); |
| 1058 | if (!larbnode) |
| 1059 | return -EINVAL; |
| 1060 | |
| 1061 | if (!of_device_is_available(larbnode)) { |
| 1062 | of_node_put(larbnode); |
| 1063 | continue; |
| 1064 | } |
| 1065 | |
| 1066 | ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); |
| 1067 | if (ret)/* The id is consecutive if there is no this property */ |
| 1068 | id = i; |
| 1069 | |
| 1070 | plarbdev = of_find_device_by_node(larbnode); |
| 1071 | if (!plarbdev) { |
| 1072 | of_node_put(larbnode); |
| 1073 | return -ENODEV; |
| 1074 | } |
| 1075 | if (!plarbdev->dev.driver) { |
| 1076 | of_node_put(larbnode); |
| 1077 | return -EPROBE_DEFER; |
| 1078 | } |
| 1079 | data->larb_imu[id].dev = &plarbdev->dev; |
| 1080 | |
| 1081 | component_match_add_release(dev, match, component_release_of, |
| 1082 | component_compare_of, larbnode); |
| 1083 | } |
| 1084 | |
Yong Wu | f7b71d0 | 2022-05-03 15:14:09 +0800 | [diff] [blame] | 1085 | /* Get smi-(sub)-common dev from the last larb. */ |
| 1086 | smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); |
| 1087 | if (!smi_subcomm_node) |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1088 | return -EINVAL; |
| 1089 | |
Yong Wu | f7b71d0 | 2022-05-03 15:14:09 +0800 | [diff] [blame] | 1090 | /* |
| 1091 | * It may have two level smi-common. the node is smi-sub-common if it |
| 1092 | * has a new mediatek,smi property. otherwise it is smi-commmon. |
| 1093 | */ |
| 1094 | smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0); |
| 1095 | if (smicomm_node) |
| 1096 | of_node_put(smi_subcomm_node); |
| 1097 | else |
| 1098 | smicomm_node = smi_subcomm_node; |
| 1099 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1100 | plarbdev = of_find_device_by_node(smicomm_node); |
| 1101 | of_node_put(smicomm_node); |
| 1102 | data->smicomm_dev = &plarbdev->dev; |
| 1103 | |
| 1104 | link = device_link_add(data->smicomm_dev, dev, |
| 1105 | DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); |
| 1106 | if (!link) { |
| 1107 | dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev)); |
| 1108 | return -EINVAL; |
| 1109 | } |
| 1110 | return 0; |
| 1111 | } |
| 1112 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1113 | static int mtk_iommu_probe(struct platform_device *pdev) |
| 1114 | { |
| 1115 | struct mtk_iommu_data *data; |
| 1116 | struct device *dev = &pdev->dev; |
| 1117 | struct resource *res; |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 1118 | resource_size_t ioaddr; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1119 | struct component_match *match = NULL; |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 1120 | struct regmap *infracfg; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1121 | void *protect; |
Yong Wu | 42d57fc | 2022-05-03 15:14:24 +0800 | [diff] [blame] | 1122 | int ret, banks_num, i = 0; |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 1123 | u32 val; |
| 1124 | char *p; |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1125 | struct mtk_iommu_bank_data *bank; |
| 1126 | void __iomem *base; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1127 | |
| 1128 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
| 1129 | if (!data) |
| 1130 | return -ENOMEM; |
| 1131 | data->dev = dev; |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1132 | data->plat_data = of_device_get_match_data(dev); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1133 | |
| 1134 | /* Protect memory. HW will access here while translation fault.*/ |
| 1135 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); |
| 1136 | if (!protect) |
| 1137 | return -ENOMEM; |
| 1138 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); |
| 1139 | |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 1140 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) { |
AngeloGioacchino Del Regno | 7d748ff | 2022-06-16 13:08:27 +0200 | [diff] [blame] | 1141 | infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg"); |
| 1142 | if (IS_ERR(infracfg)) { |
| 1143 | /* |
| 1144 | * Legacy devicetrees will not specify a phandle to |
| 1145 | * mediatek,infracfg: in that case, we use the older |
| 1146 | * way to retrieve a syscon to infra. |
| 1147 | * |
| 1148 | * This is for retrocompatibility purposes only, hence |
| 1149 | * no more compatibles shall be added to this. |
| 1150 | */ |
| 1151 | switch (data->plat_data->m4u_plat) { |
| 1152 | case M4U_MT2712: |
| 1153 | p = "mediatek,mt2712-infracfg"; |
| 1154 | break; |
| 1155 | case M4U_MT8173: |
| 1156 | p = "mediatek,mt8173-infracfg"; |
| 1157 | break; |
| 1158 | default: |
| 1159 | p = NULL; |
| 1160 | } |
| 1161 | |
| 1162 | infracfg = syscon_regmap_lookup_by_compatible(p); |
| 1163 | if (IS_ERR(infracfg)) |
| 1164 | return PTR_ERR(infracfg); |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 1165 | } |
| 1166 | |
Miles Chen | c2c5945 | 2020-09-04 18:40:38 +0800 | [diff] [blame] | 1167 | ret = regmap_read(infracfg, REG_INFRA_MISC, &val); |
| 1168 | if (ret) |
| 1169 | return ret; |
| 1170 | data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN); |
| 1171 | } |
Yong Wu | 01e23c9 | 2016-03-14 06:01:11 +0800 | [diff] [blame] | 1172 | |
Yong Wu | 42d57fc | 2022-05-03 15:14:24 +0800 | [diff] [blame] | 1173 | banks_num = data->plat_data->banks_num; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1174 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
Yong Wu | 42d57fc | 2022-05-03 15:14:24 +0800 | [diff] [blame] | 1175 | if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) { |
| 1176 | dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res); |
| 1177 | return -EINVAL; |
| 1178 | } |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1179 | base = devm_ioremap_resource(dev, res); |
| 1180 | if (IS_ERR(base)) |
| 1181 | return PTR_ERR(base); |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 1182 | ioaddr = res->start; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1183 | |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1184 | data->bank = devm_kmalloc(dev, banks_num * sizeof(*data->bank), GFP_KERNEL); |
| 1185 | if (!data->bank) |
| 1186 | return -ENOMEM; |
| 1187 | |
Yong Wu | 42d57fc | 2022-05-03 15:14:24 +0800 | [diff] [blame] | 1188 | do { |
| 1189 | if (!data->plat_data->banks_enable[i]) |
| 1190 | continue; |
| 1191 | bank = &data->bank[i]; |
| 1192 | bank->id = i; |
| 1193 | bank->base = base + i * MTK_IOMMU_BANK_SZ; |
| 1194 | bank->m4u_dom = NULL; |
| 1195 | |
| 1196 | bank->irq = platform_get_irq(pdev, i); |
| 1197 | if (bank->irq < 0) |
| 1198 | return bank->irq; |
| 1199 | bank->parent_dev = dev; |
| 1200 | bank->parent_data = data; |
| 1201 | spin_lock_init(&bank->tlb_lock); |
| 1202 | } while (++i < banks_num); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1203 | |
Chao Hao | 6b71779 | 2020-07-03 12:41:20 +0800 | [diff] [blame] | 1204 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { |
Yong Wu | 2aa4c25 | 2019-08-24 11:01:56 +0800 | [diff] [blame] | 1205 | data->bclk = devm_clk_get(dev, "bclk"); |
| 1206 | if (IS_ERR(data->bclk)) |
| 1207 | return PTR_ERR(data->bclk); |
| 1208 | } |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1209 | |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 1210 | pm_runtime_enable(dev); |
| 1211 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1212 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
| 1213 | ret = mtk_iommu_mm_dts_parse(dev, &match, data); |
| 1214 | if (ret) { |
NÃcolas F. R. A. Prado | 3168010 | 2022-07-12 17:44:27 -0400 | [diff] [blame] | 1215 | dev_err_probe(dev, ret, "mm dts parse fail\n"); |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1216 | goto out_runtime_disable; |
| 1217 | } |
AngeloGioacchino Del Regno | 21fd9be | 2022-06-16 13:08:30 +0200 | [diff] [blame] | 1218 | } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { |
| 1219 | p = data->plat_data->pericfg_comp_str; |
| 1220 | data->pericfg = syscon_regmap_lookup_by_compatible(p); |
| 1221 | if (IS_ERR(data->pericfg)) { |
| 1222 | ret = PTR_ERR(data->pericfg); |
Yong Wu | f9b8c9b | 2022-05-03 15:14:12 +0800 | [diff] [blame] | 1223 | goto out_runtime_disable; |
| 1224 | } |
Yong Wu | baf94e6 | 2021-01-11 19:18:59 +0800 | [diff] [blame] | 1225 | } |
| 1226 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1227 | platform_set_drvdata(pdev, data); |
Yong Wu | 0e5a3f2 | 2022-05-03 15:13:58 +0800 | [diff] [blame] | 1228 | mutex_init(&data->mutex); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1229 | |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 1230 | ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, |
| 1231 | "mtk-iommu.%pa", &ioaddr); |
| 1232 | if (ret) |
Yong Wu | baf94e6 | 2021-01-11 19:18:59 +0800 | [diff] [blame] | 1233 | goto out_link_remove; |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 1234 | |
Robin Murphy | 2d471b2 | 2021-04-01 14:56:26 +0100 | [diff] [blame] | 1235 | ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 1236 | if (ret) |
Yong Wu | 986d9ec | 2021-01-11 19:18:58 +0800 | [diff] [blame] | 1237 | goto out_sysfs_remove; |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 1238 | |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 1239 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) { |
| 1240 | list_add_tail(&data->list, data->plat_data->hw_list); |
| 1241 | data->hw_list = data->plat_data->hw_list; |
| 1242 | } else { |
| 1243 | INIT_LIST_HEAD(&data->hw_list_head); |
| 1244 | list_add_tail(&data->list, &data->hw_list_head); |
| 1245 | data->hw_list = &data->hw_list_head; |
| 1246 | } |
Yong Wu | 7c3a2ec | 2017-08-21 19:00:17 +0800 | [diff] [blame] | 1247 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1248 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
| 1249 | ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); |
| 1250 | if (ret) |
Robin Murphy | 7341c36 | 2022-08-15 17:20:13 +0100 | [diff] [blame] | 1251 | goto out_list_del; |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1252 | } |
Yong Wu | 986d9ec | 2021-01-11 19:18:58 +0800 | [diff] [blame] | 1253 | return ret; |
| 1254 | |
Yong Wu | 986d9ec | 2021-01-11 19:18:58 +0800 | [diff] [blame] | 1255 | out_list_del: |
| 1256 | list_del(&data->list); |
| 1257 | iommu_device_unregister(&data->iommu); |
| 1258 | out_sysfs_remove: |
| 1259 | iommu_device_sysfs_remove(&data->iommu); |
Yong Wu | baf94e6 | 2021-01-11 19:18:59 +0800 | [diff] [blame] | 1260 | out_link_remove: |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1261 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) |
| 1262 | device_link_remove(data->smicomm_dev, dev); |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 1263 | out_runtime_disable: |
| 1264 | pm_runtime_disable(dev); |
Yong Wu | 986d9ec | 2021-01-11 19:18:58 +0800 | [diff] [blame] | 1265 | return ret; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1266 | } |
| 1267 | |
| 1268 | static int mtk_iommu_remove(struct platform_device *pdev) |
| 1269 | { |
| 1270 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); |
Yong Wu | 42d57fc | 2022-05-03 15:14:24 +0800 | [diff] [blame] | 1271 | struct mtk_iommu_bank_data *bank; |
| 1272 | int i; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1273 | |
Joerg Roedel | b16c017 | 2017-02-03 12:57:32 +0100 | [diff] [blame] | 1274 | iommu_device_sysfs_remove(&data->iommu); |
| 1275 | iommu_device_unregister(&data->iommu); |
| 1276 | |
Yong Wu | ee55f75 | 2022-05-03 15:13:56 +0800 | [diff] [blame] | 1277 | list_del(&data->list); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1278 | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1279 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
| 1280 | device_link_remove(data->smicomm_dev, &pdev->dev); |
| 1281 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); |
| 1282 | } |
Yong Wu | c0b5758 | 2021-01-11 19:19:01 +0800 | [diff] [blame] | 1283 | pm_runtime_disable(&pdev->dev); |
Yong Wu | 42d57fc | 2022-05-03 15:14:24 +0800 | [diff] [blame] | 1284 | for (i = 0; i < data->plat_data->banks_num; i++) { |
| 1285 | bank = &data->bank[i]; |
| 1286 | if (!bank->m4u_dom) |
| 1287 | continue; |
| 1288 | devm_free_irq(&pdev->dev, bank->irq, bank); |
| 1289 | } |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1290 | return 0; |
| 1291 | } |
| 1292 | |
Yong Wu | 34665c7 | 2021-01-11 19:19:00 +0800 | [diff] [blame] | 1293 | static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1294 | { |
| 1295 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
| 1296 | struct mtk_iommu_suspend_reg *reg = &data->reg; |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1297 | void __iomem *base; |
| 1298 | int i = 0; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1299 | |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1300 | base = data->bank[i].base; |
Chao Hao | 35c1b48 | 2020-07-03 12:41:24 +0800 | [diff] [blame] | 1301 | reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL); |
Chao Hao | 75eed35 | 2020-07-03 12:41:19 +0800 | [diff] [blame] | 1302 | reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1303 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); |
| 1304 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); |
Yong Wu | b9475b3 | 2019-08-24 11:02:06 +0800 | [diff] [blame] | 1305 | reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1306 | do { |
| 1307 | if (!data->plat_data->banks_enable[i]) |
| 1308 | continue; |
| 1309 | base = data->bank[i].base; |
| 1310 | reg->int_control[i] = readl_relaxed(base + REG_MMU_INT_CONTROL0); |
| 1311 | reg->int_main_control[i] = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); |
| 1312 | reg->ivrp_paddr[i] = readl_relaxed(base + REG_MMU_IVRP_PADDR); |
| 1313 | } while (++i < data->plat_data->banks_num); |
Yong Wu | 6254b64 | 2017-08-21 19:00:19 +0800 | [diff] [blame] | 1314 | clk_disable_unprepare(data->bclk); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1315 | return 0; |
| 1316 | } |
| 1317 | |
Yong Wu | 34665c7 | 2021-01-11 19:19:00 +0800 | [diff] [blame] | 1318 | static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1319 | { |
| 1320 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
| 1321 | struct mtk_iommu_suspend_reg *reg = &data->reg; |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1322 | struct mtk_iommu_domain *m4u_dom; |
| 1323 | void __iomem *base; |
| 1324 | int ret, i = 0; |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1325 | |
Yong Wu | 6254b64 | 2017-08-21 19:00:19 +0800 | [diff] [blame] | 1326 | ret = clk_prepare_enable(data->bclk); |
| 1327 | if (ret) { |
| 1328 | dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); |
| 1329 | return ret; |
| 1330 | } |
Dafna Hirschfeld | b34ea31 | 2021-04-16 12:54:49 +0200 | [diff] [blame] | 1331 | |
| 1332 | /* |
| 1333 | * Uppon first resume, only enable the clk and return, since the values of the |
| 1334 | * registers are not yet set. |
| 1335 | */ |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1336 | if (!reg->wr_len_ctrl) |
Dafna Hirschfeld | b34ea31 | 2021-04-16 12:54:49 +0200 | [diff] [blame] | 1337 | return 0; |
| 1338 | |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1339 | base = data->bank[i].base; |
Chao Hao | 35c1b48 | 2020-07-03 12:41:24 +0800 | [diff] [blame] | 1340 | writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL); |
Chao Hao | 75eed35 | 2020-07-03 12:41:19 +0800 | [diff] [blame] | 1341 | writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1342 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); |
| 1343 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); |
Yong Wu | b9475b3 | 2019-08-24 11:02:06 +0800 | [diff] [blame] | 1344 | writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1345 | do { |
| 1346 | m4u_dom = data->bank[i].m4u_dom; |
| 1347 | if (!data->plat_data->banks_enable[i] || !m4u_dom) |
| 1348 | continue; |
| 1349 | base = data->bank[i].base; |
| 1350 | writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0); |
| 1351 | writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL); |
| 1352 | writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR); |
Yunfei Wang | 301c3ca | 2022-06-30 17:29:26 +0800 | [diff] [blame] | 1353 | writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR); |
Yong Wu | d7127de | 2022-05-03 15:14:25 +0800 | [diff] [blame] | 1354 | } while (++i < data->plat_data->banks_num); |
Yong Wu | 4f23f6d | 2021-12-08 14:07:44 +0200 | [diff] [blame] | 1355 | |
| 1356 | /* |
| 1357 | * Users may allocate dma buffer before they call pm_runtime_get, |
| 1358 | * in which case it will lack the necessary tlb flush. |
| 1359 | * Thus, make sure to update the tlb after each PM resume. |
| 1360 | */ |
| 1361 | mtk_iommu_tlb_flush_all(data); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1362 | return 0; |
| 1363 | } |
| 1364 | |
Yong Wu | e6dec92 | 2017-08-21 19:00:16 +0800 | [diff] [blame] | 1365 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
Yong Wu | 34665c7 | 2021-01-11 19:19:00 +0800 | [diff] [blame] | 1366 | SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL) |
| 1367 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
| 1368 | pm_runtime_force_resume) |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1369 | }; |
| 1370 | |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1371 | static const struct mtk_iommu_plat_data mt2712_data = { |
| 1372 | .m4u_plat = M4U_MT2712, |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1373 | .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG | SHARE_PGTABLE | |
| 1374 | MTK_IOMMU_TYPE_MM, |
Yong Wu | 9e3a2a6 | 2022-05-03 15:14:00 +0800 | [diff] [blame] | 1375 | .hw_list = &m4ulist, |
Chao Hao | b053bc7 | 2020-07-03 12:41:22 +0800 | [diff] [blame] | 1376 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
Yong Wu | 585e58f | 2021-01-11 19:19:07 +0800 | [diff] [blame] | 1377 | .iova_region = single_domain, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1378 | .banks_num = 1, |
| 1379 | .banks_enable = {true}, |
Yong Wu | 585e58f | 2021-01-11 19:19:07 +0800 | [diff] [blame] | 1380 | .iova_region_nr = ARRAY_SIZE(single_domain), |
Chao Hao | 37276e0 | 2020-07-03 12:41:23 +0800 | [diff] [blame] | 1381 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1382 | }; |
| 1383 | |
Chao Hao | 068c86e | 2020-07-03 12:41:27 +0800 | [diff] [blame] | 1384 | static const struct mtk_iommu_plat_data mt6779_data = { |
| 1385 | .m4u_plat = M4U_MT6779, |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1386 | .flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN | |
Yunfei Wang | 301c3ca | 2022-06-30 17:29:26 +0800 | [diff] [blame] | 1387 | MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN, |
Chao Hao | 068c86e | 2020-07-03 12:41:27 +0800 | [diff] [blame] | 1388 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1389 | .banks_num = 1, |
| 1390 | .banks_enable = {true}, |
Yong Wu | 585e58f | 2021-01-11 19:19:07 +0800 | [diff] [blame] | 1391 | .iova_region = single_domain, |
| 1392 | .iova_region_nr = ARRAY_SIZE(single_domain), |
Chao Hao | 068c86e | 2020-07-03 12:41:27 +0800 | [diff] [blame] | 1393 | .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1394 | }; |
| 1395 | |
AngeloGioacchino Del Regno | 717ec15e | 2022-09-13 17:11:48 +0200 | [diff] [blame] | 1396 | static const struct mtk_iommu_plat_data mt6795_data = { |
| 1397 | .m4u_plat = M4U_MT6795, |
| 1398 | .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | |
| 1399 | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM | |
| 1400 | TF_PORT_TO_ADDR_MT8173, |
| 1401 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
| 1402 | .banks_num = 1, |
| 1403 | .banks_enable = {true}, |
| 1404 | .iova_region = single_domain, |
| 1405 | .iova_region_nr = ARRAY_SIZE(single_domain), |
| 1406 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */ |
| 1407 | }; |
| 1408 | |
Fabien Parent | 3c21356 | 2020-09-07 12:16:49 +0200 | [diff] [blame] | 1409 | static const struct mtk_iommu_plat_data mt8167_data = { |
| 1410 | .m4u_plat = M4U_MT8167, |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1411 | .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, |
Fabien Parent | 3c21356 | 2020-09-07 12:16:49 +0200 | [diff] [blame] | 1412 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1413 | .banks_num = 1, |
| 1414 | .banks_enable = {true}, |
Yong Wu | 585e58f | 2021-01-11 19:19:07 +0800 | [diff] [blame] | 1415 | .iova_region = single_domain, |
| 1416 | .iova_region_nr = ARRAY_SIZE(single_domain), |
Fabien Parent | 3c21356 | 2020-09-07 12:16:49 +0200 | [diff] [blame] | 1417 | .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */ |
| 1418 | }; |
| 1419 | |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1420 | static const struct mtk_iommu_plat_data mt8173_data = { |
| 1421 | .m4u_plat = M4U_MT8173, |
Fabien Parent | d1b5ef0 | 2020-09-07 12:16:48 +0200 | [diff] [blame] | 1422 | .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | |
AngeloGioacchino Del Regno | 86580ec | 2022-09-13 17:11:47 +0200 | [diff] [blame] | 1423 | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM | |
| 1424 | TF_PORT_TO_ADDR_MT8173, |
Chao Hao | b053bc7 | 2020-07-03 12:41:22 +0800 | [diff] [blame] | 1425 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1426 | .banks_num = 1, |
| 1427 | .banks_enable = {true}, |
Yong Wu | 585e58f | 2021-01-11 19:19:07 +0800 | [diff] [blame] | 1428 | .iova_region = single_domain, |
| 1429 | .iova_region_nr = ARRAY_SIZE(single_domain), |
Chao Hao | 37276e0 | 2020-07-03 12:41:23 +0800 | [diff] [blame] | 1430 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1431 | }; |
| 1432 | |
Yong Wu | 907ba6a | 2019-08-24 11:02:02 +0800 | [diff] [blame] | 1433 | static const struct mtk_iommu_plat_data mt8183_data = { |
| 1434 | .m4u_plat = M4U_MT8183, |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1435 | .flags = RESET_AXI | MTK_IOMMU_TYPE_MM, |
Chao Hao | b053bc7 | 2020-07-03 12:41:22 +0800 | [diff] [blame] | 1436 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1437 | .banks_num = 1, |
| 1438 | .banks_enable = {true}, |
Yong Wu | 585e58f | 2021-01-11 19:19:07 +0800 | [diff] [blame] | 1439 | .iova_region = single_domain, |
| 1440 | .iova_region_nr = ARRAY_SIZE(single_domain), |
Chao Hao | 37276e0 | 2020-07-03 12:41:23 +0800 | [diff] [blame] | 1441 | .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, |
Yong Wu | 907ba6a | 2019-08-24 11:02:02 +0800 | [diff] [blame] | 1442 | }; |
| 1443 | |
Yong Wu | e8d7cca | 2022-05-03 15:14:27 +0800 | [diff] [blame] | 1444 | static const struct mtk_iommu_plat_data mt8186_data_mm = { |
| 1445 | .m4u_plat = M4U_MT8186, |
| 1446 | .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | |
| 1447 | WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, |
| 1448 | .larbid_remap = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20}, |
| 1449 | {MTK_INVALID_LARBID, 14, 16}, |
| 1450 | {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}}, |
| 1451 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
| 1452 | .banks_num = 1, |
| 1453 | .banks_enable = {true}, |
| 1454 | .iova_region = mt8192_multi_dom, |
| 1455 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
| 1456 | }; |
| 1457 | |
Yong Wu | 9e3489e | 2021-01-11 19:19:13 +0800 | [diff] [blame] | 1458 | static const struct mtk_iommu_plat_data mt8192_data = { |
| 1459 | .m4u_plat = M4U_MT8192, |
Yong Wu | 9ec30c0 | 2022-05-03 15:14:06 +0800 | [diff] [blame] | 1460 | .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | |
Yong Wu | d2e9a11 | 2022-05-03 15:14:08 +0800 | [diff] [blame] | 1461 | WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, |
Yong Wu | 9e3489e | 2021-01-11 19:19:13 +0800 | [diff] [blame] | 1462 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1463 | .banks_num = 1, |
| 1464 | .banks_enable = {true}, |
Yong Wu | 9e3489e | 2021-01-11 19:19:13 +0800 | [diff] [blame] | 1465 | .iova_region = mt8192_multi_dom, |
| 1466 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
| 1467 | .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20}, |
| 1468 | {0, 14, 16}, {0, 13, 18, 17}}, |
| 1469 | }; |
| 1470 | |
Yong Wu | ef68a19 | 2022-05-03 15:14:14 +0800 | [diff] [blame] | 1471 | static const struct mtk_iommu_plat_data mt8195_data_infra = { |
| 1472 | .m4u_plat = M4U_MT8195, |
| 1473 | .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO | |
| 1474 | MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT, |
| 1475 | .pericfg_comp_str = "mediatek,mt8195-pericfg_ao", |
| 1476 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
Yong Wu | 7597e3c | 2022-05-03 15:14:26 +0800 | [diff] [blame] | 1477 | .banks_num = 5, |
| 1478 | .banks_enable = {true, false, false, false, true}, |
| 1479 | .banks_portmsk = {[0] = GENMASK(19, 16), /* PCIe */ |
| 1480 | [4] = GENMASK(31, 20), /* USB */ |
| 1481 | }, |
Yong Wu | ef68a19 | 2022-05-03 15:14:14 +0800 | [diff] [blame] | 1482 | .iova_region = single_domain, |
| 1483 | .iova_region_nr = ARRAY_SIZE(single_domain), |
| 1484 | }; |
| 1485 | |
| 1486 | static const struct mtk_iommu_plat_data mt8195_data_vdo = { |
| 1487 | .m4u_plat = M4U_MT8195, |
| 1488 | .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | |
| 1489 | WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, |
| 1490 | .hw_list = &m4ulist, |
| 1491 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1492 | .banks_num = 1, |
| 1493 | .banks_enable = {true}, |
Yong Wu | ef68a19 | 2022-05-03 15:14:14 +0800 | [diff] [blame] | 1494 | .iova_region = mt8192_multi_dom, |
| 1495 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
| 1496 | .larbid_remap = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11}, |
| 1497 | {13, 17, 15/* 17b */, 25}, {5}}, |
| 1498 | }; |
| 1499 | |
| 1500 | static const struct mtk_iommu_plat_data mt8195_data_vpp = { |
| 1501 | .m4u_plat = M4U_MT8195, |
| 1502 | .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | |
| 1503 | WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, |
| 1504 | .hw_list = &m4ulist, |
| 1505 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
Yong Wu | 99ca022 | 2022-05-03 15:14:20 +0800 | [diff] [blame] | 1506 | .banks_num = 1, |
| 1507 | .banks_enable = {true}, |
Yong Wu | ef68a19 | 2022-05-03 15:14:14 +0800 | [diff] [blame] | 1508 | .iova_region = mt8192_multi_dom, |
| 1509 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
| 1510 | .larbid_remap = {{1}, {3}, |
| 1511 | {22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23}, |
| 1512 | {8}, {20}, {12}, |
| 1513 | /* 16: 16a; 29: 16b; 30: CCUtop0; 31: CCUtop1 */ |
| 1514 | {14, 16, 29, 26, 30, 31, 18}, |
| 1515 | {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}}, |
| 1516 | }; |
| 1517 | |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1518 | static const struct of_device_id mtk_iommu_of_ids[] = { |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1519 | { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, |
Chao Hao | 068c86e | 2020-07-03 12:41:27 +0800 | [diff] [blame] | 1520 | { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, |
AngeloGioacchino Del Regno | 717ec15e | 2022-09-13 17:11:48 +0200 | [diff] [blame] | 1521 | { .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data}, |
Fabien Parent | 3c21356 | 2020-09-07 12:16:49 +0200 | [diff] [blame] | 1522 | { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data}, |
Yong Wu | cecdce9 | 2019-08-24 11:01:47 +0800 | [diff] [blame] | 1523 | { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, |
Yong Wu | 907ba6a | 2019-08-24 11:02:02 +0800 | [diff] [blame] | 1524 | { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, |
Yong Wu | e8d7cca | 2022-05-03 15:14:27 +0800 | [diff] [blame] | 1525 | { .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */ |
Yong Wu | 9e3489e | 2021-01-11 19:19:13 +0800 | [diff] [blame] | 1526 | { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data}, |
Yong Wu | ef68a19 | 2022-05-03 15:14:14 +0800 | [diff] [blame] | 1527 | { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra}, |
| 1528 | { .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo}, |
| 1529 | { .compatible = "mediatek,mt8195-iommu-vpp", .data = &mt8195_data_vpp}, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1530 | {} |
| 1531 | }; |
| 1532 | |
| 1533 | static struct platform_driver mtk_iommu_driver = { |
| 1534 | .probe = mtk_iommu_probe, |
| 1535 | .remove = mtk_iommu_remove, |
| 1536 | .driver = { |
| 1537 | .name = "mtk-iommu", |
Krzysztof Kozlowski | f53dd97 | 2020-07-27 20:18:42 +0200 | [diff] [blame] | 1538 | .of_match_table = mtk_iommu_of_ids, |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1539 | .pm = &mtk_iommu_pm_ops, |
| 1540 | } |
| 1541 | }; |
Yong Wu | 18d8c74 | 2021-03-26 11:23:37 +0800 | [diff] [blame] | 1542 | module_platform_driver(mtk_iommu_driver); |
Yong Wu | 0df4fab | 2016-02-23 01:20:50 +0800 | [diff] [blame] | 1543 | |
Yong Wu | 18d8c74 | 2021-03-26 11:23:37 +0800 | [diff] [blame] | 1544 | MODULE_DESCRIPTION("IOMMU API for MediaTek M4U implementations"); |
| 1545 | MODULE_LICENSE("GPL v2"); |