David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Jordan Crouse | 6311b65 | 2019-08-05 14:33:46 -0600 | [diff] [blame] | 3 | * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 4 | * |
| 5 | */ |
| 6 | |
| 7 | #include <asm/div64.h> |
| 8 | #include <dt-bindings/interconnect/qcom,sdm845.h> |
| 9 | #include <linux/device.h> |
| 10 | #include <linux/interconnect.h> |
| 11 | #include <linux/interconnect-provider.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/of_device.h> |
| 15 | #include <linux/of_platform.h> |
| 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/sort.h> |
| 18 | |
| 19 | #include <soc/qcom/cmd-db.h> |
| 20 | #include <soc/qcom/rpmh.h> |
| 21 | #include <soc/qcom/tcs.h> |
| 22 | |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 23 | #define to_qcom_provider(_provider) \ |
| 24 | container_of(_provider, struct qcom_icc_provider, provider) |
| 25 | |
| 26 | struct qcom_icc_provider { |
| 27 | struct icc_provider provider; |
| 28 | struct device *dev; |
| 29 | struct qcom_icc_bcm **bcms; |
| 30 | size_t num_bcms; |
| 31 | }; |
| 32 | |
| 33 | /** |
| 34 | * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM) |
| 35 | * @unit: divisor used to convert bytes/sec bw value to an RPMh msg |
| 36 | * @width: multiplier used to convert bytes/sec bw value to an RPMh msg |
| 37 | * @vcd: virtual clock domain that this bcm belongs to |
| 38 | * @reserved: reserved field |
| 39 | */ |
| 40 | struct bcm_db { |
| 41 | __le32 unit; |
| 42 | __le16 width; |
| 43 | u8 vcd; |
| 44 | u8 reserved; |
| 45 | }; |
| 46 | |
| 47 | #define SDM845_MAX_LINKS 43 |
| 48 | #define SDM845_MAX_BCMS 30 |
| 49 | #define SDM845_MAX_BCM_PER_NODE 2 |
| 50 | #define SDM845_MAX_VCD 10 |
| 51 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 52 | /* |
| 53 | * The AMC bucket denotes constraints that are applied to hardware when |
| 54 | * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied |
| 55 | * when the execution environment transitions between active and low power mode. |
| 56 | */ |
| 57 | #define QCOM_ICC_BUCKET_AMC 0 |
| 58 | #define QCOM_ICC_BUCKET_WAKE 1 |
| 59 | #define QCOM_ICC_BUCKET_SLEEP 2 |
| 60 | #define QCOM_ICC_NUM_BUCKETS 3 |
| 61 | #define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC) |
| 62 | #define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE) |
| 63 | #define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP) |
| 64 | #define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE) |
| 65 | #define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\ |
| 66 | QCOM_ICC_TAG_SLEEP) |
| 67 | |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 68 | /** |
| 69 | * struct qcom_icc_node - Qualcomm specific interconnect nodes |
| 70 | * @name: the node name used in debugfs |
| 71 | * @links: an array of nodes where we can go next while traversing |
| 72 | * @id: a unique node identifier |
| 73 | * @num_links: the total number of @links |
| 74 | * @channels: num of channels at this node |
| 75 | * @buswidth: width of the interconnect between a node and the bus |
| 76 | * @sum_avg: current sum aggregate value of all avg bw requests |
| 77 | * @max_peak: current max aggregate value of all peak bw requests |
| 78 | * @bcms: list of bcms associated with this logical node |
| 79 | * @num_bcms: num of @bcms |
| 80 | */ |
| 81 | struct qcom_icc_node { |
| 82 | const char *name; |
| 83 | u16 links[SDM845_MAX_LINKS]; |
| 84 | u16 id; |
| 85 | u16 num_links; |
| 86 | u16 channels; |
| 87 | u16 buswidth; |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 88 | u64 sum_avg[QCOM_ICC_NUM_BUCKETS]; |
| 89 | u64 max_peak[QCOM_ICC_NUM_BUCKETS]; |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 90 | struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; |
| 91 | size_t num_bcms; |
| 92 | }; |
| 93 | |
| 94 | /** |
| 95 | * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes |
| 96 | * known as Bus Clock Manager (BCM) |
| 97 | * @name: the bcm node name used to fetch BCM data from command db |
| 98 | * @type: latency or bandwidth bcm |
| 99 | * @addr: address offsets used when voting to RPMH |
| 100 | * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm |
| 101 | * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm |
| 102 | * @dirty: flag used to indicate whether the bcm needs to be committed |
| 103 | * @keepalive: flag used to indicate whether a keepalive is required |
| 104 | * @aux_data: auxiliary data used when calculating threshold values and |
| 105 | * communicating with RPMh |
| 106 | * @list: used to link to other bcms when compiling lists for commit |
| 107 | * @num_nodes: total number of @num_nodes |
| 108 | * @nodes: list of qcom_icc_nodes that this BCM encapsulates |
| 109 | */ |
| 110 | struct qcom_icc_bcm { |
| 111 | const char *name; |
| 112 | u32 type; |
| 113 | u32 addr; |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 114 | u64 vote_x[QCOM_ICC_NUM_BUCKETS]; |
| 115 | u64 vote_y[QCOM_ICC_NUM_BUCKETS]; |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 116 | bool dirty; |
| 117 | bool keepalive; |
| 118 | struct bcm_db aux_data; |
| 119 | struct list_head list; |
| 120 | size_t num_nodes; |
| 121 | struct qcom_icc_node *nodes[]; |
| 122 | }; |
| 123 | |
| 124 | struct qcom_icc_fabric { |
| 125 | struct qcom_icc_node **nodes; |
| 126 | size_t num_nodes; |
| 127 | }; |
| 128 | |
| 129 | struct qcom_icc_desc { |
| 130 | struct qcom_icc_node **nodes; |
| 131 | size_t num_nodes; |
| 132 | struct qcom_icc_bcm **bcms; |
| 133 | size_t num_bcms; |
| 134 | }; |
| 135 | |
| 136 | #define DEFINE_QNODE(_name, _id, _channels, _buswidth, \ |
| 137 | _numlinks, ...) \ |
| 138 | static struct qcom_icc_node _name = { \ |
| 139 | .id = _id, \ |
| 140 | .name = #_name, \ |
| 141 | .channels = _channels, \ |
| 142 | .buswidth = _buswidth, \ |
| 143 | .num_links = _numlinks, \ |
| 144 | .links = { __VA_ARGS__ }, \ |
| 145 | } |
| 146 | |
| 147 | DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC); |
| 148 | DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC); |
| 149 | DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC); |
| 150 | DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC); |
| 151 | DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC); |
| 152 | DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC); |
| 153 | DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC); |
| 154 | DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC); |
| 155 | DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC); |
| 156 | DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC); |
| 157 | DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC); |
| 158 | DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC); |
| 159 | DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC); |
| 160 | DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC); |
| 161 | DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC); |
| 162 | DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC); |
| 163 | DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC); |
| 164 | DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC); |
| 165 | DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); |
| 166 | DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); |
| 167 | DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); |
| 168 | DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC); |
| 169 | DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); |
| 170 | DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC); |
| 171 | DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); |
| 172 | DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG); |
| 173 | DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC); |
| 174 | DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC); |
| 175 | DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1); |
| 176 | DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); |
| 177 | DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC); |
| 178 | DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC); |
| 179 | DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); |
| 180 | DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); |
| 181 | DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC); |
| 182 | DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); |
| 183 | DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); |
| 184 | DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC); |
| 185 | DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); |
| 186 | DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); |
| 187 | DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); |
| 188 | DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); |
| 189 | DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); |
| 190 | DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); |
| 191 | DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); |
| 192 | DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); |
| 193 | DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC); |
| 194 | DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC); |
| 195 | DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); |
| 196 | DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); |
| 197 | DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); |
| 198 | DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); |
| 199 | DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM); |
| 200 | DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); |
| 201 | DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); |
| 202 | DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC); |
| 203 | DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0); |
| 204 | DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); |
| 205 | DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC); |
| 206 | DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); |
| 207 | DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0); |
| 208 | DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0); |
| 209 | DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG); |
| 210 | DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG); |
| 211 | DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0); |
| 212 | DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0); |
| 213 | DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0); |
| 214 | DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0); |
| 215 | DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0); |
| 216 | DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0); |
| 217 | DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0); |
| 218 | DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC); |
| 219 | DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0); |
| 220 | DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0); |
| 221 | DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0); |
| 222 | DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0); |
| 223 | DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0); |
| 224 | DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0); |
| 225 | DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG); |
| 226 | DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0); |
| 227 | DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0); |
| 228 | DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0); |
| 229 | DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0); |
| 230 | DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0); |
| 231 | DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0); |
| 232 | DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0); |
| 233 | DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0); |
| 234 | DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0); |
| 235 | DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0); |
| 236 | DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0); |
| 237 | DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG); |
| 238 | DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0); |
| 239 | DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0); |
| 240 | DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0); |
| 241 | DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0); |
| 242 | DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0); |
| 243 | DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0); |
| 244 | DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0); |
| 245 | DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0); |
| 246 | DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0); |
| 247 | DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0); |
| 248 | DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0); |
| 249 | DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0); |
| 250 | DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC); |
| 251 | DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0); |
| 252 | DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0); |
| 253 | DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG); |
| 254 | DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC); |
| 255 | DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC); |
| 256 | DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0); |
| 257 | DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0); |
| 258 | DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0); |
| 259 | DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0); |
| 260 | DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC); |
| 261 | DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC); |
| 262 | DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0); |
| 263 | DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC); |
| 264 | DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC); |
| 265 | DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0); |
| 266 | DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0); |
| 267 | DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC); |
| 268 | DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC); |
| 269 | DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC); |
| 270 | DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0); |
| 271 | DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0); |
| 272 | DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0); |
| 273 | DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0); |
| 274 | DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0); |
| 275 | DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0); |
| 276 | DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0); |
| 277 | |
| 278 | #define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \ |
| 279 | static struct qcom_icc_bcm _name = { \ |
| 280 | .name = _bcmname, \ |
| 281 | .keepalive = _keepalive, \ |
| 282 | .num_nodes = _numnodes, \ |
| 283 | .nodes = { __VA_ARGS__ }, \ |
| 284 | } |
| 285 | |
| 286 | DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi); |
| 287 | DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi); |
| 288 | DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc); |
| 289 | DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf); |
| 290 | DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io); |
| 291 | DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1); |
| 292 | DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc); |
| 293 | DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc); |
| 294 | DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu); |
| 295 | DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9); |
| 296 | DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps); |
| 297 | DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf); |
| 298 | DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto); |
| 299 | DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc); |
| 300 | DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2); |
| 301 | DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem); |
| 302 | DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc); |
| 303 | DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc); |
| 304 | DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem); |
| 305 | DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm); |
| 306 | DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg); |
| 307 | DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie); |
| 308 | DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3); |
| 309 | DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc); |
| 310 | DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc); |
| 311 | DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic); |
| 312 | DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc); |
| 313 | DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc); |
| 314 | |
| 315 | static struct qcom_icc_node *rsc_hlos_nodes[] = { |
| 316 | [MASTER_APPSS_PROC] = &acm_l3, |
| 317 | [MASTER_TCU_0] = &acm_tcu, |
| 318 | [MASTER_LLCC] = &llcc_mc, |
| 319 | [MASTER_GNOC_CFG] = &pm_gnoc_cfg, |
| 320 | [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg, |
| 321 | [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg, |
| 322 | [MASTER_CNOC_DC_NOC] = &qhm_cnoc, |
| 323 | [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg, |
| 324 | [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg, |
| 325 | [MASTER_QDSS_BAM] = &qhm_qdss_bam, |
| 326 | [MASTER_BLSP_1] = &qhm_qup1, |
| 327 | [MASTER_BLSP_2] = &qhm_qup2, |
| 328 | [MASTER_SNOC_CFG] = &qhm_snoc_cfg, |
| 329 | [MASTER_SPDM] = &qhm_spdm, |
| 330 | [MASTER_TIC] = &qhm_tic, |
| 331 | [MASTER_TSIF] = &qhm_tsif, |
| 332 | [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, |
| 333 | [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, |
| 334 | [MASTER_GNOC_MEM_NOC] = &qnm_apps, |
| 335 | [MASTER_CNOC_A2NOC] = &qnm_cnoc, |
| 336 | [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv, |
| 337 | [MASTER_MEM_NOC_SNOC] = &qnm_memnoc, |
| 338 | [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf, |
| 339 | [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf, |
| 340 | [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc, |
| 341 | [MASTER_SNOC_CNOC] = &qnm_snoc, |
| 342 | [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, |
| 343 | [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, |
| 344 | [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0, |
| 345 | [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp, |
| 346 | [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1, |
| 347 | [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp, |
| 348 | [MASTER_CAMNOC_SF] = &qxm_camnoc_sf, |
| 349 | [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp, |
| 350 | [MASTER_CRYPTO] = &qxm_crypto, |
| 351 | [MASTER_GFX3D] = &qxm_gpu, |
| 352 | [MASTER_IPA] = &qxm_ipa, |
| 353 | [MASTER_MDP0] = &qxm_mdp0, |
| 354 | [MASTER_MDP1] = &qxm_mdp1, |
| 355 | [MASTER_PIMEM] = &qxm_pimem, |
| 356 | [MASTER_ROTATOR] = &qxm_rot, |
| 357 | [MASTER_VIDEO_P0] = &qxm_venus0, |
| 358 | [MASTER_VIDEO_P1] = &qxm_venus1, |
| 359 | [MASTER_VIDEO_PROC] = &qxm_venus_arm9, |
| 360 | [MASTER_GIC] = &xm_gic, |
| 361 | [MASTER_PCIE_1] = &xm_pcie3_1, |
| 362 | [MASTER_PCIE_0] = &xm_pcie_0, |
| 363 | [MASTER_QDSS_DAP] = &xm_qdss_dap, |
| 364 | [MASTER_QDSS_ETR] = &xm_qdss_etr, |
| 365 | [MASTER_SDCC_2] = &xm_sdc2, |
| 366 | [MASTER_SDCC_4] = &xm_sdc4, |
| 367 | [MASTER_UFS_CARD] = &xm_ufs_card, |
| 368 | [MASTER_UFS_MEM] = &xm_ufs_mem, |
| 369 | [MASTER_USB3_0] = &xm_usb3_0, |
| 370 | [MASTER_USB3_1] = &xm_usb3_1, |
| 371 | [SLAVE_EBI1] = &ebi, |
| 372 | [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg, |
| 373 | [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg, |
| 374 | [SLAVE_AOP] = &qhs_aop, |
| 375 | [SLAVE_AOSS] = &qhs_aoss, |
| 376 | [SLAVE_APPSS] = &qhs_apss, |
| 377 | [SLAVE_CAMERA_CFG] = &qhs_camera_cfg, |
| 378 | [SLAVE_CLK_CTL] = &qhs_clk_ctl, |
| 379 | [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg, |
| 380 | [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, |
| 381 | [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, |
| 382 | [SLAVE_DCC_CFG] = &qhs_dcc_cfg, |
| 383 | [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg, |
| 384 | [SLAVE_DISPLAY_CFG] = &qhs_display_cfg, |
| 385 | [SLAVE_GLM] = &qhs_glm, |
| 386 | [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg, |
| 387 | [SLAVE_IMEM_CFG] = &qhs_imem_cfg, |
| 388 | [SLAVE_IPA_CFG] = &qhs_ipa, |
| 389 | [SLAVE_LLCC_CFG] = &qhs_llcc, |
| 390 | [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg, |
| 391 | [SLAVE_MEM_NOC_CFG] = &qhs_memnoc, |
| 392 | [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg, |
| 393 | [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg, |
| 394 | [SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg, |
| 395 | [SLAVE_PDM] = &qhs_pdm, |
| 396 | [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south, |
| 397 | [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg, |
| 398 | [SLAVE_PRNG] = &qhs_prng, |
| 399 | [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, |
| 400 | [SLAVE_BLSP_2] = &qhs_qupv3_north, |
| 401 | [SLAVE_BLSP_1] = &qhs_qupv3_south, |
| 402 | [SLAVE_SDCC_2] = &qhs_sdc2, |
| 403 | [SLAVE_SDCC_4] = &qhs_sdc4, |
| 404 | [SLAVE_SNOC_CFG] = &qhs_snoc_cfg, |
| 405 | [SLAVE_SPDM_WRAPPER] = &qhs_spdm, |
| 406 | [SLAVE_SPSS_CFG] = &qhs_spss_cfg, |
| 407 | [SLAVE_TCSR] = &qhs_tcsr, |
| 408 | [SLAVE_TLMM_NORTH] = &qhs_tlmm_north, |
| 409 | [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south, |
| 410 | [SLAVE_TSIF] = &qhs_tsif, |
| 411 | [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg, |
| 412 | [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg, |
| 413 | [SLAVE_USB3_0] = &qhs_usb3_0, |
| 414 | [SLAVE_USB3_1] = &qhs_usb3_1, |
| 415 | [SLAVE_VENUS_CFG] = &qhs_venus_cfg, |
| 416 | [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, |
| 417 | [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc, |
| 418 | [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc, |
| 419 | [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc, |
| 420 | [SLAVE_MEM_NOC_GNOC] = &qns_apps_io, |
| 421 | [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp, |
| 422 | [SLAVE_SNOC_CNOC] = &qns_cnoc, |
| 423 | [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc, |
| 424 | [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv, |
| 425 | [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc, |
| 426 | [SLAVE_LLCC] = &qns_llcc, |
| 427 | [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, |
| 428 | [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc, |
| 429 | [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf, |
| 430 | [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc, |
| 431 | [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc, |
| 432 | [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc, |
| 433 | [SLAVE_IMEM] = &qxs_imem, |
| 434 | [SLAVE_PCIE_0] = &qxs_pcie, |
| 435 | [SLAVE_PCIE_1] = &qxs_pcie_gen3, |
| 436 | [SLAVE_PIMEM] = &qxs_pimem, |
| 437 | [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, |
| 438 | [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, |
| 439 | [SLAVE_SERVICE_CNOC] = &srvc_cnoc, |
| 440 | [SLAVE_SERVICE_GNOC] = &srvc_gnoc, |
| 441 | [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc, |
| 442 | [SLAVE_SERVICE_MNOC] = &srvc_mnoc, |
| 443 | [SLAVE_SERVICE_SNOC] = &srvc_snoc, |
| 444 | [SLAVE_QDSS_STM] = &xs_qdss_stm, |
| 445 | [SLAVE_TCU] = &xs_sys_tcu_cfg, |
| 446 | }; |
| 447 | |
| 448 | static struct qcom_icc_bcm *rsc_hlos_bcms[] = { |
| 449 | &bcm_acv, |
| 450 | &bcm_mc0, |
| 451 | &bcm_sh0, |
| 452 | &bcm_mm0, |
| 453 | &bcm_sh1, |
| 454 | &bcm_mm1, |
| 455 | &bcm_sh2, |
| 456 | &bcm_mm2, |
| 457 | &bcm_sh3, |
| 458 | &bcm_mm3, |
| 459 | &bcm_sh5, |
| 460 | &bcm_sn0, |
| 461 | &bcm_ce0, |
| 462 | &bcm_cn0, |
| 463 | &bcm_qup0, |
| 464 | &bcm_sn1, |
| 465 | &bcm_sn2, |
| 466 | &bcm_sn3, |
| 467 | &bcm_sn4, |
| 468 | &bcm_sn5, |
| 469 | &bcm_sn6, |
| 470 | &bcm_sn7, |
| 471 | &bcm_sn8, |
| 472 | &bcm_sn9, |
| 473 | &bcm_sn11, |
| 474 | &bcm_sn12, |
| 475 | &bcm_sn14, |
| 476 | &bcm_sn15, |
| 477 | }; |
| 478 | |
| 479 | static struct qcom_icc_desc sdm845_rsc_hlos = { |
| 480 | .nodes = rsc_hlos_nodes, |
| 481 | .num_nodes = ARRAY_SIZE(rsc_hlos_nodes), |
| 482 | .bcms = rsc_hlos_bcms, |
| 483 | .num_bcms = ARRAY_SIZE(rsc_hlos_bcms), |
| 484 | }; |
| 485 | |
| 486 | static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) |
| 487 | { |
| 488 | struct qcom_icc_node *qn; |
| 489 | const struct bcm_db *data; |
| 490 | size_t data_count; |
| 491 | int i; |
| 492 | |
| 493 | bcm->addr = cmd_db_read_addr(bcm->name); |
| 494 | if (!bcm->addr) { |
| 495 | dev_err(dev, "%s could not find RPMh address\n", |
| 496 | bcm->name); |
| 497 | return -EINVAL; |
| 498 | } |
| 499 | |
| 500 | data = cmd_db_read_aux_data(bcm->name, &data_count); |
| 501 | if (IS_ERR(data)) { |
| 502 | dev_err(dev, "%s command db read error (%ld)\n", |
| 503 | bcm->name, PTR_ERR(data)); |
| 504 | return PTR_ERR(data); |
| 505 | } |
| 506 | if (!data_count) { |
| 507 | dev_err(dev, "%s command db missing or partial aux data\n", |
| 508 | bcm->name); |
| 509 | return -EINVAL; |
| 510 | } |
| 511 | |
| 512 | bcm->aux_data.unit = le32_to_cpu(data->unit); |
| 513 | bcm->aux_data.width = le16_to_cpu(data->width); |
| 514 | bcm->aux_data.vcd = data->vcd; |
| 515 | bcm->aux_data.reserved = data->reserved; |
| 516 | |
| 517 | /* |
| 518 | * Link Qnodes to their respective BCMs |
| 519 | */ |
| 520 | for (i = 0; i < bcm->num_nodes; i++) { |
| 521 | qn = bcm->nodes[i]; |
| 522 | qn->bcms[qn->num_bcms] = bcm; |
| 523 | qn->num_bcms++; |
| 524 | } |
| 525 | |
| 526 | return 0; |
| 527 | } |
| 528 | |
| 529 | inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y, |
| 530 | u32 addr, bool commit) |
| 531 | { |
| 532 | bool valid = true; |
| 533 | |
| 534 | if (!cmd) |
| 535 | return; |
| 536 | |
| 537 | if (vote_x == 0 && vote_y == 0) |
| 538 | valid = false; |
| 539 | |
| 540 | if (vote_x > BCM_TCS_CMD_VOTE_MASK) |
| 541 | vote_x = BCM_TCS_CMD_VOTE_MASK; |
| 542 | |
| 543 | if (vote_y > BCM_TCS_CMD_VOTE_MASK) |
| 544 | vote_y = BCM_TCS_CMD_VOTE_MASK; |
| 545 | |
| 546 | cmd->addr = addr; |
| 547 | cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); |
| 548 | |
| 549 | /* |
| 550 | * Set the wait for completion flag on command that need to be completed |
| 551 | * before the next command. |
| 552 | */ |
| 553 | if (commit) |
| 554 | cmd->wait = true; |
| 555 | } |
| 556 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 557 | static void tcs_list_gen(struct list_head *bcm_list, int bucket, |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 558 | struct tcs_cmd tcs_list[SDM845_MAX_VCD], |
| 559 | int n[SDM845_MAX_VCD]) |
| 560 | { |
| 561 | struct qcom_icc_bcm *bcm; |
| 562 | bool commit; |
| 563 | size_t idx = 0, batch = 0, cur_vcd_size = 0; |
| 564 | |
| 565 | memset(n, 0, sizeof(int) * SDM845_MAX_VCD); |
| 566 | |
| 567 | list_for_each_entry(bcm, bcm_list, list) { |
| 568 | commit = false; |
| 569 | cur_vcd_size++; |
| 570 | if ((list_is_last(&bcm->list, bcm_list)) || |
| 571 | bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) { |
| 572 | commit = true; |
| 573 | cur_vcd_size = 0; |
| 574 | } |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 575 | tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket], |
| 576 | bcm->vote_y[bucket], bcm->addr, commit); |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 577 | idx++; |
| 578 | n[batch]++; |
| 579 | /* |
| 580 | * Batch the BCMs in such a way that we do not split them in |
| 581 | * multiple payloads when they are under the same VCD. This is |
| 582 | * to ensure that every BCM is committed since we only set the |
| 583 | * commit bit on the last BCM request of every VCD. |
| 584 | */ |
| 585 | if (n[batch] >= MAX_RPMH_PAYLOAD) { |
| 586 | if (!commit) { |
| 587 | n[batch] -= cur_vcd_size; |
| 588 | n[batch + 1] = cur_vcd_size; |
| 589 | } |
| 590 | batch++; |
| 591 | } |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | static void bcm_aggregate(struct qcom_icc_bcm *bcm) |
| 596 | { |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 597 | size_t i, bucket; |
| 598 | u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0}; |
| 599 | u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0}; |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 600 | u64 temp; |
| 601 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 602 | for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { |
| 603 | for (i = 0; i < bcm->num_nodes; i++) { |
| 604 | temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width; |
| 605 | do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels); |
| 606 | agg_avg[bucket] = max(agg_avg[bucket], temp); |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 607 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 608 | temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width; |
| 609 | do_div(temp, bcm->nodes[i]->buswidth); |
| 610 | agg_peak[bucket] = max(agg_peak[bucket], temp); |
| 611 | } |
| 612 | |
| 613 | temp = agg_avg[bucket] * 1000ULL; |
| 614 | do_div(temp, bcm->aux_data.unit); |
| 615 | bcm->vote_x[bucket] = temp; |
| 616 | |
| 617 | temp = agg_peak[bucket] * 1000ULL; |
| 618 | do_div(temp, bcm->aux_data.unit); |
| 619 | bcm->vote_y[bucket] = temp; |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 620 | } |
| 621 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 622 | if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 && |
| 623 | bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) { |
| 624 | bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1; |
| 625 | bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1; |
| 626 | bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1; |
| 627 | bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1; |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 628 | } |
| 629 | |
| 630 | bcm->dirty = false; |
| 631 | } |
| 632 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 633 | static void qcom_icc_pre_aggregate(struct icc_node *node) |
| 634 | { |
| 635 | size_t i; |
| 636 | struct qcom_icc_node *qn; |
| 637 | |
| 638 | qn = node->data; |
| 639 | |
| 640 | for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { |
| 641 | qn->sum_avg[i] = 0; |
| 642 | qn->max_peak[i] = 0; |
| 643 | } |
| 644 | } |
| 645 | |
Georgi Djakov | 127ab2c | 2019-08-09 15:13:23 +0300 | [diff] [blame] | 646 | static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 647 | u32 peak_bw, u32 *agg_avg, u32 *agg_peak) |
| 648 | { |
| 649 | size_t i; |
| 650 | struct qcom_icc_node *qn; |
| 651 | |
| 652 | qn = node->data; |
| 653 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 654 | if (!tag) |
| 655 | tag = QCOM_ICC_TAG_ALWAYS; |
| 656 | |
| 657 | for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { |
| 658 | if (tag & BIT(i)) { |
| 659 | qn->sum_avg[i] += avg_bw; |
| 660 | qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw); |
| 661 | } |
| 662 | } |
| 663 | |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 664 | *agg_avg += avg_bw; |
| 665 | *agg_peak = max_t(u32, *agg_peak, peak_bw); |
| 666 | |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 667 | for (i = 0; i < qn->num_bcms; i++) |
| 668 | qn->bcms[i]->dirty = true; |
| 669 | |
| 670 | return 0; |
| 671 | } |
| 672 | |
| 673 | static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) |
| 674 | { |
| 675 | struct qcom_icc_provider *qp; |
| 676 | struct icc_node *node; |
| 677 | struct tcs_cmd cmds[SDM845_MAX_BCMS]; |
| 678 | struct list_head commit_list; |
| 679 | int commit_idx[SDM845_MAX_VCD]; |
| 680 | int ret = 0, i; |
| 681 | |
| 682 | if (!src) |
| 683 | node = dst; |
| 684 | else |
| 685 | node = src; |
| 686 | |
| 687 | qp = to_qcom_provider(node->provider); |
| 688 | |
| 689 | INIT_LIST_HEAD(&commit_list); |
| 690 | |
| 691 | for (i = 0; i < qp->num_bcms; i++) { |
| 692 | if (qp->bcms[i]->dirty) { |
| 693 | bcm_aggregate(qp->bcms[i]); |
| 694 | list_add_tail(&qp->bcms[i]->list, &commit_list); |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | /* |
| 699 | * Construct the command list based on a pre ordered list of BCMs |
| 700 | * based on VCD. |
| 701 | */ |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 702 | tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx); |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 703 | |
| 704 | if (!commit_idx[0]) |
| 705 | return ret; |
| 706 | |
| 707 | ret = rpmh_invalidate(qp->dev); |
| 708 | if (ret) { |
| 709 | pr_err("Error invalidating RPMH client (%d)\n", ret); |
| 710 | return ret; |
| 711 | } |
| 712 | |
| 713 | ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE, |
| 714 | cmds, commit_idx); |
| 715 | if (ret) { |
| 716 | pr_err("Error sending AMC RPMH requests (%d)\n", ret); |
| 717 | return ret; |
| 718 | } |
| 719 | |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 720 | INIT_LIST_HEAD(&commit_list); |
| 721 | |
| 722 | for (i = 0; i < qp->num_bcms; i++) { |
| 723 | /* |
| 724 | * Only generate WAKE and SLEEP commands if a resource's |
| 725 | * requirements change as the execution environment transitions |
| 726 | * between different power states. |
| 727 | */ |
| 728 | if (qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_WAKE] != |
| 729 | qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_SLEEP] || |
| 730 | qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_WAKE] != |
| 731 | qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_SLEEP]) { |
| 732 | list_add_tail(&qp->bcms[i]->list, &commit_list); |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | if (list_empty(&commit_list)) |
| 737 | return ret; |
| 738 | |
| 739 | tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx); |
| 740 | |
| 741 | ret = rpmh_write_batch(qp->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx); |
| 742 | if (ret) { |
| 743 | pr_err("Error sending WAKE RPMH requests (%d)\n", ret); |
| 744 | return ret; |
| 745 | } |
| 746 | |
| 747 | tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx); |
| 748 | |
| 749 | ret = rpmh_write_batch(qp->dev, RPMH_SLEEP_STATE, cmds, commit_idx); |
| 750 | if (ret) { |
| 751 | pr_err("Error sending SLEEP RPMH requests (%d)\n", ret); |
| 752 | return ret; |
| 753 | } |
| 754 | |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 755 | return ret; |
| 756 | } |
| 757 | |
| 758 | static int cmp_vcd(const void *_l, const void *_r) |
| 759 | { |
| 760 | const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l; |
| 761 | const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r; |
| 762 | |
| 763 | if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) |
| 764 | return -1; |
| 765 | else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) |
| 766 | return 0; |
| 767 | else |
| 768 | return 1; |
| 769 | } |
| 770 | |
| 771 | static int qnoc_probe(struct platform_device *pdev) |
| 772 | { |
| 773 | const struct qcom_icc_desc *desc; |
| 774 | struct icc_onecell_data *data; |
| 775 | struct icc_provider *provider; |
| 776 | struct qcom_icc_node **qnodes; |
| 777 | struct qcom_icc_provider *qp; |
| 778 | struct icc_node *node; |
| 779 | size_t num_nodes, i; |
| 780 | int ret; |
| 781 | |
| 782 | desc = of_device_get_match_data(&pdev->dev); |
| 783 | if (!desc) |
| 784 | return -EINVAL; |
| 785 | |
| 786 | qnodes = desc->nodes; |
| 787 | num_nodes = desc->num_nodes; |
| 788 | |
| 789 | qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); |
| 790 | if (!qp) |
| 791 | return -ENOMEM; |
| 792 | |
Leonard Crestez | 83c774f | 2019-09-24 21:01:15 +0300 | [diff] [blame] | 793 | data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), |
| 794 | GFP_KERNEL); |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 795 | if (!data) |
| 796 | return -ENOMEM; |
| 797 | |
| 798 | provider = &qp->provider; |
| 799 | provider->dev = &pdev->dev; |
| 800 | provider->set = qcom_icc_set; |
David Dai | 9e3ce77 | 2019-08-13 17:53:41 +0300 | [diff] [blame] | 801 | provider->pre_aggregate = qcom_icc_pre_aggregate; |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 802 | provider->aggregate = qcom_icc_aggregate; |
| 803 | provider->xlate = of_icc_xlate_onecell; |
| 804 | INIT_LIST_HEAD(&provider->nodes); |
| 805 | provider->data = data; |
| 806 | |
| 807 | qp->dev = &pdev->dev; |
| 808 | qp->bcms = desc->bcms; |
| 809 | qp->num_bcms = desc->num_bcms; |
| 810 | |
| 811 | ret = icc_provider_add(provider); |
| 812 | if (ret) { |
| 813 | dev_err(&pdev->dev, "error adding interconnect provider\n"); |
| 814 | return ret; |
| 815 | } |
| 816 | |
| 817 | for (i = 0; i < num_nodes; i++) { |
| 818 | size_t j; |
| 819 | |
| 820 | node = icc_node_create(qnodes[i]->id); |
| 821 | if (IS_ERR(node)) { |
| 822 | ret = PTR_ERR(node); |
| 823 | goto err; |
| 824 | } |
| 825 | |
| 826 | node->name = qnodes[i]->name; |
| 827 | node->data = qnodes[i]; |
| 828 | icc_node_add(node, provider); |
| 829 | |
| 830 | dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, |
| 831 | qnodes[i]->name, node->id); |
| 832 | |
| 833 | /* populate links */ |
| 834 | for (j = 0; j < qnodes[i]->num_links; j++) |
| 835 | icc_link_create(node, qnodes[i]->links[j]); |
| 836 | |
| 837 | data->nodes[i] = node; |
| 838 | } |
| 839 | data->num_nodes = num_nodes; |
| 840 | |
| 841 | for (i = 0; i < qp->num_bcms; i++) |
| 842 | qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); |
| 843 | |
| 844 | /* |
| 845 | * Pre sort the BCMs based on VCD for ease of generating a command list |
| 846 | * that groups the BCMs with the same VCD together. VCDs are numbered |
| 847 | * with lowest being the most expensive time wise, ensuring that |
| 848 | * those commands are being sent the earliest in the queue. |
| 849 | */ |
| 850 | sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL); |
| 851 | |
| 852 | platform_set_drvdata(pdev, qp); |
| 853 | |
| 854 | dev_dbg(&pdev->dev, "Registered SDM845 ICC\n"); |
| 855 | |
| 856 | return ret; |
| 857 | err: |
| 858 | list_for_each_entry(node, &provider->nodes, node_list) { |
| 859 | icc_node_del(node); |
| 860 | icc_node_destroy(node->id); |
| 861 | } |
| 862 | |
| 863 | icc_provider_del(provider); |
| 864 | return ret; |
| 865 | } |
| 866 | |
| 867 | static int qnoc_remove(struct platform_device *pdev) |
| 868 | { |
| 869 | struct qcom_icc_provider *qp = platform_get_drvdata(pdev); |
| 870 | struct icc_provider *provider = &qp->provider; |
Georgi Djakov | b29b811 | 2019-12-12 09:53:30 +0200 | [diff] [blame] | 871 | struct icc_node *n, *tmp; |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 872 | |
Georgi Djakov | b29b811 | 2019-12-12 09:53:30 +0200 | [diff] [blame] | 873 | list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { |
David Dai | b5d2f74 | 2019-01-16 18:11:00 +0200 | [diff] [blame] | 874 | icc_node_del(n); |
| 875 | icc_node_destroy(n->id); |
| 876 | } |
| 877 | |
| 878 | return icc_provider_del(provider); |
| 879 | } |
| 880 | |
| 881 | static const struct of_device_id qnoc_of_match[] = { |
| 882 | { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos }, |
| 883 | { }, |
| 884 | }; |
| 885 | MODULE_DEVICE_TABLE(of, qnoc_of_match); |
| 886 | |
| 887 | static struct platform_driver qnoc_driver = { |
| 888 | .probe = qnoc_probe, |
| 889 | .remove = qnoc_remove, |
| 890 | .driver = { |
| 891 | .name = "qnoc-sdm845", |
| 892 | .of_match_table = qnoc_of_match, |
| 893 | }, |
| 894 | }; |
| 895 | module_platform_driver(qnoc_driver); |
| 896 | |
| 897 | MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>"); |
| 898 | MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver"); |
| 899 | MODULE_LICENSE("GPL v2"); |