Thomas Gleixner | 1802d0b | 2019-05-27 08:55:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 2 | /* |
| 3 | * TI AM33XX SRAM EMIF Driver |
| 4 | * |
| 5 | * Copyright (C) 2016-2017 Texas Instruments Inc. |
| 6 | * Dave Gerlach |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/err.h> |
| 10 | #include <linux/genalloc.h> |
| 11 | #include <linux/io.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/of.h> |
| 15 | #include <linux/of_platform.h> |
| 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/sram.h> |
| 18 | #include <linux/ti-emif-sram.h> |
| 19 | |
| 20 | #include "emif.h" |
| 21 | |
| 22 | #define TI_EMIF_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \ |
| 23 | (unsigned long)&ti_emif_sram) |
| 24 | |
| 25 | #define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0 |
| 26 | |
| 27 | struct ti_emif_data { |
| 28 | phys_addr_t ti_emif_sram_phys; |
| 29 | phys_addr_t ti_emif_sram_data_phys; |
| 30 | unsigned long ti_emif_sram_virt; |
| 31 | unsigned long ti_emif_sram_data_virt; |
| 32 | struct gen_pool *sram_pool_code; |
| 33 | struct gen_pool *sram_pool_data; |
| 34 | struct ti_emif_pm_data pm_data; |
| 35 | struct ti_emif_pm_functions pm_functions; |
| 36 | }; |
| 37 | |
| 38 | static struct ti_emif_data *emif_instance; |
| 39 | |
| 40 | static u32 sram_suspend_address(struct ti_emif_data *emif_data, |
| 41 | unsigned long addr) |
| 42 | { |
| 43 | return (emif_data->ti_emif_sram_virt + |
| 44 | TI_EMIF_SRAM_SYMBOL_OFFSET(addr)); |
| 45 | } |
| 46 | |
| 47 | static phys_addr_t sram_resume_address(struct ti_emif_data *emif_data, |
| 48 | unsigned long addr) |
| 49 | { |
| 50 | return ((unsigned long)emif_data->ti_emif_sram_phys + |
| 51 | TI_EMIF_SRAM_SYMBOL_OFFSET(addr)); |
| 52 | } |
| 53 | |
| 54 | static void ti_emif_free_sram(struct ti_emif_data *emif_data) |
| 55 | { |
| 56 | gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt, |
| 57 | ti_emif_sram_sz); |
| 58 | gen_pool_free(emif_data->sram_pool_data, |
| 59 | emif_data->ti_emif_sram_data_virt, |
| 60 | sizeof(struct emif_regs_amx3)); |
| 61 | } |
| 62 | |
| 63 | static int ti_emif_alloc_sram(struct device *dev, |
| 64 | struct ti_emif_data *emif_data) |
| 65 | { |
| 66 | struct device_node *np = dev->of_node; |
| 67 | int ret; |
| 68 | |
| 69 | emif_data->sram_pool_code = of_gen_pool_get(np, "sram", 0); |
| 70 | if (!emif_data->sram_pool_code) { |
| 71 | dev_err(dev, "Unable to get sram pool for ocmcram code\n"); |
| 72 | return -ENODEV; |
| 73 | } |
| 74 | |
| 75 | emif_data->ti_emif_sram_virt = |
| 76 | gen_pool_alloc(emif_data->sram_pool_code, |
| 77 | ti_emif_sram_sz); |
| 78 | if (!emif_data->ti_emif_sram_virt) { |
| 79 | dev_err(dev, "Unable to allocate code memory from ocmcram\n"); |
| 80 | return -ENOMEM; |
| 81 | } |
| 82 | |
| 83 | /* Save physical address to calculate resume offset during pm init */ |
| 84 | emif_data->ti_emif_sram_phys = |
| 85 | gen_pool_virt_to_phys(emif_data->sram_pool_code, |
| 86 | emif_data->ti_emif_sram_virt); |
| 87 | |
| 88 | /* Get sram pool for data section and allocate space */ |
| 89 | emif_data->sram_pool_data = of_gen_pool_get(np, "sram", 1); |
| 90 | if (!emif_data->sram_pool_data) { |
| 91 | dev_err(dev, "Unable to get sram pool for ocmcram data\n"); |
| 92 | ret = -ENODEV; |
| 93 | goto err_free_sram_code; |
| 94 | } |
| 95 | |
| 96 | emif_data->ti_emif_sram_data_virt = |
| 97 | gen_pool_alloc(emif_data->sram_pool_data, |
| 98 | sizeof(struct emif_regs_amx3)); |
| 99 | if (!emif_data->ti_emif_sram_data_virt) { |
| 100 | dev_err(dev, "Unable to allocate data memory from ocmcram\n"); |
| 101 | ret = -ENOMEM; |
| 102 | goto err_free_sram_code; |
| 103 | } |
| 104 | |
| 105 | /* Save physical address to calculate resume offset during pm init */ |
| 106 | emif_data->ti_emif_sram_data_phys = |
| 107 | gen_pool_virt_to_phys(emif_data->sram_pool_data, |
| 108 | emif_data->ti_emif_sram_data_virt); |
| 109 | /* |
| 110 | * These functions are called during suspend path while MMU is |
| 111 | * still on so add virtual base to offset for absolute address |
| 112 | */ |
| 113 | emif_data->pm_functions.save_context = |
| 114 | sram_suspend_address(emif_data, |
| 115 | (unsigned long)ti_emif_save_context); |
| 116 | emif_data->pm_functions.enter_sr = |
| 117 | sram_suspend_address(emif_data, |
| 118 | (unsigned long)ti_emif_enter_sr); |
| 119 | emif_data->pm_functions.abort_sr = |
| 120 | sram_suspend_address(emif_data, |
| 121 | (unsigned long)ti_emif_abort_sr); |
| 122 | |
| 123 | /* |
| 124 | * These are called during resume path when MMU is not enabled |
| 125 | * so physical address is used instead |
| 126 | */ |
| 127 | emif_data->pm_functions.restore_context = |
| 128 | sram_resume_address(emif_data, |
| 129 | (unsigned long)ti_emif_restore_context); |
| 130 | emif_data->pm_functions.exit_sr = |
| 131 | sram_resume_address(emif_data, |
| 132 | (unsigned long)ti_emif_exit_sr); |
Dave Gerlach | 6c11056 | 2019-04-02 11:57:42 -0500 | [diff] [blame] | 133 | emif_data->pm_functions.run_hw_leveling = |
| 134 | sram_resume_address(emif_data, |
| 135 | (unsigned long)ti_emif_run_hw_leveling); |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 136 | |
| 137 | emif_data->pm_data.regs_virt = |
| 138 | (struct emif_regs_amx3 *)emif_data->ti_emif_sram_data_virt; |
| 139 | emif_data->pm_data.regs_phys = emif_data->ti_emif_sram_data_phys; |
| 140 | |
| 141 | return 0; |
| 142 | |
| 143 | err_free_sram_code: |
| 144 | gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt, |
| 145 | ti_emif_sram_sz); |
| 146 | return ret; |
| 147 | } |
| 148 | |
| 149 | static int ti_emif_push_sram(struct device *dev, struct ti_emif_data *emif_data) |
| 150 | { |
| 151 | void *copy_addr; |
| 152 | u32 data_addr; |
| 153 | |
| 154 | copy_addr = sram_exec_copy(emif_data->sram_pool_code, |
| 155 | (void *)emif_data->ti_emif_sram_virt, |
| 156 | &ti_emif_sram, ti_emif_sram_sz); |
| 157 | if (!copy_addr) { |
| 158 | dev_err(dev, "Cannot copy emif code to sram\n"); |
| 159 | return -ENODEV; |
| 160 | } |
| 161 | |
| 162 | data_addr = sram_suspend_address(emif_data, |
| 163 | (unsigned long)&ti_emif_pm_sram_data); |
| 164 | copy_addr = sram_exec_copy(emif_data->sram_pool_code, |
| 165 | (void *)data_addr, |
| 166 | &emif_data->pm_data, |
| 167 | sizeof(emif_data->pm_data)); |
| 168 | if (!copy_addr) { |
| 169 | dev_err(dev, "Cannot copy emif data to code sram\n"); |
| 170 | return -ENODEV; |
| 171 | } |
| 172 | |
| 173 | return 0; |
| 174 | } |
| 175 | |
| 176 | /* |
| 177 | * Due to Usage Note 3.1.2 "DDR3: JEDEC Compliance for Maximum |
| 178 | * Self-Refresh Command Limit" found in AM335x Silicon Errata |
| 179 | * (Document SPRZ360F Revised November 2013) we must configure |
| 180 | * the self refresh delay timer to 0xA (8192 cycles) to avoid |
| 181 | * generating too many refresh command from the EMIF. |
| 182 | */ |
| 183 | static void ti_emif_configure_sr_delay(struct ti_emif_data *emif_data) |
| 184 | { |
| 185 | writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES, |
| 186 | (emif_data->pm_data.ti_emif_base_addr_virt + |
| 187 | EMIF_POWER_MANAGEMENT_CONTROL)); |
| 188 | |
| 189 | writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES, |
| 190 | (emif_data->pm_data.ti_emif_base_addr_virt + |
| 191 | EMIF_POWER_MANAGEMENT_CTRL_SHDW)); |
| 192 | } |
| 193 | |
| 194 | /** |
| 195 | * ti_emif_copy_pm_function_table - copy mapping of pm funcs in sram |
| 196 | * @sram_pool: pointer to struct gen_pool where dst resides |
| 197 | * @dst: void * to address that table should be copied |
| 198 | * |
| 199 | * Returns 0 if success other error code if table is not available |
| 200 | */ |
| 201 | int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst) |
| 202 | { |
| 203 | void *copy_addr; |
| 204 | |
| 205 | if (!emif_instance) |
| 206 | return -ENODEV; |
| 207 | |
| 208 | copy_addr = sram_exec_copy(sram_pool, dst, |
| 209 | &emif_instance->pm_functions, |
| 210 | sizeof(emif_instance->pm_functions)); |
| 211 | if (!copy_addr) |
| 212 | return -ENODEV; |
| 213 | |
| 214 | return 0; |
| 215 | } |
| 216 | EXPORT_SYMBOL_GPL(ti_emif_copy_pm_function_table); |
| 217 | |
| 218 | /** |
| 219 | * ti_emif_get_mem_type - return type for memory type in use |
| 220 | * |
| 221 | * Returns memory type value read from EMIF or error code if fails |
| 222 | */ |
| 223 | int ti_emif_get_mem_type(void) |
| 224 | { |
| 225 | unsigned long temp; |
| 226 | |
| 227 | if (!emif_instance) |
| 228 | return -ENODEV; |
| 229 | |
| 230 | temp = readl(emif_instance->pm_data.ti_emif_base_addr_virt + |
| 231 | EMIF_SDRAM_CONFIG); |
| 232 | |
| 233 | temp = (temp & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT; |
| 234 | return temp; |
| 235 | } |
| 236 | EXPORT_SYMBOL_GPL(ti_emif_get_mem_type); |
| 237 | |
| 238 | static const struct of_device_id ti_emif_of_match[] = { |
| 239 | { .compatible = "ti,emif-am3352", .data = |
| 240 | (void *)EMIF_SRAM_AM33_REG_LAYOUT, }, |
| 241 | { .compatible = "ti,emif-am4372", .data = |
| 242 | (void *)EMIF_SRAM_AM43_REG_LAYOUT, }, |
| 243 | {}, |
| 244 | }; |
| 245 | MODULE_DEVICE_TABLE(of, ti_emif_of_match); |
| 246 | |
Dave Gerlach | 38853979 | 2018-06-26 10:05:17 -0700 | [diff] [blame] | 247 | #ifdef CONFIG_PM_SLEEP |
| 248 | static int ti_emif_resume(struct device *dev) |
| 249 | { |
| 250 | unsigned long tmp = |
| 251 | __raw_readl((void *)emif_instance->ti_emif_sram_virt); |
| 252 | |
| 253 | /* |
| 254 | * Check to see if what we are copying is already present in the |
| 255 | * first byte at the destination, only copy if it is not which |
| 256 | * indicates we have lost context and sram no longer contains |
| 257 | * the PM code |
| 258 | */ |
| 259 | if (tmp != ti_emif_sram) |
| 260 | ti_emif_push_sram(dev, emif_instance); |
| 261 | |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | static int ti_emif_suspend(struct device *dev) |
| 266 | { |
| 267 | /* |
| 268 | * The contents will be present in DDR hence no need to |
| 269 | * explicitly save |
| 270 | */ |
| 271 | return 0; |
| 272 | } |
| 273 | #endif /* CONFIG_PM_SLEEP */ |
| 274 | |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 275 | static int ti_emif_probe(struct platform_device *pdev) |
| 276 | { |
| 277 | int ret; |
| 278 | struct resource *res; |
| 279 | struct device *dev = &pdev->dev; |
| 280 | const struct of_device_id *match; |
| 281 | struct ti_emif_data *emif_data; |
| 282 | |
| 283 | emif_data = devm_kzalloc(dev, sizeof(*emif_data), GFP_KERNEL); |
| 284 | if (!emif_data) |
| 285 | return -ENOMEM; |
| 286 | |
| 287 | match = of_match_device(ti_emif_of_match, &pdev->dev); |
| 288 | if (!match) |
| 289 | return -ENODEV; |
| 290 | |
| 291 | emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data; |
| 292 | |
| 293 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 294 | emif_data->pm_data.ti_emif_base_addr_virt = devm_ioremap_resource(dev, |
| 295 | res); |
| 296 | if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) { |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 297 | ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt); |
| 298 | return ret; |
| 299 | } |
| 300 | |
| 301 | emif_data->pm_data.ti_emif_base_addr_phys = res->start; |
| 302 | |
| 303 | ti_emif_configure_sr_delay(emif_data); |
| 304 | |
| 305 | ret = ti_emif_alloc_sram(dev, emif_data); |
| 306 | if (ret) |
| 307 | return ret; |
| 308 | |
| 309 | ret = ti_emif_push_sram(dev, emif_data); |
| 310 | if (ret) |
| 311 | goto fail_free_sram; |
| 312 | |
| 313 | emif_instance = emif_data; |
| 314 | |
| 315 | return 0; |
| 316 | |
| 317 | fail_free_sram: |
| 318 | ti_emif_free_sram(emif_data); |
| 319 | |
| 320 | return ret; |
| 321 | } |
| 322 | |
| 323 | static int ti_emif_remove(struct platform_device *pdev) |
| 324 | { |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 325 | struct ti_emif_data *emif_data = emif_instance; |
| 326 | |
| 327 | emif_instance = NULL; |
| 328 | |
| 329 | ti_emif_free_sram(emif_data); |
| 330 | |
| 331 | return 0; |
| 332 | } |
| 333 | |
Dave Gerlach | 38853979 | 2018-06-26 10:05:17 -0700 | [diff] [blame] | 334 | static const struct dev_pm_ops ti_emif_pm_ops = { |
| 335 | SET_SYSTEM_SLEEP_PM_OPS(ti_emif_suspend, ti_emif_resume) |
| 336 | }; |
| 337 | |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 338 | static struct platform_driver ti_emif_driver = { |
| 339 | .probe = ti_emif_probe, |
| 340 | .remove = ti_emif_remove, |
| 341 | .driver = { |
| 342 | .name = KBUILD_MODNAME, |
| 343 | .of_match_table = of_match_ptr(ti_emif_of_match), |
Dave Gerlach | 38853979 | 2018-06-26 10:05:17 -0700 | [diff] [blame] | 344 | .pm = &ti_emif_pm_ops, |
Dave Gerlach | 8428e5a | 2015-06-17 14:52:10 -0500 | [diff] [blame] | 345 | }, |
| 346 | }; |
| 347 | module_platform_driver(ti_emif_driver); |
| 348 | |
| 349 | MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>"); |
| 350 | MODULE_DESCRIPTION("Texas Instruments SRAM EMIF driver"); |
| 351 | MODULE_LICENSE("GPL v2"); |