Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | * (C) Copyright 2001, 2002 Ralf Baechle |
| 8 | */ |
Paul Gortmaker | d9ba577 | 2016-08-21 15:58:14 -0400 | [diff] [blame] | 9 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/addrspace.h> |
| 11 | #include <asm/byteorder.h> |
Paul Burton | 523402f | 2018-07-05 14:37:52 -0700 | [diff] [blame] | 12 | #include <linux/ioport.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 13 | #include <linux/sched.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/vmalloc.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 16 | #include <linux/mm_types.h> |
Christoph Hellwig | c2591eb | 2020-04-16 17:00:11 +0200 | [diff] [blame] | 17 | #include <linux/io.h> |
Ralf Baechle | 5ce704f | 2007-02-21 09:37:44 +0000 | [diff] [blame] | 18 | #include <asm/cacheflush.h> |
Ralf Baechle | 5ce704f | 2007-02-21 09:37:44 +0000 | [diff] [blame] | 19 | #include <asm/tlbflush.h> |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 20 | #include <ioremap.h> |
| 21 | |
| 22 | #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) |
| 23 | #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) |
Ralf Baechle | 5ce704f | 2007-02-21 09:37:44 +0000 | [diff] [blame] | 24 | |
Paul Burton | 523402f | 2018-07-05 14:37:52 -0700 | [diff] [blame] | 25 | static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
| 26 | void *arg) |
| 27 | { |
| 28 | unsigned long i; |
| 29 | |
| 30 | for (i = 0; i < nr_pages; i++) { |
| 31 | if (pfn_valid(start_pfn + i) && |
| 32 | !PageReserved(pfn_to_page(start_pfn + i))) |
| 33 | return 1; |
| 34 | } |
| 35 | |
| 36 | return 0; |
| 37 | } |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | /* |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 40 | * ioremap_prot - map bus memory into CPU space |
| 41 | * @phys_addr: bus address of the memory |
| 42 | * @size: size of the resource to map |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | * |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 44 | * ioremap_prot gives the caller control over cache coherency attributes (CCA) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | */ |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 46 | void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, |
| 47 | unsigned long prot_val) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | { |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 49 | unsigned long flags = prot_val & _CACHE_MASK; |
Paul Burton | 523402f | 2018-07-05 14:37:52 -0700 | [diff] [blame] | 50 | unsigned long offset, pfn, last_pfn; |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 51 | struct vm_struct *area; |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 52 | phys_addr_t last_addr; |
Christoph Hellwig | c2591eb | 2020-04-16 17:00:11 +0200 | [diff] [blame] | 53 | unsigned long vaddr; |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 54 | void __iomem *cpu_addr; |
| 55 | |
| 56 | cpu_addr = plat_ioremap(phys_addr, size, flags); |
| 57 | if (cpu_addr) |
| 58 | return cpu_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | |
| 60 | phys_addr = fixup_bigphys_addr(phys_addr, size); |
| 61 | |
| 62 | /* Don't allow wraparound or zero size */ |
| 63 | last_addr = phys_addr + size - 1; |
| 64 | if (!size || last_addr < phys_addr) |
| 65 | return NULL; |
| 66 | |
| 67 | /* |
| 68 | * Map uncached objects in the low 512mb of address space using KSEG1, |
| 69 | * otherwise map using page tables. |
| 70 | */ |
| 71 | if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && |
| 72 | flags == _CACHE_UNCACHED) |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 73 | return (void __iomem *) CKSEG1ADDR(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
| 75 | /* |
Paul Burton | 523402f | 2018-07-05 14:37:52 -0700 | [diff] [blame] | 76 | * Don't allow anybody to remap RAM that may be allocated by the page |
| 77 | * allocator, since that could lead to races & data clobbering. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | */ |
Paul Burton | 523402f | 2018-07-05 14:37:52 -0700 | [diff] [blame] | 79 | pfn = PFN_DOWN(phys_addr); |
| 80 | last_pfn = PFN_DOWN(last_addr); |
| 81 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, |
| 82 | __ioremap_check_ram) == 1) { |
| 83 | WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", |
| 84 | &phys_addr, &last_addr); |
| 85 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | /* |
| 89 | * Mappings have to be page-aligned |
| 90 | */ |
| 91 | offset = phys_addr & ~PAGE_MASK; |
| 92 | phys_addr &= PAGE_MASK; |
| 93 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
| 94 | |
| 95 | /* |
| 96 | * Ok, go for it.. |
| 97 | */ |
| 98 | area = get_vm_area(size, VM_IOREMAP); |
| 99 | if (!area) |
| 100 | return NULL; |
Christoph Hellwig | c2591eb | 2020-04-16 17:00:11 +0200 | [diff] [blame] | 101 | vaddr = (unsigned long)area->addr; |
| 102 | |
| 103 | flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE; |
| 104 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, |
| 105 | __pgprot(flags))) { |
| 106 | free_vm_area(area); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | return NULL; |
| 108 | } |
| 109 | |
Christoph Hellwig | c2591eb | 2020-04-16 17:00:11 +0200 | [diff] [blame] | 110 | return (void __iomem *)(vaddr + offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | } |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 112 | EXPORT_SYMBOL(ioremap_prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 114 | void iounmap(const volatile void __iomem *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | { |
Christoph Hellwig | c2591eb | 2020-04-16 17:00:11 +0200 | [diff] [blame] | 116 | if (!plat_iounmap(addr) && !IS_KSEG1(addr)) |
| 117 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | } |
Christoph Hellwig | d257b8f | 2020-04-16 17:00:10 +0200 | [diff] [blame] | 119 | EXPORT_SYMBOL(iounmap); |