Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Haavard Skinnemoen | 74588d8 | 2006-09-30 23:29:12 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Re-map IO memory to kernel address space so that we can access it. |
| 4 | * This is needed for high PCI addresses that aren't mapped in the |
| 5 | * 640k-1MB IO memory area on PC's |
| 6 | * |
| 7 | * (C) Copyright 1995 1996 Linus Torvalds |
| 8 | */ |
Haavard Skinnemoen | 74588d8 | 2006-09-30 23:29:12 -0700 | [diff] [blame] | 9 | #include <linux/vmalloc.h> |
| 10 | #include <linux/mm.h> |
Adrian Bunk | 53fa664 | 2007-10-16 23:26:42 -0700 | [diff] [blame] | 11 | #include <linux/io.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 12 | #include <linux/export.h> |
Baoquan He | 016fec9 | 2023-07-06 23:45:17 +0800 | [diff] [blame] | 13 | #include <linux/ioremap.h> |
Christophe Leroy | ab1cd02 | 2023-07-06 23:45:16 +0800 | [diff] [blame] | 14 | |
Christophe Leroy | 7613366 | 2023-07-06 23:45:05 +0800 | [diff] [blame] | 15 | void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, |
| 16 | pgprot_t prot) |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 17 | { |
| 18 | unsigned long offset, vaddr; |
| 19 | phys_addr_t last_addr; |
| 20 | struct vm_struct *area; |
| 21 | |
Baoquan He | a5f6164 | 2023-07-06 23:45:07 +0800 | [diff] [blame] | 22 | /* An early platform driver might end up here */ |
| 23 | if (WARN_ON_ONCE(!slab_is_available())) |
| 24 | return NULL; |
| 25 | |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 26 | /* Disallow wrap-around or zero size */ |
Kefeng Wang | abc5992 | 2022-06-07 20:50:23 +0800 | [diff] [blame] | 27 | last_addr = phys_addr + size - 1; |
| 28 | if (!size || last_addr < phys_addr) |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 29 | return NULL; |
| 30 | |
| 31 | /* Page-align mappings */ |
Kefeng Wang | abc5992 | 2022-06-07 20:50:23 +0800 | [diff] [blame] | 32 | offset = phys_addr & (~PAGE_MASK); |
| 33 | phys_addr -= offset; |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 34 | size = PAGE_ALIGN(size + offset); |
| 35 | |
Christophe Leroy | ab1cd02 | 2023-07-06 23:45:16 +0800 | [diff] [blame] | 36 | area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, |
| 37 | IOREMAP_END, __builtin_return_address(0)); |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 38 | if (!area) |
| 39 | return NULL; |
| 40 | vaddr = (unsigned long)area->addr; |
Kefeng Wang | a14fff1 | 2022-06-07 20:50:24 +0800 | [diff] [blame] | 41 | area->phys_addr = phys_addr; |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 42 | |
Christophe Leroy | 7613366 | 2023-07-06 23:45:05 +0800 | [diff] [blame] | 43 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 44 | free_vm_area(area); |
| 45 | return NULL; |
| 46 | } |
| 47 | |
| 48 | return (void __iomem *)(vaddr + offset); |
| 49 | } |
Christophe Leroy | 7613366 | 2023-07-06 23:45:05 +0800 | [diff] [blame] | 50 | |
Baoquan He | dfdc6ba | 2023-07-06 23:45:06 +0800 | [diff] [blame] | 51 | #ifndef ioremap_prot |
Christophe Leroy | 7613366 | 2023-07-06 23:45:05 +0800 | [diff] [blame] | 52 | void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, |
| 53 | unsigned long prot) |
| 54 | { |
| 55 | return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); |
| 56 | } |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 57 | EXPORT_SYMBOL(ioremap_prot); |
Baoquan He | dfdc6ba | 2023-07-06 23:45:06 +0800 | [diff] [blame] | 58 | #endif |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 59 | |
Christophe Leroy | 7613366 | 2023-07-06 23:45:05 +0800 | [diff] [blame] | 60 | void generic_iounmap(volatile void __iomem *addr) |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 61 | { |
Kefeng Wang | 18e780b | 2022-06-07 20:50:25 +0800 | [diff] [blame] | 62 | void *vaddr = (void *)((unsigned long)addr & PAGE_MASK); |
| 63 | |
Christophe Leroy | ab1cd02 | 2023-07-06 23:45:16 +0800 | [diff] [blame] | 64 | if (is_ioremap_addr(vaddr)) |
Kefeng Wang | 18e780b | 2022-06-07 20:50:25 +0800 | [diff] [blame] | 65 | vunmap(vaddr); |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 66 | } |
Christophe Leroy | 7613366 | 2023-07-06 23:45:05 +0800 | [diff] [blame] | 67 | |
Baoquan He | dfdc6ba | 2023-07-06 23:45:06 +0800 | [diff] [blame] | 68 | #ifndef iounmap |
Christophe Leroy | 7613366 | 2023-07-06 23:45:05 +0800 | [diff] [blame] | 69 | void iounmap(volatile void __iomem *addr) |
| 70 | { |
| 71 | generic_iounmap(addr); |
| 72 | } |
Christoph Hellwig | 80b0ca9 | 2019-08-13 11:24:04 +0200 | [diff] [blame] | 73 | EXPORT_SYMBOL(iounmap); |
Baoquan He | dfdc6ba | 2023-07-06 23:45:06 +0800 | [diff] [blame] | 74 | #endif |