blob: 3e049dfb28bd0dff3dae96e5479148b2129a2071 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07002/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07009#include <linux/vmalloc.h>
10#include <linux/mm.h>
Adrian Bunk53fa6642007-10-16 23:26:42 -070011#include <linux/io.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050012#include <linux/export.h>
Baoquan He016fec92023-07-06 23:45:17 +080013#include <linux/ioremap.h>
Christophe Leroyab1cd022023-07-06 23:45:16 +080014
Christophe Leroy76133662023-07-06 23:45:05 +080015void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
16 pgprot_t prot)
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020017{
18 unsigned long offset, vaddr;
19 phys_addr_t last_addr;
20 struct vm_struct *area;
21
Baoquan Hea5f61642023-07-06 23:45:07 +080022 /* An early platform driver might end up here */
23 if (WARN_ON_ONCE(!slab_is_available()))
24 return NULL;
25
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020026 /* Disallow wrap-around or zero size */
Kefeng Wangabc59922022-06-07 20:50:23 +080027 last_addr = phys_addr + size - 1;
28 if (!size || last_addr < phys_addr)
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020029 return NULL;
30
31 /* Page-align mappings */
Kefeng Wangabc59922022-06-07 20:50:23 +080032 offset = phys_addr & (~PAGE_MASK);
33 phys_addr -= offset;
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020034 size = PAGE_ALIGN(size + offset);
35
Christophe Leroyab1cd022023-07-06 23:45:16 +080036 area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START,
37 IOREMAP_END, __builtin_return_address(0));
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020038 if (!area)
39 return NULL;
40 vaddr = (unsigned long)area->addr;
Kefeng Wanga14fff12022-06-07 20:50:24 +080041 area->phys_addr = phys_addr;
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020042
Christophe Leroy76133662023-07-06 23:45:05 +080043 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020044 free_vm_area(area);
45 return NULL;
46 }
47
48 return (void __iomem *)(vaddr + offset);
49}
Christophe Leroy76133662023-07-06 23:45:05 +080050
Baoquan Hedfdc6ba2023-07-06 23:45:06 +080051#ifndef ioremap_prot
Christophe Leroy76133662023-07-06 23:45:05 +080052void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
53 unsigned long prot)
54{
55 return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
56}
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020057EXPORT_SYMBOL(ioremap_prot);
Baoquan Hedfdc6ba2023-07-06 23:45:06 +080058#endif
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020059
Christophe Leroy76133662023-07-06 23:45:05 +080060void generic_iounmap(volatile void __iomem *addr)
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020061{
Kefeng Wang18e780b2022-06-07 20:50:25 +080062 void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
63
Christophe Leroyab1cd022023-07-06 23:45:16 +080064 if (is_ioremap_addr(vaddr))
Kefeng Wang18e780b2022-06-07 20:50:25 +080065 vunmap(vaddr);
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020066}
Christophe Leroy76133662023-07-06 23:45:05 +080067
Baoquan Hedfdc6ba2023-07-06 23:45:06 +080068#ifndef iounmap
Christophe Leroy76133662023-07-06 23:45:05 +080069void iounmap(volatile void __iomem *addr)
70{
71 generic_iounmap(addr);
72}
Christoph Hellwig80b0ca92019-08-13 11:24:04 +020073EXPORT_SYMBOL(iounmap);
Baoquan Hedfdc6ba2023-07-06 23:45:06 +080074#endif