blob: b3294d36769d0da73bb68e6d33a56f52fa735a6f [file] [log] [blame]
Keith Packardfd940932008-10-30 19:37:09 -07001/*
2 * Copyright © 2008 Ingo Molnar
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#include <asm/iomap.h>
Eric Anholtef5fa0a2009-01-23 14:14:21 -080020#include <asm/pat.h>
Paul Gortmaker4b599fed2016-07-13 20:18:55 -040021#include <linux/export.h>
Akinobu Mita7ca43e72009-03-31 15:23:25 -070022#include <linux/highmem.h>
Keith Packardfd940932008-10-30 19:37:09 -070023
Venkatesh Pallipadi9e36fda02009-07-10 09:57:35 -070024static int is_io_mapping_possible(resource_size_t base, unsigned long size)
Venkatesh Pallipadi4ab0d472009-02-24 17:35:12 -080025{
Andrew Morton6a491e22009-04-02 16:44:38 -070026#if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
Venkatesh Pallipadi4ab0d472009-02-24 17:35:12 -080027 /* There is no way to map greater than 1 << 32 address without PAE */
28 if (base + size > 0x100000000ULL)
29 return 0;
Ingo Molnar92b9af92009-02-28 14:09:27 +010030#endif
Venkatesh Pallipadi4ab0d472009-02-24 17:35:12 -080031 return 1;
32}
Venkatesh Pallipadi9e36fda02009-07-10 09:57:35 -070033
34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
35{
Juergen Gross49a3b3c2014-11-03 14:01:54 +010036 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
Venkatesh Pallipadi9e36fda02009-07-10 09:57:35 -070037 int ret;
38
39 if (!is_io_mapping_possible(base, size))
40 return -EINVAL;
41
Juergen Gross49a3b3c2014-11-03 14:01:54 +010042 ret = io_reserve_memtype(base, base + size, &pcm);
Venkatesh Pallipadi9e36fda02009-07-10 09:57:35 -070043 if (ret)
44 return ret;
45
Juergen Gross49a3b3c2014-11-03 14:01:54 +010046 *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
Dave Hansenfb43d6c2018-04-06 13:55:09 -070047 /* Filter out unsupported __PAGE_KERNEL* bits: */
48 pgprot_val(*prot) &= __default_kernel_pte_mask;
49
Venkatesh Pallipadi9e36fda02009-07-10 09:57:35 -070050 return 0;
51}
52EXPORT_SYMBOL_GPL(iomap_create_wc);
53
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070054void iomap_free(resource_size_t base, unsigned long size)
Venkatesh Pallipadi9e36fda02009-07-10 09:57:35 -070055{
56 io_free_memtype(base, base + size);
57}
58EXPORT_SYMBOL_GPL(iomap_free);
Venkatesh Pallipadi4ab0d472009-02-24 17:35:12 -080059
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070060void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
Keith Packardfd940932008-10-30 19:37:09 -070061{
Keith Packardfd940932008-10-30 19:37:09 -070062 unsigned long vaddr;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070063 int idx, type;
Keith Packardfd940932008-10-30 19:37:09 -070064
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020065 preempt_disable();
Keith Packardfd940932008-10-30 19:37:09 -070066 pagefault_disable();
67
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070068 type = kmap_atomic_idx_push();
Ingo Molnardd63fdc2009-03-13 03:20:49 +010069 idx = type + KM_TYPE_NR * smp_processor_id();
70 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
71 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
72 arch_flush_lazy_mmu_mode();
73
74 return (void *)vaddr;
75}
76
77/*
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070078 * Map 'pfn' using protections 'prot'
Keith Packardfd940932008-10-30 19:37:09 -070079 */
Francisco Jerezcc1a8e52010-09-04 22:56:43 +020080void __iomem *
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070081iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
Keith Packardfd940932008-10-30 19:37:09 -070082{
Eric Anholtef5fa0a2009-01-23 14:14:21 -080083 /*
Borislav Petkov7202fdb2015-06-04 18:55:11 +020084 * For non-PAT systems, translate non-WB request to UC- just in
85 * case the caller set the PWT bit to prot directly without using
86 * pgprot_writecombine(). UC- translates to uncached if the MTRR
87 * is UC or WC. UC- gets the real intention, of the user, which is
88 * "WC if the MTRR is WC, UC if you can't do that."
Eric Anholtef5fa0a2009-01-23 14:14:21 -080089 */
Borislav Petkov7202fdb2015-06-04 18:55:11 +020090 if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
Juergen Gross49a3b3c2014-11-03 14:01:54 +010091 prot = __pgprot(__PAGE_KERNEL |
92 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
Eric Anholtef5fa0a2009-01-23 14:14:21 -080093
Dave Hansenfb43d6c2018-04-06 13:55:09 -070094 /* Filter out unsupported __PAGE_KERNEL* bits: */
95 pgprot_val(prot) &= __default_kernel_pte_mask;
96
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070097 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
Keith Packardfd940932008-10-30 19:37:09 -070098}
99EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
100
101void
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700102iounmap_atomic(void __iomem *kvaddr)
Keith Packardfd940932008-10-30 19:37:09 -0700103{
104 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
Keith Packardfd940932008-10-30 19:37:09 -0700105
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700106 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
107 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
108 int idx, type;
109
Peter Zijlstra20273942010-10-27 15:32:58 -0700110 type = kmap_atomic_idx();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700111 idx = type + KM_TYPE_NR * smp_processor_id();
112
113#ifdef CONFIG_DEBUG_HIGHMEM
114 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
115#endif
116 /*
117 * Force other mappings to Oops if they'll try to access this
118 * pte without first remap it. Keeping stale mappings around
119 * is a bad idea also, in case the page changes cacheability
120 * attributes or becomes a protected page in a hypervisor.
121 */
Keith Packardfd940932008-10-30 19:37:09 -0700122 kpte_clear_flush(kmap_pte-idx, vaddr);
Peter Zijlstra20273942010-10-27 15:32:58 -0700123 kmap_atomic_idx_pop();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700124 }
Keith Packardfd940932008-10-30 19:37:09 -0700125
Keith Packardfd940932008-10-30 19:37:09 -0700126 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200127 preempt_enable();
Keith Packardfd940932008-10-30 19:37:09 -0700128}
129EXPORT_SYMBOL_GPL(iounmap_atomic);