blob: af750ab973b6943840e9de53e4716819da06c4a2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Gleixner240d3a72008-01-30 13:34:05 +010025#ifdef CONFIG_X86_64
26
Thomas Gleixnere3100c82008-02-27 20:57:40 +010027static inline int phys_addr_valid(unsigned long addr)
28{
29 return addr < (1UL << boot_cpu_data.x86_phys_bits);
30}
31
Jiri Slaby59ea7462008-06-12 13:56:40 +020032unsigned long __phys_addr(unsigned long x)
33{
34 if (x >= __START_KERNEL_map) {
35 x -= __START_KERNEL_map;
36 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37 x += phys_base;
38 } else {
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40 x -= PAGE_OFFSET;
41 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42 !phys_addr_valid(x));
43 }
44 return x;
45}
46EXPORT_SYMBOL(__phys_addr);
47
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020048bool __virt_addr_valid(unsigned long x)
49{
50 if (x >= __START_KERNEL_map) {
51 x -= __START_KERNEL_map;
52 if (x >= KERNEL_IMAGE_SIZE)
53 return false;
54 x += phys_base;
55 } else {
56 if (x < PAGE_OFFSET)
57 return false;
58 x -= PAGE_OFFSET;
59 if (system_state == SYSTEM_BOOTING ?
60 x > MAXMEM : !phys_addr_valid(x)) {
61 return false;
62 }
63 }
64
65 return pfn_valid(x >> PAGE_SHIFT);
66}
67EXPORT_SYMBOL(__virt_addr_valid);
68
Thomas Gleixnere3100c82008-02-27 20:57:40 +010069#else
70
71static inline int phys_addr_valid(unsigned long addr)
72{
73 return 1;
74}
75
Jiri Slabya1bf9632008-06-12 13:56:40 +020076#ifdef CONFIG_DEBUG_VIRTUAL
Jiri Slaby59ea7462008-06-12 13:56:40 +020077unsigned long __phys_addr(unsigned long x)
78{
79 /* VMALLOC_* aren't constants; not available at the boot time */
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020080 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81 VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82 is_vmalloc_addr((void *) x));
Jiri Slaby59ea7462008-06-12 13:56:40 +020083 return x - PAGE_OFFSET;
84}
85EXPORT_SYMBOL(__phys_addr);
Jiri Slabya1bf9632008-06-12 13:56:40 +020086#endif
Jiri Slaby59ea7462008-06-12 13:56:40 +020087
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020088bool __virt_addr_valid(unsigned long x)
89{
90 if (x < PAGE_OFFSET)
91 return false;
92 if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93 return false;
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95}
96EXPORT_SYMBOL(__virt_addr_valid);
97
Thomas Gleixner240d3a72008-01-30 13:34:05 +010098#endif
99
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100100int page_is_ram(unsigned long pagenr)
101{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100102 resource_size_t addr, end;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100103 int i;
104
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -0800105 /*
106 * A special case is the first 4Kb of memory;
107 * This is a BIOS owned area, not kernel ram, but generally
108 * not listed as such in the E820 table.
109 */
110 if (pagenr == 0)
111 return 0;
112
Arjan van de Ven156fbc32008-02-18 09:58:45 -0800113 /*
114 * Second special case: Some BIOSen report the PC BIOS
115 * area (640->1Mb) as ram even though it is not.
116 */
117 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118 pagenr < (BIOS_END >> PAGE_SHIFT))
119 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -0800120
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100121 for (i = 0; i < e820.nr_map; i++) {
122 /*
123 * Not usable memory:
124 */
125 if (e820.map[i].type != E820_RAM)
126 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100127 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +0100129
Thomas Gleixner950f9d92008-01-30 13:34:06 +0100130
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100131 if ((pagenr >= addr) && (pagenr < end))
132 return 1;
133 }
134 return 0;
135}
136
Suresh Siddha9542ada2008-09-24 08:53:33 -0700137int pagerange_is_ram(unsigned long start, unsigned long end)
138{
139 int ram_page = 0, not_rampage = 0;
140 unsigned long page_nr;
141
142 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143 ++page_nr) {
144 if (page_is_ram(page_nr))
145 ram_page = 1;
146 else
147 not_rampage = 1;
148
149 if (ram_page == not_rampage)
150 return -1;
151 }
152
153 return ram_page;
154}
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100157 * Fix up the linear direct mapping of the kernel to avoid cache attribute
158 * conflicts.
159 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700160int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100162{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100163 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +0100164 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100165
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700166 switch (prot_val) {
167 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100168 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700169 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100170 break;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700171 case _PAGE_CACHE_WC:
172 err = _set_memory_wc(vaddr, nrpages);
173 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700174 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700175 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100176 break;
177 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100178
179 return err;
180}
181
182/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * Remap an arbitrary physical address space into the kernel virtual
184 * address space. Needed when the kernel wants to access high addresses
185 * directly.
186 *
187 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188 * have to convert them into an offset in a page-aligned mapping, but the
189 * caller shouldn't need to know that small detail.
190 */
Christoph Lameter23016962008-04-28 02:12:42 -0700191static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100194 unsigned long pfn, offset, vaddr;
195 resource_size_t last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200196 const resource_size_t unaligned_phys_addr = phys_addr;
197 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100198 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700199 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100200 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700201 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +0200202 void __iomem *ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 /* Don't allow wraparound or zero size */
205 last_addr = phys_addr + size - 1;
206 if (!size || last_addr < phys_addr)
207 return NULL;
208
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100209 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700210 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700211 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100212 WARN_ON_ONCE(1);
213 return NULL;
214 }
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /*
217 * Don't remap the low PCI/ISA area, it's always mapped..
218 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +0200219 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100220 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 /*
Suresh Siddha379daf62008-09-25 18:43:34 -0700223 * Check if the request spans more than any BAR in the iomem resource
224 * tree.
225 */
Ingo Molnar88085002008-12-12 09:20:12 +0100226 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
227 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
Suresh Siddha379daf62008-09-25 18:43:34 -0700228
229 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 * Don't allow anybody to remap normal RAM that we're using..
231 */
Andres Salomoncb8ab682008-04-30 11:30:24 -0400232 for (pfn = phys_addr >> PAGE_SHIFT;
233 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
234 pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100235
Ingo Molnarba748d22008-03-03 09:37:41 +0100236 int is_ram = page_is_ram(pfn);
237
238 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100239 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100240 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 }
242
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700243 /*
244 * Mappings have to be page-aligned
245 */
246 offset = phys_addr & ~PAGE_MASK;
247 phys_addr &= PAGE_MASK;
248 size = PAGE_ALIGN(last_addr+1) - phys_addr;
249
Andi Kleene213e872008-08-15 18:12:47 +0200250 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700251 prot_val, &new_prot_val);
252 if (retval) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700253 pr_debug("Warning: reserve_memtype returned %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700254 return NULL;
255 }
256
257 if (prot_val != new_prot_val) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700258 /*
259 * Do not fallback to certain memory types with certain
260 * requested type:
Suresh Siddhade33c442008-04-25 17:07:22 -0700261 * - request is uc-, return cannot be write-back
262 * - request is uc-, return cannot be write-combine
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700263 * - request is write-combine, return cannot be write-back
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700264 */
Suresh Siddhade33c442008-04-25 17:07:22 -0700265 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700266 (new_prot_val == _PAGE_CACHE_WB ||
267 new_prot_val == _PAGE_CACHE_WC)) ||
268 (prot_val == _PAGE_CACHE_WC &&
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700269 new_prot_val == _PAGE_CACHE_WB)) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700270 pr_debug(
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700271 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700272 (unsigned long long)phys_addr,
273 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700274 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700275 free_memtype(phys_addr, phys_addr + size);
276 return NULL;
277 }
278 prot_val = new_prot_val;
279 }
280
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700281 switch (prot_val) {
282 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100283 default:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700284 prot = PAGE_KERNEL_IO_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100285 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700286 case _PAGE_CACHE_UC_MINUS:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700287 prot = PAGE_KERNEL_IO_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700288 break;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700289 case _PAGE_CACHE_WC:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700290 prot = PAGE_KERNEL_IO_WC;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700291 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700292 case _PAGE_CACHE_WB:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700293 prot = PAGE_KERNEL_IO;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100294 break;
295 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 * Ok, go for it..
299 */
Christoph Lameter23016962008-04-28 02:12:42 -0700300 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (!area)
302 return NULL;
303 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100304 vaddr = (unsigned long) area->addr;
305 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700306 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100307 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 return NULL;
309 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100310
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700311 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700312 free_memtype(phys_addr, phys_addr + size);
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100313 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100314 return NULL;
315 }
316
Pekka Paalanend61fc442008-05-12 21:20:57 +0200317 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200318 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200319
320 return ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323/**
324 * ioremap_nocache - map bus memory into CPU space
325 * @offset: bus address of the memory
326 * @size: size of the resource to map
327 *
328 * ioremap_nocache performs a platform specific sequence of operations to
329 * make bus memory CPU accessible via the readb/readw/readl/writeb/
330 * writew/writel functions and the other mmio helpers. The returned
331 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100332 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 *
334 * This version of ioremap ensures that the memory is marked uncachable
335 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100336 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 * busses. In particular driver authors should read up on PCI writes
338 *
339 * It's useful if some control registers are in such an area and
340 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100341 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 * Must be freed with iounmap.
343 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700344void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
Suresh Siddhade33c442008-04-25 17:07:22 -0700346 /*
347 * Ideally, this should be:
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200348 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700349 *
350 * Till we fix all X drivers to use ioremap_wc(), we will use
351 * UC MINUS.
352 */
353 unsigned long val = _PAGE_CACHE_UC_MINUS;
354
355 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700356 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700358EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700360/**
361 * ioremap_wc - map memory into CPU space write combined
362 * @offset: bus address of the memory
363 * @size: size of the resource to map
364 *
365 * This version of ioremap ensures that the memory is marked write combining.
366 * Write combining allows faster writes to some hardware devices.
367 *
368 * Must be freed with iounmap.
369 */
370void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
371{
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200372 if (pat_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700373 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
374 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700375 else
376 return ioremap_nocache(phys_addr, size);
377}
378EXPORT_SYMBOL(ioremap_wc);
379
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700380void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100381{
Christoph Lameter23016962008-04-28 02:12:42 -0700382 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
383 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100384}
385EXPORT_SYMBOL(ioremap_cache);
386
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200387static void __iomem *ioremap_default(resource_size_t phys_addr,
388 unsigned long size)
389{
390 unsigned long flags;
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700391 void __iomem *ret;
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200392 int err;
393
394 /*
395 * - WB for WB-able memory and no other conflicting mappings
396 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
397 * - Inherit from confliting mappings otherwise
398 */
399 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
400 if (err < 0)
401 return NULL;
402
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700403 ret = __ioremap_caller(phys_addr, size, flags,
404 __builtin_return_address(0));
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200405
406 free_memtype(phys_addr, phys_addr + size);
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700407 return ret;
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200408}
409
Rik van Riel28b2ee22008-07-23 21:27:05 -0700410void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
411 unsigned long prot_val)
412{
413 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
414 __builtin_return_address(0));
415}
416EXPORT_SYMBOL(ioremap_prot);
417
Andi Kleenbf5421c2005-12-12 22:17:09 -0800418/**
419 * iounmap - Free a IO remapping
420 * @addr: virtual address from ioremap_*
421 *
422 * Caller must ensure there is only one unmapping for the same pointer.
423 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424void iounmap(volatile void __iomem *addr)
425{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800426 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700427
428 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 return;
430
431 /*
432 * __ioremap special-cases the PCI/ISA range by not instantiating a
433 * vm_area and by simply returning an address into the kernel mapping
434 * of ISA space. So handle that here.
435 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200436 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
437 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 return;
439
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100440 addr = (volatile void __iomem *)
441 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800442
Pekka Paalanend61fc442008-05-12 21:20:57 +0200443 mmiotrace_iounmap(addr);
444
Andi Kleenbf5421c2005-12-12 22:17:09 -0800445 /* Use the vm area unlocked, assuming the caller
446 ensures there isn't another iounmap for the same address
447 in parallel. Reuse of the virtual address is prevented by
448 leaving it in the global lists until we're done with it.
449 cpa takes care of the direct mappings. */
450 read_lock(&vmlist_lock);
451 for (p = vmlist; p; p = p->next) {
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200452 if (p->addr == (void __force *)addr)
Andi Kleenbf5421c2005-12-12 22:17:09 -0800453 break;
454 }
455 read_unlock(&vmlist_lock);
456
457 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100458 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700459 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800460 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 }
462
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700463 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
464
Andi Kleenbf5421c2005-12-12 22:17:09 -0800465 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200466 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800467 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100468 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700470EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700472/*
473 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
474 * access
475 */
476void *xlate_dev_mem_ptr(unsigned long phys)
477{
478 void *addr;
479 unsigned long start = phys & PAGE_MASK;
480
481 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
482 if (page_is_ram(start >> PAGE_SHIFT))
483 return __va(phys);
484
Ingo Molnarae94b802008-07-12 07:29:02 +0200485 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700486 if (addr)
487 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
488
489 return addr;
490}
491
492void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
493{
494 if (page_is_ram(phys >> PAGE_SHIFT))
495 return;
496
497 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
498 return;
499}
500
Jaswinder Singh4b6e9f22008-07-23 17:39:16 +0530501static int __initdata early_ioremap_debug;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100502
503static int __init early_ioremap_debug_setup(char *str)
504{
505 early_ioremap_debug = 1;
506
Huang, Ying793b24a2008-01-30 13:33:45 +0100507 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100508}
Huang, Ying793b24a2008-01-30 13:33:45 +0100509early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100510
Huang, Ying0947b2f2008-01-30 13:33:44 +0100511static __initdata int after_paging_init;
Jeremy Fitzhardingea7bf0bd2008-05-28 15:02:14 +0100512static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100513
Ian Campbell551889a62008-02-09 23:24:09 +0100514static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100515{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100516 /* Don't assume we're using swapper_pg_dir at this point */
517 pgd_t *base = __va(read_cr3());
518 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a62008-02-09 23:24:09 +0100519 pud_t *pud = pud_offset(pgd, addr);
520 pmd_t *pmd = pmd_offset(pud, addr);
521
522 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100523}
524
Ian Campbell551889a62008-02-09 23:24:09 +0100525static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100526{
Ian Campbell551889a62008-02-09 23:24:09 +0100527 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100528}
529
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100530void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100531{
Ian Campbell551889a62008-02-09 23:24:09 +0100532 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100533
Ingo Molnard18d6d62008-01-30 13:33:45 +0100534 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100535 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100536
Ian Campbell551889a62008-02-09 23:24:09 +0100537 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100538 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100539 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a62008-02-09 23:24:09 +0100540
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100541 /*
Ian Campbell551889a62008-02-09 23:24:09 +0100542 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100543 * we are not prepared:
544 */
Ian Campbell551889a62008-02-09 23:24:09 +0100545 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100546 WARN_ON(1);
Ian Campbell551889a62008-02-09 23:24:09 +0100547 printk(KERN_WARNING "pmd %p != %p\n",
548 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100549 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a62008-02-09 23:24:09 +0100550 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100551 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a62008-02-09 23:24:09 +0100552 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100553
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100554 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
555 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
556 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100557 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100558}
559
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100560void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100561{
Huang, Ying0947b2f2008-01-30 13:33:44 +0100562 after_paging_init = 1;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100563}
564
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100565static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100566 unsigned long phys, pgprot_t flags)
567{
Ian Campbell551889a62008-02-09 23:24:09 +0100568 unsigned long addr = __fix_to_virt(idx);
569 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100570
571 if (idx >= __end_of_fixed_addresses) {
572 BUG();
573 return;
574 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100575 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400576
Huang, Ying0947b2f2008-01-30 13:33:44 +0100577 if (pgprot_val(flags))
Ian Campbell551889a62008-02-09 23:24:09 +0100578 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100579 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400580 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100581 __flush_tlb_one(addr);
582}
583
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100584static inline void __init early_set_fixmap(enum fixed_addresses idx,
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700585 unsigned long phys, pgprot_t prot)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100586{
587 if (after_paging_init)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700588 __set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100589 else
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700590 __early_set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100591}
592
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100593static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100594{
595 if (after_paging_init)
596 clear_fixmap(idx);
597 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100598 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100599}
600
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700601static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700602static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
Ingo Molnard690b2a2008-01-30 13:33:47 +0100603static int __init check_early_ioremap_leak(void)
604{
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700605 int count = 0;
606 int i;
607
608 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
609 if (prev_map[i])
610 count++;
611
612 if (!count)
Ingo Molnard690b2a2008-01-30 13:33:47 +0100613 return 0;
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700614 WARN(1, KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100615 "Debug warning: early ioremap leak of %d areas detected.\n",
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700616 count);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100617 printk(KERN_WARNING
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700618 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100619
620 return 1;
621}
622late_initcall(check_early_ioremap_leak);
623
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700624static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
626 unsigned long offset, last_addr;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700627 unsigned int nrpages;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100628 enum fixed_addresses idx0, idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700629 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100630
631 WARN_ON(system_state != SYSTEM_BOOTING);
632
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700633 slot = -1;
634 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
635 if (!prev_map[i]) {
636 slot = i;
637 break;
638 }
639 }
640
641 if (slot < 0) {
642 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
643 phys_addr, size);
644 WARN_ON(1);
645 return NULL;
646 }
647
Ingo Molnard18d6d62008-01-30 13:33:45 +0100648 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100649 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700650 phys_addr, size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100651 dump_stack();
652 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654 /* Don't allow wraparound or zero size */
655 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100656 if (!size || last_addr < phys_addr) {
657 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700661 prev_size[slot] = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 /*
663 * Mappings have to be page-aligned
664 */
665 offset = phys_addr & ~PAGE_MASK;
666 phys_addr &= PAGE_MASK;
Alan Coxc613ec12008-10-10 10:46:45 +0100667 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 /*
670 * Mappings have to fit in the FIX_BTMAP area.
671 */
672 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100673 if (nrpages > NR_FIX_BTMAPS) {
674 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 /*
679 * Ok, go for it..
680 */
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700681 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100682 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 while (nrpages > 0) {
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700684 early_set_fixmap(idx, phys_addr, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 phys_addr += PAGE_SIZE;
686 --idx;
687 --nrpages;
688 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100689 if (early_ioremap_debug)
690 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100691
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700692 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700693 return prev_map[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694}
695
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700696/* Remap an IO device */
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700697void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700698{
699 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
700}
701
702/* Remap memory */
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700703void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700704{
705 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
706}
707
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700708void __init early_iounmap(void __iomem *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
710 unsigned long virt_addr;
711 unsigned long offset;
712 unsigned int nrpages;
713 enum fixed_addresses idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700714 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100715
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700716 slot = -1;
717 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
718 if (prev_map[i] == addr) {
719 slot = i;
720 break;
721 }
722 }
723
724 if (slot < 0) {
725 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
726 addr, size);
727 WARN_ON(1);
Ingo Molnar226e9a92008-05-27 09:56:49 +0200728 return;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700729 }
730
731 if (prev_size[slot] != size) {
732 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
733 addr, size, slot, prev_size[slot]);
734 WARN_ON(1);
735 return;
736 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Ingo Molnard18d6d62008-01-30 13:33:45 +0100738 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100739 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700740 size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100741 dump_stack();
742 }
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100745 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
746 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 offset = virt_addr & ~PAGE_MASK;
750 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
751
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700752 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100754 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 --idx;
756 --nrpages;
757 }
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700758 prev_map[slot] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100760
761void __this_fixmap_does_not_exist(void)
762{
763 WARN_ON(1);
764}