blob: 44b628e4d6ea9c0121acf892ffbcfb30d0fc40ad [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/mmap.c
3 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/mman.h>
7#include <linux/shm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +04008#include <linux/sched.h>
Russell King09d9bae2008-09-05 14:08:44 +01009#include <linux/io.h>
Nicolas Pitredf5419a2011-04-13 04:57:17 +010010#include <linux/personality.h>
Nicolas Pitrecc92c282010-06-14 21:16:19 -040011#include <linux/random.h>
Rob Herring41dfaa92011-11-22 04:01:06 +010012#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14#define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17
18/*
19 * We need to ensure that shared mappings are correctly aligned to
20 * avoid aliasing issues with VIPT caches. We need to ensure that
21 * a specific page of an object is always mapped at a multiple of
22 * SHMLBA bytes.
23 *
24 * We unconditionally provide this function for all cases, however
25 * in the VIVT case, we optimise out the alignment rules.
26 */
27unsigned long
28arch_get_unmapped_area(struct file *filp, unsigned long addr,
29 unsigned long len, unsigned long pgoff, unsigned long flags)
30{
31 struct mm_struct *mm = current->mm;
32 struct vm_area_struct *vma;
33 unsigned long start_addr;
Rob Herring41dfaa92011-11-22 04:01:06 +010034 int do_align = 0;
35 int aliasing = cache_is_vipt_aliasing();
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37 /*
38 * We only need to do colour alignment if either the I or D
Rob Herring41dfaa92011-11-22 04:01:06 +010039 * caches alias.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 */
Rob Herring41dfaa92011-11-22 04:01:06 +010041 if (aliasing)
42 do_align = filp || (flags & MAP_SHARED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44 /*
Benjamin Herrenschmidtacec0ac2007-05-06 14:50:07 -070045 * We enforce the MAP_FIXED case.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 */
47 if (flags & MAP_FIXED) {
Al Viroe77414e2009-12-05 15:10:44 -050048 if (aliasing && flags & MAP_SHARED &&
49 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 return -EINVAL;
51 return addr;
52 }
53
54 if (len > TASK_SIZE)
55 return -ENOMEM;
56
57 if (addr) {
58 if (do_align)
59 addr = COLOUR_ALIGN(addr, pgoff);
60 else
61 addr = PAGE_ALIGN(addr);
62
63 vma = find_vma(mm, addr);
64 if (TASK_SIZE - len >= addr &&
65 (!vma || addr + len <= vma->vm_start))
66 return addr;
67 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -070068 if (len > mm->cached_hole_size) {
69 start_addr = addr = mm->free_area_cache;
70 } else {
71 start_addr = addr = TASK_UNMAPPED_BASE;
72 mm->cached_hole_size = 0;
73 }
Nicolas Pitrecc92c282010-06-14 21:16:19 -040074 /* 8 bits of randomness in 20 address space bits */
Nicolas Pitredf5419a2011-04-13 04:57:17 +010075 if ((current->flags & PF_RANDOMIZE) &&
76 !(current->personality & ADDR_NO_RANDOMIZE))
Nicolas Pitrecc92c282010-06-14 21:16:19 -040077 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79full_search:
80 if (do_align)
81 addr = COLOUR_ALIGN(addr, pgoff);
82 else
83 addr = PAGE_ALIGN(addr);
84
85 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
86 /* At this point: (!vma || addr < vma->vm_end). */
87 if (TASK_SIZE - len < addr) {
88 /*
89 * Start a new search - just in case we missed
90 * some holes.
91 */
92 if (start_addr != TASK_UNMAPPED_BASE) {
93 start_addr = addr = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -070094 mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 goto full_search;
96 }
97 return -ENOMEM;
98 }
99 if (!vma || addr + len <= vma->vm_start) {
100 /*
101 * Remember the place where we stopped the search:
102 */
103 mm->free_area_cache = addr + len;
104 return addr;
105 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700106 if (addr + mm->cached_hole_size < vma->vm_start)
107 mm->cached_hole_size = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 addr = vma->vm_end;
109 if (do_align)
110 addr = COLOUR_ALIGN(addr, pgoff);
111 }
112}
113
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100114
115/*
116 * You really shouldn't be using read() or write() on /dev/mem. This
117 * might go away in the future.
118 */
119int valid_phys_addr_range(unsigned long addr, size_t size)
120{
Alexandre Rusev9ae3ae02008-02-26 18:42:10 +0100121 if (addr < PHYS_OFFSET)
122 return 0;
Greg Ungerer6806bfe2009-10-02 00:45:28 +0100123 if (addr + size > __pa(high_memory - 1) + 1)
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100124 return 0;
125
126 return 1;
127}
128
129/*
130 * We don't use supersection mappings for mmap() on /dev/mem, which
131 * means that we can't map the memory area above the 4G barrier into
132 * userspace.
133 */
134int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
135{
136 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
137}
Nicolas Pitre087aaff2010-09-22 18:34:36 -0400138
139#ifdef CONFIG_STRICT_DEVMEM
140
141#include <linux/ioport.h>
142
143/*
144 * devmem_is_allowed() checks to see if /dev/mem access to a certain
145 * address is valid. The argument is a physical page number.
146 * We mimic x86 here by disallowing access to system RAM as well as
147 * device-exclusive MMIO regions. This effectively disable read()/write()
148 * on /dev/mem.
149 */
150int devmem_is_allowed(unsigned long pfn)
151{
152 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
153 return 0;
154 if (!page_is_ram(pfn))
155 return 1;
156 return 0;
157}
158
159#endif