Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2007-2008 Paul Mackerras, IBM Corp. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/gfp.h> |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 13 | #include <linux/types.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/hugetlb.h> |
Al Viro | 3691d61 | 2018-05-02 23:20:46 +1000 | [diff] [blame] | 16 | #include <linux/syscalls.h> |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 17 | |
| 18 | #include <asm/pgtable.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 19 | #include <linux/uaccess.h> |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 20 | |
| 21 | /* |
| 22 | * Free all pages allocated for subpage protection maps and pointers. |
| 23 | * Also makes sure that the subpage_prot_table structure is |
| 24 | * reinitialized for the next user. |
| 25 | */ |
David Gibson | d28513b | 2009-11-26 18:56:04 +0000 | [diff] [blame] | 26 | void subpage_prot_free(struct mm_struct *mm) |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 27 | { |
Aneesh Kumar K.V | 60458fb | 2019-04-17 18:33:48 +0530 | [diff] [blame] | 28 | struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 29 | unsigned long i, j, addr; |
| 30 | u32 **p; |
| 31 | |
Aneesh Kumar K.V | ef629cc | 2019-04-17 18:33:51 +0530 | [diff] [blame] | 32 | if (!spt) |
| 33 | return; |
| 34 | |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 35 | for (i = 0; i < 4; ++i) { |
| 36 | if (spt->low_prot[i]) { |
| 37 | free_page((unsigned long)spt->low_prot[i]); |
| 38 | spt->low_prot[i] = NULL; |
| 39 | } |
| 40 | } |
| 41 | addr = 0; |
Aneesh Kumar K.V | 0da12a7 | 2017-06-17 20:00:55 +0530 | [diff] [blame] | 42 | for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) { |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 43 | p = spt->protptrs[i]; |
| 44 | if (!p) |
| 45 | continue; |
| 46 | spt->protptrs[i] = NULL; |
| 47 | for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr; |
| 48 | ++j, addr += PAGE_SIZE) |
| 49 | if (p[j]) |
| 50 | free_page((unsigned long)p[j]); |
| 51 | free_page((unsigned long)p); |
| 52 | } |
| 53 | spt->maxaddr = 0; |
Aneesh Kumar K.V | ef629cc | 2019-04-17 18:33:51 +0530 | [diff] [blame] | 54 | kfree(spt); |
David Gibson | d28513b | 2009-11-26 18:56:04 +0000 | [diff] [blame] | 55 | } |
| 56 | |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 57 | static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, |
| 58 | int npages) |
| 59 | { |
| 60 | pgd_t *pgd; |
| 61 | pud_t *pud; |
| 62 | pmd_t *pmd; |
| 63 | pte_t *pte; |
| 64 | spinlock_t *ptl; |
| 65 | |
| 66 | pgd = pgd_offset(mm, addr); |
| 67 | if (pgd_none(*pgd)) |
| 68 | return; |
| 69 | pud = pud_offset(pgd, addr); |
| 70 | if (pud_none(*pud)) |
| 71 | return; |
| 72 | pmd = pmd_offset(pud, addr); |
| 73 | if (pmd_none(*pmd)) |
| 74 | return; |
| 75 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 76 | arch_enter_lazy_mmu_mode(); |
| 77 | for (; npages > 0; --npages) { |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 78 | pte_update(mm, addr, pte, 0, 0, 0); |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 79 | addr += PAGE_SIZE; |
| 80 | ++pte; |
| 81 | } |
| 82 | arch_leave_lazy_mmu_mode(); |
| 83 | pte_unmap_unlock(pte - 1, ptl); |
| 84 | } |
| 85 | |
| 86 | /* |
| 87 | * Clear the subpage protection map for an address range, allowing |
| 88 | * all accesses that are allowed by the pte permissions. |
| 89 | */ |
| 90 | static void subpage_prot_clear(unsigned long addr, unsigned long len) |
| 91 | { |
| 92 | struct mm_struct *mm = current->mm; |
Aneesh Kumar K.V | 2c474c0 | 2019-04-30 13:29:07 +0530 | [diff] [blame] | 93 | struct subpage_prot_table *spt; |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 94 | u32 **spm, *spp; |
Joe MacDonald | 6b5e722 | 2012-08-21 08:22:28 +0000 | [diff] [blame] | 95 | unsigned long i; |
| 96 | size_t nw; |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 97 | unsigned long next, limit; |
| 98 | |
| 99 | down_write(&mm->mmap_sem); |
Aneesh Kumar K.V | 2c474c0 | 2019-04-30 13:29:07 +0530 | [diff] [blame] | 100 | |
| 101 | spt = mm_ctx_subpage_prot(&mm->context); |
| 102 | if (!spt) |
| 103 | goto err_out; |
| 104 | |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 105 | limit = addr + len; |
| 106 | if (limit > spt->maxaddr) |
| 107 | limit = spt->maxaddr; |
| 108 | for (; addr < limit; addr = next) { |
| 109 | next = pmd_addr_end(addr, limit); |
Anton Blanchard | b0d436c | 2013-08-07 02:01:24 +1000 | [diff] [blame] | 110 | if (addr < 0x100000000UL) { |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 111 | spm = spt->low_prot; |
| 112 | } else { |
| 113 | spm = spt->protptrs[addr >> SBP_L3_SHIFT]; |
| 114 | if (!spm) |
| 115 | continue; |
| 116 | } |
| 117 | spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; |
| 118 | if (!spp) |
| 119 | continue; |
| 120 | spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); |
| 121 | |
| 122 | i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
| 123 | nw = PTRS_PER_PTE - i; |
| 124 | if (addr + (nw << PAGE_SHIFT) > next) |
| 125 | nw = (next - addr) >> PAGE_SHIFT; |
| 126 | |
| 127 | memset(spp, 0, nw * sizeof(u32)); |
| 128 | |
| 129 | /* now flush any existing HPTEs for the range */ |
| 130 | hpte_flush_range(mm, addr, nw); |
| 131 | } |
Aneesh Kumar K.V | 2c474c0 | 2019-04-30 13:29:07 +0530 | [diff] [blame] | 132 | |
| 133 | err_out: |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 134 | up_write(&mm->mmap_sem); |
| 135 | } |
| 136 | |
Aneesh Kumar K.V | d8e355a | 2013-06-20 14:30:25 +0530 | [diff] [blame] | 137 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 138 | static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, |
| 139 | unsigned long end, struct mm_walk *walk) |
| 140 | { |
Naoya Horiguchi | 1757bbd | 2015-02-11 15:28:00 -0800 | [diff] [blame] | 141 | struct vm_area_struct *vma = walk->vma; |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 142 | split_huge_pmd(vma, pmd, addr); |
Aneesh Kumar K.V | d8e355a | 2013-06-20 14:30:25 +0530 | [diff] [blame] | 143 | return 0; |
| 144 | } |
| 145 | |
| 146 | static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, |
| 147 | unsigned long len) |
| 148 | { |
| 149 | struct vm_area_struct *vma; |
| 150 | struct mm_walk subpage_proto_walk = { |
| 151 | .mm = mm, |
| 152 | .pmd_entry = subpage_walk_pmd_entry, |
| 153 | }; |
| 154 | |
| 155 | /* |
| 156 | * We don't try too hard, we just mark all the vma in that range |
| 157 | * VM_NOHUGEPAGE and split them. |
| 158 | */ |
| 159 | vma = find_vma(mm, addr); |
| 160 | /* |
| 161 | * If the range is in unmapped range, just return |
| 162 | */ |
| 163 | if (vma && ((addr + len) <= vma->vm_start)) |
| 164 | return; |
| 165 | |
| 166 | while (vma) { |
| 167 | if (vma->vm_start >= (addr + len)) |
| 168 | break; |
| 169 | vma->vm_flags |= VM_NOHUGEPAGE; |
Naoya Horiguchi | 1757bbd | 2015-02-11 15:28:00 -0800 | [diff] [blame] | 170 | walk_page_vma(vma, &subpage_proto_walk); |
Aneesh Kumar K.V | d8e355a | 2013-06-20 14:30:25 +0530 | [diff] [blame] | 171 | vma = vma->vm_next; |
| 172 | } |
| 173 | } |
| 174 | #else |
| 175 | static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, |
| 176 | unsigned long len) |
| 177 | { |
| 178 | return; |
| 179 | } |
| 180 | #endif |
| 181 | |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 182 | /* |
| 183 | * Copy in a subpage protection map for an address range. |
| 184 | * The map has 2 bits per 4k subpage, so 32 bits per 64k page. |
| 185 | * Each 2-bit field is 0 to allow any access, 1 to prevent writes, |
| 186 | * 2 or 3 to prevent all accesses. |
| 187 | * Note that the normal page protections also apply; the subpage |
| 188 | * protection mechanism is an additional constraint, so putting 0 |
| 189 | * in a 2-bit field won't allow writes to a page that is otherwise |
| 190 | * write-protected. |
| 191 | */ |
Al Viro | 3691d61 | 2018-05-02 23:20:46 +1000 | [diff] [blame] | 192 | SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, |
| 193 | unsigned long, len, u32 __user *, map) |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 194 | { |
| 195 | struct mm_struct *mm = current->mm; |
Aneesh Kumar K.V | 2c474c0 | 2019-04-30 13:29:07 +0530 | [diff] [blame] | 196 | struct subpage_prot_table *spt; |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 197 | u32 **spm, *spp; |
Joe MacDonald | 6b5e722 | 2012-08-21 08:22:28 +0000 | [diff] [blame] | 198 | unsigned long i; |
| 199 | size_t nw; |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 200 | unsigned long next, limit; |
| 201 | int err; |
| 202 | |
Anshuman Khandual | 5b2b807 | 2017-12-04 11:19:22 +0530 | [diff] [blame] | 203 | if (radix_enabled()) |
| 204 | return -ENOENT; |
| 205 | |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 206 | /* Check parameters */ |
| 207 | if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || |
Aneesh Kumar K.V | be77e99 | 2017-04-14 00:48:21 +0530 | [diff] [blame] | 208 | addr >= mm->task_size || len >= mm->task_size || |
| 209 | addr + len > mm->task_size) |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 210 | return -EINVAL; |
| 211 | |
| 212 | if (is_hugepage_only_range(mm, addr, len)) |
| 213 | return -EINVAL; |
| 214 | |
| 215 | if (!map) { |
| 216 | /* Clear out the protection map for the address range */ |
| 217 | subpage_prot_clear(addr, len); |
| 218 | return 0; |
| 219 | } |
| 220 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 221 | if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 222 | return -EFAULT; |
| 223 | |
| 224 | down_write(&mm->mmap_sem); |
Aneesh Kumar K.V | ef629cc | 2019-04-17 18:33:51 +0530 | [diff] [blame] | 225 | |
Aneesh Kumar K.V | 2c474c0 | 2019-04-30 13:29:07 +0530 | [diff] [blame] | 226 | spt = mm_ctx_subpage_prot(&mm->context); |
Aneesh Kumar K.V | ef629cc | 2019-04-17 18:33:51 +0530 | [diff] [blame] | 227 | if (!spt) { |
| 228 | /* |
| 229 | * Allocate subpage prot table if not already done. |
| 230 | * Do this with mmap_sem held |
| 231 | */ |
| 232 | spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); |
| 233 | if (!spt) { |
| 234 | err = -ENOMEM; |
| 235 | goto out; |
| 236 | } |
| 237 | mm->context.hash_context->spt = spt; |
| 238 | } |
| 239 | |
Aneesh Kumar K.V | d8e355a | 2013-06-20 14:30:25 +0530 | [diff] [blame] | 240 | subpage_mark_vma_nohuge(mm, addr, len); |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 241 | for (limit = addr + len; addr < limit; addr = next) { |
| 242 | next = pmd_addr_end(addr, limit); |
| 243 | err = -ENOMEM; |
Anton Blanchard | b0d436c | 2013-08-07 02:01:24 +1000 | [diff] [blame] | 244 | if (addr < 0x100000000UL) { |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 245 | spm = spt->low_prot; |
| 246 | } else { |
| 247 | spm = spt->protptrs[addr >> SBP_L3_SHIFT]; |
| 248 | if (!spm) { |
| 249 | spm = (u32 **)get_zeroed_page(GFP_KERNEL); |
| 250 | if (!spm) |
| 251 | goto out; |
| 252 | spt->protptrs[addr >> SBP_L3_SHIFT] = spm; |
| 253 | } |
| 254 | } |
| 255 | spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1); |
| 256 | spp = *spm; |
| 257 | if (!spp) { |
| 258 | spp = (u32 *)get_zeroed_page(GFP_KERNEL); |
| 259 | if (!spp) |
| 260 | goto out; |
| 261 | *spm = spp; |
| 262 | } |
| 263 | spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); |
| 264 | |
| 265 | local_irq_disable(); |
| 266 | demote_segment_4k(mm, addr); |
| 267 | local_irq_enable(); |
| 268 | |
| 269 | i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
| 270 | nw = PTRS_PER_PTE - i; |
| 271 | if (addr + (nw << PAGE_SHIFT) > next) |
| 272 | nw = (next - addr) >> PAGE_SHIFT; |
| 273 | |
| 274 | up_write(&mm->mmap_sem); |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 275 | if (__copy_from_user(spp, map, nw * sizeof(u32))) |
Markus Elfring | a967f16 | 2017-01-21 16:10:50 +0100 | [diff] [blame] | 276 | return -EFAULT; |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 277 | map += nw; |
| 278 | down_write(&mm->mmap_sem); |
| 279 | |
| 280 | /* now flush any existing HPTEs for the range */ |
| 281 | hpte_flush_range(mm, addr, nw); |
| 282 | } |
| 283 | if (limit > spt->maxaddr) |
| 284 | spt->maxaddr = limit; |
| 285 | err = 0; |
| 286 | out: |
| 287 | up_write(&mm->mmap_sem); |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 288 | return err; |
| 289 | } |