blob: 59c0766ae4e04d024cd33dad8faf577b7001c25c [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * MMU context allocation for 64-bit kernels.
3 *
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Paul Mackerras14cf11a2005-09-26 16:04:21 +100013#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/mm.h>
19#include <linux/spinlock.h>
20#include <linux/idr.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040021#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +000023#include <linux/slab.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024
25#include <asm/mmu_context.h>
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +000026#include <asm/pgalloc.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100027
28static DEFINE_SPINLOCK(mmu_context_lock);
Anton Blanchard7317ac82010-02-07 12:30:12 +000029static DEFINE_IDA(mmu_context_ida);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100030
Michael Ellermanc1ff8402017-03-29 22:10:45 +110031static int alloc_context_id(int min_id, int max_id)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100032{
Michael Ellermanc1ff8402017-03-29 22:10:45 +110033 int index, err;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100034
35again:
Anton Blanchard7317ac82010-02-07 12:30:12 +000036 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
Paul Mackerras14cf11a2005-09-26 16:04:21 +100037 return -ENOMEM;
38
39 spin_lock(&mmu_context_lock);
Michael Ellermanc1ff8402017-03-29 22:10:45 +110040 err = ida_get_new_above(&mmu_context_ida, min_id, &index);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100041 spin_unlock(&mmu_context_lock);
42
43 if (err == -EAGAIN)
44 goto again;
45 else if (err)
46 return err;
47
Michael Ellermanc1ff8402017-03-29 22:10:45 +110048 if (index > max_id) {
Sonny Raof86c97472006-06-27 08:46:09 -040049 spin_lock(&mmu_context_lock);
Anton Blanchard7317ac82010-02-07 12:30:12 +000050 ida_remove(&mmu_context_ida, index);
Sonny Raof86c97472006-06-27 08:46:09 -040051 spin_unlock(&mmu_context_lock);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052 return -ENOMEM;
53 }
54
Alexander Grafe85a4712009-11-02 12:02:30 +000055 return index;
56}
Michael Ellermana336f2f2017-03-29 22:00:46 +110057
Aneesh Kumar K.V82228e32017-03-22 09:07:00 +053058void hash__reserve_context_id(int id)
59{
60 int rc, result = 0;
61
62 do {
63 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
64 break;
65
66 spin_lock(&mmu_context_lock);
67 rc = ida_get_new_above(&mmu_context_ida, id, &result);
68 spin_unlock(&mmu_context_lock);
69 } while (rc == -EAGAIN);
70
71 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
72}
73
Michael Ellermana336f2f2017-03-29 22:00:46 +110074int hash__alloc_context_id(void)
75{
Aneesh Kumar K.Ve6f81a92017-03-29 17:21:53 +110076 unsigned long max;
77
78 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
79 max = MAX_USER_CONTEXT;
80 else
81 max = MAX_USER_CONTEXT_65BIT_VA;
82
83 return alloc_context_id(MIN_USER_CONTEXT, max);
Michael Ellermana336f2f2017-03-29 22:00:46 +110084}
85EXPORT_SYMBOL_GPL(hash__alloc_context_id);
86
Michael Ellerman760573c2017-03-29 22:36:56 +110087static int hash__init_new_context(struct mm_struct *mm)
Alexander Grafe85a4712009-11-02 12:02:30 +000088{
89 int index;
90
Michael Ellermanc1ff8402017-03-29 22:10:45 +110091 index = hash__alloc_context_id();
Alexander Grafe85a4712009-11-02 12:02:30 +000092 if (index < 0)
93 return index;
94
Michael Ellerman760573c2017-03-29 22:36:56 +110095 /*
Nicholas Piggineffc1b22017-11-10 04:27:37 +110096 * In the case of exec, use the default limit,
97 * otherwise inherit it from the mm we are duplicating.
Aneesh Kumar K.V957b7782017-03-22 09:06:58 +053098 */
Nicholas Piggin47224762017-11-10 04:27:40 +110099 if (!mm->context.slb_addr_limit)
100 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
Aneesh Kumar K.V957b7782017-03-22 09:06:58 +0530101
102 /*
Michael Ellerman760573c2017-03-29 22:36:56 +1100103 * The old code would re-promote on fork, we don't do that when using
104 * slices as it could cause problem promoting slices that have been
105 * forced down to 4K.
106 *
107 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
108 * explicitly against context.id == 0. This ensures that we properly
109 * initialize context slice details for newly allocated mm's (which will
110 * have id == 0) and don't alter context slice inherited via fork (which
111 * will have id != 0).
112 *
113 * We should not be calling init_new_context() on init_mm. Hence a
114 * check against 0 is OK.
115 */
116 if (mm->context.id == 0)
117 slice_set_user_psize(mm, mmu_virtual_psize);
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000118
Michael Ellerman760573c2017-03-29 22:36:56 +1100119 subpage_prot_init_new_context(mm);
120
121 return index;
122}
123
124static int radix__init_new_context(struct mm_struct *mm)
125{
126 unsigned long rts_field;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000127 int index, max_id;
Michael Ellerman760573c2017-03-29 22:36:56 +1100128
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000129 max_id = (1 << mmu_pid_bits) - 1;
130 index = alloc_context_id(mmu_base_pid, max_id);
Michael Ellerman760573c2017-03-29 22:36:56 +1100131 if (index < 0)
132 return index;
133
134 /*
135 * set the process table entry,
136 */
137 rts_field = radix__get_tree_size();
138 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
139
Benjamin Herrenschmidt3a6a0472017-07-07 16:12:16 -0500140 /*
141 * Order the above store with subsequent update of the PID
142 * register (at which point HW can start loading/caching
143 * the entry) and the corresponding load by the MMU from
144 * the L2 cache.
145 */
146 asm volatile("ptesync;isync" : : : "memory");
147
Alistair Popple1ab66d12017-04-03 19:51:44 +1000148 mm->context.npu_context = NULL;
149
Michael Ellerman760573c2017-03-29 22:36:56 +1100150 return index;
151}
152
153int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
154{
155 int index;
156
157 if (radix_enabled())
158 index = radix__init_new_context(mm);
159 else
160 index = hash__init_new_context(mm);
161
162 if (index < 0)
163 return index;
164
Stephen Rothwell9dfe5c532007-08-15 16:33:55 +1000165 mm->context.id = index;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000166
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000167#ifdef CONFIG_PPC_64K_PAGES
168 mm->context.pte_frag = NULL;
169#endif
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000170#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +1100171 mm_iommu_init(mm);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000172#endif
Benjamin Herrenschmidta619e592017-07-24 14:28:02 +1000173 atomic_set(&mm->context.active_cpus, 0);
174
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000175 return 0;
176}
177
Alexander Grafe85a4712009-11-02 12:02:30 +0000178void __destroy_context(int context_id)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000179{
180 spin_lock(&mmu_context_lock);
Anton Blanchard7317ac82010-02-07 12:30:12 +0000181 ida_remove(&mmu_context_ida, context_id);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000182 spin_unlock(&mmu_context_lock);
Alexander Grafe85a4712009-11-02 12:02:30 +0000183}
184EXPORT_SYMBOL_GPL(__destroy_context);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000185
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000186#ifdef CONFIG_PPC_64K_PAGES
187static void destroy_pagetable_page(struct mm_struct *mm)
188{
189 int count;
190 void *pte_frag;
191 struct page *page;
192
193 pte_frag = mm->context.pte_frag;
194 if (!pte_frag)
195 return;
196
197 page = virt_to_page(pte_frag);
198 /* drop all the pending references */
199 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
200 /* We allow PTE_FRAG_NR fragments from a PTE page */
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700201 if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000202 pgtable_page_dtor(page);
Mel Gorman2d4894b2017-11-15 17:37:59 -0800203 free_unref_page(page);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000204 }
205}
206
207#else
208static inline void destroy_pagetable_page(struct mm_struct *mm)
209{
210 return;
211}
212#endif
213
Alexander Grafe85a4712009-11-02 12:02:30 +0000214void destroy_context(struct mm_struct *mm)
215{
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000216#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100217 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000218#endif
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000219 if (radix_enabled())
220 WARN_ON(process_tb[mm->context.id].prtb0 != 0);
221 else
222 subpage_prot_free(mm);
223 destroy_pagetable_page(mm);
224 __destroy_context(mm->context.id);
225 mm->context.id = MMU_NO_CONTEXT;
226}
227
228void arch_exit_mmap(struct mm_struct *mm)
229{
Benjamin Herrenschmidtc6bb0b82017-07-08 07:45:32 -0500230 if (radix_enabled()) {
231 /*
232 * Radix doesn't have a valid bit in the process table
233 * entries. However we know that at least P9 implementation
234 * will avoid caching an entry with an invalid RTS field,
235 * and 0 is invalid. So this will do.
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000236 *
237 * This runs before the "fullmm" tlb flush in exit_mmap,
238 * which does a RIC=2 tlbie to clear the process table
239 * entry. See the "fullmm" comments in tlb-radix.c.
240 *
241 * No barrier required here after the store because
242 * this process will do the invalidate, which starts with
243 * ptesync.
Benjamin Herrenschmidtc6bb0b82017-07-08 07:45:32 -0500244 */
245 process_tb[mm->context.id].prtb0 = 0;
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000246 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000247}
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000248
249#ifdef CONFIG_PPC_RADIX_MMU
250void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
251{
Benjamin Herrenschmidt74e27c62017-06-25 15:08:46 -0500252
253 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
254 isync();
255 mtspr(SPRN_PID, next->context.id);
256 isync();
257 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
258 } else {
259 mtspr(SPRN_PID, next->context.id);
260 isync();
261 }
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000262}
263#endif