blob: b569ebaa590e2d263be0db24196b6498f5f12135 [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001// SPDX-License-Identifier: GPL-2.0-only
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00002/*
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00003 *
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11006 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00007 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/highmem.h>
14#include <linux/gfp.h>
15#include <linux/slab.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010016#include <linux/sched/signal.h>
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000017#include <linux/hugetlb.h>
18#include <linux/list.h>
19#include <linux/anon_inodes.h>
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +110020#include <linux/iommu.h>
21#include <linux/file.h>
Daniel Jordan79eb5972019-07-16 16:30:54 -070022#include <linux/mm.h>
Kent Overstreet1e2f2d32023-12-15 15:51:54 -050023#include <linux/rcupdate_wait.h>
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000024
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000025#include <asm/kvm_ppc.h>
26#include <asm/kvm_book3s.h>
Aneesh Kumar K.Vf64e80842016-03-01 12:59:20 +053027#include <asm/book3s/64/mmu-hash.h>
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000028#include <asm/hvcall.h>
29#include <asm/synch.h>
30#include <asm/ppc-opcode.h>
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000031#include <asm/udbg.h>
Alexey Kardashevskiy462ee112016-02-15 12:55:07 +110032#include <asm/iommu.h>
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +110033#include <asm/tce.h>
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +110034#include <asm/mmu_context.h>
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000035
Alexey Kardashevskiycad32d92022-05-06 15:37:55 +100036static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
37 unsigned long liobn)
38{
39 struct kvmppc_spapr_tce_table *stt;
40
41 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
42 if (stt->liobn == liobn)
43 return stt;
44
45 return NULL;
46}
47
Alexey Kardashevskiyfe26e522016-03-01 17:54:38 +110048static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000049{
Alexey Kardashevskiyfe26e522016-03-01 17:54:38 +110050 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +000051}
52
Alexey Kardashevskiyf8626982016-02-15 12:55:06 +110053static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
54{
55 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
56 (tce_pages * sizeof(struct page *));
57
58 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
59}
60
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +110061static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
62{
63 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
64 struct kvmppc_spapr_tce_iommu_table, rcu);
65
66 iommu_tce_table_put(stit->tbl);
67
68 kfree(stit);
69}
70
71static void kvm_spapr_tce_liobn_put(struct kref *kref)
72{
73 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
74 struct kvmppc_spapr_tce_iommu_table, kref);
75
76 list_del_rcu(&stit->next);
77
78 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
79}
80
Benjamin Gray419d5d12023-10-11 16:37:04 +110081void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
82 struct iommu_group *grp)
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +110083{
84 int i;
85 struct kvmppc_spapr_tce_table *stt;
86 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
87 struct iommu_table_group *table_group = NULL;
88
Qian Caiab8b65b2020-05-10 01:18:34 -040089 rcu_read_lock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +110090 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
91
92 table_group = iommu_group_get_iommudata(grp);
93 if (WARN_ON(!table_group))
94 continue;
95
96 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
97 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
98 if (table_group->tables[i] != stit->tbl)
99 continue;
100
101 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100102 }
103 }
Qian Caiab8b65b2020-05-10 01:18:34 -0400104 cond_resched_rcu();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100105 }
Qian Caiab8b65b2020-05-10 01:18:34 -0400106 rcu_read_unlock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100107}
108
Benjamin Gray419d5d12023-10-11 16:37:04 +1100109long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
110 struct iommu_group *grp)
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100111{
112 struct kvmppc_spapr_tce_table *stt = NULL;
113 bool found = false;
114 struct iommu_table *tbl = NULL;
115 struct iommu_table_group *table_group;
116 long i;
117 struct kvmppc_spapr_tce_iommu_table *stit;
118 struct fd f;
119
120 f = fdget(tablefd);
121 if (!f.file)
122 return -EBADF;
123
Qian Caiab8b65b2020-05-10 01:18:34 -0400124 rcu_read_lock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100125 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
126 if (stt == f.file->private_data) {
127 found = true;
128 break;
129 }
130 }
Qian Caiab8b65b2020-05-10 01:18:34 -0400131 rcu_read_unlock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100132
133 fdput(f);
134
135 if (!found)
136 return -EINVAL;
137
138 table_group = iommu_group_get_iommudata(grp);
139 if (WARN_ON(!table_group))
140 return -EFAULT;
141
142 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
143 struct iommu_table *tbltmp = table_group->tables[i];
144
145 if (!tbltmp)
146 continue;
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000147 /* Make sure hardware table parameters are compatible */
148 if ((tbltmp->it_page_shift <= stt->page_shift) &&
149 (tbltmp->it_offset << tbltmp->it_page_shift ==
150 stt->offset << stt->page_shift) &&
Alexey Kardashevskiy76346cd2018-06-20 18:42:58 +1000151 (tbltmp->it_size << tbltmp->it_page_shift >=
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000152 stt->size << stt->page_shift)) {
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100153 /*
154 * Reference the table to avoid races with
155 * add/remove DMA windows.
156 */
157 tbl = iommu_tce_table_get(tbltmp);
158 break;
159 }
160 }
161 if (!tbl)
162 return -EINVAL;
163
Qian Caiab8b65b2020-05-10 01:18:34 -0400164 rcu_read_lock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100165 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
166 if (tbl != stit->tbl)
167 continue;
168
169 if (!kref_get_unless_zero(&stit->kref)) {
170 /* stit is being destroyed */
171 iommu_tce_table_put(tbl);
Qian Caiab8b65b2020-05-10 01:18:34 -0400172 rcu_read_unlock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100173 return -ENOTTY;
174 }
175 /*
176 * The table is already known to this KVM, we just increased
177 * its KVM reference counter and can return.
178 */
Qian Caiab8b65b2020-05-10 01:18:34 -0400179 rcu_read_unlock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100180 return 0;
181 }
Qian Caiab8b65b2020-05-10 01:18:34 -0400182 rcu_read_unlock();
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100183
184 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
185 if (!stit) {
186 iommu_tce_table_put(tbl);
187 return -ENOMEM;
188 }
189
190 stit->tbl = tbl;
191 kref_init(&stit->kref);
192
193 list_add_rcu(&stit->next, &stt->iommu_tables);
194
195 return 0;
196}
197
Alexey Kardashevskiy366baf22016-02-15 12:55:05 +1100198static void release_spapr_tce_table(struct rcu_head *head)
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000199{
Alexey Kardashevskiy366baf22016-02-15 12:55:05 +1100200 struct kvmppc_spapr_tce_table *stt = container_of(head,
201 struct kvmppc_spapr_tce_table, rcu);
Alexey Kardashevskiyfe26e522016-03-01 17:54:38 +1100202 unsigned long i, npages = kvmppc_tce_pages(stt->size);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000203
Alexey Kardashevskiyf8626982016-02-15 12:55:06 +1100204 for (i = 0; i < npages; i++)
Alexey Kardashevskiye1a1ef82019-03-29 16:43:26 +1100205 if (stt->pages[i])
206 __free_page(stt->pages[i]);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000207
Alexey Kardashevskiy366baf22016-02-15 12:55:05 +1100208 kfree(stt);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000209}
210
Alexey Kardashevskiye1a1ef82019-03-29 16:43:26 +1100211static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
212 unsigned long sttpage)
213{
214 struct page *page = stt->pages[sttpage];
215
216 if (page)
217 return page;
218
219 mutex_lock(&stt->alloc_lock);
220 page = stt->pages[sttpage];
221 if (!page) {
222 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
223 WARN_ON_ONCE(!page);
224 if (page)
225 stt->pages[sttpage] = page;
226 }
227 mutex_unlock(&stt->alloc_lock);
228
229 return page;
230}
231
Souptick Joarder16d5c392018-05-10 23:57:19 +0530232static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000233{
Dave Jiang11bac802017-02-24 14:56:41 -0800234 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000235 struct page *page;
236
Alexey Kardashevskiyfe26e522016-03-01 17:54:38 +1100237 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000238 return VM_FAULT_SIGBUS;
239
Alexey Kardashevskiye1a1ef82019-03-29 16:43:26 +1100240 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
241 if (!page)
242 return VM_FAULT_OOM;
243
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000244 get_page(page);
245 vmf->page = page;
246 return 0;
247}
248
249static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
250 .fault = kvm_spapr_tce_fault,
251};
252
253static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
254{
255 vma->vm_ops = &kvm_spapr_tce_vm_ops;
256 return 0;
257}
258
259static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
260{
261 struct kvmppc_spapr_tce_table *stt = filp->private_data;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100262 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
Paul Mackerrasedd03602017-08-28 14:31:24 +1000263 struct kvm *kvm = stt->kvm;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000264
Paul Mackerrasedd03602017-08-28 14:31:24 +1000265 mutex_lock(&kvm->lock);
Alexey Kardashevskiy366baf22016-02-15 12:55:05 +1100266 list_del_rcu(&stt->list);
Paul Mackerrasedd03602017-08-28 14:31:24 +1000267 mutex_unlock(&kvm->lock);
Alexey Kardashevskiy366baf22016-02-15 12:55:05 +1100268
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100269 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
270 WARN_ON(!kref_read(&stit->kref));
271 while (1) {
272 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
273 break;
274 }
275 }
276
Leonardo Bras8a9c8922019-11-26 19:36:30 -0300277 account_locked_vm(kvm->mm,
278 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
279
Alexey Kardashevskiy366baf22016-02-15 12:55:05 +1100280 kvm_put_kvm(stt->kvm);
281
282 call_rcu(&stt->rcu, release_spapr_tce_table);
283
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000284 return 0;
285}
286
Al Viro75ef9de2013-04-04 19:09:41 -0400287static const struct file_operations kvm_spapr_tce_fops = {
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000288 .mmap = kvm_spapr_tce_mmap,
289 .release = kvm_spapr_tce_release,
290};
291
Thomas Huth67c48662e2023-02-08 15:01:00 +0100292int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
293 struct kvm_create_spapr_tce_64 *args)
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000294{
295 struct kvmppc_spapr_tce_table *stt = NULL;
Paul Mackerras47c53102017-08-24 19:14:47 +1000296 struct kvmppc_spapr_tce_table *siter;
Leonardo Bras8a9c8922019-11-26 19:36:30 -0300297 struct mm_struct *mm = kvm->mm;
Deming Wang6fa1efe2022-07-03 13:29:32 -0400298 unsigned long npages;
Jing Xiangfengeb173552020-09-19 15:12:30 +0800299 int ret;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000300
Alexey Kardashevskiye45719a2018-05-14 20:00:29 +1000301 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
302 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100303 return -EINVAL;
304
Deming Wang6fa1efe2022-07-03 13:29:32 -0400305 npages = kvmppc_tce_pages(args->size);
Leonardo Bras8a9c8922019-11-26 19:36:30 -0300306 ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
Paul Mackerras47c53102017-08-24 19:14:47 +1000307 if (ret)
308 return ret;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000309
Wei Yongjun5982f082017-02-08 16:20:01 +0000310 ret = -ENOMEM;
Alexey Kardashevskiy4dee21e2022-06-28 18:02:28 +1000311 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000312 if (!stt)
Paul Mackerras47c53102017-08-24 19:14:47 +1000313 goto fail_acct;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000314
315 stt->liobn = args->liobn;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100316 stt->page_shift = args->page_shift;
317 stt->offset = args->offset;
Deming Wang6fa1efe2022-07-03 13:29:32 -0400318 stt->size = args->size;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000319 stt->kvm = kvm;
Alexey Kardashevskiye1a1ef82019-03-29 16:43:26 +1100320 mutex_init(&stt->alloc_lock);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100321 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000322
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000323 mutex_lock(&kvm->lock);
Paul Mackerras47c53102017-08-24 19:14:47 +1000324
325 /* Check this LIOBN hasn't been previously allocated */
326 ret = 0;
327 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
328 if (siter->liobn == args->liobn) {
329 ret = -EBUSY;
330 break;
331 }
332 }
333
Alexey Kardashevskiy716cb112019-02-21 14:44:14 +1100334 kvm_get_kvm(kvm);
Paul Mackerrasedd03602017-08-28 14:31:24 +1000335 if (!ret)
336 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
337 stt, O_RDWR | O_CLOEXEC);
338
Alexey Kardashevskiy716cb112019-02-21 14:44:14 +1100339 if (ret >= 0)
Paul Mackerras47c53102017-08-24 19:14:47 +1000340 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
Alexey Kardashevskiy716cb112019-02-21 14:44:14 +1100341 else
Sean Christopherson149487b2019-10-21 15:58:42 -0700342 kvm_put_kvm_no_destroy(kvm);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000343
344 mutex_unlock(&kvm->lock);
345
Paul Mackerrasedd03602017-08-28 14:31:24 +1000346 if (ret >= 0)
347 return ret;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000348
Paul Mackerras47c53102017-08-24 19:14:47 +1000349 kfree(stt);
350 fail_acct:
Leonardo Bras8a9c8922019-11-26 19:36:30 -0300351 account_locked_vm(mm, kvmppc_stt_pages(npages), false);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000352 return ret;
353}
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100354
Alexey Kardashevskiy20018252019-03-29 16:42:20 +1100355static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
356 unsigned long *ua)
357{
358 unsigned long gfn = tce >> PAGE_SHIFT;
359 struct kvm_memory_slot *memslot;
360
David Matlack0f22af92021-08-04 22:28:39 +0000361 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
Alexey Kardashevskiy20018252019-03-29 16:42:20 +1100362 if (!memslot)
363 return -EINVAL;
364
365 *ua = __gfn_to_hva_memslot(memslot, gfn) |
366 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
367
368 return 0;
369}
370
Alexey Kardashevskiy42de7b92018-09-10 18:29:10 +1000371static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
372 unsigned long tce)
373{
374 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
375 enum dma_data_direction dir = iommu_tce_direction(tce);
376 struct kvmppc_spapr_tce_iommu_table *stit;
377 unsigned long ua = 0;
378
379 /* Allow userspace to poison TCE table */
380 if (dir == DMA_NONE)
381 return H_SUCCESS;
382
383 if (iommu_tce_check_gpa(stt->page_shift, gpa))
384 return H_TOO_HARD;
385
Alexey Kardashevskiy20018252019-03-29 16:42:20 +1100386 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
Alexey Kardashevskiy42de7b92018-09-10 18:29:10 +1000387 return H_TOO_HARD;
388
Qian Caiab8b65b2020-05-10 01:18:34 -0400389 rcu_read_lock();
Alexey Kardashevskiy42de7b92018-09-10 18:29:10 +1000390 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
391 unsigned long hpa = 0;
392 struct mm_iommu_table_group_mem_t *mem;
393 long shift = stit->tbl->it_page_shift;
394
395 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
Qian Caiab8b65b2020-05-10 01:18:34 -0400396 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
397 rcu_read_unlock();
Alexey Kardashevskiy42de7b92018-09-10 18:29:10 +1000398 return H_TOO_HARD;
Qian Caiab8b65b2020-05-10 01:18:34 -0400399 }
Alexey Kardashevskiy42de7b92018-09-10 18:29:10 +1000400 }
Qian Caiab8b65b2020-05-10 01:18:34 -0400401 rcu_read_unlock();
Alexey Kardashevskiy42de7b92018-09-10 18:29:10 +1000402
403 return H_SUCCESS;
404}
405
Alexey Kardashevskiye1a1ef82019-03-29 16:43:26 +1100406/*
407 * Handles TCE requests for emulated devices.
408 * Puts guest TCE values to the table and expects user space to convert them.
409 * Cannot fail so kvmppc_tce_validate must be called before it.
410 */
411static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
412 unsigned long idx, unsigned long tce)
413{
414 struct page *page;
415 u64 *tbl;
416 unsigned long sttpage;
417
418 idx -= stt->offset;
419 sttpage = idx / TCES_PER_PAGE;
420 page = stt->pages[sttpage];
421
422 if (!page) {
423 /* We allow any TCE, not just with read|write permissions */
424 if (!tce)
425 return;
426
427 page = kvm_spapr_get_tce_page(stt, sttpage);
428 if (!page)
429 return;
430 }
431 tbl = page_to_virt(page);
432
433 tbl[idx % TCES_PER_PAGE] = tce;
434}
435
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000436static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
437 struct iommu_table *tbl, unsigned long entry)
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100438{
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000439 unsigned long i;
440 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
441 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100442
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000443 for (i = 0; i < subpages; ++i) {
444 unsigned long hpa = 0;
445 enum dma_data_direction dir = DMA_NONE;
446
447 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
448 }
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100449}
450
451static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
452 struct iommu_table *tbl, unsigned long entry)
453{
454 struct mm_iommu_table_group_mem_t *mem = NULL;
455 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
Alexey Kardashevskiy6e301a82018-10-15 21:08:41 +1100456 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100457
458 if (!pua)
Alexey Kardashevskiy6e301a82018-10-15 21:08:41 +1100459 return H_SUCCESS;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100460
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000461 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100462 if (!mem)
463 return H_TOO_HARD;
464
465 mm_iommu_mapped_dec(mem);
466
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000467 *pua = cpu_to_be64(0);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100468
469 return H_SUCCESS;
470}
471
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000472static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100473 struct iommu_table *tbl, unsigned long entry)
474{
475 enum dma_data_direction dir = DMA_NONE;
476 unsigned long hpa = 0;
477 long ret;
478
Alexey Kardashevskiy01b7d122019-08-29 18:52:49 +1000479 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
480 &dir)))
Alexey Kardashevskiyf7960e22018-09-10 18:29:09 +1000481 return H_TOO_HARD;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100482
483 if (dir == DMA_NONE)
484 return H_SUCCESS;
485
486 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
487 if (ret != H_SUCCESS)
Alexey Kardashevskiy01b7d122019-08-29 18:52:49 +1000488 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100489
490 return ret;
491}
492
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000493static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
494 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
495 unsigned long entry)
496{
497 unsigned long i, ret = H_SUCCESS;
498 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
499 unsigned long io_entry = entry * subpages;
500
501 for (i = 0; i < subpages; ++i) {
502 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
503 if (ret != H_SUCCESS)
504 break;
505 }
506
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000507 iommu_tce_kill(tbl, io_entry, subpages);
508
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000509 return ret;
510}
511
Wang Wenshengcf59eb12020-09-21 11:22:11 +0000512static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100513 unsigned long entry, unsigned long ua,
514 enum dma_data_direction dir)
515{
516 long ret;
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000517 unsigned long hpa;
518 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100519 struct mm_iommu_table_group_mem_t *mem;
520
521 if (!pua)
522 /* it_userspace allocation might be delayed */
523 return H_TOO_HARD;
524
525 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
526 if (!mem)
527 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
528 return H_TOO_HARD;
529
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000530 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
Alexey Kardashevskiyf7960e22018-09-10 18:29:09 +1000531 return H_TOO_HARD;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100532
533 if (mm_iommu_mapped_inc(mem))
Alexey Kardashevskiyf7960e22018-09-10 18:29:09 +1000534 return H_TOO_HARD;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100535
Alexey Kardashevskiy01b7d122019-08-29 18:52:49 +1000536 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100537 if (WARN_ON_ONCE(ret)) {
538 mm_iommu_mapped_dec(mem);
Alexey Kardashevskiyf7960e22018-09-10 18:29:09 +1000539 return H_TOO_HARD;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100540 }
541
542 if (dir != DMA_NONE)
543 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
544
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000545 *pua = cpu_to_be64(ua);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100546
547 return 0;
548}
549
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000550static long kvmppc_tce_iommu_map(struct kvm *kvm,
551 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
552 unsigned long entry, unsigned long ua,
553 enum dma_data_direction dir)
554{
555 unsigned long i, pgoff, ret = H_SUCCESS;
556 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
557 unsigned long io_entry = entry * subpages;
558
559 for (i = 0, pgoff = 0; i < subpages;
560 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
561
562 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
563 io_entry + i, ua + pgoff, dir);
564 if (ret != H_SUCCESS)
565 break;
566 }
567
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000568 iommu_tce_kill(tbl, io_entry, subpages);
569
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000570 return ret;
571}
572
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100573long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
574 unsigned long ioba, unsigned long tce)
575{
Alexey Kardashevskiy503bfcbe2017-03-22 15:21:53 +1100576 struct kvmppc_spapr_tce_table *stt;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100577 long ret, idx;
578 struct kvmppc_spapr_tce_iommu_table *stit;
579 unsigned long entry, ua = 0;
580 enum dma_data_direction dir;
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100581
582 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
583 /* liobn, ioba, tce); */
584
Alexey Kardashevskiy503bfcbe2017-03-22 15:21:53 +1100585 stt = kvmppc_find_table(vcpu->kvm, liobn);
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100586 if (!stt)
587 return H_TOO_HARD;
588
589 ret = kvmppc_ioba_validate(stt, ioba, 1);
590 if (ret != H_SUCCESS)
591 return ret;
592
Alexey Kardashevskiy345077c82019-03-29 16:41:13 +1100593 idx = srcu_read_lock(&vcpu->kvm->srcu);
594
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100595 ret = kvmppc_tce_validate(stt, tce);
596 if (ret != H_SUCCESS)
Alexey Kardashevskiy345077c82019-03-29 16:41:13 +1100597 goto unlock_exit;
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100598
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100599 dir = iommu_tce_direction(tce);
Alexey Kardashevskiy8f6a9f02017-10-11 16:00:34 +1100600
Alexey Kardashevskiy20018252019-03-29 16:42:20 +1100601 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
Alexey Kardashevskiy8f6a9f02017-10-11 16:00:34 +1100602 ret = H_PARAMETER;
603 goto unlock_exit;
604 }
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100605
606 entry = ioba >> stt->page_shift;
607
608 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
Alexey Kardashevskiy8f6a9f02017-10-11 16:00:34 +1100609 if (dir == DMA_NONE)
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000610 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100611 stit->tbl, entry);
Alexey Kardashevskiy8f6a9f02017-10-11 16:00:34 +1100612 else
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000613 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100614 entry, ua, dir);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100615
Alexey Kardashevskiy01b7d122019-08-29 18:52:49 +1000616
Alexey Kardashevskiy2691f0f2018-09-10 18:29:11 +1000617 if (ret != H_SUCCESS) {
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000618 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
Alexey Kardashevskiy8f6a9f02017-10-11 16:00:34 +1100619 goto unlock_exit;
Alexey Kardashevskiy2691f0f2018-09-10 18:29:11 +1000620 }
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100621 }
622
623 kvmppc_tce_put(stt, entry, tce);
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100624
Alexey Kardashevskiy8f6a9f02017-10-11 16:00:34 +1100625unlock_exit:
626 srcu_read_unlock(&vcpu->kvm->srcu, idx);
627
628 return ret;
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100629}
630EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
631
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100632long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
633 unsigned long liobn, unsigned long ioba,
634 unsigned long tce_list, unsigned long npages)
635{
636 struct kvmppc_spapr_tce_table *stt;
637 long i, ret = H_SUCCESS, idx;
638 unsigned long entry, ua = 0;
Daniel Axtensf8750512016-07-12 10:54:48 +1000639 u64 __user *tces;
640 u64 tce;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100641 struct kvmppc_spapr_tce_iommu_table *stit;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100642
Alexey Kardashevskiy503bfcbe2017-03-22 15:21:53 +1100643 stt = kvmppc_find_table(vcpu->kvm, liobn);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100644 if (!stt)
645 return H_TOO_HARD;
646
Alexey Kardashevskiyfe26e522016-03-01 17:54:38 +1100647 entry = ioba >> stt->page_shift;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100648 /*
649 * SPAPR spec says that the maximum size of the list is 512 TCEs
650 * so the whole table fits in 4K page
651 */
652 if (npages > 512)
653 return H_PARAMETER;
654
655 if (tce_list & (SZ_4K - 1))
656 return H_PARAMETER;
657
658 ret = kvmppc_ioba_validate(stt, ioba, npages);
659 if (ret != H_SUCCESS)
660 return ret;
661
662 idx = srcu_read_lock(&vcpu->kvm->srcu);
Alexey Kardashevskiy20018252019-03-29 16:42:20 +1100663 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100664 ret = H_TOO_HARD;
665 goto unlock_exit;
666 }
667 tces = (u64 __user *) ua;
668
669 for (i = 0; i < npages; ++i) {
670 if (get_user(tce, tces + i)) {
671 ret = H_TOO_HARD;
672 goto unlock_exit;
673 }
674 tce = be64_to_cpu(tce);
675
676 ret = kvmppc_tce_validate(stt, tce);
677 if (ret != H_SUCCESS)
678 goto unlock_exit;
Alexey Kardashevskiye199ad22018-09-10 18:29:08 +1000679 }
680
681 for (i = 0; i < npages; ++i) {
682 /*
683 * This looks unsafe, because we validate, then regrab
684 * the TCE from userspace which could have been changed by
685 * another thread.
686 *
687 * But it actually is safe, because the relevant checks will be
688 * re-executed in the following code. If userspace tries to
689 * change this dodgily it will result in a messier failure mode
690 * but won't threaten the host.
691 */
692 if (get_user(tce, tces + i)) {
693 ret = H_TOO_HARD;
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000694 goto unlock_exit;
Alexey Kardashevskiye199ad22018-09-10 18:29:08 +1000695 }
696 tce = be64_to_cpu(tce);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100697
Alexey Kardashevskiy4f916592019-08-26 14:55:20 +1000698 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
699 ret = H_PARAMETER;
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000700 goto unlock_exit;
Alexey Kardashevskiy4f916592019-08-26 14:55:20 +1000701 }
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100702
703 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000704 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100705 stit->tbl, entry + i, ua,
706 iommu_tce_direction(tce));
707
Alexey Kardashevskiy2691f0f2018-09-10 18:29:11 +1000708 if (ret != H_SUCCESS) {
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000709 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
710 entry + i);
711 goto unlock_exit;
Alexey Kardashevskiy2691f0f2018-09-10 18:29:11 +1000712 }
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100713 }
714
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100715 kvmppc_tce_put(stt, entry + i, tce);
716 }
717
718unlock_exit:
719 srcu_read_unlock(&vcpu->kvm->srcu, idx);
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100724
725long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
726 unsigned long liobn, unsigned long ioba,
727 unsigned long tce_value, unsigned long npages)
728{
729 struct kvmppc_spapr_tce_table *stt;
730 long i, ret;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100731 struct kvmppc_spapr_tce_iommu_table *stit;
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100732
Alexey Kardashevskiy503bfcbe2017-03-22 15:21:53 +1100733 stt = kvmppc_find_table(vcpu->kvm, liobn);
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100734 if (!stt)
735 return H_TOO_HARD;
736
737 ret = kvmppc_ioba_validate(stt, ioba, npages);
738 if (ret != H_SUCCESS)
739 return ret;
740
741 /* Check permission bits only to allow userspace poison TCE for debug */
742 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
743 return H_PARAMETER;
744
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100745 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
Alexey Kardashevskiyc6b61662018-05-14 20:00:27 +1000746 unsigned long entry = ioba >> stt->page_shift;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100747
748 for (i = 0; i < npages; ++i) {
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000749 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100750 stit->tbl, entry + i);
751
752 if (ret == H_SUCCESS)
753 continue;
754
755 if (ret == H_TOO_HARD)
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000756 return ret;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100757
758 WARN_ON_ONCE(1);
Alexey Kardashevskiy26a62b72022-04-20 15:08:40 +1000759 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100760 }
761 }
762
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100763 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
764 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
765
Alexey Kardashevskiy01b7d122019-08-29 18:52:49 +1000766 return ret;
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100767}
768EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
Alexey Kardashevskiycad32d92022-05-06 15:37:55 +1000769
770long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
771 unsigned long ioba)
772{
773 struct kvmppc_spapr_tce_table *stt;
774 long ret;
775 unsigned long idx;
776 struct page *page;
777 u64 *tbl;
778
779 stt = kvmppc_find_table(vcpu->kvm, liobn);
780 if (!stt)
781 return H_TOO_HARD;
782
783 ret = kvmppc_ioba_validate(stt, ioba, 1);
784 if (ret != H_SUCCESS)
785 return ret;
786
787 idx = (ioba >> stt->page_shift) - stt->offset;
788 page = stt->pages[idx / TCES_PER_PAGE];
789 if (!page) {
Jordan Niethe0e85b7d2023-09-14 13:05:50 +1000790 kvmppc_set_gpr(vcpu, 4, 0);
Alexey Kardashevskiycad32d92022-05-06 15:37:55 +1000791 return H_SUCCESS;
792 }
793 tbl = (u64 *)page_address(page);
794
Jordan Niethe0e85b7d2023-09-14 13:05:50 +1000795 kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]);
Alexey Kardashevskiycad32d92022-05-06 15:37:55 +1000796
797 return H_SUCCESS;
798}
799EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);