blob: f00ad5f5f1d4a56648453c72d7c99ca7ae2a6e72 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08002/******************************************************************************
3 * privcmd.c
4 *
5 * Interface to privileged domain-0 commands.
6 *
7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
8 */
9
Joe Perches283c0972013-06-28 03:21:41 -070010#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11
Viresh Kumarf8941e62023-08-22 15:15:07 +053012#include <linux/eventfd.h>
13#include <linux/file.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080014#include <linux/kernel.h>
Bastian Blankd8414d32011-12-16 11:34:33 -050015#include <linux/module.h>
Viresh Kumarf8941e62023-08-22 15:15:07 +053016#include <linux/mutex.h>
17#include <linux/poll.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080018#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/string.h>
Viresh Kumarf8941e62023-08-22 15:15:07 +053021#include <linux/workqueue.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080022#include <linux/errno.h>
23#include <linux/mm.h>
24#include <linux/mman.h>
25#include <linux/uaccess.h>
26#include <linux/swap.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080027#include <linux/highmem.h>
28#include <linux/pagemap.h>
29#include <linux/seq_file.h>
Bastian Blankd8414d32011-12-16 11:34:33 -050030#include <linux/miscdevice.h>
Paul Durrantab520be2017-02-13 17:03:23 +000031#include <linux/moduleparam.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080032
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080033#include <asm/xen/hypervisor.h>
34#include <asm/xen/hypercall.h>
35
36#include <xen/xen.h>
37#include <xen/privcmd.h>
38#include <xen/interface/xen.h>
Paul Durrant3ad08762018-05-09 14:16:12 +010039#include <xen/interface/memory.h>
Paul Durrantab520be2017-02-13 17:03:23 +000040#include <xen/interface/hvm/dm_op.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080041#include <xen/features.h>
42#include <xen/page.h>
Ian Campbellde1ef202009-05-21 10:09:46 +010043#include <xen/xen-ops.h>
Mukesh Rathord71f5132012-10-17 17:11:21 -070044#include <xen/balloon.h>
Ian Campbellf020e292009-05-20 15:42:14 +010045
Bastian Blankd8414d32011-12-16 11:34:33 -050046#include "privcmd.h"
47
48MODULE_LICENSE("GPL");
49
Mukesh Rathord71f5132012-10-17 17:11:21 -070050#define PRIV_VMA_LOCKED ((void *)1)
51
Paul Durrantab520be2017-02-13 17:03:23 +000052static unsigned int privcmd_dm_op_max_num = 16;
53module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
54MODULE_PARM_DESC(dm_op_max_nr_bufs,
55 "Maximum number of buffers per dm_op hypercall");
56
57static unsigned int privcmd_dm_op_buf_max_size = 4096;
58module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
59 0644);
60MODULE_PARM_DESC(dm_op_buf_max_size,
61 "Maximum size of a dm_op hypercall buffer");
62
Paul Durrant4610d242017-02-13 17:03:24 +000063struct privcmd_data {
64 domid_t domid;
65};
66
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +010067static int privcmd_vma_range_is_mapped(
68 struct vm_area_struct *vma,
69 unsigned long addr,
70 unsigned long nr_pages);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080071
Paul Durrant4610d242017-02-13 17:03:24 +000072static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080073{
Paul Durrant4610d242017-02-13 17:03:24 +000074 struct privcmd_data *data = file->private_data;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080075 struct privcmd_hypercall hypercall;
76 long ret;
77
Paul Durrant4610d242017-02-13 17:03:24 +000078 /* Disallow arbitrary hypercalls if restricted */
79 if (data->domid != DOMID_INVALID)
80 return -EPERM;
81
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080082 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
83 return -EFAULT;
84
David Vrabelfdfd8112015-02-19 15:23:17 +000085 xen_preemptible_hcall_begin();
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080086 ret = privcmd_call(hypercall.op,
87 hypercall.arg[0], hypercall.arg[1],
88 hypercall.arg[2], hypercall.arg[3],
89 hypercall.arg[4]);
David Vrabelfdfd8112015-02-19 15:23:17 +000090 xen_preemptible_hcall_end();
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080091
92 return ret;
93}
94
95static void free_page_list(struct list_head *pages)
96{
97 struct page *p, *n;
98
99 list_for_each_entry_safe(p, n, pages, lru)
100 __free_page(p);
101
102 INIT_LIST_HEAD(pages);
103}
104
105/*
106 * Given an array of items in userspace, return a list of pages
107 * containing the data. If copying fails, either because of memory
108 * allocation failure or a problem reading user memory, return an
109 * error code; its up to the caller to dispose of any partial list.
110 */
111static int gather_array(struct list_head *pagelist,
112 unsigned nelem, size_t size,
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400113 const void __user *data)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800114{
115 unsigned pageidx;
116 void *pagedata;
117 int ret;
118
119 if (size > PAGE_SIZE)
120 return 0;
121
122 pageidx = PAGE_SIZE;
123 pagedata = NULL; /* quiet, gcc */
124 while (nelem--) {
125 if (pageidx > PAGE_SIZE-size) {
126 struct page *page = alloc_page(GFP_KERNEL);
127
128 ret = -ENOMEM;
129 if (page == NULL)
130 goto fail;
131
132 pagedata = page_address(page);
133
134 list_add_tail(&page->lru, pagelist);
135 pageidx = 0;
136 }
137
138 ret = -EFAULT;
139 if (copy_from_user(pagedata + pageidx, data, size))
140 goto fail;
141
142 data += size;
143 pageidx += size;
144 }
145
146 ret = 0;
147
148fail:
149 return ret;
150}
151
152/*
153 * Call function "fn" on each element of the array fragmented
154 * over a list of pages.
155 */
156static int traverse_pages(unsigned nelem, size_t size,
157 struct list_head *pos,
158 int (*fn)(void *data, void *state),
159 void *state)
160{
161 void *pagedata;
162 unsigned pageidx;
Ian Campbellf020e292009-05-20 15:42:14 +0100163 int ret = 0;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800164
165 BUG_ON(size > PAGE_SIZE);
166
167 pageidx = PAGE_SIZE;
168 pagedata = NULL; /* hush, gcc */
169
170 while (nelem--) {
171 if (pageidx > PAGE_SIZE-size) {
172 struct page *page;
173 pos = pos->next;
174 page = list_entry(pos, struct page, lru);
175 pagedata = page_address(page);
176 pageidx = 0;
177 }
178
179 ret = (*fn)(pagedata + pageidx, state);
180 if (ret)
181 break;
182 pageidx += size;
183 }
184
185 return ret;
186}
187
David Vrabel4e8c0c82015-03-11 14:49:57 +0000188/*
189 * Similar to traverse_pages, but use each page as a "block" of
190 * data to be processed as one unit.
191 */
192static int traverse_pages_block(unsigned nelem, size_t size,
193 struct list_head *pos,
194 int (*fn)(void *data, int nr, void *state),
195 void *state)
196{
197 void *pagedata;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000198 int ret = 0;
199
200 BUG_ON(size > PAGE_SIZE);
201
David Vrabel4e8c0c82015-03-11 14:49:57 +0000202 while (nelem) {
203 int nr = (PAGE_SIZE/size);
204 struct page *page;
205 if (nr > nelem)
206 nr = nelem;
207 pos = pos->next;
208 page = list_entry(pos, struct page, lru);
209 pagedata = page_address(page);
210 ret = (*fn)(pagedata, nr, state);
211 if (ret)
212 break;
213 nelem -= nr;
214 }
215
216 return ret;
217}
218
Julien Gralla13d7202015-08-07 17:34:41 +0100219struct mmap_gfn_state {
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800220 unsigned long va;
221 struct vm_area_struct *vma;
222 domid_t domain;
223};
224
Julien Gralla13d7202015-08-07 17:34:41 +0100225static int mmap_gfn_range(void *data, void *state)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800226{
227 struct privcmd_mmap_entry *msg = data;
Julien Gralla13d7202015-08-07 17:34:41 +0100228 struct mmap_gfn_state *st = state;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800229 struct vm_area_struct *vma = st->vma;
230 int rc;
231
232 /* Do not allow range to wrap the address space. */
233 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
234 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
235 return -EINVAL;
236
237 /* Range chunks must be contiguous in va space. */
238 if ((msg->va != st->va) ||
239 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
240 return -EINVAL;
241
Julien Gralla13d7202015-08-07 17:34:41 +0100242 rc = xen_remap_domain_gfn_range(vma,
Ian Campbellde1ef202009-05-21 10:09:46 +0100243 msg->va & PAGE_MASK,
244 msg->mfn, msg->npages,
245 vma->vm_page_prot,
Ian Campbell9a032e392012-10-17 13:37:49 -0700246 st->domain, NULL);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800247 if (rc < 0)
248 return rc;
249
250 st->va += msg->npages << PAGE_SHIFT;
251
252 return 0;
253}
254
Paul Durrant4610d242017-02-13 17:03:24 +0000255static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800256{
Paul Durrant4610d242017-02-13 17:03:24 +0000257 struct privcmd_data *data = file->private_data;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800258 struct privcmd_mmap mmapcmd;
259 struct mm_struct *mm = current->mm;
260 struct vm_area_struct *vma;
261 int rc;
262 LIST_HEAD(pagelist);
Julien Gralla13d7202015-08-07 17:34:41 +0100263 struct mmap_gfn_state state;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800264
Jan Beulich97315722021-09-22 12:18:25 +0200265 /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
Mukesh Rathord71f5132012-10-17 17:11:21 -0700266 if (xen_feature(XENFEAT_auto_translated_physmap))
267 return -ENOSYS;
268
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800269 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
270 return -EFAULT;
271
Paul Durrant4610d242017-02-13 17:03:24 +0000272 /* If restriction is in place, check the domid matches */
273 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
274 return -EPERM;
275
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800276 rc = gather_array(&pagelist,
277 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
278 mmapcmd.entry);
279
280 if (rc || list_empty(&pagelist))
281 goto out;
282
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700283 mmap_write_lock(mm);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800284
285 {
286 struct page *page = list_first_entry(&pagelist,
287 struct page, lru);
288 struct privcmd_mmap_entry *msg = page_address(page);
289
Liam R. Howlett7ccf0892022-09-06 19:48:49 +0000290 vma = vma_lookup(mm, msg->va);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800291 rc = -EINVAL;
292
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100293 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800294 goto out_up;
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100295 vma->vm_private_data = PRIV_VMA_LOCKED;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800296 }
297
298 state.va = vma->vm_start;
299 state.vma = vma;
300 state.domain = mmapcmd.dom;
301
302 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
303 &pagelist,
Julien Gralla13d7202015-08-07 17:34:41 +0100304 mmap_gfn_range, &state);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800305
306
307out_up:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700308 mmap_write_unlock(mm);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800309
310out:
311 free_page_list(&pagelist);
312
313 return rc;
314}
315
316struct mmap_batch_state {
317 domid_t domain;
318 unsigned long va;
319 struct vm_area_struct *vma;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700320 int index;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400321 /* A tristate:
322 * 0 for no errors
323 * 1 if at least one error has happened (and no
324 * -ENOENT errors have happened)
325 * -ENOENT if at least 1 -ENOENT has happened.
326 */
327 int global_error;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500328 int version;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800329
Julien Gralla13d7202015-08-07 17:34:41 +0100330 /* User-space gfn array to store errors in the second pass for V1. */
331 xen_pfn_t __user *user_gfn;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500332 /* User-space int array to store errors in the second pass for V2. */
333 int __user *user_err;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800334};
335
Julien Gralla13d7202015-08-07 17:34:41 +0100336/* auto translated dom0 note: if domU being created is PV, then gfn is
337 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
Mukesh Rathord71f5132012-10-17 17:11:21 -0700338 */
David Vrabel4e8c0c82015-03-11 14:49:57 +0000339static int mmap_batch_fn(void *data, int nr, void *state)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800340{
Julien Gralla13d7202015-08-07 17:34:41 +0100341 xen_pfn_t *gfnp = data;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800342 struct mmap_batch_state *st = state;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700343 struct vm_area_struct *vma = st->vma;
344 struct page **pages = vma->vm_private_data;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000345 struct page **cur_pages = NULL;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400346 int ret;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800347
Mukesh Rathord71f5132012-10-17 17:11:21 -0700348 if (xen_feature(XENFEAT_auto_translated_physmap))
David Vrabel4e8c0c82015-03-11 14:49:57 +0000349 cur_pages = &pages[st->index];
Mukesh Rathord71f5132012-10-17 17:11:21 -0700350
David Vrabel4e8c0c82015-03-11 14:49:57 +0000351 BUG_ON(nr < 0);
Julien Gralla13d7202015-08-07 17:34:41 +0100352 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
353 (int *)gfnp, st->vma->vm_page_prot,
David Vrabel4e8c0c82015-03-11 14:49:57 +0000354 st->domain, cur_pages);
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400355
David Vrabel4e8c0c82015-03-11 14:49:57 +0000356 /* Adjust the global_error? */
357 if (ret != nr) {
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400358 if (ret == -ENOENT)
359 st->global_error = -ENOENT;
360 else {
361 /* Record that at least one error has happened. */
362 if (st->global_error == 0)
363 st->global_error = 1;
364 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800365 }
Julien Grall753c09b2017-05-31 14:03:57 +0100366 st->va += XEN_PAGE_SIZE * nr;
367 st->index += nr / XEN_PFN_PER_PAGE;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800368
369 return 0;
370}
371
David Vrabel4e8c0c82015-03-11 14:49:57 +0000372static int mmap_return_error(int err, struct mmap_batch_state *st)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800373{
David Vrabel4e8c0c82015-03-11 14:49:57 +0000374 int ret;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800375
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500376 if (st->version == 1) {
David Vrabel4e8c0c82015-03-11 14:49:57 +0000377 if (err) {
Julien Gralla13d7202015-08-07 17:34:41 +0100378 xen_pfn_t gfn;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000379
Julien Gralla13d7202015-08-07 17:34:41 +0100380 ret = get_user(gfn, st->user_gfn);
David Vrabel4e8c0c82015-03-11 14:49:57 +0000381 if (ret < 0)
382 return ret;
383 /*
384 * V1 encodes the error codes in the 32bit top
Julien Gralla13d7202015-08-07 17:34:41 +0100385 * nibble of the gfn (with its known
David Vrabel4e8c0c82015-03-11 14:49:57 +0000386 * limitations vis-a-vis 64 bit callers).
387 */
Julien Gralla13d7202015-08-07 17:34:41 +0100388 gfn |= (err == -ENOENT) ?
David Vrabel4e8c0c82015-03-11 14:49:57 +0000389 PRIVCMD_MMAPBATCH_PAGED_ERROR :
390 PRIVCMD_MMAPBATCH_MFN_ERROR;
Julien Gralla13d7202015-08-07 17:34:41 +0100391 return __put_user(gfn, st->user_gfn++);
David Vrabel4e8c0c82015-03-11 14:49:57 +0000392 } else
Julien Gralla13d7202015-08-07 17:34:41 +0100393 st->user_gfn++;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500394 } else { /* st->version == 2 */
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500395 if (err)
396 return __put_user(err, st->user_err++);
397 else
398 st->user_err++;
399 }
400
401 return 0;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800402}
403
David Vrabel4e8c0c82015-03-11 14:49:57 +0000404static int mmap_return_errors(void *data, int nr, void *state)
405{
406 struct mmap_batch_state *st = state;
407 int *errs = data;
408 int i;
409 int ret;
410
411 for (i = 0; i < nr; i++) {
412 ret = mmap_return_error(errs[i], st);
413 if (ret < 0)
414 return ret;
415 }
416 return 0;
417}
418
Julien Gralla13d7202015-08-07 17:34:41 +0100419/* Allocate pfns that are then mapped with gfns from foreign domid. Update
Mukesh Rathord71f5132012-10-17 17:11:21 -0700420 * the vma with the page info to use later.
421 * Returns: 0 if success, otherwise -errno
422 */
423static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
424{
425 int rc;
426 struct page **pages;
427
Jan Beulich04325232021-09-22 12:16:35 +0200428 pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
Mukesh Rathord71f5132012-10-17 17:11:21 -0700429 if (pages == NULL)
430 return -ENOMEM;
431
Roger Pau Monne9e2369c2020-09-01 10:33:26 +0200432 rc = xen_alloc_unpopulated_pages(numpgs, pages);
Mukesh Rathord71f5132012-10-17 17:11:21 -0700433 if (rc != 0) {
434 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
435 numpgs, rc);
Jan Beulich04325232021-09-22 12:16:35 +0200436 kvfree(pages);
Mukesh Rathord71f5132012-10-17 17:11:21 -0700437 return -ENOMEM;
438 }
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100439 BUG_ON(vma->vm_private_data != NULL);
Mukesh Rathord71f5132012-10-17 17:11:21 -0700440 vma->vm_private_data = pages;
441
442 return 0;
443}
444
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -0700445static const struct vm_operations_struct privcmd_vm_ops;
Jeremy Fitzhardingef31fdf52009-03-08 04:10:00 -0700446
Paul Durrant4610d242017-02-13 17:03:24 +0000447static long privcmd_ioctl_mmap_batch(
448 struct file *file, void __user *udata, int version)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800449{
Paul Durrant4610d242017-02-13 17:03:24 +0000450 struct privcmd_data *data = file->private_data;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800451 int ret;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400452 struct privcmd_mmapbatch_v2 m;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800453 struct mm_struct *mm = current->mm;
454 struct vm_area_struct *vma;
455 unsigned long nr_pages;
456 LIST_HEAD(pagelist);
457 struct mmap_batch_state state;
458
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400459 switch (version) {
460 case 1:
461 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
462 return -EFAULT;
463 /* Returns per-frame error in m.arr. */
464 m.err = NULL;
Linus Torvalds96d4f262019-01-03 18:57:57 -0800465 if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400466 return -EFAULT;
467 break;
468 case 2:
469 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
470 return -EFAULT;
471 /* Returns per-frame error code in m.err. */
Linus Torvalds96d4f262019-01-03 18:57:57 -0800472 if (!access_ok(m.err, m.num * (sizeof(*m.err))))
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400473 return -EFAULT;
474 break;
475 default:
476 return -EINVAL;
477 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800478
Paul Durrant4610d242017-02-13 17:03:24 +0000479 /* If restriction is in place, check the domid matches */
480 if (data->domid != DOMID_INVALID && data->domid != m.dom)
481 return -EPERM;
482
Julien Grall5995a682015-05-05 16:54:12 +0100483 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800484 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
485 return -EINVAL;
486
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400487 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800488
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400489 if (ret)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800490 goto out;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400491 if (list_empty(&pagelist)) {
492 ret = -EINVAL;
493 goto out;
494 }
495
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500496 if (version == 2) {
497 /* Zero error array now to only copy back actual errors. */
498 if (clear_user(m.err, sizeof(int) * m.num)) {
499 ret = -EFAULT;
500 goto out;
501 }
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400502 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800503
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700504 mmap_write_lock(mm);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800505
506 vma = find_vma(mm, m.addr);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800507 if (!vma ||
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100508 vma->vm_ops != &privcmd_vm_ops) {
Mats Petersson68fa9652012-11-16 18:36:49 +0000509 ret = -EINVAL;
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100510 goto out_unlock;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800511 }
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100512
513 /*
514 * Caller must either:
515 *
516 * Map the whole VMA range, which will also allocate all the
517 * pages required for the auto_translated_physmap case.
518 *
519 * Or
520 *
521 * Map unmapped holes left from a previous map attempt (e.g.,
522 * because those foreign frames were previously paged out).
523 */
524 if (vma->vm_private_data == NULL) {
525 if (m.addr != vma->vm_start ||
526 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
527 ret = -EINVAL;
528 goto out_unlock;
529 }
530 if (xen_feature(XENFEAT_auto_translated_physmap)) {
Julien Grall5995a682015-05-05 16:54:12 +0100531 ret = alloc_empty_pages(vma, nr_pages);
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100532 if (ret < 0)
533 goto out_unlock;
534 } else
535 vma->vm_private_data = PRIV_VMA_LOCKED;
536 } else {
537 if (m.addr < vma->vm_start ||
538 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
539 ret = -EINVAL;
540 goto out_unlock;
541 }
542 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
543 ret = -EINVAL;
544 goto out_unlock;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700545 }
546 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800547
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400548 state.domain = m.dom;
549 state.vma = vma;
550 state.va = m.addr;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700551 state.index = 0;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400552 state.global_error = 0;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500553 state.version = version;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800554
Julien Grall5995a682015-05-05 16:54:12 +0100555 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400556 /* mmap_batch_fn guarantees ret == 0 */
David Vrabel4e8c0c82015-03-11 14:49:57 +0000557 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
558 &pagelist, mmap_batch_fn, &state));
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800559
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700560 mmap_write_unlock(mm);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800561
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500562 if (state.global_error) {
563 /* Write back errors in second pass. */
Julien Gralla13d7202015-08-07 17:34:41 +0100564 state.user_gfn = (xen_pfn_t *)m.arr;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500565 state.user_err = m.err;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000566 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
567 &pagelist, mmap_return_errors, &state);
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500568 } else
569 ret = 0;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400570
571 /* If we have not had any EFAULT-like global errors then set the global
572 * error to -ENOENT if necessary. */
573 if ((ret == 0) && (state.global_error == -ENOENT))
574 ret = -ENOENT;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800575
576out:
577 free_page_list(&pagelist);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800578 return ret;
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100579
580out_unlock:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700581 mmap_write_unlock(mm);
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100582 goto out;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800583}
584
Paul Durrantab520be2017-02-13 17:03:23 +0000585static int lock_pages(
586 struct privcmd_dm_op_buf kbufs[], unsigned int num,
Souptick Joardere398fb42020-07-12 09:09:53 +0530587 struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
Paul Durrantab520be2017-02-13 17:03:23 +0000588{
Juergen Grossc5deb2782022-08-25 16:19:18 +0200589 unsigned int i, off = 0;
Paul Durrantab520be2017-02-13 17:03:23 +0000590
Juergen Grossc5deb2782022-08-25 16:19:18 +0200591 for (i = 0; i < num; ) {
Paul Durrantab520be2017-02-13 17:03:23 +0000592 unsigned int requested;
Souptick Joardere398fb42020-07-12 09:09:53 +0530593 int page_count;
Paul Durrantab520be2017-02-13 17:03:23 +0000594
595 requested = DIV_ROUND_UP(
596 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
Juergen Grossc5deb2782022-08-25 16:19:18 +0200597 PAGE_SIZE) - off;
Paul Durrantab520be2017-02-13 17:03:23 +0000598 if (requested > nr_pages)
599 return -ENOSPC;
600
Souptick Joarderff669aa2020-07-12 09:09:55 +0530601 page_count = pin_user_pages_fast(
Juergen Grossc5deb2782022-08-25 16:19:18 +0200602 (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
Paul Durrantab520be2017-02-13 17:03:23 +0000603 requested, FOLL_WRITE, pages);
Juergen Grossc5deb2782022-08-25 16:19:18 +0200604 if (page_count <= 0)
605 return page_count ? : -EFAULT;
Paul Durrantab520be2017-02-13 17:03:23 +0000606
Souptick Joardere398fb42020-07-12 09:09:53 +0530607 *pinned += page_count;
608 nr_pages -= page_count;
609 pages += page_count;
Juergen Grossc5deb2782022-08-25 16:19:18 +0200610
611 off = (requested == page_count) ? 0 : off + page_count;
612 i += !off;
Paul Durrantab520be2017-02-13 17:03:23 +0000613 }
614
615 return 0;
616}
617
618static void unlock_pages(struct page *pages[], unsigned int nr_pages)
619{
Souptick Joarderff669aa2020-07-12 09:09:55 +0530620 unpin_user_pages_dirty_lock(pages, nr_pages, true);
Paul Durrantab520be2017-02-13 17:03:23 +0000621}
622
Paul Durrant4610d242017-02-13 17:03:24 +0000623static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
Paul Durrantab520be2017-02-13 17:03:23 +0000624{
Paul Durrant4610d242017-02-13 17:03:24 +0000625 struct privcmd_data *data = file->private_data;
Paul Durrantab520be2017-02-13 17:03:23 +0000626 struct privcmd_dm_op kdata;
627 struct privcmd_dm_op_buf *kbufs;
628 unsigned int nr_pages = 0;
629 struct page **pages = NULL;
630 struct xen_dm_op_buf *xbufs = NULL;
631 unsigned int i;
632 long rc;
Souptick Joardere398fb42020-07-12 09:09:53 +0530633 unsigned int pinned = 0;
Paul Durrantab520be2017-02-13 17:03:23 +0000634
635 if (copy_from_user(&kdata, udata, sizeof(kdata)))
636 return -EFAULT;
637
Paul Durrant4610d242017-02-13 17:03:24 +0000638 /* If restriction is in place, check the domid matches */
639 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
640 return -EPERM;
641
Paul Durrantab520be2017-02-13 17:03:23 +0000642 if (kdata.num == 0)
643 return 0;
644
645 if (kdata.num > privcmd_dm_op_max_num)
646 return -E2BIG;
647
648 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
649 if (!kbufs)
650 return -ENOMEM;
651
652 if (copy_from_user(kbufs, kdata.ubufs,
653 sizeof(*kbufs) * kdata.num)) {
654 rc = -EFAULT;
655 goto out;
656 }
657
658 for (i = 0; i < kdata.num; i++) {
659 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
660 rc = -E2BIG;
661 goto out;
662 }
663
Linus Torvalds96d4f262019-01-03 18:57:57 -0800664 if (!access_ok(kbufs[i].uptr,
Paul Durrantab520be2017-02-13 17:03:23 +0000665 kbufs[i].size)) {
666 rc = -EFAULT;
667 goto out;
668 }
669
670 nr_pages += DIV_ROUND_UP(
671 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
672 PAGE_SIZE);
673 }
674
675 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
676 if (!pages) {
677 rc = -ENOMEM;
678 goto out;
679 }
680
681 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
682 if (!xbufs) {
683 rc = -ENOMEM;
684 goto out;
685 }
686
Souptick Joardere398fb42020-07-12 09:09:53 +0530687 rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
Juergen Grossc5deb2782022-08-25 16:19:18 +0200688 if (rc < 0)
Paul Durrantab520be2017-02-13 17:03:23 +0000689 goto out;
690
691 for (i = 0; i < kdata.num; i++) {
692 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
693 xbufs[i].size = kbufs[i].size;
694 }
695
696 xen_preemptible_hcall_begin();
697 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
698 xen_preemptible_hcall_end();
699
700out:
Juergen Grossc5deb2782022-08-25 16:19:18 +0200701 unlock_pages(pages, pinned);
Paul Durrantab520be2017-02-13 17:03:23 +0000702 kfree(xbufs);
703 kfree(pages);
704 kfree(kbufs);
705
706 return rc;
707}
708
Paul Durrant4610d242017-02-13 17:03:24 +0000709static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
710{
711 struct privcmd_data *data = file->private_data;
712 domid_t dom;
713
714 if (copy_from_user(&dom, udata, sizeof(dom)))
715 return -EFAULT;
716
717 /* Set restriction to the specified domain, or check it matches */
718 if (data->domid == DOMID_INVALID)
719 data->domid = dom;
720 else if (data->domid != dom)
721 return -EINVAL;
722
723 return 0;
724}
725
Roger Pau Monneef3a5752021-01-12 12:53:58 +0100726static long privcmd_ioctl_mmap_resource(struct file *file,
727 struct privcmd_mmap_resource __user *udata)
Paul Durrant3ad08762018-05-09 14:16:12 +0100728{
729 struct privcmd_data *data = file->private_data;
730 struct mm_struct *mm = current->mm;
731 struct vm_area_struct *vma;
732 struct privcmd_mmap_resource kdata;
733 xen_pfn_t *pfns = NULL;
Roger Pau Monneef3a5752021-01-12 12:53:58 +0100734 struct xen_mem_acquire_resource xdata = { };
Paul Durrant3ad08762018-05-09 14:16:12 +0100735 int rc;
736
737 if (copy_from_user(&kdata, udata, sizeof(kdata)))
738 return -EFAULT;
739
740 /* If restriction is in place, check the domid matches */
741 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
742 return -EPERM;
743
Roger Pau Monneef3a5752021-01-12 12:53:58 +0100744 /* Both fields must be set or unset */
745 if (!!kdata.addr != !!kdata.num)
746 return -EINVAL;
747
748 xdata.domid = kdata.dom;
749 xdata.type = kdata.type;
750 xdata.id = kdata.id;
751
752 if (!kdata.addr && !kdata.num) {
753 /* Query the size of the resource. */
754 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
755 if (rc)
756 return rc;
757 return __put_user(xdata.nr_frames, &udata->num);
758 }
759
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700760 mmap_write_lock(mm);
Paul Durrant3ad08762018-05-09 14:16:12 +0100761
762 vma = find_vma(mm, kdata.addr);
763 if (!vma || vma->vm_ops != &privcmd_vm_ops) {
764 rc = -EINVAL;
765 goto out;
766 }
767
Harshit Mogalapalli8b997b22022-11-25 21:07:45 -0800768 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
Paul Durrant3ad08762018-05-09 14:16:12 +0100769 if (!pfns) {
770 rc = -ENOMEM;
771 goto out;
772 }
773
Arnd Bergmanna78d14a2019-07-22 09:46:29 +0200774 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
775 xen_feature(XENFEAT_auto_translated_physmap)) {
Paul Durrant3ad08762018-05-09 14:16:12 +0100776 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
777 struct page **pages;
778 unsigned int i;
779
780 rc = alloc_empty_pages(vma, nr);
781 if (rc < 0)
782 goto out;
783
784 pages = vma->vm_private_data;
785 for (i = 0; i < kdata.num; i++) {
786 xen_pfn_t pfn =
787 page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
788
789 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
790 }
791 } else
792 vma->vm_private_data = PRIV_VMA_LOCKED;
793
Paul Durrant3ad08762018-05-09 14:16:12 +0100794 xdata.frame = kdata.idx;
795 xdata.nr_frames = kdata.num;
796 set_xen_guest_handle(xdata.frame_list, pfns);
797
798 xen_preemptible_hcall_begin();
799 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
800 xen_preemptible_hcall_end();
801
802 if (rc)
803 goto out;
804
Arnd Bergmanna78d14a2019-07-22 09:46:29 +0200805 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
806 xen_feature(XENFEAT_auto_translated_physmap)) {
807 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
Paul Durrant3ad08762018-05-09 14:16:12 +0100808 } else {
809 unsigned int domid =
810 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
811 DOMID_SELF : kdata.dom;
Jan Beuliche11423d2021-09-22 12:17:48 +0200812 int num, *errs = (int *)pfns;
Paul Durrant3ad08762018-05-09 14:16:12 +0100813
Jan Beuliche11423d2021-09-22 12:17:48 +0200814 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
Paul Durrant3ad08762018-05-09 14:16:12 +0100815 num = xen_remap_domain_mfn_array(vma,
816 kdata.addr & PAGE_MASK,
Jan Beuliche11423d2021-09-22 12:17:48 +0200817 pfns, kdata.num, errs,
Paul Durrant3ad08762018-05-09 14:16:12 +0100818 vma->vm_page_prot,
Jan Beulich97315722021-09-22 12:18:25 +0200819 domid);
Paul Durrant3ad08762018-05-09 14:16:12 +0100820 if (num < 0)
821 rc = num;
822 else if (num != kdata.num) {
823 unsigned int i;
824
825 for (i = 0; i < num; i++) {
Jan Beuliche11423d2021-09-22 12:17:48 +0200826 rc = errs[i];
Paul Durrant3ad08762018-05-09 14:16:12 +0100827 if (rc < 0)
828 break;
829 }
830 } else
831 rc = 0;
832 }
833
834out:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700835 mmap_write_unlock(mm);
Paul Durrant3ad08762018-05-09 14:16:12 +0100836 kfree(pfns);
837
838 return rc;
839}
840
Viresh Kumarf8941e62023-08-22 15:15:07 +0530841#ifdef CONFIG_XEN_PRIVCMD_IRQFD
842/* Irqfd support */
843static struct workqueue_struct *irqfd_cleanup_wq;
844static DEFINE_MUTEX(irqfds_lock);
845static LIST_HEAD(irqfds_list);
846
847struct privcmd_kernel_irqfd {
848 struct xen_dm_op_buf xbufs;
849 domid_t dom;
850 bool error;
851 struct eventfd_ctx *eventfd;
852 struct work_struct shutdown;
853 wait_queue_entry_t wait;
854 struct list_head list;
855 poll_table pt;
856};
857
858static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
859{
860 lockdep_assert_held(&irqfds_lock);
861
862 list_del_init(&kirqfd->list);
863 queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
864}
865
866static void irqfd_shutdown(struct work_struct *work)
867{
868 struct privcmd_kernel_irqfd *kirqfd =
869 container_of(work, struct privcmd_kernel_irqfd, shutdown);
870 u64 cnt;
871
872 eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
873 eventfd_ctx_put(kirqfd->eventfd);
874 kfree(kirqfd);
875}
876
877static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
878{
879 u64 cnt;
880 long rc;
881
882 eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
883
884 xen_preemptible_hcall_begin();
885 rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
886 xen_preemptible_hcall_end();
887
888 /* Don't repeat the error message for consecutive failures */
889 if (rc && !kirqfd->error) {
890 pr_err("Failed to configure irq for guest domain: %d\n",
891 kirqfd->dom);
892 }
893
894 kirqfd->error = rc;
895}
896
897static int
898irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
899{
900 struct privcmd_kernel_irqfd *kirqfd =
901 container_of(wait, struct privcmd_kernel_irqfd, wait);
902 __poll_t flags = key_to_poll(key);
903
904 if (flags & EPOLLIN)
905 irqfd_inject(kirqfd);
906
907 if (flags & EPOLLHUP) {
908 mutex_lock(&irqfds_lock);
909 irqfd_deactivate(kirqfd);
910 mutex_unlock(&irqfds_lock);
911 }
912
913 return 0;
914}
915
916static void
917irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
918{
919 struct privcmd_kernel_irqfd *kirqfd =
920 container_of(pt, struct privcmd_kernel_irqfd, pt);
921
922 add_wait_queue_priority(wqh, &kirqfd->wait);
923}
924
925static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
926{
927 struct privcmd_kernel_irqfd *kirqfd, *tmp;
928 __poll_t events;
929 struct fd f;
930 void *dm_op;
931 int ret;
932
933 kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
934 if (!kirqfd)
935 return -ENOMEM;
936 dm_op = kirqfd + 1;
937
938 if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) {
939 ret = -EFAULT;
940 goto error_kfree;
941 }
942
943 kirqfd->xbufs.size = irqfd->size;
944 set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
945 kirqfd->dom = irqfd->dom;
946 INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
947
948 f = fdget(irqfd->fd);
949 if (!f.file) {
950 ret = -EBADF;
951 goto error_kfree;
952 }
953
954 kirqfd->eventfd = eventfd_ctx_fileget(f.file);
955 if (IS_ERR(kirqfd->eventfd)) {
956 ret = PTR_ERR(kirqfd->eventfd);
957 goto error_fd_put;
958 }
959
960 /*
961 * Install our own custom wake-up handling so we are notified via a
962 * callback whenever someone signals the underlying eventfd.
963 */
964 init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
965 init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
966
967 mutex_lock(&irqfds_lock);
968
969 list_for_each_entry(tmp, &irqfds_list, list) {
970 if (kirqfd->eventfd == tmp->eventfd) {
971 ret = -EBUSY;
972 mutex_unlock(&irqfds_lock);
973 goto error_eventfd;
974 }
975 }
976
977 list_add_tail(&kirqfd->list, &irqfds_list);
978 mutex_unlock(&irqfds_lock);
979
980 /*
981 * Check if there was an event already pending on the eventfd before we
982 * registered, and trigger it as if we didn't miss it.
983 */
984 events = vfs_poll(f.file, &kirqfd->pt);
985 if (events & EPOLLIN)
986 irqfd_inject(kirqfd);
987
988 /*
989 * Do not drop the file until the kirqfd is fully initialized, otherwise
990 * we might race against the EPOLLHUP.
991 */
992 fdput(f);
993 return 0;
994
995error_eventfd:
996 eventfd_ctx_put(kirqfd->eventfd);
997
998error_fd_put:
999 fdput(f);
1000
1001error_kfree:
1002 kfree(kirqfd);
1003 return ret;
1004}
1005
1006static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
1007{
1008 struct privcmd_kernel_irqfd *kirqfd;
1009 struct eventfd_ctx *eventfd;
1010
1011 eventfd = eventfd_ctx_fdget(irqfd->fd);
1012 if (IS_ERR(eventfd))
1013 return PTR_ERR(eventfd);
1014
1015 mutex_lock(&irqfds_lock);
1016
1017 list_for_each_entry(kirqfd, &irqfds_list, list) {
1018 if (kirqfd->eventfd == eventfd) {
1019 irqfd_deactivate(kirqfd);
1020 break;
1021 }
1022 }
1023
1024 mutex_unlock(&irqfds_lock);
1025
1026 eventfd_ctx_put(eventfd);
1027
1028 /*
1029 * Block until we know all outstanding shutdown jobs have completed so
1030 * that we guarantee there will not be any more interrupts once this
1031 * deassign function returns.
1032 */
1033 flush_workqueue(irqfd_cleanup_wq);
1034
1035 return 0;
1036}
1037
1038static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1039{
1040 struct privcmd_data *data = file->private_data;
1041 struct privcmd_irqfd irqfd;
1042
1043 if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
1044 return -EFAULT;
1045
1046 /* No other flags should be set */
1047 if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
1048 return -EINVAL;
1049
1050 /* If restriction is in place, check the domid matches */
1051 if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
1052 return -EPERM;
1053
1054 if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
1055 return privcmd_irqfd_deassign(&irqfd);
1056
1057 return privcmd_irqfd_assign(&irqfd);
1058}
1059
1060static int privcmd_irqfd_init(void)
1061{
1062 irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
1063 if (!irqfd_cleanup_wq)
1064 return -ENOMEM;
1065
1066 return 0;
1067}
1068
1069static void privcmd_irqfd_exit(void)
1070{
1071 struct privcmd_kernel_irqfd *kirqfd, *tmp;
1072
1073 mutex_lock(&irqfds_lock);
1074
1075 list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
1076 irqfd_deactivate(kirqfd);
1077
1078 mutex_unlock(&irqfds_lock);
1079
1080 destroy_workqueue(irqfd_cleanup_wq);
1081}
1082#else
1083static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1084{
1085 return -EOPNOTSUPP;
1086}
1087
1088static inline int privcmd_irqfd_init(void)
1089{
1090 return 0;
1091}
1092
1093static inline void privcmd_irqfd_exit(void)
1094{
1095}
1096#endif /* CONFIG_XEN_PRIVCMD_IRQFD */
1097
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001098static long privcmd_ioctl(struct file *file,
1099 unsigned int cmd, unsigned long data)
1100{
Paul Durrantdc9eab62017-02-13 17:03:22 +00001101 int ret = -ENOTTY;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001102 void __user *udata = (void __user *) data;
1103
1104 switch (cmd) {
1105 case IOCTL_PRIVCMD_HYPERCALL:
Paul Durrant4610d242017-02-13 17:03:24 +00001106 ret = privcmd_ioctl_hypercall(file, udata);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001107 break;
1108
1109 case IOCTL_PRIVCMD_MMAP:
Paul Durrant4610d242017-02-13 17:03:24 +00001110 ret = privcmd_ioctl_mmap(file, udata);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001111 break;
1112
1113 case IOCTL_PRIVCMD_MMAPBATCH:
Paul Durrant4610d242017-02-13 17:03:24 +00001114 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -04001115 break;
1116
1117 case IOCTL_PRIVCMD_MMAPBATCH_V2:
Paul Durrant4610d242017-02-13 17:03:24 +00001118 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001119 break;
1120
Paul Durrantab520be2017-02-13 17:03:23 +00001121 case IOCTL_PRIVCMD_DM_OP:
Paul Durrant4610d242017-02-13 17:03:24 +00001122 ret = privcmd_ioctl_dm_op(file, udata);
1123 break;
1124
1125 case IOCTL_PRIVCMD_RESTRICT:
1126 ret = privcmd_ioctl_restrict(file, udata);
Paul Durrantab520be2017-02-13 17:03:23 +00001127 break;
1128
Paul Durrant3ad08762018-05-09 14:16:12 +01001129 case IOCTL_PRIVCMD_MMAP_RESOURCE:
1130 ret = privcmd_ioctl_mmap_resource(file, udata);
1131 break;
1132
Viresh Kumarf8941e62023-08-22 15:15:07 +05301133 case IOCTL_PRIVCMD_IRQFD:
1134 ret = privcmd_ioctl_irqfd(file, udata);
1135 break;
1136
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001137 default:
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001138 break;
1139 }
1140
1141 return ret;
1142}
1143
Paul Durrant4610d242017-02-13 17:03:24 +00001144static int privcmd_open(struct inode *ino, struct file *file)
1145{
1146 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
1147
1148 if (!data)
1149 return -ENOMEM;
1150
1151 /* DOMID_INVALID implies no restriction */
1152 data->domid = DOMID_INVALID;
1153
1154 file->private_data = data;
1155 return 0;
1156}
1157
1158static int privcmd_release(struct inode *ino, struct file *file)
1159{
1160 struct privcmd_data *data = file->private_data;
1161
1162 kfree(data);
1163 return 0;
1164}
1165
Mukesh Rathord71f5132012-10-17 17:11:21 -07001166static void privcmd_close(struct vm_area_struct *vma)
1167{
1168 struct page **pages = vma->vm_private_data;
Muhammad Falak R Wanic7ebf9d2016-05-24 05:34:32 +05301169 int numpgs = vma_pages(vma);
Julien Grall5995a682015-05-05 16:54:12 +01001170 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
Ian Campbellb6497b32013-12-06 17:55:56 +00001171 int rc;
Mukesh Rathord71f5132012-10-17 17:11:21 -07001172
Dan Carpenter9eff37a2012-11-05 09:42:17 +03001173 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
Mukesh Rathord71f5132012-10-17 17:11:21 -07001174 return;
1175
Julien Grall5995a682015-05-05 16:54:12 +01001176 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
Ian Campbellb6497b32013-12-06 17:55:56 +00001177 if (rc == 0)
Roger Pau Monne9e2369c2020-09-01 10:33:26 +02001178 xen_free_unpopulated_pages(numpgs, pages);
Ian Campbellb6497b32013-12-06 17:55:56 +00001179 else
1180 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
1181 numpgs, rc);
Jan Beulich04325232021-09-22 12:16:35 +02001182 kvfree(pages);
Mukesh Rathord71f5132012-10-17 17:11:21 -07001183}
1184
Souptick Joarder4bf2cc92018-04-15 00:45:42 +05301185static vm_fault_t privcmd_fault(struct vm_fault *vmf)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001186{
Jeremy Fitzhardinge441c7412009-03-06 09:56:59 -08001187 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
Dave Jiang11bac802017-02-24 14:56:41 -08001188 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
Jan Kara1a29d852016-12-14 15:07:01 -08001189 vmf->pgoff, (void *)vmf->address);
Jeremy Fitzhardinge441c7412009-03-06 09:56:59 -08001190
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001191 return VM_FAULT_SIGBUS;
1192}
1193
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07001194static const struct vm_operations_struct privcmd_vm_ops = {
Mukesh Rathord71f5132012-10-17 17:11:21 -07001195 .close = privcmd_close,
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001196 .fault = privcmd_fault
1197};
1198
1199static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
1200{
Stefano Stabellinie060e7af2010-11-11 12:37:43 -08001201 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
1202 * how to recreate these mappings */
Suren Baghdasaryan1c712222023-01-26 11:37:49 -08001203 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
1204 VM_DONTEXPAND | VM_DONTDUMP);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001205 vma->vm_ops = &privcmd_vm_ops;
1206 vma->vm_private_data = NULL;
1207
1208 return 0;
1209}
1210
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +01001211/*
1212 * For MMAPBATCH*. This allows asserting the singleshot mapping
1213 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
1214 * can be then retried until success.
1215 */
Anshuman Khandual8b1e0f82019-07-11 20:58:43 -07001216static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001217{
Ryan Robertsc33c7942023-06-12 16:15:45 +01001218 return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +01001219}
1220
1221static int privcmd_vma_range_is_mapped(
1222 struct vm_area_struct *vma,
1223 unsigned long addr,
1224 unsigned long nr_pages)
1225{
1226 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
1227 is_mapped_fn, NULL) != 0;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001228}
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001229
Bastian Blankd8414d32011-12-16 11:34:33 -05001230const struct file_operations xen_privcmd_fops = {
1231 .owner = THIS_MODULE,
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001232 .unlocked_ioctl = privcmd_ioctl,
Paul Durrant4610d242017-02-13 17:03:24 +00001233 .open = privcmd_open,
1234 .release = privcmd_release,
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001235 .mmap = privcmd_mmap,
1236};
Bastian Blankd8414d32011-12-16 11:34:33 -05001237EXPORT_SYMBOL_GPL(xen_privcmd_fops);
1238
1239static struct miscdevice privcmd_dev = {
1240 .minor = MISC_DYNAMIC_MINOR,
1241 .name = "xen/privcmd",
1242 .fops = &xen_privcmd_fops,
1243};
1244
1245static int __init privcmd_init(void)
1246{
1247 int err;
1248
1249 if (!xen_domain())
1250 return -ENODEV;
1251
1252 err = misc_register(&privcmd_dev);
1253 if (err != 0) {
Joe Perches283c0972013-06-28 03:21:41 -07001254 pr_err("Could not register Xen privcmd device\n");
Bastian Blankd8414d32011-12-16 11:34:33 -05001255 return err;
1256 }
Juergen Grossc51b3c62018-06-18 09:36:39 +02001257
1258 err = misc_register(&xen_privcmdbuf_dev);
1259 if (err != 0) {
1260 pr_err("Could not register Xen hypercall-buf device\n");
Viresh Kumarf8941e62023-08-22 15:15:07 +05301261 goto err_privcmdbuf;
1262 }
1263
1264 err = privcmd_irqfd_init();
1265 if (err != 0) {
1266 pr_err("irqfd init failed\n");
1267 goto err_irqfd;
Juergen Grossc51b3c62018-06-18 09:36:39 +02001268 }
1269
Bastian Blankd8414d32011-12-16 11:34:33 -05001270 return 0;
Viresh Kumarf8941e62023-08-22 15:15:07 +05301271
1272err_irqfd:
1273 misc_deregister(&xen_privcmdbuf_dev);
1274err_privcmdbuf:
1275 misc_deregister(&privcmd_dev);
1276 return err;
Bastian Blankd8414d32011-12-16 11:34:33 -05001277}
1278
1279static void __exit privcmd_exit(void)
1280{
Viresh Kumarf8941e62023-08-22 15:15:07 +05301281 privcmd_irqfd_exit();
Bastian Blankd8414d32011-12-16 11:34:33 -05001282 misc_deregister(&privcmd_dev);
Juergen Grossc51b3c62018-06-18 09:36:39 +02001283 misc_deregister(&xen_privcmdbuf_dev);
Bastian Blankd8414d32011-12-16 11:34:33 -05001284}
1285
1286module_init(privcmd_init);
1287module_exit(privcmd_exit);