blob: b4c384c02d7a8babf182376121a6af43b8ae8d42 [file] [log] [blame] [edit]
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO-KVM bridge pseudo device
*
* Copyright (C) 2013 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*/
#include <linux/anon_inodes.h>
#include <linux/eventfd.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/kvm_host.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include "vfio.h"
#ifdef CONFIG_SPAPR_TCE_IOMMU
#include <asm/kvm_ppc.h>
#endif
struct kvm_pviommu {
struct kvm_device *dev;
int fd;
struct eventfd_ctx *fault_trigger;
};
struct kvm_vfio_file {
struct list_head node;
struct file *file;
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct iommu_group *iommu_group;
#endif
struct kvm_pviommu *pviommu;
};
struct kvm_vfio {
struct list_head file_list;
struct mutex lock;
bool noncoherent;
};
static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
void (*fn)(struct file *file, struct kvm *kvm);
fn = symbol_get(vfio_file_set_kvm);
if (!fn)
return;
fn(file, kvm);
symbol_put(vfio_file_set_kvm);
}
static bool kvm_vfio_file_enforced_coherent(struct file *file)
{
bool (*fn)(struct file *file);
bool ret;
fn = symbol_get(vfio_file_enforced_coherent);
if (!fn)
return false;
ret = fn(file);
symbol_put(vfio_file_enforced_coherent);
return ret;
}
static bool kvm_vfio_file_is_valid(struct file *file)
{
bool (*fn)(struct file *file);
bool ret;
fn = symbol_get(vfio_file_is_valid);
if (!fn)
return false;
ret = fn(file);
symbol_put(vfio_file_is_valid);
return ret;
}
static struct device *kvm_vfio_file_get_device(struct file *file)
{
struct device *(*fn)(struct file *file);
struct device *dev;
fn = symbol_get(vfio_file_get_device);
if (!fn)
return NULL;
dev = fn(file);
symbol_put(vfio_file_get_device);
return dev;
}
static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
{
struct iommu_group *(*fn)(struct file *file);
struct iommu_group *ret;
fn = symbol_get(vfio_file_iommu_group);
if (!fn)
return NULL;
ret = fn(file);
symbol_put(vfio_file_iommu_group);
return ret;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
struct kvm_vfio_file *kvf)
{
if (WARN_ON_ONCE(!kvf->iommu_group))
return;
kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
iommu_group_put(kvf->iommu_group);
kvf->iommu_group = NULL;
}
#endif
/*
* Groups/devices can use the same or different IOMMU domains. If the same
* then adding a new group/device may change the coherency of groups/devices
* we've previously been told about. We don't want to care about any of
* that so we retest each group/device and bail as soon as we find one that's
* noncoherent. This means we only ever [un]register_noncoherent_dma once
* for the whole device.
*/
static void kvm_vfio_update_coherency(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
bool noncoherent = false;
struct kvm_vfio_file *kvf;
list_for_each_entry(kvf, &kv->file_list, node) {
if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
noncoherent = true;
break;
}
}
if (noncoherent != kv->noncoherent) {
kv->noncoherent = noncoherent;
if (kv->noncoherent)
kvm_arch_register_noncoherent_dma(dev->kvm);
else
kvm_arch_unregister_noncoherent_dma(dev->kvm);
}
}
static int kvm_vfio_assign_file(struct file *file)
{
struct device *dev;
struct iommu_group *group;
dev = kvm_vfio_file_get_device(file);
if (dev)
return kvm_arch_assign_device(dev);
group = kvm_vfio_file_iommu_group(file);
if (group)
return kvm_arch_assign_group(group);
return -ENODEV;
}
static void kvm_vfio_reclaim_file(struct file *file)
{
struct device *dev;
struct iommu_group *group;
dev = kvm_vfio_file_get_device(file);
if (dev) {
kvm_arch_reclaim_device(dev);
return;
}
group = kvm_vfio_file_iommu_group(file);
if (group)
kvm_arch_reclaim_group(group);
}
static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
struct file *filp;
int ret = 0;
filp = fget(fd);
if (!filp)
return -EBADF;
/* Ensure the FD is a vfio FD. */
if (!kvm_vfio_file_is_valid(filp)) {
ret = -EINVAL;
goto out_fput;
}
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
if (kvf->file == filp) {
ret = -EEXIST;
goto out_unlock;
}
}
kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
if (!kvf) {
ret = -ENOMEM;
goto out_unlock;
}
kvf->file = get_file(filp);
list_add_tail(&kvf->node, &kv->file_list);
ret = kvm_vfio_assign_file(filp);
if (ret)
goto out_unlock;
kvm_arch_start_assignment(dev->kvm);
kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
kvm_vfio_update_coherency(dev);
out_unlock:
mutex_unlock(&kv->lock);
out_fput:
fput(filp);
return ret;
}
static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
struct fd f;
int ret;
f = fdget(fd);
if (!f.file)
return -EBADF;
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
if (kvf->file != f.file)
continue;
kvm_vfio_reclaim_file(kvf->file);
list_del(&kvf->node);
kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
kvm_vfio_file_set_kvm(kvf->file, NULL);
fput(kvf->file);
kfree(kvf);
ret = 0;
break;
}
kvm_vfio_update_coherency(dev);
mutex_unlock(&kv->lock);
fdput(f);
return ret;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
void __user *arg)
{
struct kvm_vfio_spapr_tce param;
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
struct fd f;
int ret;
if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
return -EFAULT;
f = fdget(param.groupfd);
if (!f.file)
return -EBADF;
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
if (kvf->file != f.file)
continue;
if (!kvf->iommu_group) {
kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
if (WARN_ON_ONCE(!kvf->iommu_group)) {
ret = -EIO;
goto err_fdput;
}
}
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
kvf->iommu_group);
break;
}
err_fdput:
mutex_unlock(&kv->lock);
fdput(f);
return ret;
}
#endif
static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
void __user *arg)
{
int32_t __user *argp = arg;
int32_t fd;
switch (attr) {
case KVM_DEV_VFIO_FILE_ADD:
if (get_user(fd, argp))
return -EFAULT;
return kvm_vfio_file_add(dev, fd);
case KVM_DEV_VFIO_FILE_DEL:
if (get_user(fd, argp))
return -EFAULT;
return kvm_vfio_file_del(dev, fd);
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
return kvm_vfio_file_set_spapr_tce(dev, arg);
#endif
}
return -ENXIO;
}
static int kvm_vfio_pviommu_set_config(struct file *fiommu, struct kvm_vfio_iommu_config *config)
{
int vfio_dev_fd = config->device_fd;
struct file *filp;
int ret;
u32 phys_sid;
pkvm_handle_t iommu;
struct kvm_pviommu *pviommu = fiommu->private_data;
struct device *dev;
filp = fget(vfio_dev_fd);
if (!filp)
return -EBADF;
dev = kvm_vfio_file_get_device(filp);
if (!dev) {
ret = -ENODEV;
goto err_fput;
}
ret = kvm_iommu_device_id(dev, config->sid_idx, &iommu, &phys_sid);
if (ret)
goto err_fput;
ret = kvm_call_hyp_nvhe(__pkvm_pviommu_add_vsid, pviommu->dev->kvm, pviommu->fd,
iommu, phys_sid, config->vsid);
err_fput:
fput(filp);
return ret;
}
int pviommu_dev_fault_handler(struct iommu_fault *fault, void *data)
{
u32 flags;
struct kvm_vfio_file *kvf = data;
int ret;
if (fault->type != IOMMU_FAULT_DMA_UNRECOV)
return 0;
/* VMs are not involved in IOMMU descriptors. */
flags = fault->event.flags & ~IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID;
/* TBD: add a one of the sids of the fault device? */
ret = kvm_call_hyp_nvhe(__pkvm_pviommu_record_fault,
kvf->pviommu->dev->kvm,
kvf->pviommu->fd,
fault->event.reason,
flags,
fault->event.pasid,
fault->event.perm,
fault->event.addr);
if (ret)
return ret;
eventfd_signal(kvf->pviommu->fault_trigger, 1);
return 0;
}
int kvm_vfio_pviommu_set_fault_irq(struct file *fiommu,
struct kvm_vfio_iommu_faultfd *faultfd)
{
struct kvm_pviommu *pviommu = fiommu->private_data;
struct kvm_vfio *kv = pviommu->dev->private;
struct kvm_vfio_file *kvf;
struct device *dev;
int ret;
if (pviommu->fault_trigger)
return -EBUSY;
pviommu->fault_trigger = eventfd_ctx_fdget(faultfd->eventfd);
list_for_each_entry(kvf, &kv->file_list, node) {
dev = kvm_vfio_file_get_device(kvf->file);
if (!dev)
return -EINVAL;
ret = iommu_register_device_fault_handler(dev,
pviommu_dev_fault_handler,
kvf);
if (ret) {
/* TODO: iommu_unregister_device_fault_handler */
dev_err(dev, "failed to register IOMMU fault handler %d\n", ret);
return ret;
}
kvf->pviommu = pviommu;
}
return 0;
}
static long pviommufd_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vfio_iommu_config config;
struct kvm_vfio_iommu_faultfd faultfd;
switch (ioctl) {
case KVM_PVIOMMU_SET_CONFIG:
if (copy_from_user(&config, (void *)arg, sizeof(config)))
return -EFAULT;
return kvm_vfio_pviommu_set_config(filp, &config);
case KVM_PVIOMMU_SET_FAULT_IRQ:
if (copy_from_user(&faultfd, (void *)arg, sizeof(faultfd)))
return -EFAULT;
return kvm_vfio_pviommu_set_fault_irq(filp, &faultfd);
default:
return -ENXIO;
}
return 0;
}
static int pviommufd_release(struct inode *i, struct file *filp)
{
struct kvm_pviommu *pviommu = filp->private_data;
struct kvm_vfio *kv = pviommu->dev->private;
struct kvm_vfio_file *kvf;
if (pviommu->fault_trigger) {
eventfd_ctx_put(pviommu->fault_trigger);
list_for_each_entry(kvf, &kv->file_list, node) {
struct device *dev = kvm_vfio_file_get_device(kvf->file);
if (!dev)
continue;
iommu_unregister_device_fault_handler(dev);
}
}
kfree(pviommu);
return 0;
}
static const struct file_operations pviommu_fops = {
.unlocked_ioctl = pviommufd_ioctl,
.release = pviommufd_release,
};
static int kvm_vfio_pviommu_attach(struct kvm_device *dev)
{
int ret;
struct kvm_pviommu *pviommu;
pviommu = kmalloc(sizeof(*pviommu), GFP_KERNEL);
if (!pviommu)
return -ENOMEM;
pviommu->dev = dev;
ret = anon_inode_getfd("kvm-pviommu", &pviommu_fops, pviommu, O_CLOEXEC);
if (ret < 0)
goto out_free;
pviommu->fd = ret;
/* Create pvIOMMU with this ID. */
ret = kvm_call_hyp_nvhe(__pkvm_pviommu_attach, dev->kvm, pviommu->fd);
if (ret)
goto out_free;
return pviommu->fd;
out_free:
kfree(pviommu);
return ret;
}
static int kvm_vfio_pviommu_get_info(struct kvm_device *dev,
struct kvm_vfio_iommu_info *info)
{
int vfio_dev_fd = info->device_fd;
int ret = 0;
struct file *filp;
struct device *device;
filp = fget(vfio_dev_fd);
if (!filp)
return -EBADF;
device = kvm_vfio_file_get_device(filp);
if (!device) {
ret = -ENODEV;
goto err_fput;
}
info->nr_sids = kvm_iommu_device_num_ids(device);
err_fput:
fput(filp);
return ret;
}
static int kvm_vfio_pviommu(struct kvm_device *dev, long attr,
void __user *arg)
{
int32_t __user *argp = arg;
struct kvm_vfio_iommu_info info;
int ret;
switch (attr) {
case KVM_DEV_VFIO_PVIOMMU_ATTACH:
return kvm_vfio_pviommu_attach(dev);
case KVM_DEV_VFIO_PVIOMMU_GET_INFO:
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
ret = kvm_vfio_pviommu_get_info(dev, &info);
if (ret)
return ret;
return copy_to_user(arg, &info, sizeof(info));
}
return -ENXIO;
}
static int kvm_vfio_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_DEV_VFIO_FILE:
return kvm_vfio_set_file(dev, attr->attr,
u64_to_user_ptr(attr->addr));
case KVM_DEV_VFIO_PVIOMMU:
return kvm_vfio_pviommu(dev, attr->attr,
u64_to_user_ptr(attr->addr));
}
return -ENXIO;
}
static int kvm_vfio_has_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_DEV_VFIO_FILE:
switch (attr->attr) {
case KVM_DEV_VFIO_FILE_ADD:
case KVM_DEV_VFIO_FILE_DEL:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
#endif
return 0;
}
break;
case KVM_DEV_VFIO_PVIOMMU:
switch (attr->attr) {
case KVM_DEV_VFIO_PVIOMMU_ATTACH:
case KVM_DEV_VFIO_PVIOMMU_GET_INFO:
return 0;
}
break;
}
return -ENXIO;
}
static void kvm_vfio_release(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf, *tmp;
list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
kvm_vfio_reclaim_file(kvf->file);
kvm_vfio_file_set_kvm(kvf->file, NULL);
fput(kvf->file);
list_del(&kvf->node);
kfree(kvf);
kvm_arch_end_assignment(dev->kvm);
}
kvm_vfio_update_coherency(dev);
kfree(kv);
kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
}
static int kvm_vfio_create(struct kvm_device *dev, u32 type);
static struct kvm_device_ops kvm_vfio_ops = {
.name = "kvm-vfio",
.create = kvm_vfio_create,
.release = kvm_vfio_release,
.set_attr = kvm_vfio_set_attr,
.has_attr = kvm_vfio_has_attr,
};
static int kvm_vfio_create(struct kvm_device *dev, u32 type)
{
struct kvm_device *tmp;
struct kvm_vfio *kv;
/* Only one VFIO "device" per VM */
list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
if (tmp->ops == &kvm_vfio_ops)
return -EBUSY;
kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
if (!kv)
return -ENOMEM;
INIT_LIST_HEAD(&kv->file_list);
mutex_init(&kv->lock);
dev->private = kv;
return 0;
}
int kvm_vfio_ops_init(void)
{
return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
}
void kvm_vfio_ops_exit(void)
{
kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
}