blob: d604ff92ac1c5567fc56aa4b9ec0f5818b993eda [file] [log] [blame] [edit]
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Arm Ltd.
*/
#include "arm_smmu_v3.h"
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <linux/types.h>
#include <linux/gfp_types.h>
#include <linux/io-pgtable-arm.h>
#include <nvhe/alloc.h>
#include <nvhe/iommu.h>
#include <nvhe/mem_protect.h>
#include "arm-smmu-v3-module.h"
#define io_pgtable_cfg_to_pgtable(x) container_of((x), struct io_pgtable, cfg)
#define io_pgtable_cfg_to_data(x) \
io_pgtable_to_data(io_pgtable_cfg_to_pgtable(x))
void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, struct io_pgtable_cfg *cfg)
{
void *addr;
struct arm_lpae_io_pgtable *data = io_pgtable_cfg_to_data(cfg);
if(!PAGE_ALIGNED(size))
return NULL;
if (data->idmapped)
addr = kvm_iommu_donate_pages_atomic(get_order(size));
else
addr = kvm_iommu_donate_pages_request(get_order(size));
if (addr && !cfg->coherent_walk)
kvm_flush_dcache_to_poc(addr, size);
return addr;
}
void __arm_lpae_free_pages(void *addr, size_t size, struct io_pgtable_cfg *cfg)
{
u8 order = get_order(size);
struct arm_lpae_io_pgtable *data = io_pgtable_cfg_to_data(cfg);
BUG_ON(size != (1 << order) * PAGE_SIZE);
if (!cfg->coherent_walk)
kvm_flush_dcache_to_poc(addr, size);
if (data->idmapped)
kvm_iommu_reclaim_pages_atomic(addr, order);
else
kvm_iommu_reclaim_pages(addr, order);
}
void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg)
{
if (!cfg->coherent_walk)
kvm_flush_dcache_to_poc(ptep, sizeof(*ptep) * num_entries);
}
int kvm_arm_io_pgtable_init(struct io_pgtable_cfg *cfg,
struct arm_lpae_io_pgtable *data)
{
int ret = -EINVAL;
if (cfg->fmt == ARM_64_LPAE_S2)
ret = arm_lpae_init_pgtable_s2(cfg, data);
else if (cfg->fmt == ARM_64_LPAE_S1)
ret = arm_lpae_init_pgtable_s1(cfg, data);
if (ret)
return ret;
data->iop.cfg = *cfg;
data->iop.fmt = cfg->fmt;
return 0;
}
struct io_pgtable *kvm_arm_io_pgtable_alloc(struct io_pgtable_cfg *cfg,
void *cookie,
int *out_ret)
{
size_t pgd_size, alignment;
struct arm_lpae_io_pgtable *data;
int ret;
data = hyp_alloc(sizeof(*data));
if (!data) {
*out_ret = hyp_alloc_errno();
return NULL;
}
ret = kvm_arm_io_pgtable_init(cfg, data);
if (ret)
goto out_free;
pgd_size = ARM_LPAE_PGD_SIZE(data);
data->pgd = __arm_lpae_alloc_pages(pgd_size, 0, &data->iop.cfg);
if (!data->pgd) {
ret = -ENOMEM;
goto out_free;
}
/*
* If it has eight or more entries, the table must be aligned on
* its size. Otherwise 64 bytes.
*/
alignment = max(pgd_size, 8 * sizeof(arm_lpae_iopte));
BUG_ON(!IS_ALIGNED(hyp_virt_to_phys(data->pgd), alignment));
data->iop.cookie = cookie;
data->iop.cfg.arm_lpae_s2_cfg.vttbr = __arm_lpae_virt_to_phys(data->pgd);
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
*out_ret = 0;
return &data->iop;
out_free:
hyp_free(data);
*out_ret = ret;
return NULL;
}
int kvm_arm_io_pgtable_free(struct io_pgtable *iopt)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iopt);
size_t pgd_size = ARM_LPAE_PGD_SIZE(data);
if (!data->iop.cfg.coherent_walk)
kvm_flush_dcache_to_poc(data->pgd, pgd_size);
__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
hyp_free(data);
return 0;
}
int arm_lpae_mapping_exists(struct arm_lpae_io_pgtable *data)
{
/*
* Sometime the hypervisor forces mapping in the host page table, for example,
* on teardown we force pages to host even if they were shared.
* If this is not an idmapped domain, then this is a host bug.
*/
WARN_ON(!data->idmapped);
return -EEXIST;
}
void arm_lpae_mapping_missing(struct arm_lpae_io_pgtable *data)
{
/* Similar to arm_lpae_mapping_exists() */
WARN_ON(!data->idmapped);
}
static bool arm_lpae_iopte_is_mmio(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte pte)
{
if (data->iop.fmt == ARM_64_LPAE_S1)
return ((pte >> 2) & 0x7) == ARM_LPAE_MAIR_ATTR_IDX_DEV;
return (pte & (0xf << 2)) == ARM_LPAE_PTE_MEMATTR_DEV;
}
#define ARM_LPAE_TABLE_LAST_IDX GENMASK(7, 2)
static u32 arm_lpae_table_get_last_idx(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte table)
{
u16 val = FIELD_GET(ARM_LPAE_TABLE_LAST_IDX, table);
return val << (data->bits_per_level - 6);
}
static void arm_lpae_table_set_last_idx(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte *tablep, u32 idx)
{
u16 val = idx >> (data->bits_per_level - 6);
u64p_replace_bits(tablep, val, ARM_LPAE_TABLE_LAST_IDX);
}
static bool arm_lpae_scan_last_level(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size,
arm_lpae_iopte *tablep)
{
u32 n, idx, start, nentries, map_idx_start, map_idx_end;
arm_lpae_iopte table = *tablep, *cptep = iopte_deref(table, data);
nentries = ARM_LPAE_PTES_PER_TABLE(data);
idx = start = arm_lpae_table_get_last_idx(data, table);
map_idx_start = ARM_LPAE_LVL_IDX(iova, ARM_LPAE_MAX_LEVELS - 1, data);
map_idx_end = min_t(u32,
map_idx_start + (size / ARM_LPAE_GRANULE(data)),
nentries) - 1;
for (n = 0; n < nentries; ++n) {
arm_lpae_iopte pte = cptep[idx];
if (idx >= map_idx_start && idx <= map_idx_end) {
n += map_idx_end - map_idx_start;
idx = (map_idx_end + 1) % nentries;
continue;
}
if (!pte || arm_lpae_iopte_is_mmio(data, pte))
break;
idx = (idx + 1) % nentries;
}
if (n != nentries && idx != start)
arm_lpae_table_set_last_idx(data, tablep, idx);
return n == nentries;
}
bool arm_lpae_use_block_mapping(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t pgsize,
size_t *pgcount, arm_lpae_iopte prot,
int level, arm_lpae_iopte *ptep)
{
size_t block_size = ARM_LPAE_BLOCK_SIZE(level, data);
size_t size = pgsize * *pgcount;
arm_lpae_iopte pte = *ptep;
if (pgsize == block_size)
return true;
if (level != ARM_LPAE_MAX_LEVELS - 2 || !data->idmapped)
return false;
if (!pte || arm_lpae_iopte_is_mmio(data, prot))
return false;
switch (iopte_type(pte)) {
case ARM_LPAE_PTE_TYPE_TABLE:
if (!arm_lpae_scan_last_level(data, iova, size, ptep))
break;
fallthrough;
case ARM_LPAE_PTE_TYPE_BLOCK:
*pgcount = 1;
return true;
}
return false;
}
void arm_lpae_post_table_walk(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t pgsize,
size_t pgcount, arm_lpae_iopte prot,
int level, arm_lpae_iopte *ptep)
{
size_t block_size = ARM_LPAE_BLOCK_SIZE(level, data);
arm_lpae_iopte pte = *ptep;
int i = 0;
arm_lpae_iopte *cptep = iopte_deref(pte, data);
if (!data->idmapped)
return;
/* Already done. */
if (level >= ARM_LPAE_MAX_LEVELS - 2)
return;
for (i = 0 ; i < ARM_LPAE_PTES_PER_TABLE(data); ++i) {
arm_lpae_iopte pte = cptep[i];
if (!iopte_leaf(pte, level + 1, data->iop.fmt) || arm_lpae_iopte_is_mmio(data, pte))
return;
}
iova &= ~(block_size - 1);
WARN_ON(arm_lpae_init_pte(data, iova, iova, prot, level, 1, ptep));
return;
}