blob: 21a3a5b193083de22db1098a625f40ccd1bc2cf3 [file] [log] [blame] [edit]
// SPDX-License-Identifier: GPL-2.0
/*
* pKVM hyp driver for the Arm SMMUv3
*
* Copyright (C) 2022 Linaro Ltd.
*/
#include <asm/arm-smmu-v3-regs.h>
#include <asm/kvm_hyp.h>
#include <kvm/arm_smmu_v3.h>
#include <kvm/pl011.h>
#include <nvhe/iommu.h>
#include <nvhe/mm.h>
#include <nvhe/pkvm.h>
#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */
size_t __ro_after_init kvm_hyp_arm_smmu_v3_count;
struct hyp_arm_smmu_v3_device __ro_after_init *kvm_hyp_arm_smmu_v3_smmus;
#define for_each_smmu(smmu) \
for ((smmu) = kvm_hyp_arm_smmu_v3_smmus; \
(smmu) != &kvm_hyp_arm_smmu_v3_smmus[kvm_hyp_arm_smmu_v3_count]; \
(smmu)++)
/*
* Wait until @cond is true.
* Return 0 on success, or -ETIMEDOUT
*/
#define smmu_wait(_cond) \
({ \
int __i = 0; \
int __ret = 0; \
\
while (!(_cond)) { \
if (++__i > ARM_SMMU_POLL_TIMEOUT_US) { \
__ret = -ETIMEDOUT; \
break; \
} \
pkvm_udelay(1); \
} \
__ret; \
})
#define smmu_wait_event(_smmu, _cond) \
({ \
if ((_smmu)->features & ARM_SMMU_FEAT_SEV) { \
while (!(_cond)) \
wfe(); \
} \
smmu_wait(_cond); \
})
static int smmu_write_cr0(struct hyp_arm_smmu_v3_device *smmu, u32 val)
{
writel_relaxed(val, smmu->base + ARM_SMMU_CR0);
return smmu_wait(readl_relaxed(smmu->base + ARM_SMMU_CR0ACK) == val);
}
#define Q_WRAP(smmu, reg) ((reg) & (1 << (smmu)->cmdq_log2size))
#define Q_IDX(smmu, reg) ((reg) & ((1 << (smmu)->cmdq_log2size) - 1))
static bool smmu_cmdq_full(struct hyp_arm_smmu_v3_device *smmu)
{
u64 cons = readl_relaxed(smmu->base + ARM_SMMU_CMDQ_CONS);
return Q_IDX(smmu, smmu->cmdq_prod) == Q_IDX(smmu, cons) &&
Q_WRAP(smmu, smmu->cmdq_prod) != Q_WRAP(smmu, cons);
}
static bool smmu_cmdq_empty(struct hyp_arm_smmu_v3_device *smmu)
{
u64 cons = readl_relaxed(smmu->base + ARM_SMMU_CMDQ_CONS);
return Q_IDX(smmu, smmu->cmdq_prod) == Q_IDX(smmu, cons) &&
Q_WRAP(smmu, smmu->cmdq_prod) == Q_WRAP(smmu, cons);
}
static int smmu_add_cmd(struct hyp_arm_smmu_v3_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{
int i;
int ret;
u64 cmd[CMDQ_ENT_DWORDS] = {};
int idx = Q_IDX(smmu, smmu->cmdq_prod);
u64 *slot = smmu->cmdq_base + idx * CMDQ_ENT_DWORDS;
if (smmu->iommu.power_is_off)
return -EPIPE;
ret = smmu_wait_event(smmu, !smmu_cmdq_full(smmu));
if (ret)
return ret;
cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
switch (ent->opcode) {
case CMDQ_OP_CFGI_ALL:
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_CFGI_CD:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
fallthrough;
case CMDQ_OP_CFGI_STE:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
break;
case CMDQ_OP_TLBI_NH_VA:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
break;
case CMDQ_OP_TLBI_NSNH_ALL:
break;
case CMDQ_OP_TLBI_NH_ASID:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
fallthrough;
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
case CMDQ_OP_TLBI_S2_IPA:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
break;
case CMDQ_OP_CMD_SYNC:
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
break;
default:
return -EINVAL;
}
for (i = 0; i < CMDQ_ENT_DWORDS; i++)
slot[i] = cpu_to_le64(cmd[i]);
smmu->cmdq_prod++;
writel(Q_IDX(smmu, smmu->cmdq_prod) | Q_WRAP(smmu, smmu->cmdq_prod),
smmu->base + ARM_SMMU_CMDQ_PROD);
return 0;
}
static int smmu_sync_cmd(struct hyp_arm_smmu_v3_device *smmu)
{
int ret;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CMD_SYNC,
};
ret = smmu_add_cmd(smmu, &cmd);
if (ret)
return ret;
return smmu_wait_event(smmu, smmu_cmdq_empty(smmu));
}
static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
struct arm_smmu_cmdq_ent *cmd)
{
int ret = smmu_add_cmd(smmu, cmd);
if (ret)
return ret;
return smmu_sync_cmd(smmu);
}
static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
{
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_STE,
.cfgi.sid = sid,
.cfgi.leaf = true,
};
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
return 0;
return smmu_send_cmd(smmu, &cmd);
}
static int smmu_sync_cd(struct hyp_arm_smmu_v3_device *smmu, u32 sid, u32 ssid)
{
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD,
.cfgi.sid = ssid,
.cfgi.ssid = ssid,
.cfgi.leaf = true,
};
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
return 0;
return smmu_send_cmd(smmu, &cmd);
}
static int smmu_alloc_l2_strtab(struct hyp_arm_smmu_v3_device *smmu, u32 idx)
{
void *table;
u64 l2ptr, span;
/* Leaf tables must be page-sized */
if (smmu->strtab_split + ilog2(STRTAB_STE_DWORDS) + 3 != PAGE_SHIFT)
return -EINVAL;
span = smmu->strtab_split + 1;
if (WARN_ON(span < 1 || span > 11))
return -EINVAL;
table = kvm_iommu_donate_page();
if (!table)
return -ENOMEM;
l2ptr = hyp_virt_to_phys(table);
if (l2ptr & (~STRTAB_L1_DESC_L2PTR_MASK | ~PAGE_MASK))
return -EINVAL;
/* Ensure the empty stream table is visible before the descriptor write */
wmb();
if ((cmpxchg64_relaxed(&smmu->strtab_base[idx], 0, l2ptr | span) != 0))
kvm_iommu_reclaim_page(table);
return 0;
}
static u64 *smmu_get_ste_ptr(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
{
u32 idx;
int ret;
u64 l1std, span, *base;
if (sid >= smmu->strtab_num_entries)
return NULL;
sid = array_index_nospec(sid, smmu->strtab_num_entries);
if (!smmu->strtab_split)
return smmu->strtab_base + sid * STRTAB_STE_DWORDS;
idx = sid >> smmu->strtab_split;
l1std = smmu->strtab_base[idx];
if (!l1std) {
ret = smmu_alloc_l2_strtab(smmu, idx);
if (ret)
return NULL;
l1std = smmu->strtab_base[idx];
if (WARN_ON(!l1std))
return NULL;
}
span = l1std & STRTAB_L1_DESC_SPAN;
idx = sid & ((1 << smmu->strtab_split) - 1);
if (!span || idx >= (1 << (span - 1)))
return NULL;
base = hyp_phys_to_virt(l1std & STRTAB_L1_DESC_L2PTR_MASK);
return base + idx * STRTAB_STE_DWORDS;
}
static u64 *smmu_get_cd_ptr(u64 *cdtab, u32 ssid)
{
/* Assume linear for now. */
return cdtab + ssid * CTXDESC_CD_DWORDS;
}
static int smmu_init_registers(struct hyp_arm_smmu_v3_device *smmu)
{
u64 val, old;
if (!(readl_relaxed(smmu->base + ARM_SMMU_GBPA) & GBPA_ABORT))
return -EINVAL;
/* Initialize all RW registers that will be read by the SMMU */
smmu_write_cr0(smmu, 0);
val = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
writel_relaxed(val, smmu->base + ARM_SMMU_CR1);
writel_relaxed(CR2_PTM, smmu->base + ARM_SMMU_CR2);
writel_relaxed(0, smmu->base + ARM_SMMU_IRQ_CTRL);
val = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
old = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
/* Service Failure Mode is fatal */
if ((val ^ old) & GERROR_SFM_ERR)
return -EIO;
/* Clear pending errors */
writel_relaxed(val, smmu->base + ARM_SMMU_GERRORN);
return 0;
}
/* Transfer ownership of structures from host to hyp */
static void *smmu_take_pages(u64 base, size_t size)
{
void *hyp_ptr;
hyp_ptr = hyp_phys_to_virt(base);
if (pkvm_create_mappings(hyp_ptr, hyp_ptr + size, PAGE_HYP))
return NULL;
return hyp_ptr;
}
static int smmu_init_cmdq(struct hyp_arm_smmu_v3_device *smmu)
{
u64 cmdq_base;
size_t cmdq_nr_entries, cmdq_size;
cmdq_base = readq_relaxed(smmu->base + ARM_SMMU_CMDQ_BASE);
if (cmdq_base & ~(Q_BASE_RWA | Q_BASE_ADDR_MASK | Q_BASE_LOG2SIZE))
return -EINVAL;
smmu->cmdq_log2size = cmdq_base & Q_BASE_LOG2SIZE;
cmdq_nr_entries = 1 << smmu->cmdq_log2size;
cmdq_size = cmdq_nr_entries * CMDQ_ENT_DWORDS * 8;
cmdq_base &= Q_BASE_ADDR_MASK;
smmu->cmdq_base = smmu_take_pages(cmdq_base, cmdq_size);
if (!smmu->cmdq_base)
return -EINVAL;
memset(smmu->cmdq_base, 0, cmdq_size);
writel_relaxed(0, smmu->base + ARM_SMMU_CMDQ_PROD);
writel_relaxed(0, smmu->base + ARM_SMMU_CMDQ_CONS);
pkvm_debug("CMDQ @0x%llx 0x%llx sz=0x%lx\n", cmdq_base,
(u64)smmu->cmdq_base, cmdq_size);
return 0;
}
static int smmu_init_strtab(struct hyp_arm_smmu_v3_device *smmu)
{
u64 strtab_base;
size_t strtab_size;
u32 strtab_cfg, fmt;
int split, log2size;
strtab_base = readq_relaxed(smmu->base + ARM_SMMU_STRTAB_BASE);
if (strtab_base & ~(STRTAB_BASE_ADDR_MASK | STRTAB_BASE_RA))
return -EINVAL;
strtab_cfg = readl_relaxed(smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
if (strtab_cfg & ~(STRTAB_BASE_CFG_FMT | STRTAB_BASE_CFG_SPLIT |
STRTAB_BASE_CFG_LOG2SIZE))
return -EINVAL;
fmt = FIELD_GET(STRTAB_BASE_CFG_FMT, strtab_cfg);
split = FIELD_GET(STRTAB_BASE_CFG_SPLIT, strtab_cfg);
log2size = FIELD_GET(STRTAB_BASE_CFG_LOG2SIZE, strtab_cfg);
smmu->strtab_split = split;
smmu->strtab_num_entries = 1 << log2size;
switch (fmt) {
case STRTAB_BASE_CFG_FMT_LINEAR:
if (split)
return -EINVAL;
smmu->strtab_num_l1_entries = smmu->strtab_num_entries;
strtab_size = smmu->strtab_num_l1_entries *
STRTAB_STE_DWORDS * 8;
break;
case STRTAB_BASE_CFG_FMT_2LVL:
if (split != 6 && split != 8 && split != 10)
return -EINVAL;
smmu->strtab_num_l1_entries = 1 << max(0, log2size - split);
strtab_size = smmu->strtab_num_l1_entries *
STRTAB_L1_DESC_DWORDS * 8;
break;
default:
return -EINVAL;
}
strtab_base &= STRTAB_BASE_ADDR_MASK;
smmu->strtab_base = smmu_take_pages(strtab_base, strtab_size);
if (!smmu->strtab_base)
return -EINVAL;
/* Disable all STEs */
memset(smmu->strtab_base, 0, strtab_size);
switch (fmt) {
case STRTAB_BASE_CFG_FMT_LINEAR:
pkvm_debug("STRTAB @0x%llx 0x%llx sz=0x%lx\n",
strtab_base, (u64)smmu->strtab_base, strtab_size);
break;
case STRTAB_BASE_CFG_FMT_2LVL:
pkvm_debug("STRTAB @0x%llx 0x%llx sz=0x%lx split=%x\n",
strtab_base, (u64)smmu->strtab_base, strtab_size,
smmu->strtab_split);
break;
}
return 0;
}
static int smmu_reset_device(struct hyp_arm_smmu_v3_device *smmu)
{
int ret;
struct arm_smmu_cmdq_ent cfgi_cmd = {
.opcode = CMDQ_OP_CFGI_ALL,
};
struct arm_smmu_cmdq_ent tlbi_cmd = {
.opcode = CMDQ_OP_TLBI_NSNH_ALL,
};
/* Invalidate all cached configs and TLBs */
ret = smmu_write_cr0(smmu, CR0_CMDQEN);
if (ret)
return ret;
ret = smmu_add_cmd(smmu, &cfgi_cmd);
if (ret)
goto err_disable_cmdq;
ret = smmu_add_cmd(smmu, &tlbi_cmd);
if (ret)
goto err_disable_cmdq;
ret = smmu_sync_cmd(smmu);
if (ret)
goto err_disable_cmdq;
/* Enable translation */
return smmu_write_cr0(smmu, CR0_SMMUEN | CR0_CMDQEN | CR0_ATSCHK);
err_disable_cmdq:
return smmu_write_cr0(smmu, 0);
}
static struct hyp_arm_smmu_v3_device *to_smmu(struct kvm_hyp_iommu *iommu)
{
return container_of(iommu, struct hyp_arm_smmu_v3_device, iommu);
}
static void smmu_tlb_flush_all(void *cookie)
{
struct kvm_iommu_tlb_cookie *data = cookie;
struct hyp_arm_smmu_v3_device *smmu = to_smmu(data->iommu);
struct arm_smmu_cmdq_ent cmd;
struct kvm_hyp_iommu_domain *domain = data->domain;
struct arm_lpae_io_pgtable *pgtable = container_of(domain->pgtable,
struct arm_lpae_io_pgtable, iop);
if (pgtable->iop.cfg.fmt == ARM_64_LPAE_S2) {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
cmd.tlbi.vmid = data->domain_id;
} else {
cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
cmd.tlbi.asid = data->domain_id;
/* Domain ID is unique across all VMs. */
cmd.tlbi.vmid = 0;
}
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
return;
WARN_ON(smmu_send_cmd(smmu, &cmd));
}
static void smmu_tlb_inv_range(struct kvm_iommu_tlb_cookie *data,
unsigned long iova, size_t size, size_t granule,
bool leaf)
{
struct hyp_arm_smmu_v3_device *smmu = to_smmu(data->iommu);
unsigned long end = iova + size;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_TLBI_S2_IPA,
.tlbi.vmid = data->domain_id,
.tlbi.leaf = leaf,
};
struct kvm_hyp_iommu_domain *domain = data->domain;
struct arm_lpae_io_pgtable *pgtable = container_of(domain->pgtable,
struct arm_lpae_io_pgtable, iop);
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
return;
if (pgtable->iop.cfg.fmt == ARM_64_LPAE_S1) {
cmd.opcode = CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = data->domain_id;
cmd.tlbi.vmid = 0;
}
/*
* There are no mappings at high addresses since we don't use TTB1, so
* no overflow possible.
*/
BUG_ON(end < iova);
while (iova < end) {
cmd.tlbi.addr = iova;
WARN_ON(smmu_send_cmd(smmu, &cmd));
BUG_ON(iova + granule < iova);
iova += granule;
}
}
static void smmu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
smmu_tlb_inv_range(cookie, iova, size, granule, false);
}
static void smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{
smmu_tlb_inv_range(cookie, iova, granule, granule, true);
}
static const struct iommu_flush_ops smmu_tlb_ops = {
.tlb_flush_all = smmu_tlb_flush_all,
.tlb_flush_walk = smmu_tlb_flush_walk,
.tlb_add_page = smmu_tlb_add_page,
};
static int smmu_init_device(struct hyp_arm_smmu_v3_device *smmu)
{
int ret;
if (!PAGE_ALIGNED(smmu->mmio_addr | smmu->mmio_size))
return -EINVAL;
ret = pkvm_create_hyp_device_mapping(smmu->mmio_addr, smmu->mmio_size,
&smmu->base);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
pkvm_debug("MMIO @0x%llx-0x%llx\n", smmu->mmio_addr,
smmu->mmio_addr + smmu->mmio_size - 1);
ret = smmu_init_registers(smmu);
if (ret)
return ret;
ret = smmu_init_cmdq(smmu);
if (ret)
return ret;
ret = smmu_init_strtab(smmu);
if (ret)
return ret;
ret = smmu_reset_device(smmu);
if (ret)
return ret;
return kvm_iommu_init_device(&smmu->iommu);
}
static int smmu_init(void)
{
int ret;
struct hyp_arm_smmu_v3_device *smmu;
ret = kvm_iommu_init();
if (ret)
return ret;
ret = pkvm_create_mappings(kvm_hyp_arm_smmu_v3_smmus,
kvm_hyp_arm_smmu_v3_smmus +
kvm_hyp_arm_smmu_v3_count,
PAGE_HYP);
if (ret)
return ret;
for_each_smmu(smmu) {
ret = smmu_init_device(smmu);
if (ret)
return ret;
}
return 0;
}
static struct kvm_hyp_iommu *smmu_id_to_iommu(pkvm_handle_t smmu_id)
{
if (smmu_id >= kvm_hyp_arm_smmu_v3_count)
return NULL;
smmu_id = array_index_nospec(smmu_id, kvm_hyp_arm_smmu_v3_count);
return &kvm_hyp_arm_smmu_v3_smmus[smmu_id].iommu;
}
static u64 *smmu_alloc_cd(u32 pasid_bits, u32 *table_size)
{
u64 *cd_table = kvm_iommu_donate_page();
u32 requested_size;
/* No mem. */
if (!cd_table)
return NULL;
requested_size = (1 << pasid_bits) * (CTXDESC_CD_DWORDS << 3);
*table_size = min(PAGE_SIZE, requested_size);
return __hyp_pa(cd_table);
}
static int smmu_finalise_s1(u64 *ent, struct hyp_arm_smmu_v3_device *smmu,
struct kvm_hyp_iommu_domain *domain,
pkvm_handle_t domain_id, u32 sid,
u32 ssid, u32 pasid_bits, bool *update_ste)
{
u64 *cd_table;
u64 *ste;
u32 table_size;
u32 nr_entries;
u64 val;
u64 *cd_entry;
struct io_pgtable_cfg *cfg;
cfg = &smmu->pgtable_s1.iop.cfg;
/* Check if we already have CD for this SID. */
ste = smmu_get_ste_ptr(smmu, sid);
val = le64_to_cpu(ste[0]);
cd_table = FIELD_GET(STRTAB_STE_0_S1CTXPTR_MASK, val);
nr_entries = 1 << FIELD_GET(STRTAB_STE_0_S1CDMAX, val);
*update_ste = false;
if (!cd_table) {
/*
* Workaround, for now we only allow one page of CDs as guests can't allocate.
* contiguous physical memory.
* Only linear tables are supported now.
*/
cd_table = smmu_alloc_cd(pasid_bits, &table_size);
nr_entries = table_size / (CTXDESC_CD_DWORDS << 3);
ent[1] = FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH);
ent[0] = ((u64)cd_table & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
FIELD_PREP(STRTAB_STE_0_S1CDMAX, ilog2(nr_entries)) |
FIELD_PREP(STRTAB_STE_0_S1FMT, STRTAB_STE_0_S1FMT_LINEAR) |
STRTAB_STE_0_V;
*update_ste = true;
}
if (!cd_table)
return -ENOMEM;
if (ssid >= nr_entries)
return -E2BIG;
/* Write CD. */
cd_entry = smmu_get_cd_ptr(__hyp_va(cd_table), ssid);
cd_entry[1] = cpu_to_le64(hyp_virt_to_phys(domain->pgd) & CTXDESC_CD_1_TTB0_MASK);
cd_entry[2] = 0;
cd_entry[3] = cpu_to_le64(cfg->arm_lpae_s1_cfg.mair);
smmu_sync_cd(smmu, sid, ssid);
val = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz) |
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, cfg->arm_lpae_s1_cfg.tcr.ips) |
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64 |
CTXDESC_CD_0_R | CTXDESC_CD_0_A |
CTXDESC_CD_0_ASET |
FIELD_PREP(CTXDESC_CD_0_ASID, domain_id) |
CTXDESC_CD_0_V;
WRITE_ONCE(cd_entry[0], cpu_to_le64(val));
/* Sync CD. */
smmu_sync_cd(smmu, sid, ssid);
return 0;
}
static int smmu_finalise_s2(u64 *ent, struct hyp_arm_smmu_v3_device *smmu,
struct kvm_hyp_iommu_domain *domain,
pkvm_handle_t domain_id)
{
struct io_pgtable_cfg *cfg;
u64 ts, sl, ic, oc, sh, tg, ps;
cfg = &smmu->pgtable_s2.iop.cfg;
ps = cfg->arm_lpae_s2_cfg.vtcr.ps;
tg = cfg->arm_lpae_s2_cfg.vtcr.tg;
sh = cfg->arm_lpae_s2_cfg.vtcr.sh;
oc = cfg->arm_lpae_s2_cfg.vtcr.orgn;
ic = cfg->arm_lpae_s2_cfg.vtcr.irgn;
sl = cfg->arm_lpae_s2_cfg.vtcr.sl;
ts = cfg->arm_lpae_s2_cfg.vtcr.tsz;
ent[0] = STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
ent[2] = FIELD_PREP(STRTAB_STE_2_VTCR,
FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, ps) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, tg) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, sh) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, oc) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, ic) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, sl) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, ts)) |
FIELD_PREP(STRTAB_STE_2_S2VMID, domain_id) |
STRTAB_STE_2_S2AA64;
ent[3] = hyp_virt_to_phys(domain->pgd) & STRTAB_STE_3_S2TTB_MASK;
return 0;
}
static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, pkvm_handle_t domain_id,
struct kvm_hyp_iommu_domain *domain, u32 sid, u32 pasid,
u32 pasid_bits)
{
int i;
int ret;
u64 *dst;
u64 ent[STRTAB_STE_DWORDS] = {};
struct hyp_arm_smmu_v3_device *smmu = to_smmu(iommu);
bool update_ste = true;
dst = smmu_get_ste_ptr(smmu, sid);
if (!dst || dst[0])
return -EINVAL;
pasid_bits = min(pasid_bits, smmu->ssid_bits);
/* If stage-1 finalise CD setup. */
if (domain->pgtable->cfg.fmt == ARM_64_LPAE_S1)
ret = smmu_finalise_s1(ent, smmu, domain, domain_id, sid,
pasid, pasid_bits, &update_ste);
else
ret = smmu_finalise_s2(ent, smmu, domain, domain_id);
if (ret)
return ret;
if (!update_ste)
return 0;
/*
* The SMMU may cache a disabled STE.
* Initialize all fields, sync, then enable it.
*/
for (i = 1; i < STRTAB_STE_DWORDS; i++)
dst[i] = cpu_to_le64(ent[i]);
ret = smmu_sync_ste(smmu, sid);
if (ret)
return ret;
WRITE_ONCE(dst[0], cpu_to_le64(ent[0]));
ret = smmu_sync_ste(smmu, sid);
if (ret)
dst[0] = 0;
return ret;
}
static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, pkvm_handle_t domain_id,
struct kvm_hyp_iommu_domain *domain, u32 sid, u32 pasid)
{
u64 ttb;
u64 *dst;
int i, ret;
struct hyp_arm_smmu_v3_device *smmu = to_smmu(iommu);
dst = smmu_get_ste_ptr(smmu, sid);
if (!dst)
return -ENODEV;
ttb = dst[3] & STRTAB_STE_3_S2TTB_MASK;
dst[0] = 0;
ret = smmu_sync_ste(smmu, sid);
if (ret)
return ret;
for (i = 1; i < STRTAB_STE_DWORDS; i++)
dst[i] = 0;
return smmu_sync_ste(smmu, sid);
}
int smmu_alloc_domain(pkvm_handle_t iommu_id, struct io_pgtable_params **pgtable, int type)
{
struct kvm_hyp_iommu *iommu = smmu_id_to_iommu(iommu_id);
struct hyp_arm_smmu_v3_device *smmu = to_smmu(iommu);
int ret;
unsigned long ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
struct io_pgtable_cfg pgtable_cfg;
if (type == ARM_64_LPAE_S1) {
pgtable_cfg = (struct io_pgtable_cfg) {
.fmt = ARM_64_LPAE_S1,
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = min_t(unsigned long, ias, VA_BITS),
.oas = smmu->ias,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &smmu_tlb_ops
};
ret = kvm_arm_io_pgtable_init_s1(&pgtable_cfg, &smmu->pgtable_s1);
*pgtable = &smmu->pgtable_s1.iop;
} else if (type == ARM_64_LPAE_S2) {
pgtable_cfg = (struct io_pgtable_cfg) {
.fmt = ARM_64_LPAE_S1,
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = smmu->ias,
.oas = smmu->oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &smmu_tlb_ops
};
ret = kvm_arm_io_pgtable_init_s2(&pgtable_cfg, &smmu->pgtable_s2);
*pgtable = &smmu->pgtable_s2.iop;
} else {
BUG();
}
if (ret)
return ret;
return 0;
}
struct kvm_iommu_ops smmu_ops = {
.init = smmu_init,
.get_iommu_by_id = smmu_id_to_iommu,
.alloc_iopt = kvm_arm_io_pgtable_alloc,
.free_iopt = kvm_arm_io_pgtable_free,
.iopt_size = kvm_arm_io_pgtable_size,
.attach_dev = smmu_attach_dev,
.detach_dev = smmu_detach_dev,
.alloc_domain = smmu_alloc_domain,
};