drivers/arm-smmu-v3-nesting: WIP: Don't eagrly disable SMMUs
Change-Id: Ie1b45d532b9d000afb44f45fa2ac5d2426268842
Signed-off-by: Mostafa Saleh <smostafa@google.com>
diff --git a/drivers/misc/pkvm/pkvm/smmuv3_nesting.c b/drivers/misc/pkvm/pkvm/smmuv3_nesting.c
index 3d202d9..fdce238 100644
--- a/drivers/misc/pkvm/pkvm/smmuv3_nesting.c
+++ b/drivers/misc/pkvm/pkvm/smmuv3_nesting.c
@@ -595,17 +595,6 @@ static int smmu_share_pages(phys_addr_t addr, size_t size)
return 0;
}
-static int smmu_unshare_pages(phys_addr_t addr, size_t size)
-{
- int i;
- size_t nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- for(i = 0 ; i < nr_pages ; ++i)
- WARN_ON(__pkvm_host_unshare_hyp((addr + i * PAGE_SIZE) >> PAGE_SHIFT));
-
- return 0;
-}
-
static bool is_smmu_enabled(struct smmu_v3_nested *smmu)
{
return FIELD_GET(CR0_SMMUEN, smmu->cr0);
@@ -790,30 +779,13 @@ static void alloc_2lvl_strtab(struct smmu_v3_nested *smmu)
smmu_share_pages(host_ste_base, l1_size);
}
-static void free_linear_ste(struct smmu_v3_nested *smmu)
-{
- size_t ste_size = strtab_size(smmu);
-
- kvm_iommu_reclaim_pages_atomic(smmu->hyp_ste_base, get_order(ste_size));
- smmu->hyp_ste_base = NULL;
- smmu_unshare_pages(strtab_host_base(smmu), ste_size);
-}
-
-static void free_2lvl_ste(struct smmu_v3_nested *smmu)
-{
- size_t ste_l1_size = strtab_l1_size(smmu);
- phys_addr_t host_ste_base = strtab_host_base(smmu);
-
- /* TBD: Iterate over l2 descriptors and unshare and free them if valid. */
- kvm_iommu_reclaim_pages_atomic(smmu->hyp_ste_base, get_order(ste_l1_size));
- smmu->hyp_ste_base = NULL;
- smmu_unshare_pages(host_ste_base, ste_l1_size);
-}
-
static void emulate_strtab_smmu_enable(struct smmu_v3_nested *smmu)
{
u32 fmt = FIELD_GET(STRTAB_BASE_CFG_FMT, smmu->host_ste_cfg);
+ if (smmu->hyp_ste_base)
+ goto out_enable;
+
if (fmt == STRTAB_BASE_CFG_FMT_2LVL) {
alloc_2lvl_strtab(smmu);
} else if (fmt == STRTAB_BASE_CFG_FMT_LINEAR) {
@@ -823,29 +795,13 @@ static void emulate_strtab_smmu_enable(struct smmu_v3_nested *smmu)
BUG();
}
+out_enable:
writeq_relaxed(hyp_virt_to_phys(smmu->hyp_ste_base),
smmu->va + ARM_SMMU_STRTAB_BASE);
writel_relaxed(smmu->host_ste_cfg,
smmu->va + ARM_SMMU_STRTAB_BASE_CFG);
}
-static void emulate_strtab_smmu_disable(struct smmu_v3_nested *smmu)
-{
- u32 fmt = FIELD_GET(STRTAB_BASE_CFG_FMT, smmu->host_ste_cfg);
-
- if (fmt == STRTAB_BASE_CFG_FMT_2LVL) {
- free_2lvl_ste(smmu);
- } else if (fmt == STRTAB_BASE_CFG_FMT_LINEAR) {
- free_linear_ste(smmu);
- } else {
- /* Wrong format. */
- BUG();
- }
-
- writeq_relaxed(0, smmu->va + ARM_SMMU_STRTAB_BASE);
- writel_relaxed(0, smmu->va + ARM_SMMU_STRTAB_BASE_CFG);
-}
-
static bool hyp_handle_command(struct smmu_v3_nested *smmu, u64 *command)
{
u64 type = FIELD_GET(CMDQ_0_OP, command[0]);
@@ -948,6 +904,11 @@ static void emulate_cmdq_smmu_enable(struct smmu_v3_nested *smmu)
/* SMMU enabled without CMDQ? */
WARN_ON(!is_cmdq_enabled(smmu));
+ if (smmu->cmdq_log2size) {
+ /* We don't support command queue changes at run time */
+ WARN_ON(smmu->cmdq_log2size != (smmu->host_cmdq_base & Q_BASE_LOG2SIZE));
+ goto out_enable;
+ }
/* TODO: coherency. */
smmu->cmdq_log2size = smmu->host_cmdq_base & Q_BASE_LOG2SIZE;
smmu->cmdq_log2size = min(smmu->cmdq_log2size, PKVM_SMMU_MAX_CMDQ_SHIFT);
@@ -955,6 +916,8 @@ static void emulate_cmdq_smmu_enable(struct smmu_v3_nested *smmu)
hyp_cmdq_size = (1 << smmu->cmdq_log2size) * CMDQ_ENT_DWORDS * 8;
smmu_share_pages(smmu->host_cmdq_base & Q_BASE_ADDR_MASK, hyp_cmdq_size);
smmu->cmdq_prod = 0;
+
+out_enable:
writeq_relaxed(hyp_virt_to_phys(smmu->hyp_cmdq_base) |
FIELD_PREP(Q_BASE_LOG2SIZE, smmu->cmdq_log2size),
smmu->va + ARM_SMMU_CMDQ_BASE);
@@ -962,27 +925,11 @@ static void emulate_cmdq_smmu_enable(struct smmu_v3_nested *smmu)
writel_relaxed(0, smmu->va + ARM_SMMU_CMDQ_CONS);
}
-static void emulate_cmdq_smmu_disable(struct smmu_v3_nested *smmu)
-{
- size_t hyp_cmdq_size;
-
- /* Why CMDQ still enabled? */
- WARN_ON(is_cmdq_enabled(smmu));
-
- hyp_cmdq_size = (1 << smmu->cmdq_log2size) * CMDQ_ENT_DWORDS * 8;
- smmu_unshare_pages(smmu->host_cmdq_base & Q_BASE_ADDR_MASK, hyp_cmdq_size);
-}
-
static void emulate_smmu_enable(struct smmu_v3_nested *smmu)
{
emulate_strtab_smmu_enable(smmu);
}
-static void emulate_smmu_disable(struct smmu_v3_nested *smmu)
-{
- emulate_strtab_smmu_disable(smmu);
-}
-
static bool smmuv3_nesting_dabt_device(struct smmu_v3_nested *smmu,
struct kvm_cpu_context *host_ctxt,
u64 esr, u32 off)
@@ -1066,12 +1013,8 @@ static bool smmuv3_nesting_dabt_device(struct smmu_v3_nested *smmu,
smmu->cr0 = val;
if (!last_cmdq && is_cmdq_enabled(smmu))
emulate_cmdq_smmu_enable(smmu);
- else if (last_cmdq && !is_cmdq_enabled(smmu))
- emulate_cmdq_smmu_disable(smmu);
if (!last_smmu && is_smmu_enabled(smmu))
emulate_smmu_enable(smmu);
- else if (last_smmu && !is_smmu_enabled(smmu))
- emulate_smmu_disable(smmu);
}
mask = read_write;
BUG_ON(len != sizeof(u32));