drivers: iommu: pviommu: Add pviommu_iova_to_phys
Add iova_to_phys IOMMU ops. As the hypervisor has a collapsed mapping
this has to be maintianed in the EL1 driver.
A mapple tree is used to hold intervals.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
diff --git a/drivers/iommu/pkvm-pviommu.c b/drivers/iommu/pkvm-pviommu.c
index 095b1e3..bc2ff2e 100644
--- a/drivers/iommu/pkvm-pviommu.c
+++ b/drivers/iommu/pkvm-pviommu.c
@@ -6,6 +6,7 @@
#include <linux/of_platform.h>
#include <linux/arm-smccc.h>
#include <linux/iommu.h>
+#include <linux/maple_tree.h>
#include <linux/pci.h>
#define FEAUTRE_PGSIZE_BITMAP 0x1
@@ -20,6 +21,7 @@ struct pviommu {
struct pviommu_domain {
struct iommu_domain domain;
unsigned long id; /* pKVM domain ID. */
+ struct maple_tree mappings; /* IOVA -> IPA */
};
struct pviommu_master {
@@ -29,6 +31,52 @@ struct pviommu_master {
struct pviommu_domain *domain;
};
+/* Ranges are inclusive for all functions. */
+static void pviommu_domain_insert_map(struct pviommu_domain *pv_domain,
+ u64 start, u64 end, u64 val)
+{
+ if (end < start)
+ return;
+
+ mtree_store_range(&pv_domain->mappings, start, end, val, GFP_KERNEL);
+}
+
+static void pviommu_domain_remove_map(struct pviommu_domain *pv_domain,
+ u64 start, u64 end)
+{
+ /* Range can cover multiple entries. */
+ while (start < end) {
+ MA_STATE(mas, &pv_domain->mappings, start, ULONG_MAX);
+ u64 entry = (u64)mas_find(&mas, start);
+ u64 old_start, old_end;
+
+ old_start = mas.index;
+ old_end = mas.last;
+ mas_erase(&mas);
+
+ /* Insert the rest if no removed*/
+ if (start > old_start)
+ mtree_store_range(&pv_domain->mappings, old_start, start - 1, entry, GFP_KERNEL);
+
+ if (old_end > end)
+ mtree_store_range(&pv_domain->mappings, end + 1, old_end, entry + (end - old_start + 1), GFP_KERNEL);
+
+ start = old_end + 1;
+ }
+}
+
+static u64 pviommu_domain_find(struct pviommu_domain *pv_domain, u64 key)
+{
+ MA_STATE(mas, &pv_domain->mappings, key, ULONG_MAX);
+ u64 entry = (u64)mas_find(&mas, key);
+
+ /* No entry. */
+ if (mas.index == mas.last)
+ return 0;
+
+ return (key - mas.index) + entry;
+}
+
static int pviommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -51,6 +99,10 @@ static int pviommu_map_pages(struct iommu_domain *domain, unsigned long iova,
if (ret && (ret != -EAGAIN))
break;
}
+
+ if (*mapped)
+ pviommu_domain_insert_map(pv_domain, iova, iova + *mapped - 1, paddr);
+
return ret;
}
@@ -74,12 +126,18 @@ static size_t pviommu_unmap_pages(struct iommu_domain *domain, unsigned long iov
if (ret && (ret != -EAGAIN))
break;
}
+
+ if (total_unmapped)
+ pviommu_domain_remove_map(pv_domain, iova, iova + total_unmapped - 1);
+
return total_unmapped;
}
static phys_addr_t pviommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
- return 0;
+ struct pviommu_domain *pv_domain = container_of(domain, struct pviommu_domain, domain);
+
+ return pviommu_domain_find(pv_domain, iova);
}
static void pviommu_domain_free(struct iommu_domain *domain)
@@ -91,6 +149,8 @@ static void pviommu_domain_free(struct iommu_domain *domain)
pv_domain->id, &res);
if (res.a0 != SMCCC_RET_SUCCESS)
pr_err("Failed to free domain %d\n", res.a0);
+
+ mtree_destroy(&pv_domain->mappings);
kfree(pv_domain);
}
@@ -192,10 +252,13 @@ static struct iommu_domain *pviommu_domain_alloc(unsigned int type)
type != IOMMU_DOMAIN_DMA)
return NULL;
+
pv_domain = kzalloc(sizeof(*pv_domain), GFP_KERNEL);
if (!pv_domain)
return NULL;
+ mt_init(&pv_domain->mappings);
+
arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_KVM_IOMMU_ALLOC_DOMAIN_FUNC_ID, &res);
if (res.a0 != SMCCC_RET_SUCCESS) {