| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Copyright (C) 2024 Google LLC |
| * Author: Mostafa Saleh <smostafa@google.com> |
| */ |
| #include <asm/arm-smmu-v3-regs.h> |
| |
| #include <asm/kvm_mmu.h> |
| #include <asm/kvm_pkvm.h> |
| |
| #include <linux/moduleparam.h> |
| #include <linux/of_address.h> |
| #include <linux/of_platform.h> |
| |
| #include "pkvm/smmuv3_nesting.h" |
| |
| #ifdef MODULE |
| static unsigned long pkvm_module_token; |
| |
| #define ksym_ref_addr_nvhe(x) \ |
| ((typeof(kvm_nvhe_sym(x)) *)(pkvm_el2_mod_va(&kvm_nvhe_sym(x), pkvm_module_token))) |
| #else |
| #define ksym_ref_addr_nvhe(x) \ |
| ((typeof(kvm_nvhe_sym(x)) *)(kern_hyp_va(lm_alias(&kvm_nvhe_sym(x))))) |
| #endif |
| |
| int kvm_nvhe_sym(smmuv3_nesting_init_module)(const struct pkvm_module_ops *ops); |
| extern struct kvm_iommu_ops kvm_nvhe_sym(smmuv3_hyp_nesting_ops); |
| extern unsigned long kvm_nvhe_sym(smmu_v3_nested_count); |
| extern struct smmu_v3_nested *kvm_nvhe_sym(smmu_v3_nested_base); |
| #define smmu_v3_nested_base kvm_nvhe_sym(smmu_v3_nested_base) |
| |
| /* |
| * Decides wether to use the same cpu page table for the IOMMU or |
| * create a new page table. |
| */ |
| extern bool kvm_nvhe_sym(smmu_cpu_stage_2); |
| static bool smmu_cpu_stage_2; |
| module_param(smmu_cpu_stage_2, bool, 0); |
| |
| static int smmuv3_describe_smmuv3(void) |
| { |
| struct device_node *np; |
| int smmu_v3_nested_count = 0, total_smmus = 0; |
| struct resource res; |
| int ret; |
| int order; |
| |
| for_each_compatible_node(np, NULL, "arm,smmu-v3") |
| total_smmus++; |
| |
| if (!total_smmus) |
| return 0; |
| |
| order = get_order(sizeof(*smmu_v3_nested_base) * total_smmus); |
| smmu_v3_nested_base = |
| (struct smmu_v3_nested *)__get_free_pages(GFP_KERNEL | |
| __GFP_ZERO, order); |
| |
| for_each_compatible_node(np, NULL, "arm,smmu-v3") { |
| ret = of_address_to_resource(np, 0, &res); |
| if (ret) |
| return ret; |
| |
| smmu_v3_nested_base[smmu_v3_nested_count].base = res.start; |
| if (resource_size(&res) != SZ_128K) { |
| pr_err("SMMUv3(%s) has unsupported size\n", np->name); |
| } |
| if (of_dma_is_coherent(np)) |
| smmu_v3_nested_base[smmu_v3_nested_count].features |= |
| ARM_SMMU_FEAT_COHERENCY; |
| /* TODO: ioremap and read IDR1.CMDQS */ |
| order = get_order(((1 << PKVM_SMMU_MAX_CMDQ_SHIFT) * CMDQ_ENT_DWORDS) << 3); |
| smmu_v3_nested_base[smmu_v3_nested_count].hyp_cmdq_base = (u64 *)virt_to_phys((void *)__get_free_pages(GFP_KERNEL | |
| __GFP_ZERO, |
| order)); |
| smmu_v3_nested_count++; |
| } |
| kvm_nvhe_sym(smmu_v3_nested_count) = total_smmus; |
| pr_err("Found SMMUv3: %d will be used for nesting\n", total_smmus); |
| |
| return total_smmus; |
| } |
| |
| static struct platform_driver smmuv3_nesting_driver; |
| |
| static int smmuv3_nesting_probe(struct platform_device *pdev) |
| { |
| /* Nothing to do, as rpm is not enabled, device should be on. */ |
| return 0; |
| } |
| |
| static int smmuv3_nesting_init(void) |
| { |
| int ret; |
| struct kvm_hyp_memcache atomic_mc = {}; |
| int atomic_pages = 6000; /* arbitary for now. */ |
| int nr_smmus; |
| int nr_pages = 0; |
| bool registered; |
| |
| ret = platform_driver_probe(&smmuv3_nesting_driver, smmuv3_nesting_probe); |
| registered = !ret; |
| if (ret) |
| pr_err("smmuv3_nesting: Couldn't probe power domains: %d\n", ret); |
| |
| nr_smmus = smmuv3_describe_smmuv3(); |
| if (nr_smmus == 0) |
| return 0; |
| |
| /* For STEs. */ |
| nr_pages += nr_smmus; |
| ret = topup_hyp_memcache(&atomic_mc, nr_smmus, 10); |
| if (ret) |
| return ret; |
| |
| /* For command queue. */ |
| nr_pages += nr_smmus; |
| ret = topup_hyp_memcache(&atomic_mc, nr_smmus, 8); |
| if (ret) |
| return ret; |
| |
| /* For PGD. */ |
| nr_pages += nr_smmus; |
| ret = topup_hyp_memcache(&atomic_mc, nr_smmus, 3); |
| if (ret) |
| return ret; |
| |
| /*For L2 ptrs */; |
| nr_pages += 50; |
| ret = topup_hyp_memcache(&atomic_mc, 50, 2); |
| if (ret) |
| return ret; |
| |
| nr_pages += atomic_pages; |
| ret = topup_hyp_memcache(&atomic_mc, atomic_pages, 0); |
| if (ret) |
| return ret; |
| pr_info("smmuv3-nesting: Allocated %d MiB for atomic usage\n", |
| (nr_pages + (1 << 3)) >> 8); |
| |
| #ifdef MODULE |
| ret = pkvm_load_el2_module(kvm_nvhe_sym(smmuv3_nesting_init_module), |
| &pkvm_module_token); |
| |
| if (ret) { |
| pr_err("Failed to load SMMUv3 IOMMU EL2 module: %d\n", ret); |
| return ret; |
| } |
| #endif |
| /* For io-pgtable struct*/ |
| __pkvm_topup_hyp_alloc(1); |
| |
| kvm_nvhe_sym(smmu_cpu_stage_2) = smmu_cpu_stage_2; |
| ret = kvm_iommu_init_hyp(ksym_ref_addr_nvhe(smmuv3_hyp_nesting_ops), |
| &atomic_mc, 0); |
| |
| if (registered) |
| platform_driver_unregister(&smmuv3_nesting_driver); |
| return ret; |
| } |
| |
| static pkvm_handle_t smmuv3_get_iommu_id(struct device *dev) |
| { |
| /* Not supported, we are invisible to the kernel. */ |
| return 0; |
| } |
| |
| static void smmuv3_nesting_remove(void) |
| { |
| } |
| |
| struct kvm_iommu_driver smmuv3_nesting_ops = { |
| .init_driver = smmuv3_nesting_init, |
| .remove_driver = smmuv3_nesting_remove, |
| .get_iommu_id = smmuv3_get_iommu_id, |
| }; |
| |
| static int smmuv3_nesting_register(void) |
| { |
| return kvm_iommu_register_driver(&smmuv3_nesting_ops); |
| } |
| |
| static const struct of_device_id smmuv3_nested_of_match[] = { |
| { .compatible = "arm,smmu-v3-nested", }, |
| { }, |
| }; |
| |
| static struct platform_driver smmuv3_nesting_driver = { |
| .driver = { |
| .name = "smmuv3-nesting", |
| .of_match_table = smmuv3_nested_of_match, |
| }, |
| }; |
| |
| /* |
| * Register must be run before de-privliage before kvm_iommu_init_driver |
| * for module case, it should be loaded using pKVM early loading which |
| * loads it before this point. |
| * For builtin drivers we use core_initcall |
| */ |
| #ifdef MODULE |
| module_init(smmuv3_nesting_register); |
| #else |
| core_initcall(smmuv3_nesting_register); |
| #endif |
| |
| MODULE_LICENSE("GPL v2"); |