| /* SPDX-License-Identifier: GPL-2.0 */ |
| #ifndef __ARM64_KVM_NVHE_IOMMU_H__ |
| #define __ARM64_KVM_NVHE_IOMMU_H__ |
| |
| #include <kvm/iommu.h> |
| #include <linux/io-pgtable.h> |
| |
| #include <asm/kvm_pgtable.h> |
| |
| /* |
| * Domain types are reserved for driver, but this is a generic one, |
| * that drivers should use as don't care. |
| * This is typically used by guests as they are not aware of the physical |
| * IOMMU topology. |
| */ |
| #define DOMAIN_ANY_TYPE (-1) |
| /* Domains that are suitable for IDMAPPING. */ |
| #define DOMAIN_IDMAPPED_TYPE (-2) |
| |
| #if IS_ENABLED(CONFIG_ARM_SMMU_V3_PKVM) |
| #include <linux/io-pgtable-arm.h> |
| |
| int kvm_arm_io_pgtable_init(struct io_pgtable_cfg *cfg, |
| struct arm_lpae_io_pgtable *data); |
| int kvm_arm_io_pgtable_alloc(struct io_pgtable *iop, unsigned long pgd_hva); |
| int kvm_arm_io_pgtable_free(struct io_pgtable *iop); |
| size_t kvm_arm_io_pgtable_size(struct io_pgtable *iopt); |
| #endif /* CONFIG_ARM_SMMU_V3_PKVM */ |
| |
| int kvm_iommu_init(struct kvm_iommu_ops *ops, |
| struct kvm_hyp_memcache *mc, |
| unsigned long init_arg); |
| int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu); |
| void *kvm_iommu_donate_pages(u8 order, bool fill_req); |
| void kvm_iommu_reclaim_pages(void *p, u8 order); |
| void *kvm_iommu_donate_pgtable_pages(struct io_pgtable *iop, u8 order, bool fill_req); |
| void hyp_iommu_lock(struct kvm_hyp_iommu *iommu); |
| void hyp_iommu_unlock(struct kvm_hyp_iommu *iommu); |
| void hyp_assert_iommu_lock_held(struct kvm_hyp_iommu *iommu); |
| void hyp_domains_lock(void); |
| void hyp_domains_unlock(void); |
| int kvm_iommu_request(struct kvm_hyp_req *req); |
| |
| /* Hypercall handlers */ |
| int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, u32 type); |
| int kvm_iommu_free_domain(pkvm_handle_t domain_id); |
| int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, |
| u32 endpoint_id, u32 pasid, u32 pasid_bits, u64 flags); |
| int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, |
| u32 endpoint_id, u32 pasid); |
| size_t kvm_iommu_map_pages(pkvm_handle_t domain_id, unsigned long iova, |
| phys_addr_t paddr, size_t pgsize, |
| size_t pgcount, int prot); |
| |
| int kvm_iommu_map_pages_ret(pkvm_handle_t domain_id, unsigned long iova, |
| phys_addr_t paddr, size_t pgsize, |
| size_t pgcount, int prot, size_t *mapped); |
| |
| size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova, |
| size_t pgsize, size_t pgcount); |
| phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova); |
| int kvm_iommu_block_dev(pkvm_handle_t iommu_id, u32 endpoint_id, |
| struct pkvm_hyp_vm *hyp_vm); |
| int kvm_iommu_alloc_guest_domain(pkvm_handle_t *ret_domain); |
| |
| int kvm_iommu_free_guest_domains(struct pkvm_hyp_vm *hyp_vm); |
| u64 kvm_iommu_id_to_token(pkvm_handle_t id); |
| bool kvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr); |
| |
| /* ID mapping functions. */ |
| void kvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end, |
| enum kvm_pgtable_prot prot); |
| |
| struct kvm_iommu_tlb_cookie { |
| pkvm_handle_t domain_id; |
| struct kvm_hyp_iommu_domain *domain; |
| }; |
| |
| struct kvm_iommu_ops { |
| int (*init)(unsigned long arg); |
| struct kvm_hyp_iommu *(*get_iommu_by_id)(pkvm_handle_t smmu_id); |
| int (*free_domain)(struct kvm_hyp_iommu_domain *domain, pkvm_handle_t domain_id); |
| u64 (*get_iommu_token_by_id)(pkvm_handle_t smmu_id); |
| int (*attach_dev)(struct kvm_hyp_iommu *iommu, pkvm_handle_t domain_id, |
| struct kvm_hyp_iommu_domain *domain, u32 endpoint_id, u32 pasid, |
| u32 pasid_bits, u64 flags); |
| int (*detach_dev)(struct kvm_hyp_iommu *iommu, pkvm_handle_t domain_id, |
| struct kvm_hyp_iommu_domain *domain, u32 endpoint_id, u32 pasid); |
| int (*alloc_domain)(struct kvm_hyp_iommu_domain *domain, pkvm_handle_t domain_id, |
| unsigned long pgd_hva, unsigned long pgd_size, u32 type); |
| int (*block_dev)(struct kvm_hyp_iommu *iommu, u32 endpoint_id); |
| unsigned long (*pgd_size)(int type); |
| bool (*dabt_handler)(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr); |
| int (*suspend)(struct kvm_hyp_iommu *iommu); |
| int (*resume)(struct kvm_hyp_iommu *iommu); |
| }; |
| |
| extern struct kvm_iommu_ops *kvm_iommu_ops; |
| |
| #define domain_to_iopt(_domain, _domain_id) \ |
| (struct io_pgtable) { \ |
| .ops = &(_domain)->pgtable->ops, \ |
| .pgd = (_domain)->pgd, \ |
| .cookie = &(struct kvm_iommu_tlb_cookie) { \ |
| .domain_id = (_domain_id), \ |
| .domain = (_domain), \ |
| }, \ |
| } |
| |
| extern struct hyp_mgt_allocator_ops kvm_iommu_allocator_ops; |
| |
| #endif /* __ARM64_KVM_NVHE_IOMMU_H__ */ |