Nicolin Chen | addb665 | 2023-07-17 15:12:09 -0300 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Jason Gunthorpe | 23a1b46 | 2023-08-02 21:08:02 -0300 | [diff] [blame] | 2 | /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. |
| 3 | */ |
Nicolin Chen | addb665 | 2023-07-17 15:12:09 -0300 | [diff] [blame] | 4 | #ifndef __LINUX_IOMMU_PRIV_H |
| 5 | #define __LINUX_IOMMU_PRIV_H |
| 6 | |
| 7 | #include <linux/iommu.h> |
| 8 | |
Yi Liu | 92766e1 | 2023-08-18 03:10:29 -0700 | [diff] [blame] | 9 | static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) |
| 10 | { |
| 11 | /* |
| 12 | * Assume that valid ops must be installed if iommu_probe_device() |
| 13 | * has succeeded. The device ops are essentially for internal use |
| 14 | * within the IOMMU subsystem itself, so we should be able to trust |
| 15 | * ourselves not to misuse the helper. |
| 16 | */ |
| 17 | return dev->iommu->iommu_dev->ops; |
| 18 | } |
| 19 | |
Nicolin Chen | addb665 | 2023-07-17 15:12:09 -0300 | [diff] [blame] | 20 | int iommu_group_replace_domain(struct iommu_group *group, |
| 21 | struct iommu_domain *new_domain); |
| 22 | |
Jason Gunthorpe | 23a1b46 | 2023-08-02 21:08:02 -0300 | [diff] [blame] | 23 | int iommu_device_register_bus(struct iommu_device *iommu, |
Krzysztof Kozlowski | e70e9ec | 2024-02-16 15:40:24 +0100 | [diff] [blame] | 24 | const struct iommu_ops *ops, |
| 25 | const struct bus_type *bus, |
Jason Gunthorpe | 23a1b46 | 2023-08-02 21:08:02 -0300 | [diff] [blame] | 26 | struct notifier_block *nb); |
| 27 | void iommu_device_unregister_bus(struct iommu_device *iommu, |
Krzysztof Kozlowski | e70e9ec | 2024-02-16 15:40:24 +0100 | [diff] [blame] | 28 | const struct bus_type *bus, |
Jason Gunthorpe | 23a1b46 | 2023-08-02 21:08:02 -0300 | [diff] [blame] | 29 | struct notifier_block *nb); |
| 30 | |
Nicolin Chen | addb665 | 2023-07-17 15:12:09 -0300 | [diff] [blame] | 31 | #endif /* __LINUX_IOMMU_PRIV_H */ |