| // SPDX-License-Identifier: GPL-2.0 |
| #include <linux/dma-direct.h> |
| #include <linux/dma-debug.h> |
| #include <linux/iommu.h> |
| #include <linux/dmar.h> |
| #include <linux/export.h> |
| #include <linux/memblock.h> |
| #include <linux/gfp.h> |
| #include <linux/pci.h> |
| |
| #include <asm/proto.h> |
| #include <asm/dma.h> |
| #include <asm/iommu.h> |
| #include <asm/gart.h> |
| #include <asm/x86_init.h> |
| #include <asm/iommu_table.h> |
| |
| static bool disable_dac_quirk __read_mostly; |
| |
| const struct dma_map_ops *dma_ops; |
| EXPORT_SYMBOL(dma_ops); |
| |
| #ifdef CONFIG_IOMMU_DEBUG |
| int panic_on_overflow __read_mostly = 1; |
| int force_iommu __read_mostly = 1; |
| #else |
| int panic_on_overflow __read_mostly = 0; |
| int force_iommu __read_mostly = 0; |
| #endif |
| |
| int iommu_merge __read_mostly = 0; |
| |
| int no_iommu __read_mostly; |
| /* Set this to 1 if there is a HW IOMMU in the system */ |
| int iommu_detected __read_mostly = 0; |
| |
| extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; |
| |
| void __init pci_iommu_alloc(void) |
| { |
| struct iommu_table_entry *p; |
| |
| sort_iommu_table(__iommu_table, __iommu_table_end); |
| check_iommu_entries(__iommu_table, __iommu_table_end); |
| |
| for (p = __iommu_table; p < __iommu_table_end; p++) { |
| if (p && p->detect && p->detect() > 0) { |
| p->flags |= IOMMU_DETECTED; |
| if (p->early_init) |
| p->early_init(); |
| if (p->flags & IOMMU_FINISH_IF_DETECTED) |
| break; |
| } |
| } |
| } |
| |
| /* |
| * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel |
| * parameter documentation. |
| */ |
| static __init int iommu_setup(char *p) |
| { |
| iommu_merge = 1; |
| |
| if (!p) |
| return -EINVAL; |
| |
| while (*p) { |
| if (!strncmp(p, "off", 3)) |
| no_iommu = 1; |
| /* gart_parse_options has more force support */ |
| if (!strncmp(p, "force", 5)) |
| force_iommu = 1; |
| if (!strncmp(p, "noforce", 7)) { |
| iommu_merge = 0; |
| force_iommu = 0; |
| } |
| |
| if (!strncmp(p, "biomerge", 8)) { |
| iommu_merge = 1; |
| force_iommu = 1; |
| } |
| if (!strncmp(p, "panic", 5)) |
| panic_on_overflow = 1; |
| if (!strncmp(p, "nopanic", 7)) |
| panic_on_overflow = 0; |
| if (!strncmp(p, "merge", 5)) { |
| iommu_merge = 1; |
| force_iommu = 1; |
| } |
| if (!strncmp(p, "nomerge", 7)) |
| iommu_merge = 0; |
| if (!strncmp(p, "forcesac", 8)) |
| pr_warn("forcesac option ignored.\n"); |
| if (!strncmp(p, "allowdac", 8)) |
| pr_warn("allowdac option ignored.\n"); |
| if (!strncmp(p, "nodac", 5)) |
| pr_warn("nodac option ignored.\n"); |
| if (!strncmp(p, "usedac", 6)) { |
| disable_dac_quirk = true; |
| return 1; |
| } |
| #ifdef CONFIG_SWIOTLB |
| if (!strncmp(p, "soft", 4)) |
| swiotlb = 1; |
| #endif |
| if (!strncmp(p, "pt", 2)) |
| iommu_set_default_passthrough(true); |
| if (!strncmp(p, "nopt", 4)) |
| iommu_set_default_translated(true); |
| |
| gart_parse_options(p); |
| |
| p += strcspn(p, ","); |
| if (*p == ',') |
| ++p; |
| } |
| return 0; |
| } |
| early_param("iommu", iommu_setup); |
| |
| static int __init pci_iommu_init(void) |
| { |
| struct iommu_table_entry *p; |
| |
| x86_init.iommu.iommu_init(); |
| |
| for (p = __iommu_table; p < __iommu_table_end; p++) { |
| if (p && (p->flags & IOMMU_DETECTED) && p->late_init) |
| p->late_init(); |
| } |
| |
| return 0; |
| } |
| /* Must execute after PCI subsystem */ |
| rootfs_initcall(pci_iommu_init); |
| |
| #ifdef CONFIG_PCI |
| /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ |
| |
| static int via_no_dac_cb(struct pci_dev *pdev, void *data) |
| { |
| pdev->dev.bus_dma_limit = DMA_BIT_MASK(32); |
| return 0; |
| } |
| |
| static void via_no_dac(struct pci_dev *dev) |
| { |
| if (!disable_dac_quirk) { |
| dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); |
| pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL); |
| } |
| } |
| DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, |
| PCI_CLASS_BRIDGE_PCI, 8, via_no_dac); |
| #endif |