| # SPDX-License-Identifier: GPL-2.0-only | 
 |  | 
 | config NO_DMA | 
 | 	bool | 
 |  | 
 | config HAS_DMA | 
 | 	bool | 
 | 	depends on !NO_DMA | 
 | 	default y | 
 |  | 
 | config DMA_OPS | 
 | 	depends on HAS_DMA | 
 | 	bool | 
 |  | 
 | # | 
 | # IOMMU drivers that can bypass the IOMMU code and optionally use the direct | 
 | # mapping fast path should select this option and set the dma_ops_bypass | 
 | # flag in struct device where applicable | 
 | # | 
 | config DMA_OPS_BYPASS | 
 | 	bool | 
 |  | 
 | # Lets platform IOMMU driver choose between bypass and IOMMU | 
 | config ARCH_HAS_DMA_MAP_DIRECT | 
 | 	bool | 
 |  | 
 | config NEED_SG_DMA_FLAGS | 
 | 	bool | 
 |  | 
 | config NEED_SG_DMA_LENGTH | 
 | 	bool | 
 |  | 
 | config NEED_DMA_MAP_STATE | 
 | 	bool | 
 |  | 
 | config ARCH_DMA_ADDR_T_64BIT | 
 | 	def_bool 64BIT || PHYS_ADDR_T_64BIT | 
 |  | 
 | config ARCH_HAS_DMA_SET_MASK | 
 | 	bool | 
 |  | 
 | # | 
 | # Select this option if the architecture needs special handling for | 
 | # DMA_ATTR_WRITE_COMBINE.  Normally the "uncached" mapping should be what | 
 | # people think of when saying write combine, so very few platforms should | 
 | # need to enable this. | 
 | # | 
 | config ARCH_HAS_DMA_WRITE_COMBINE | 
 | 	bool | 
 |  | 
 | # | 
 | # Select if the architectures provides the arch_dma_mark_clean hook | 
 | # | 
 | config ARCH_HAS_DMA_MARK_CLEAN | 
 | 	bool | 
 |  | 
 | config DMA_DECLARE_COHERENT | 
 | 	bool | 
 |  | 
 | config ARCH_HAS_SETUP_DMA_OPS | 
 | 	bool | 
 |  | 
 | config ARCH_HAS_TEARDOWN_DMA_OPS | 
 | 	bool | 
 |  | 
 | config ARCH_HAS_SYNC_DMA_FOR_DEVICE | 
 | 	bool | 
 |  | 
 | config ARCH_HAS_SYNC_DMA_FOR_CPU | 
 | 	bool | 
 | 	select NEED_DMA_MAP_STATE | 
 |  | 
 | config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL | 
 | 	bool | 
 |  | 
 | config ARCH_HAS_DMA_PREP_COHERENT | 
 | 	bool | 
 |  | 
 | config ARCH_HAS_FORCE_DMA_UNENCRYPTED | 
 | 	bool | 
 |  | 
 | # | 
 | # Select this option if the architecture assumes DMA devices are coherent | 
 | # by default. | 
 | # | 
 | config ARCH_DMA_DEFAULT_COHERENT | 
 | 	bool | 
 |  | 
 | config SWIOTLB | 
 | 	bool | 
 | 	select NEED_DMA_MAP_STATE | 
 |  | 
 | config SWIOTLB_DYNAMIC | 
 | 	bool "Dynamic allocation of DMA bounce buffers" | 
 | 	default n | 
 | 	depends on SWIOTLB | 
 | 	help | 
 | 	  This enables dynamic resizing of the software IO TLB. The kernel | 
 | 	  starts with one memory pool at boot and it will allocate additional | 
 | 	  pools as needed. To reduce run-time kernel memory requirements, you | 
 | 	  may have to specify a smaller size of the initial pool using | 
 | 	  "swiotlb=" on the kernel command line. | 
 |  | 
 | 	  If unsure, say N. | 
 |  | 
 | config DMA_BOUNCE_UNALIGNED_KMALLOC | 
 | 	bool | 
 | 	depends on SWIOTLB | 
 |  | 
 | config DMA_RESTRICTED_POOL | 
 | 	bool "DMA Restricted Pool" | 
 | 	depends on OF && OF_RESERVED_MEM && SWIOTLB | 
 | 	help | 
 | 	  This enables support for restricted DMA pools which provide a level of | 
 | 	  DMA memory protection on systems with limited hardware protection | 
 | 	  capabilities, such as those lacking an IOMMU. | 
 |  | 
 | 	  For more information see | 
 | 	  <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt> | 
 | 	  and <kernel/dma/swiotlb.c>. | 
 | 	  If unsure, say "n". | 
 |  | 
 | # | 
 | # Should be selected if we can mmap non-coherent mappings to userspace. | 
 | # The only thing that is really required is a way to set an uncached bit | 
 | # in the pagetables | 
 | # | 
 | config DMA_NONCOHERENT_MMAP | 
 | 	default y if !MMU | 
 | 	bool | 
 |  | 
 | config DMA_COHERENT_POOL | 
 | 	select GENERIC_ALLOCATOR | 
 | 	bool | 
 |  | 
 | config DMA_GLOBAL_POOL | 
 | 	select DMA_DECLARE_COHERENT | 
 | 	depends on !ARCH_HAS_DMA_SET_UNCACHED | 
 | 	depends on !DMA_DIRECT_REMAP | 
 | 	bool | 
 |  | 
 | config DMA_DIRECT_REMAP | 
 | 	bool | 
 | 	select DMA_COHERENT_POOL | 
 | 	select DMA_NONCOHERENT_MMAP | 
 |  | 
 | # | 
 | # Fallback to arch code for DMA allocations.  This should eventually go away. | 
 | # | 
 | config ARCH_HAS_DMA_ALLOC | 
 | 	depends on !ARCH_HAS_DMA_SET_UNCACHED | 
 | 	depends on !DMA_DIRECT_REMAP | 
 | 	depends on !DMA_GLOBAL_POOL | 
 | 	bool | 
 |  | 
 | config DMA_CMA | 
 | 	bool "DMA Contiguous Memory Allocator" | 
 | 	depends on HAVE_DMA_CONTIGUOUS && CMA | 
 | 	help | 
 | 	  This enables the Contiguous Memory Allocator which allows drivers | 
 | 	  to allocate big physically-contiguous blocks of memory for use with | 
 | 	  hardware components that do not support I/O map nor scatter-gather. | 
 |  | 
 | 	  You can disable CMA by specifying "cma=0" on the kernel's command | 
 | 	  line. | 
 |  | 
 | 	  For more information see <kernel/dma/contiguous.c>. | 
 | 	  If unsure, say "n". | 
 |  | 
 | if  DMA_CMA | 
 |  | 
 | config DMA_NUMA_CMA | 
 | 	bool "Enable separate DMA Contiguous Memory Area for NUMA Node" | 
 | 	depends on NUMA | 
 | 	help | 
 | 	  Enable this option to get numa CMA areas so that NUMA devices | 
 | 	  can get local memory by DMA coherent APIs. | 
 |  | 
 | 	  You can set the size of pernuma CMA by specifying "cma_pernuma=size" | 
 | 	  or set the node id and its size of CMA by specifying "numa_cma= | 
 | 	  <node>:size[,<node>:size]" on the kernel's command line. | 
 |  | 
 | comment "Default contiguous memory area size:" | 
 |  | 
 | config CMA_SIZE_MBYTES | 
 | 	int "Size in Mega Bytes" | 
 | 	depends on !CMA_SIZE_SEL_PERCENTAGE | 
 | 	default 0 if X86 | 
 | 	default 16 | 
 | 	help | 
 | 	  Defines the size (in MiB) of the default memory area for Contiguous | 
 | 	  Memory Allocator.  If the size of 0 is selected, CMA is disabled by | 
 | 	  default, but it can be enabled by passing cma=size[MG] to the kernel. | 
 |  | 
 |  | 
 | config CMA_SIZE_PERCENTAGE | 
 | 	int "Percentage of total memory" | 
 | 	depends on !CMA_SIZE_SEL_MBYTES | 
 | 	default 0 if X86 | 
 | 	default 10 | 
 | 	help | 
 | 	  Defines the size of the default memory area for Contiguous Memory | 
 | 	  Allocator as a percentage of the total memory in the system. | 
 | 	  If 0 percent is selected, CMA is disabled by default, but it can be | 
 | 	  enabled by passing cma=size[MG] to the kernel. | 
 |  | 
 | choice | 
 | 	prompt "Selected region size" | 
 | 	default CMA_SIZE_SEL_MBYTES | 
 |  | 
 | config CMA_SIZE_SEL_MBYTES | 
 | 	bool "Use mega bytes value only" | 
 |  | 
 | config CMA_SIZE_SEL_PERCENTAGE | 
 | 	bool "Use percentage value only" | 
 |  | 
 | config CMA_SIZE_SEL_MIN | 
 | 	bool "Use lower value (minimum)" | 
 |  | 
 | config CMA_SIZE_SEL_MAX | 
 | 	bool "Use higher value (maximum)" | 
 |  | 
 | endchoice | 
 |  | 
 | config CMA_ALIGNMENT | 
 | 	int "Maximum PAGE_SIZE order of alignment for contiguous buffers" | 
 | 	range 2 12 | 
 | 	default 8 | 
 | 	help | 
 | 	  DMA mapping framework by default aligns all buffers to the smallest | 
 | 	  PAGE_SIZE order which is greater than or equal to the requested buffer | 
 | 	  size. This works well for buffers up to a few hundreds kilobytes, but | 
 | 	  for larger buffers it just a memory waste. With this parameter you can | 
 | 	  specify the maximum PAGE_SIZE order for contiguous buffers. Larger | 
 | 	  buffers will be aligned only to this specified order. The order is | 
 | 	  expressed as a power of two multiplied by the PAGE_SIZE. | 
 |  | 
 | 	  For example, if your system defaults to 4KiB pages, the order value | 
 | 	  of 8 means that the buffers will be aligned up to 1MiB only. | 
 |  | 
 | 	  If unsure, leave the default value "8". | 
 |  | 
 | endif | 
 |  | 
 | config DMA_API_DEBUG | 
 | 	bool "Enable debugging of DMA-API usage" | 
 | 	select NEED_DMA_MAP_STATE | 
 | 	help | 
 | 	  Enable this option to debug the use of the DMA API by device drivers. | 
 | 	  With this option you will be able to detect common bugs in device | 
 | 	  drivers like double-freeing of DMA mappings or freeing mappings that | 
 | 	  were never allocated. | 
 |  | 
 | 	  This option causes a performance degradation.  Use only if you want to | 
 | 	  debug device drivers and dma interactions. | 
 |  | 
 | 	  If unsure, say N. | 
 |  | 
 | config DMA_API_DEBUG_SG | 
 | 	bool "Debug DMA scatter-gather usage" | 
 | 	default y | 
 | 	depends on DMA_API_DEBUG | 
 | 	help | 
 | 	  Perform extra checking that callers of dma_map_sg() have respected the | 
 | 	  appropriate segment length/boundary limits for the given device when | 
 | 	  preparing DMA scatterlists. | 
 |  | 
 | 	  This is particularly likely to have been overlooked in cases where the | 
 | 	  dma_map_sg() API is used for general bulk mapping of pages rather than | 
 | 	  preparing literal scatter-gather descriptors, where there is a risk of | 
 | 	  unexpected behaviour from DMA API implementations if the scatterlist | 
 | 	  is technically out-of-spec. | 
 |  | 
 | 	  If unsure, say N. | 
 |  | 
 | config DMA_MAP_BENCHMARK | 
 | 	bool "Enable benchmarking of streaming DMA mapping" | 
 | 	depends on DEBUG_FS | 
 | 	help | 
 | 	  Provides /sys/kernel/debug/dma_map_benchmark that helps with testing | 
 | 	  performance of dma_(un)map_page. | 
 |  | 
 | 	  See tools/testing/selftests/dma/dma_map_benchmark.c |