blob: bbd7774e4d4e312b57a626c0b685cb447c82b89d [file] [log] [blame]
Aurelien Jacquiot14aa7e82011-10-04 12:17:19 -04001/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#ifndef _ASM_C6X_DMA_MAPPING_H
13#define _ASM_C6X_DMA_MAPPING_H
14
15#include <linux/dma-debug.h>
16#include <asm-generic/dma-coherent.h>
17
18#define dma_supported(d, m) 1
19
Chen Gang76e01892015-03-04 11:09:35 +080020static inline void dma_sync_single_range_for_device(struct device *dev,
21 dma_addr_t addr,
22 unsigned long offset,
23 size_t size,
24 enum dma_data_direction dir)
25{
26}
27
Aurelien Jacquiot14aa7e82011-10-04 12:17:19 -040028static inline int dma_set_mask(struct device *dev, u64 dma_mask)
29{
30 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
31 return -EIO;
32
33 *dev->dma_mask = dma_mask;
34
35 return 0;
36}
37
38/*
39 * DMA errors are defined by all-bits-set in the DMA address.
40 */
41static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
42{
Shuah Khanad154882012-11-23 14:32:06 -070043 debug_dma_mapping_error(dev, dma_addr);
Aurelien Jacquiot14aa7e82011-10-04 12:17:19 -040044 return dma_addr == ~0;
45}
46
47extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
48 size_t size, enum dma_data_direction dir);
49
50extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
51 size_t size, enum dma_data_direction dir);
52
53extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
54 int nents, enum dma_data_direction direction);
55
56extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
57 int nents, enum dma_data_direction direction);
58
59static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
60 unsigned long offset, size_t size,
61 enum dma_data_direction dir)
62{
63 dma_addr_t handle;
64
65 handle = dma_map_single(dev, page_address(page) + offset, size, dir);
66
67 debug_dma_map_page(dev, page, offset, size, dir, handle, false);
68
69 return handle;
70}
71
72static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
73 size_t size, enum dma_data_direction dir)
74{
75 dma_unmap_single(dev, handle, size, dir);
76
77 debug_dma_unmap_page(dev, handle, size, dir, false);
78}
79
80extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
81 size_t size, enum dma_data_direction dir);
82
83extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
84 size_t size,
85 enum dma_data_direction dir);
86
87extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
88 int nents, enum dma_data_direction dir);
89
90extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
91 int nents, enum dma_data_direction dir);
92
93extern void coherent_mem_init(u32 start, u32 size);
94extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
95extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
96
97#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
98#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
99
Geert Uytterhoeven18180652013-01-27 09:33:22 +0000100/* Not supported for now */
101static inline int dma_mmap_coherent(struct device *dev,
102 struct vm_area_struct *vma, void *cpu_addr,
103 dma_addr_t dma_addr, size_t size)
104{
105 return -EINVAL;
106}
107
108static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
109 void *cpu_addr, dma_addr_t dma_addr,
110 size_t size)
111{
112 return -EINVAL;
113}
114
Aurelien Jacquiot14aa7e82011-10-04 12:17:19 -0400115#endif /* _ASM_C6X_DMA_MAPPING_H */