blob: 2e192a5df949bb6c60bc5067670b7d2936946a0e [file] [log] [blame]
Greg Ungerer56416862012-05-02 17:06:22 +10001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#undef DEBUG
8
Christoph Hellwig9f4df96b2020-09-22 15:36:11 +02009#include <linux/dma-map-ops.h>
Greg Ungerer56416862012-05-02 17:06:22 +100010#include <linux/device.h>
11#include <linux/kernel.h>
Finn Thainb12c8a702018-05-17 20:07:13 +100012#include <linux/platform_device.h>
Greg Ungerer56416862012-05-02 17:06:22 +100013#include <linux/scatterlist.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/export.h>
17
Mike Rapoportca15ca42020-08-06 23:22:28 -070018#include <asm/cacheflush.h>
Greg Ungerer56416862012-05-02 17:06:22 +100019
Greg Ungererb60f1872012-06-26 21:02:54 +100020#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
Christoph Hellwig69878ef2019-06-25 11:01:35 +020021void arch_dma_prep_coherent(struct page *page, size_t size)
22{
23 cache_push(page_to_phys(page), size);
24}
25
Christoph Hellwig419e2f12019-08-26 09:03:44 +020026pgprot_t pgprot_dmacoherent(pgprot_t prot)
Greg Ungerer56416862012-05-02 17:06:22 +100027{
Christoph Hellwig34dc63a2019-06-25 11:01:34 +020028 if (CPU_IS_040_OR_060) {
29 pgprot_val(prot) &= ~_PAGE_CACHE040;
30 pgprot_val(prot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
31 } else {
32 pgprot_val(prot) |= _PAGE_NOCACHE030;
Greg Ungerer56416862012-05-02 17:06:22 +100033 }
Christoph Hellwig34dc63a2019-06-25 11:01:34 +020034 return prot;
Greg Ungerer56416862012-05-02 17:06:22 +100035}
Greg Ungerer66d857b2011-03-22 13:39:27 +100036#else
Christoph Hellwig9eb8be62018-06-20 10:19:45 +020037void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
38 gfp_t gfp, unsigned long attrs)
Greg Ungerer56416862012-05-02 17:06:22 +100039{
40 void *ret;
Greg Ungerer56416862012-05-02 17:06:22 +100041
42 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
43 gfp |= GFP_DMA;
44 ret = (void *)__get_free_pages(gfp, get_order(size));
45
46 if (ret != NULL) {
47 memset(ret, 0, size);
48 *dma_handle = virt_to_phys(ret);
49 }
50 return ret;
51}
52
Christoph Hellwig9eb8be62018-06-20 10:19:45 +020053void arch_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070054 dma_addr_t dma_handle, unsigned long attrs)
Greg Ungerer56416862012-05-02 17:06:22 +100055{
56 free_pages((unsigned long)vaddr, get_order(size));
57}
58
Greg Ungererb60f1872012-06-26 21:02:54 +100059#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
Greg Ungerer56416862012-05-02 17:06:22 +100060
Christoph Hellwig56e35f92019-11-07 18:03:11 +010061void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
62 enum dma_data_direction dir)
Greg Ungerer56416862012-05-02 17:06:22 +100063{
64 switch (dir) {
Greg Ungererd2661c62012-07-10 13:50:58 +100065 case DMA_BIDIRECTIONAL:
Greg Ungerer56416862012-05-02 17:06:22 +100066 case DMA_TO_DEVICE:
67 cache_push(handle, size);
68 break;
69 case DMA_FROM_DEVICE:
70 cache_clear(handle, size);
71 break;
72 default:
Geert Uytterhoeven7c79e1e2016-12-06 19:57:37 +010073 pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
74 dir);
Greg Ungerer56416862012-05-02 17:06:22 +100075 break;
76 }
77}