blob: 4f7739e707a7e9147b02d396969e969ca811da88 [file] [log] [blame]
Thomas Gleixnerc456cfc2019-05-28 10:10:14 -07001// SPDX-License-Identifier: GPL-2.0-only
Yoshinori Satocce2d452008-08-04 16:33:47 +09002/*
3 * arch/sh/mm/cache-sh2a.c
4 *
5 * Copyright (C) 2008 Yoshinori Sato
Yoshinori Satocce2d452008-08-04 16:33:47 +09006 */
7
8#include <linux/init.h>
9#include <linux/mm.h>
10
11#include <asm/cache.h>
12#include <asm/addrspace.h>
13#include <asm/processor.h>
14#include <asm/cacheflush.h>
15#include <asm/io.h>
16
Phil Edworthyc1537b42012-01-09 16:08:47 +000017/*
18 * The maximum number of pages we support up to when doing ranged dcache
19 * flushing. Anything exceeding this will simply flush the dcache in its
20 * entirety.
21 */
22#define MAX_OCACHE_PAGES 32
23#define MAX_ICACHE_PAGES 32
24
Phil Edworthy1ae911c2012-02-21 08:29:57 +000025#ifdef CONFIG_CACHE_WRITEBACK
Phil Edworthyc1537b42012-01-09 16:08:47 +000026static void sh2a_flush_oc_line(unsigned long v, int way)
27{
28 unsigned long addr = (v & 0x000007f0) | (way << 11);
29 unsigned long data;
30
31 data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
32 if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
33 data &= ~SH_CACHE_UPDATED;
34 __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
35 }
36}
Phil Edworthy1ae911c2012-02-21 08:29:57 +000037#endif
Phil Edworthyc1537b42012-01-09 16:08:47 +000038
39static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
40{
41 /* Set associative bit to hit all ways */
42 unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
43 __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
44}
45
46/*
47 * Write back the dirty D-caches, but not invalidate them.
48 */
Paul Mundta58e1a22009-08-15 12:38:29 +090049static void sh2a__flush_wback_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +090050{
Phil Edworthyc1537b42012-01-09 16:08:47 +000051#ifdef CONFIG_CACHE_WRITEBACK
Yoshinori Satocce2d452008-08-04 16:33:47 +090052 unsigned long v;
53 unsigned long begin, end;
54 unsigned long flags;
Phil Edworthyc1537b42012-01-09 16:08:47 +000055 int nr_ways;
Yoshinori Satocce2d452008-08-04 16:33:47 +090056
57 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
58 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
59 & ~(L1_CACHE_BYTES-1);
Phil Edworthyc1537b42012-01-09 16:08:47 +000060 nr_ways = current_cpu_data.dcache.ways;
Yoshinori Satocce2d452008-08-04 16:33:47 +090061
62 local_irq_save(flags);
63 jump_to_uncached();
64
Phil Edworthyc1537b42012-01-09 16:08:47 +000065 /* If there are too many pages then flush the entire cache */
66 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
67 begin = CACHE_OC_ADDRESS_ARRAY;
68 end = begin + (nr_ways * current_cpu_data.dcache.way_size);
69
70 for (v = begin; v < end; v += L1_CACHE_BYTES) {
71 unsigned long data = __raw_readl(v);
72 if (data & SH_CACHE_UPDATED)
73 __raw_writel(data & ~SH_CACHE_UPDATED, v);
74 }
75 } else {
Yoshinori Satocce2d452008-08-04 16:33:47 +090076 int way;
Phil Edworthyc1537b42012-01-09 16:08:47 +000077 for (way = 0; way < nr_ways; way++) {
78 for (v = begin; v < end; v += L1_CACHE_BYTES)
79 sh2a_flush_oc_line(v, way);
Yoshinori Satocce2d452008-08-04 16:33:47 +090080 }
81 }
82
83 back_to_cached();
84 local_irq_restore(flags);
Phil Edworthyc1537b42012-01-09 16:08:47 +000085#endif
Yoshinori Satocce2d452008-08-04 16:33:47 +090086}
87
Phil Edworthyc1537b42012-01-09 16:08:47 +000088/*
89 * Write back the dirty D-caches and invalidate them.
90 */
Paul Mundta58e1a22009-08-15 12:38:29 +090091static void sh2a__flush_purge_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +090092{
93 unsigned long v;
94 unsigned long begin, end;
95 unsigned long flags;
96
97 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
98 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
99 & ~(L1_CACHE_BYTES-1);
100
101 local_irq_save(flags);
102 jump_to_uncached();
103
104 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
Phil Edworthyc1537b42012-01-09 16:08:47 +0000105#ifdef CONFIG_CACHE_WRITEBACK
106 int way;
107 int nr_ways = current_cpu_data.dcache.ways;
108 for (way = 0; way < nr_ways; way++)
109 sh2a_flush_oc_line(v, way);
110#endif
111 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900112 }
Phil Edworthyc1537b42012-01-09 16:08:47 +0000113
Yoshinori Satocce2d452008-08-04 16:33:47 +0900114 back_to_cached();
115 local_irq_restore(flags);
116}
117
Phil Edworthyc1537b42012-01-09 16:08:47 +0000118/*
119 * Invalidate the D-caches, but no write back please
120 */
Paul Mundta58e1a22009-08-15 12:38:29 +0900121static void sh2a__flush_invalidate_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +0900122{
123 unsigned long v;
124 unsigned long begin, end;
125 unsigned long flags;
126
127 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
128 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
129 & ~(L1_CACHE_BYTES-1);
Phil Edworthyc1537b42012-01-09 16:08:47 +0000130
Yoshinori Satocce2d452008-08-04 16:33:47 +0900131 local_irq_save(flags);
132 jump_to_uncached();
133
Phil Edworthyc1537b42012-01-09 16:08:47 +0000134 /* If there are too many pages then just blow the cache */
135 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
Geert Uytterhoevena5f6ea22014-03-03 15:38:33 -0800136 __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
137 SH_CCR);
Phil Edworthyc1537b42012-01-09 16:08:47 +0000138 } else {
139 for (v = begin; v < end; v += L1_CACHE_BYTES)
140 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900141 }
Phil Edworthyc1537b42012-01-09 16:08:47 +0000142
Yoshinori Satocce2d452008-08-04 16:33:47 +0900143 back_to_cached();
144 local_irq_restore(flags);
145}
146
Phil Edworthyc1537b42012-01-09 16:08:47 +0000147/*
148 * Write back the range of D-cache, and purge the I-cache.
149 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900150static void sh2a_flush_icache_range(void *args)
Yoshinori Satocce2d452008-08-04 16:33:47 +0900151{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900152 struct flusher_data *data = args;
153 unsigned long start, end;
Yoshinori Satocce2d452008-08-04 16:33:47 +0900154 unsigned long v;
Paul Mundt983f4c52009-09-01 21:12:55 +0900155 unsigned long flags;
Yoshinori Satocce2d452008-08-04 16:33:47 +0900156
Paul Mundtf26b2a52009-08-21 17:23:14 +0900157 start = data->addr1 & ~(L1_CACHE_BYTES-1);
158 end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900159
Phil Edworthyc1537b42012-01-09 16:08:47 +0000160#ifdef CONFIG_CACHE_WRITEBACK
161 sh2a__flush_wback_region((void *)start, end-start);
162#endif
163
Paul Mundt983f4c52009-09-01 21:12:55 +0900164 local_irq_save(flags);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900165 jump_to_uncached();
166
Phil Edworthyc1537b42012-01-09 16:08:47 +0000167 /* I-Cache invalidate */
168 /* If there are too many pages then just blow the cache */
169 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
Geert Uytterhoevena5f6ea22014-03-03 15:38:33 -0800170 __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
171 SH_CCR);
Phil Edworthyc1537b42012-01-09 16:08:47 +0000172 } else {
173 for (v = start; v < end; v += L1_CACHE_BYTES)
174 sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900175 }
176
177 back_to_cached();
Paul Mundt983f4c52009-09-01 21:12:55 +0900178 local_irq_restore(flags);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900179}
Paul Mundta58e1a22009-08-15 12:38:29 +0900180
181void __init sh2a_cache_init(void)
182{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900183 local_flush_icache_range = sh2a_flush_icache_range;
Paul Mundta58e1a22009-08-15 12:38:29 +0900184
185 __flush_wback_region = sh2a__flush_wback_region;
186 __flush_purge_region = sh2a__flush_purge_region;
187 __flush_invalidate_region = sh2a__flush_invalidate_region;
188}