blob: d122f7f957ce324a1ec046381d1c36afd15ca763 [file] [log] [blame]
David Gibson26ef5c02005-11-10 11:50:16 +11001#ifndef _ASM_POWERPC_CACHE_H
2#define _ASM_POWERPC_CACHE_H
3
4#ifdef __KERNEL__
5
David Gibson26ef5c02005-11-10 11:50:16 +11006
7/* bytes per L1 cache line */
Christophe Leroy968159c2017-08-08 13:58:54 +02008#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
David Gibson26ef5c02005-11-10 11:50:16 +11009#define L1_CACHE_SHIFT 4
10#define MAX_COPY_PREFETCH 1
Kumar Gala3dfa8772008-06-16 09:41:32 -050011#elif defined(CONFIG_PPC_E500MC)
12#define L1_CACHE_SHIFT 6
13#define MAX_COPY_PREFETCH 4
David Gibson26ef5c02005-11-10 11:50:16 +110014#elif defined(CONFIG_PPC32)
David Gibson26ef5c02005-11-10 11:50:16 +110015#define MAX_COPY_PREFETCH 4
Dave Kleikampe7f75ad2010-03-05 10:43:12 +000016#if defined(CONFIG_PPC_47x)
17#define L1_CACHE_SHIFT 7
18#else
19#define L1_CACHE_SHIFT 5
20#endif
David Gibson26ef5c02005-11-10 11:50:16 +110021#else /* CONFIG_PPC64 */
22#define L1_CACHE_SHIFT 7
Nicholas Pigginf4329f22016-10-13 14:43:52 +110023#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
David Gibson26ef5c02005-11-10 11:50:16 +110024#endif
25
26#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
27
28#define SMP_CACHE_BYTES L1_CACHE_BYTES
David Gibson26ef5c02005-11-10 11:50:16 +110029
Nicholas Pigginf4329f22016-10-13 14:43:52 +110030#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
31
David Gibson26ef5c02005-11-10 11:50:16 +110032#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
Benjamin Herrenschmidte2827fe2017-01-08 17:31:47 -060033
34struct ppc_cache_info {
35 u32 size;
36 u32 line_size;
37 u32 block_size; /* L1 only */
38 u32 log_block_size;
39 u32 blocks_per_page;
40 u32 sets;
Benjamin Herrenschmidt98a5f362017-02-03 17:20:07 +110041 u32 assoc;
Benjamin Herrenschmidte2827fe2017-01-08 17:31:47 -060042};
43
David Gibson26ef5c02005-11-10 11:50:16 +110044struct ppc64_caches {
Benjamin Herrenschmidte2827fe2017-01-08 17:31:47 -060045 struct ppc_cache_info l1d;
46 struct ppc_cache_info l1i;
Benjamin Herrenschmidt65e01f32017-01-08 17:31:48 -060047 struct ppc_cache_info l2;
48 struct ppc_cache_info l3;
David Gibson26ef5c02005-11-10 11:50:16 +110049};
50
51extern struct ppc64_caches ppc64_caches;
52#endif /* __powerpc64__ && ! __ASSEMBLY__ */
53
Kevin Hao0ce63672013-08-22 09:30:35 +080054#if defined(__ASSEMBLY__)
55/*
56 * For a snooping icache, we still need a dummy icbi to purge all the
57 * prefetched instructions from the ifetch buffers. We also need a sync
58 * before the icbi to order the the actual stores to memory that might
59 * have modified instructions with the icbi.
60 */
61#define PURGE_PREFETCHED_INS \
62 sync; \
63 icbi 0,r3; \
64 sync; \
65 isync
David Howellsae3a197e2012-03-28 18:30:02 +010066
Kevin Hao0ce63672013-08-22 09:30:35 +080067#else
Denys Vlasenko54cb27a2010-02-20 01:03:44 +010068#define __read_mostly __attribute__((__section__(".data..read_mostly")))
David Howellsae3a197e2012-03-28 18:30:02 +010069
70#ifdef CONFIG_6xx
71extern long _get_L2CR(void);
72extern long _get_L3CR(void);
73extern void _set_L2CR(unsigned long);
74extern void _set_L3CR(unsigned long);
75#else
76#define _get_L2CR() 0L
77#define _get_L3CR() 0L
78#define _set_L2CR(val) do { } while(0)
79#define _set_L3CR(val) do { } while(0)
Tony Breedsbd67fcf2007-07-04 14:04:31 +100080#endif
81
Christophe Leroyd6bfa022016-02-09 17:08:23 +010082static inline void dcbz(void *addr)
83{
84 __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
85}
86
87static inline void dcbi(void *addr)
88{
89 __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
90}
91
92static inline void dcbf(void *addr)
93{
94 __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
95}
96
97static inline void dcbst(void *addr)
98{
99 __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
100}
David Howellsae3a197e2012-03-28 18:30:02 +0100101#endif /* !__ASSEMBLY__ */
David Gibson26ef5c02005-11-10 11:50:16 +1100102#endif /* __KERNEL__ */
103#endif /* _ASM_POWERPC_CACHE_H */