Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 2 | /* |
| 3 | * linux/arch/arm/lib/copypage-xscale.S |
| 4 | * |
| 5 | * Copyright (C) 1995-2005 Russell King |
| 6 | * |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 7 | * This handles the mini data cache, as found on SA11x0 and XScale |
| 8 | * processors. When we copy a user page page, we map it in such a way |
| 9 | * that accesses to this page will not touch the main data cache, but |
| 10 | * will be cached in the mini data cache. This prevents us thrashing |
| 11 | * the main data cache on page faults. |
| 12 | */ |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/mm.h> |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 15 | #include <linux/highmem.h> |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 16 | |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 17 | #include <asm/tlbflush.h> |
Richard Purdie | 1c9d3df | 2006-12-30 16:08:50 +0100 | [diff] [blame] | 18 | #include <asm/cacheflush.h> |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 19 | |
Russell King | 1b2e2b73 | 2006-08-21 17:06:38 +0100 | [diff] [blame] | 20 | #include "mm.h" |
| 21 | |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 22 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 23 | L_PTE_MT_MINICACHE) |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 24 | |
Thomas Gleixner | bd31b85 | 2009-07-03 08:44:46 -0500 | [diff] [blame] | 25 | static DEFINE_RAW_SPINLOCK(minicache_lock); |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 26 | |
| 27 | /* |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 28 | * XScale mini-dcache optimised copy_user_highpage |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 29 | * |
| 30 | * We flush the destination cache lines just before we write the data into the |
| 31 | * corresponding address. Since the Dcache is read-allocate, this removes the |
| 32 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, |
| 33 | * and merged as appropriate. |
| 34 | */ |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 35 | static void mc_copy_user_page(void *from, void *to) |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 36 | { |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 37 | int tmp; |
| 38 | |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 39 | /* |
| 40 | * Strangely enough, best performance is achieved |
| 41 | * when prefetching destination as well. (NP) |
| 42 | */ |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 43 | asm volatile ("\ |
Arnd Bergmann | c7b6804 | 2019-08-09 18:33:19 +0200 | [diff] [blame] | 44 | .arch xscale \n\ |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 45 | pld [%0, #0] \n\ |
| 46 | pld [%0, #32] \n\ |
| 47 | pld [%1, #0] \n\ |
| 48 | pld [%1, #32] \n\ |
| 49 | 1: pld [%0, #64] \n\ |
| 50 | pld [%0, #96] \n\ |
| 51 | pld [%1, #64] \n\ |
| 52 | pld [%1, #96] \n\ |
Nicolas Pitre | bc2eca9 | 2018-11-09 04:26:39 +0100 | [diff] [blame] | 53 | 2: ldrd r2, r3, [%0], #8 \n\ |
| 54 | ldrd r4, r5, [%0], #8 \n\ |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 55 | mov ip, %1 \n\ |
Nicolas Pitre | bc2eca9 | 2018-11-09 04:26:39 +0100 | [diff] [blame] | 56 | strd r2, r3, [%1], #8 \n\ |
| 57 | ldrd r2, r3, [%0], #8 \n\ |
| 58 | strd r4, r5, [%1], #8 \n\ |
| 59 | ldrd r4, r5, [%0], #8 \n\ |
| 60 | strd r2, r3, [%1], #8 \n\ |
| 61 | strd r4, r5, [%1], #8 \n\ |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 62 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ |
Nicolas Pitre | bc2eca9 | 2018-11-09 04:26:39 +0100 | [diff] [blame] | 63 | ldrd r2, r3, [%0], #8 \n\ |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 64 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ |
Nicolas Pitre | bc2eca9 | 2018-11-09 04:26:39 +0100 | [diff] [blame] | 65 | ldrd r4, r5, [%0], #8 \n\ |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 66 | mov ip, %1 \n\ |
Nicolas Pitre | bc2eca9 | 2018-11-09 04:26:39 +0100 | [diff] [blame] | 67 | strd r2, r3, [%1], #8 \n\ |
| 68 | ldrd r2, r3, [%0], #8 \n\ |
| 69 | strd r4, r5, [%1], #8 \n\ |
| 70 | ldrd r4, r5, [%0], #8 \n\ |
| 71 | strd r2, r3, [%1], #8 \n\ |
| 72 | strd r4, r5, [%1], #8 \n\ |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 73 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 74 | subs %2, %2, #1 \n\ |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 75 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ |
| 76 | bgt 1b \n\ |
Nicolas Pitre | b99afae | 2018-11-07 17:49:00 +0100 | [diff] [blame] | 77 | beq 2b " |
| 78 | : "+&r" (from), "+&r" (to), "=&r" (tmp) |
| 79 | : "2" (PAGE_SIZE / 64 - 1) |
| 80 | : "r2", "r3", "r4", "r5", "ip"); |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 81 | } |
| 82 | |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 83 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, |
Russell King | f00a75c | 2009-10-05 15:17:45 +0100 | [diff] [blame] | 84 | unsigned long vaddr, struct vm_area_struct *vma) |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 85 | { |
Cong Wang | 5472e86 | 2011-11-25 23:14:15 +0800 | [diff] [blame] | 86 | void *kto = kmap_atomic(to); |
Richard Purdie | 1c9d3df | 2006-12-30 16:08:50 +0100 | [diff] [blame] | 87 | |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 88 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
Huang Ying | cb9f753 | 2018-04-05 16:24:39 -0700 | [diff] [blame] | 89 | __flush_dcache_page(page_mapping_file(from), from); |
Richard Purdie | 1c9d3df | 2006-12-30 16:08:50 +0100 | [diff] [blame] | 90 | |
Thomas Gleixner | bd31b85 | 2009-07-03 08:44:46 -0500 | [diff] [blame] | 91 | raw_spin_lock(&minicache_lock); |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 92 | |
Russell King | 67ece14 | 2011-07-02 15:20:44 +0100 | [diff] [blame] | 93 | set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 94 | |
| 95 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
| 96 | |
Thomas Gleixner | bd31b85 | 2009-07-03 08:44:46 -0500 | [diff] [blame] | 97 | raw_spin_unlock(&minicache_lock); |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 98 | |
Cong Wang | 5472e86 | 2011-11-25 23:14:15 +0800 | [diff] [blame] | 99 | kunmap_atomic(kto); |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | /* |
| 103 | * XScale optimised clear_user_page |
| 104 | */ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 105 | void |
| 106 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 107 | { |
Cong Wang | 5472e86 | 2011-11-25 23:14:15 +0800 | [diff] [blame] | 108 | void *ptr, *kaddr = kmap_atomic(page); |
Arnd Bergmann | c7b6804 | 2019-08-09 18:33:19 +0200 | [diff] [blame] | 109 | asm volatile("\ |
| 110 | .arch xscale \n\ |
| 111 | mov r1, %2 \n\ |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 112 | mov r2, #0 \n\ |
| 113 | mov r3, #0 \n\ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 114 | 1: mov ip, %0 \n\ |
Nicolas Pitre | bc2eca9 | 2018-11-09 04:26:39 +0100 | [diff] [blame] | 115 | strd r2, r3, [%0], #8 \n\ |
| 116 | strd r2, r3, [%0], #8 \n\ |
| 117 | strd r2, r3, [%0], #8 \n\ |
| 118 | strd r2, r3, [%0], #8 \n\ |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 119 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ |
| 120 | subs r1, r1, #1 \n\ |
| 121 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 122 | bne 1b" |
Nicolas Pitre | 43ae286 | 2008-11-04 02:42:27 -0500 | [diff] [blame] | 123 | : "=r" (ptr) |
| 124 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 125 | : "r1", "r2", "r3", "ip"); |
Cong Wang | 5472e86 | 2011-11-25 23:14:15 +0800 | [diff] [blame] | 126 | kunmap_atomic(kaddr); |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | struct cpu_user_fns xscale_mc_user_fns __initdata = { |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 130 | .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 131 | .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, |
Russell King | f8f98a9 | 2005-06-08 15:28:24 +0100 | [diff] [blame] | 132 | }; |