Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. |
| 3 | |
| 4 | #include <linux/cache.h> |
| 5 | #include <linux/highmem.h> |
| 6 | #include <linux/mm.h> |
| 7 | #include <asm/cache.h> |
| 8 | |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 9 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
| 10 | pte_t *pte) |
| 11 | { |
Guo Ren | d936a7e | 2020-01-27 01:20:36 +0800 | [diff] [blame] | 12 | unsigned long addr; |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 13 | struct page *page; |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 14 | |
Guo Ren | d936a7e | 2020-01-27 01:20:36 +0800 | [diff] [blame] | 15 | page = pfn_to_page(pte_pfn(*pte)); |
| 16 | if (page == ZERO_PAGE(0)) |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 17 | return; |
| 18 | |
Guo Ren | d936a7e | 2020-01-27 01:20:36 +0800 | [diff] [blame] | 19 | if (test_and_set_bit(PG_dcache_clean, &page->flags)) |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 20 | return; |
| 21 | |
Guo Ren | 981bbf2 | 2019-04-10 10:55:07 +0800 | [diff] [blame] | 22 | addr = (unsigned long) kmap_atomic(page); |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 23 | |
Guo Ren | d936a7e | 2020-01-27 01:20:36 +0800 | [diff] [blame] | 24 | dcache_wb_range(addr, addr + PAGE_SIZE); |
| 25 | |
| 26 | if (vma->vm_flags & VM_EXEC) |
| 27 | icache_inv_range(addr, addr + PAGE_SIZE); |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 28 | |
Guo Ren | 981bbf2 | 2019-04-10 10:55:07 +0800 | [diff] [blame] | 29 | kunmap_atomic((void *) addr); |
Guo Ren | 00a9730 | 2018-09-05 14:25:10 +0800 | [diff] [blame] | 30 | } |
Guo Ren | 997153b | 2020-01-31 20:33:10 +0800 | [diff] [blame] | 31 | |
| 32 | void flush_icache_deferred(struct mm_struct *mm) |
| 33 | { |
| 34 | unsigned int cpu = smp_processor_id(); |
| 35 | cpumask_t *mask = &mm->context.icache_stale_mask; |
| 36 | |
| 37 | if (cpumask_test_cpu(cpu, mask)) { |
| 38 | cpumask_clear_cpu(cpu, mask); |
| 39 | /* |
| 40 | * Ensure the remote hart's writes are visible to this hart. |
| 41 | * This pairs with a barrier in flush_icache_mm. |
| 42 | */ |
| 43 | smp_mb(); |
| 44 | local_icache_inv_all(NULL); |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | void flush_icache_mm_range(struct mm_struct *mm, |
| 49 | unsigned long start, unsigned long end) |
| 50 | { |
| 51 | unsigned int cpu; |
| 52 | cpumask_t others, *mask; |
| 53 | |
| 54 | preempt_disable(); |
| 55 | |
| 56 | #ifdef CONFIG_CPU_HAS_ICACHE_INS |
| 57 | if (mm == current->mm) { |
| 58 | icache_inv_range(start, end); |
| 59 | preempt_enable(); |
| 60 | return; |
| 61 | } |
| 62 | #endif |
| 63 | |
| 64 | /* Mark every hart's icache as needing a flush for this MM. */ |
| 65 | mask = &mm->context.icache_stale_mask; |
| 66 | cpumask_setall(mask); |
| 67 | |
| 68 | /* Flush this hart's I$ now, and mark it as flushed. */ |
| 69 | cpu = smp_processor_id(); |
| 70 | cpumask_clear_cpu(cpu, mask); |
| 71 | local_icache_inv_all(NULL); |
| 72 | |
| 73 | /* |
| 74 | * Flush the I$ of other harts concurrently executing, and mark them as |
| 75 | * flushed. |
| 76 | */ |
| 77 | cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); |
| 78 | |
| 79 | if (mm != current->active_mm || !cpumask_empty(&others)) { |
| 80 | on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); |
| 81 | cpumask_clear(mask); |
| 82 | } |
| 83 | |
| 84 | preempt_enable(); |
| 85 | } |