Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 2 | #include <linux/init.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 3 | #include <linux/memblock.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 4 | #include <linux/fs.h> |
| 5 | #include <linux/sysfs.h> |
| 6 | #include <linux/kobject.h> |
SeongJae Park | 92fb1db | 2020-06-07 21:40:04 -0700 | [diff] [blame] | 7 | #include <linux/memory_hotplug.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 8 | #include <linux/mm.h> |
| 9 | #include <linux/mmzone.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/rmap.h> |
| 12 | #include <linux/mmu_notifier.h> |
| 13 | #include <linux/page_ext.h> |
| 14 | #include <linux/page_idle.h> |
| 15 | |
Matthew Wilcox (Oracle) | 4aed23a | 2022-01-29 15:53:59 -0500 | [diff] [blame] | 16 | #include "internal.h" |
| 17 | |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 18 | #define BITMAP_CHUNK_SIZE sizeof(u64) |
| 19 | #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) |
| 20 | |
| 21 | /* |
| 22 | * Idle page tracking only considers user memory pages, for other types of |
| 23 | * pages the idle flag is always unset and an attempt to set it is silently |
| 24 | * ignored. |
| 25 | * |
| 26 | * We treat a page as a user memory page if it is on an LRU list, because it is |
| 27 | * always safe to pass such a page to rmap_walk(), which is essential for idle |
| 28 | * page tracking. With such an indicator of user pages we can skip isolated |
| 29 | * pages, but since there are not usually many of them, it will hardly affect |
| 30 | * the overall result. |
| 31 | * |
| 32 | * This function tries to get a user memory page by pfn as described above. |
| 33 | */ |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 34 | static struct folio *page_idle_get_folio(unsigned long pfn) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 35 | { |
SeongJae Park | 92fb1db | 2020-06-07 21:40:04 -0700 | [diff] [blame] | 36 | struct page *page = pfn_to_online_page(pfn); |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 37 | struct folio *folio; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 38 | |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 39 | if (!page || PageTail(page)) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 40 | return NULL; |
| 41 | |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 42 | folio = page_folio(page); |
| 43 | if (!folio_test_lru(folio) || !folio_try_get(folio)) |
| 44 | return NULL; |
| 45 | if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { |
| 46 | folio_put(folio); |
| 47 | folio = NULL; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 48 | } |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 49 | return folio; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 50 | } |
| 51 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 52 | static bool page_idle_clear_pte_refs_one(struct folio *folio, |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 53 | struct vm_area_struct *vma, |
| 54 | unsigned long addr, void *arg) |
| 55 | { |
Matthew Wilcox (Oracle) | 4aed23a | 2022-01-29 15:53:59 -0500 | [diff] [blame] | 56 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 57 | bool referenced = false; |
| 58 | |
Kirill A. Shutemov | 699fa21 | 2017-02-24 14:57:51 -0800 | [diff] [blame] | 59 | while (page_vma_mapped_walk(&pvmw)) { |
| 60 | addr = pvmw.address; |
| 61 | if (pvmw.pte) { |
Yang Shi | f0849ac | 2018-04-05 16:22:35 -0700 | [diff] [blame] | 62 | /* |
| 63 | * For PTE-mapped THP, one sub page is referenced, |
| 64 | * the whole THP is referenced. |
| 65 | */ |
| 66 | if (ptep_clear_young_notify(vma, addr, pvmw.pte)) |
| 67 | referenced = true; |
Kirill A. Shutemov | 699fa21 | 2017-02-24 14:57:51 -0800 | [diff] [blame] | 68 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
Yang Shi | f0849ac | 2018-04-05 16:22:35 -0700 | [diff] [blame] | 69 | if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) |
| 70 | referenced = true; |
Kirill A. Shutemov | 699fa21 | 2017-02-24 14:57:51 -0800 | [diff] [blame] | 71 | } else { |
| 72 | /* unexpected pmd-mapped page? */ |
| 73 | WARN_ON_ONCE(1); |
| 74 | } |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 75 | } |
| 76 | |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 77 | if (referenced) { |
Matthew Wilcox (Oracle) | 4aed23a | 2022-01-29 15:53:59 -0500 | [diff] [blame] | 78 | folio_clear_idle(folio); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 79 | /* |
| 80 | * We cleared the referenced bit in a mapping to this page. To |
| 81 | * avoid interference with page reclaim, mark it young so that |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 82 | * folio_referenced() will return > 0. |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 83 | */ |
Matthew Wilcox (Oracle) | 4aed23a | 2022-01-29 15:53:59 -0500 | [diff] [blame] | 84 | folio_set_young(folio); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 85 | } |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 86 | return true; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 87 | } |
| 88 | |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 89 | static void page_idle_clear_pte_refs(struct folio *folio) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 90 | { |
| 91 | /* |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 92 | * Since rwc.try_lock is unused, rwc is effectively immutable, so we |
| 93 | * can make it static to save some cycles and stack. |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 94 | */ |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 95 | static struct rmap_walk_control rwc = { |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 96 | .rmap_one = page_idle_clear_pte_refs_one, |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 97 | .anon_lock = folio_lock_anon_vma_read, |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 98 | }; |
| 99 | bool need_lock; |
| 100 | |
Matthew Wilcox (Oracle) | 4aed23a | 2022-01-29 15:53:59 -0500 | [diff] [blame] | 101 | if (!folio_mapped(folio) || !folio_raw_mapping(folio)) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 102 | return; |
| 103 | |
Matthew Wilcox (Oracle) | 4aed23a | 2022-01-29 15:53:59 -0500 | [diff] [blame] | 104 | need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); |
| 105 | if (need_lock && !folio_trylock(folio)) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 106 | return; |
| 107 | |
Matthew Wilcox (Oracle) | 84fbbe2 | 2022-01-29 16:16:54 -0500 | [diff] [blame] | 108 | rmap_walk(folio, &rwc); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 109 | |
| 110 | if (need_lock) |
Matthew Wilcox (Oracle) | 4aed23a | 2022-01-29 15:53:59 -0500 | [diff] [blame] | 111 | folio_unlock(folio); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, |
| 115 | struct bin_attribute *attr, char *buf, |
| 116 | loff_t pos, size_t count) |
| 117 | { |
| 118 | u64 *out = (u64 *)buf; |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 119 | struct folio *folio; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 120 | unsigned long pfn, end_pfn; |
| 121 | int bit; |
| 122 | |
| 123 | if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) |
| 124 | return -EINVAL; |
| 125 | |
| 126 | pfn = pos * BITS_PER_BYTE; |
| 127 | if (pfn >= max_pfn) |
| 128 | return 0; |
| 129 | |
| 130 | end_pfn = pfn + count * BITS_PER_BYTE; |
| 131 | if (end_pfn > max_pfn) |
Colin Ian King | 7298e3b | 2019-06-28 12:07:05 -0700 | [diff] [blame] | 132 | end_pfn = max_pfn; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 133 | |
| 134 | for (; pfn < end_pfn; pfn++) { |
| 135 | bit = pfn % BITMAP_CHUNK_BITS; |
| 136 | if (!bit) |
| 137 | *out = 0ULL; |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 138 | folio = page_idle_get_folio(pfn); |
| 139 | if (folio) { |
| 140 | if (folio_test_idle(folio)) { |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 141 | /* |
| 142 | * The page might have been referenced via a |
| 143 | * pte, in which case it is not idle. Clear |
| 144 | * refs and recheck. |
| 145 | */ |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 146 | page_idle_clear_pte_refs(folio); |
| 147 | if (folio_test_idle(folio)) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 148 | *out |= 1ULL << bit; |
| 149 | } |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 150 | folio_put(folio); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 151 | } |
| 152 | if (bit == BITMAP_CHUNK_BITS - 1) |
| 153 | out++; |
| 154 | cond_resched(); |
| 155 | } |
| 156 | return (char *)out - buf; |
| 157 | } |
| 158 | |
| 159 | static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, |
| 160 | struct bin_attribute *attr, char *buf, |
| 161 | loff_t pos, size_t count) |
| 162 | { |
| 163 | const u64 *in = (u64 *)buf; |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 164 | struct folio *folio; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 165 | unsigned long pfn, end_pfn; |
| 166 | int bit; |
| 167 | |
| 168 | if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) |
| 169 | return -EINVAL; |
| 170 | |
| 171 | pfn = pos * BITS_PER_BYTE; |
| 172 | if (pfn >= max_pfn) |
| 173 | return -ENXIO; |
| 174 | |
| 175 | end_pfn = pfn + count * BITS_PER_BYTE; |
| 176 | if (end_pfn > max_pfn) |
Colin Ian King | 7298e3b | 2019-06-28 12:07:05 -0700 | [diff] [blame] | 177 | end_pfn = max_pfn; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 178 | |
| 179 | for (; pfn < end_pfn; pfn++) { |
| 180 | bit = pfn % BITMAP_CHUNK_BITS; |
| 181 | if ((*in >> bit) & 1) { |
Kefeng Wang | 5acc17fd | 2022-12-30 15:08:43 +0800 | [diff] [blame] | 182 | folio = page_idle_get_folio(pfn); |
| 183 | if (folio) { |
| 184 | page_idle_clear_pte_refs(folio); |
| 185 | folio_set_idle(folio); |
| 186 | folio_put(folio); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 187 | } |
| 188 | } |
| 189 | if (bit == BITMAP_CHUNK_BITS - 1) |
| 190 | in++; |
| 191 | cond_resched(); |
| 192 | } |
| 193 | return (char *)in - buf; |
| 194 | } |
| 195 | |
| 196 | static struct bin_attribute page_idle_bitmap_attr = |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 197 | __BIN_ATTR(bitmap, 0600, |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 198 | page_idle_bitmap_read, page_idle_bitmap_write, 0); |
| 199 | |
| 200 | static struct bin_attribute *page_idle_bin_attrs[] = { |
| 201 | &page_idle_bitmap_attr, |
| 202 | NULL, |
| 203 | }; |
| 204 | |
Arvind Yadav | fd147cb | 2017-09-06 16:21:59 -0700 | [diff] [blame] | 205 | static const struct attribute_group page_idle_attr_group = { |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 206 | .bin_attrs = page_idle_bin_attrs, |
| 207 | .name = "page_idle", |
| 208 | }; |
| 209 | |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 210 | static int __init page_idle_init(void) |
| 211 | { |
| 212 | int err; |
| 213 | |
| 214 | err = sysfs_create_group(mm_kobj, &page_idle_attr_group); |
| 215 | if (err) { |
| 216 | pr_err("page_idle: register sysfs failed\n"); |
| 217 | return err; |
| 218 | } |
| 219 | return 0; |
| 220 | } |
| 221 | subsys_initcall(page_idle_init); |