blob: 41ea77f22011ebc88cd0fe1e40abad6d11ff9b08 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07002#include <linux/init.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -07003#include <linux/memblock.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07004#include <linux/fs.h>
5#include <linux/sysfs.h>
6#include <linux/kobject.h>
SeongJae Park92fb1db2020-06-07 21:40:04 -07007#include <linux/memory_hotplug.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07008#include <linux/mm.h>
9#include <linux/mmzone.h>
10#include <linux/pagemap.h>
11#include <linux/rmap.h>
12#include <linux/mmu_notifier.h>
13#include <linux/page_ext.h>
14#include <linux/page_idle.h>
15
Matthew Wilcox (Oracle)4aed23a2022-01-29 15:53:59 -050016#include "internal.h"
17
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070018#define BITMAP_CHUNK_SIZE sizeof(u64)
19#define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
20
21/*
22 * Idle page tracking only considers user memory pages, for other types of
23 * pages the idle flag is always unset and an attempt to set it is silently
24 * ignored.
25 *
26 * We treat a page as a user memory page if it is on an LRU list, because it is
27 * always safe to pass such a page to rmap_walk(), which is essential for idle
28 * page tracking. With such an indicator of user pages we can skip isolated
29 * pages, but since there are not usually many of them, it will hardly affect
30 * the overall result.
31 *
32 * This function tries to get a user memory page by pfn as described above.
33 */
Kefeng Wang5acc17fd2022-12-30 15:08:43 +080034static struct folio *page_idle_get_folio(unsigned long pfn)
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070035{
SeongJae Park92fb1db2020-06-07 21:40:04 -070036 struct page *page = pfn_to_online_page(pfn);
Kefeng Wang5acc17fd2022-12-30 15:08:43 +080037 struct folio *folio;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070038
Kefeng Wang5acc17fd2022-12-30 15:08:43 +080039 if (!page || PageTail(page))
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070040 return NULL;
41
Kefeng Wang5acc17fd2022-12-30 15:08:43 +080042 folio = page_folio(page);
43 if (!folio_test_lru(folio) || !folio_try_get(folio))
44 return NULL;
45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
46 folio_put(folio);
47 folio = NULL;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070048 }
Kefeng Wang5acc17fd2022-12-30 15:08:43 +080049 return folio;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070050}
51
Matthew Wilcox (Oracle)2f031c62022-01-29 16:06:53 -050052static bool page_idle_clear_pte_refs_one(struct folio *folio,
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070053 struct vm_area_struct *vma,
54 unsigned long addr, void *arg)
55{
Matthew Wilcox (Oracle)4aed23a2022-01-29 15:53:59 -050056 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070057 bool referenced = false;
58
Kirill A. Shutemov699fa212017-02-24 14:57:51 -080059 while (page_vma_mapped_walk(&pvmw)) {
60 addr = pvmw.address;
61 if (pvmw.pte) {
Yang Shif0849ac2018-04-05 16:22:35 -070062 /*
63 * For PTE-mapped THP, one sub page is referenced,
64 * the whole THP is referenced.
65 */
66 if (ptep_clear_young_notify(vma, addr, pvmw.pte))
67 referenced = true;
Kirill A. Shutemov699fa212017-02-24 14:57:51 -080068 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
Yang Shif0849ac2018-04-05 16:22:35 -070069 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
70 referenced = true;
Kirill A. Shutemov699fa212017-02-24 14:57:51 -080071 } else {
72 /* unexpected pmd-mapped page? */
73 WARN_ON_ONCE(1);
74 }
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -080075 }
76
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070077 if (referenced) {
Matthew Wilcox (Oracle)4aed23a2022-01-29 15:53:59 -050078 folio_clear_idle(folio);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070079 /*
80 * We cleared the referenced bit in a mapping to this page. To
81 * avoid interference with page reclaim, mark it young so that
Matthew Wilcox (Oracle)b3ac0412022-01-21 11:27:31 -050082 * folio_referenced() will return > 0.
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070083 */
Matthew Wilcox (Oracle)4aed23a2022-01-29 15:53:59 -050084 folio_set_young(folio);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070085 }
Minchan Kime4b82222017-05-03 14:54:27 -070086 return true;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070087}
88
Kefeng Wang5acc17fd2022-12-30 15:08:43 +080089static void page_idle_clear_pte_refs(struct folio *folio)
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070090{
91 /*
Minchan Kim6d4675e2022-05-19 14:08:54 -070092 * Since rwc.try_lock is unused, rwc is effectively immutable, so we
93 * can make it static to save some cycles and stack.
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070094 */
Minchan Kim6d4675e2022-05-19 14:08:54 -070095 static struct rmap_walk_control rwc = {
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070096 .rmap_one = page_idle_clear_pte_refs_one,
Matthew Wilcox (Oracle)2f031c62022-01-29 16:06:53 -050097 .anon_lock = folio_lock_anon_vma_read,
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070098 };
99 bool need_lock;
100
Matthew Wilcox (Oracle)4aed23a2022-01-29 15:53:59 -0500101 if (!folio_mapped(folio) || !folio_raw_mapping(folio))
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700102 return;
103
Matthew Wilcox (Oracle)4aed23a2022-01-29 15:53:59 -0500104 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
105 if (need_lock && !folio_trylock(folio))
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700106 return;
107
Matthew Wilcox (Oracle)84fbbe22022-01-29 16:16:54 -0500108 rmap_walk(folio, &rwc);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700109
110 if (need_lock)
Matthew Wilcox (Oracle)4aed23a2022-01-29 15:53:59 -0500111 folio_unlock(folio);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700112}
113
114static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
115 struct bin_attribute *attr, char *buf,
116 loff_t pos, size_t count)
117{
118 u64 *out = (u64 *)buf;
Kefeng Wang5acc17fd2022-12-30 15:08:43 +0800119 struct folio *folio;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700120 unsigned long pfn, end_pfn;
121 int bit;
122
123 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
124 return -EINVAL;
125
126 pfn = pos * BITS_PER_BYTE;
127 if (pfn >= max_pfn)
128 return 0;
129
130 end_pfn = pfn + count * BITS_PER_BYTE;
131 if (end_pfn > max_pfn)
Colin Ian King7298e3b2019-06-28 12:07:05 -0700132 end_pfn = max_pfn;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700133
134 for (; pfn < end_pfn; pfn++) {
135 bit = pfn % BITMAP_CHUNK_BITS;
136 if (!bit)
137 *out = 0ULL;
Kefeng Wang5acc17fd2022-12-30 15:08:43 +0800138 folio = page_idle_get_folio(pfn);
139 if (folio) {
140 if (folio_test_idle(folio)) {
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700141 /*
142 * The page might have been referenced via a
143 * pte, in which case it is not idle. Clear
144 * refs and recheck.
145 */
Kefeng Wang5acc17fd2022-12-30 15:08:43 +0800146 page_idle_clear_pte_refs(folio);
147 if (folio_test_idle(folio))
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700148 *out |= 1ULL << bit;
149 }
Kefeng Wang5acc17fd2022-12-30 15:08:43 +0800150 folio_put(folio);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700151 }
152 if (bit == BITMAP_CHUNK_BITS - 1)
153 out++;
154 cond_resched();
155 }
156 return (char *)out - buf;
157}
158
159static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
160 struct bin_attribute *attr, char *buf,
161 loff_t pos, size_t count)
162{
163 const u64 *in = (u64 *)buf;
Kefeng Wang5acc17fd2022-12-30 15:08:43 +0800164 struct folio *folio;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700165 unsigned long pfn, end_pfn;
166 int bit;
167
168 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
169 return -EINVAL;
170
171 pfn = pos * BITS_PER_BYTE;
172 if (pfn >= max_pfn)
173 return -ENXIO;
174
175 end_pfn = pfn + count * BITS_PER_BYTE;
176 if (end_pfn > max_pfn)
Colin Ian King7298e3b2019-06-28 12:07:05 -0700177 end_pfn = max_pfn;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700178
179 for (; pfn < end_pfn; pfn++) {
180 bit = pfn % BITMAP_CHUNK_BITS;
181 if ((*in >> bit) & 1) {
Kefeng Wang5acc17fd2022-12-30 15:08:43 +0800182 folio = page_idle_get_folio(pfn);
183 if (folio) {
184 page_idle_clear_pte_refs(folio);
185 folio_set_idle(folio);
186 folio_put(folio);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700187 }
188 }
189 if (bit == BITMAP_CHUNK_BITS - 1)
190 in++;
191 cond_resched();
192 }
193 return (char *)in - buf;
194}
195
196static struct bin_attribute page_idle_bitmap_attr =
Joe Perches0825a6f2018-06-14 15:27:58 -0700197 __BIN_ATTR(bitmap, 0600,
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700198 page_idle_bitmap_read, page_idle_bitmap_write, 0);
199
200static struct bin_attribute *page_idle_bin_attrs[] = {
201 &page_idle_bitmap_attr,
202 NULL,
203};
204
Arvind Yadavfd147cb2017-09-06 16:21:59 -0700205static const struct attribute_group page_idle_attr_group = {
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700206 .bin_attrs = page_idle_bin_attrs,
207 .name = "page_idle",
208};
209
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700210static int __init page_idle_init(void)
211{
212 int err;
213
214 err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
215 if (err) {
216 pr_err("page_idle: register sysfs failed\n");
217 return err;
218 }
219 return 0;
220}
221subsys_initcall(page_idle_init);