Thomas Gleixner | 7a33847 | 2019-06-04 10:11:15 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3 | * Memory merging support. |
| 4 | * |
| 5 | * This code enables dynamic sharing of identical pages found in different |
| 6 | * memory areas, even if they are not shared by fork() |
| 7 | * |
Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 8 | * Copyright (C) 2008-2009 Red Hat, Inc. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 9 | * Authors: |
| 10 | * Izik Eidus |
| 11 | * Andrea Arcangeli |
| 12 | * Chris Wright |
Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 13 | * Hugh Dickins |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include <linux/errno.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> |
Arnd Bergmann | 36090de | 2022-01-14 14:06:10 -0800 | [diff] [blame] | 18 | #include <linux/mm_inline.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 19 | #include <linux/fs.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 20 | #include <linux/mman.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 21 | #include <linux/sched.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 22 | #include <linux/sched/mm.h> |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 23 | #include <linux/sched/coredump.h> |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 24 | #include <linux/sched/cputime.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 25 | #include <linux/rwsem.h> |
| 26 | #include <linux/pagemap.h> |
| 27 | #include <linux/rmap.h> |
| 28 | #include <linux/spinlock.h> |
Timofey Titovets | 59e1a2f4 | 2018-12-28 00:34:05 -0800 | [diff] [blame] | 29 | #include <linux/xxhash.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 30 | #include <linux/delay.h> |
| 31 | #include <linux/kthread.h> |
| 32 | #include <linux/wait.h> |
| 33 | #include <linux/slab.h> |
| 34 | #include <linux/rbtree.h> |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 35 | #include <linux/memory.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 36 | #include <linux/mmu_notifier.h> |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 37 | #include <linux/swap.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 38 | #include <linux/ksm.h> |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 39 | #include <linux/hashtable.h> |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 40 | #include <linux/freezer.h> |
David Rientjes | 72788c3 | 2011-05-24 17:11:40 -0700 | [diff] [blame] | 41 | #include <linux/oom.h> |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 42 | #include <linux/numa.h> |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 43 | #include <linux/pagewalk.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 44 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 45 | #include <asm/tlbflush.h> |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 46 | #include "internal.h" |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 47 | #include "mm_slot.h" |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 48 | |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 49 | #define CREATE_TRACE_POINTS |
| 50 | #include <trace/events/ksm.h> |
| 51 | |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 52 | #ifdef CONFIG_NUMA |
| 53 | #define NUMA(x) (x) |
| 54 | #define DO_NUMA(x) do { (x); } while (0) |
| 55 | #else |
| 56 | #define NUMA(x) (0) |
| 57 | #define DO_NUMA(x) do { } while (0) |
| 58 | #endif |
| 59 | |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 60 | typedef u8 rmap_age_t; |
| 61 | |
Mike Rapoport | 5a2ca3e | 2018-04-24 09:40:22 +0300 | [diff] [blame] | 62 | /** |
| 63 | * DOC: Overview |
| 64 | * |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 65 | * A few notes about the KSM scanning process, |
| 66 | * to make it easier to understand the data structures below: |
| 67 | * |
| 68 | * In order to reduce excessive scanning, KSM sorts the memory pages by their |
| 69 | * contents into a data structure that holds pointers to the pages' locations. |
| 70 | * |
| 71 | * Since the contents of the pages may change at any moment, KSM cannot just |
| 72 | * insert the pages into a normal sorted tree and expect it to find anything. |
| 73 | * Therefore KSM uses two data structures - the stable and the unstable tree. |
| 74 | * |
| 75 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted |
| 76 | * by their contents. Because each such page is write-protected, searching on |
| 77 | * this tree is fully assured to be working (except when pages are unmapped), |
| 78 | * and therefore this tree is called the stable tree. |
| 79 | * |
Mike Rapoport | 5a2ca3e | 2018-04-24 09:40:22 +0300 | [diff] [blame] | 80 | * The stable tree node includes information required for reverse |
| 81 | * mapping from a KSM page to virtual addresses that map this page. |
| 82 | * |
| 83 | * In order to avoid large latencies of the rmap walks on KSM pages, |
| 84 | * KSM maintains two types of nodes in the stable tree: |
| 85 | * |
| 86 | * * the regular nodes that keep the reverse mapping structures in a |
| 87 | * linked list |
| 88 | * * the "chains" that link nodes ("dups") that represent the same |
| 89 | * write protected memory content, but each "dup" corresponds to a |
| 90 | * different KSM page copy of that content |
| 91 | * |
| 92 | * Internally, the regular nodes, "dups" and "chains" are represented |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 93 | * using the same struct ksm_stable_node structure. |
Mike Rapoport | 5a2ca3e | 2018-04-24 09:40:22 +0300 | [diff] [blame] | 94 | * |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 95 | * In addition to the stable tree, KSM uses a second data structure called the |
| 96 | * unstable tree: this tree holds pointers to pages which have been found to |
| 97 | * be "unchanged for a period of time". The unstable tree sorts these pages |
| 98 | * by their contents, but since they are not write-protected, KSM cannot rely |
| 99 | * upon the unstable tree to work correctly - the unstable tree is liable to |
| 100 | * be corrupted as its contents are modified, and so it is called unstable. |
| 101 | * |
| 102 | * KSM solves this problem by several techniques: |
| 103 | * |
| 104 | * 1) The unstable tree is flushed every time KSM completes scanning all |
| 105 | * memory areas, and then the tree is rebuilt again from the beginning. |
| 106 | * 2) KSM will only insert into the unstable tree, pages whose hash value |
| 107 | * has not changed since the previous scan of all memory areas. |
| 108 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the |
| 109 | * colors of the nodes and not on their contents, assuring that even when |
| 110 | * the tree gets "corrupted" it won't get out of balance, so scanning time |
| 111 | * remains the same (also, searching and inserting nodes in an rbtree uses |
| 112 | * the same algorithm, so we have no overhead when we flush and rebuild). |
| 113 | * 4) KSM never flushes the stable tree, which means that even if it were to |
| 114 | * take 10 attempts to find a page in the unstable tree, once it is found, |
| 115 | * it is secured in the stable tree. (When we scan a new page, we first |
| 116 | * compare it against the stable tree, and then against the unstable tree.) |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 117 | * |
| 118 | * If the merge_across_nodes tunable is unset, then KSM maintains multiple |
| 119 | * stable trees and multiple unstable trees: one of each for each NUMA node. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 120 | */ |
| 121 | |
| 122 | /** |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 123 | * struct ksm_mm_slot - ksm information per mm that is being scanned |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 124 | * @slot: hash lookup from mm to mm_slot |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 125 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 126 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 127 | struct ksm_mm_slot { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 128 | struct mm_slot slot; |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 129 | struct ksm_rmap_item *rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 130 | }; |
| 131 | |
| 132 | /** |
| 133 | * struct ksm_scan - cursor for scanning |
| 134 | * @mm_slot: the current mm_slot we are scanning |
| 135 | * @address: the next address inside that to be scanned |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 136 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 137 | * @seqnr: count of completed full scans (needed when removing unstable node) |
| 138 | * |
| 139 | * There is only the one ksm_scan instance of this cursor structure. |
| 140 | */ |
| 141 | struct ksm_scan { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 142 | struct ksm_mm_slot *mm_slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 143 | unsigned long address; |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 144 | struct ksm_rmap_item **rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 145 | unsigned long seqnr; |
| 146 | }; |
| 147 | |
| 148 | /** |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 149 | * struct ksm_stable_node - node of the stable rbtree |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 150 | * @node: rb node of this ksm page in the stable tree |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 151 | * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 152 | * @hlist_dup: linked into the stable_node->hlist with a stable_node chain |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 153 | * @list: linked into migrate_nodes, pending placement in the proper node tree |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 154 | * @hlist: hlist head of rmap_items using this ksm page |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 155 | * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 156 | * @chain_prune_time: time of the last full garbage collection |
| 157 | * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 158 | * @nid: NUMA node id of stable tree in which linked (may not match kpfn) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 159 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 160 | struct ksm_stable_node { |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 161 | union { |
| 162 | struct rb_node node; /* when node of stable tree */ |
| 163 | struct { /* when listed for migration */ |
| 164 | struct list_head *head; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 165 | struct { |
| 166 | struct hlist_node hlist_dup; |
| 167 | struct list_head list; |
| 168 | }; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 169 | }; |
| 170 | }; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 171 | struct hlist_head hlist; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 172 | union { |
| 173 | unsigned long kpfn; |
| 174 | unsigned long chain_prune_time; |
| 175 | }; |
| 176 | /* |
| 177 | * STABLE_NODE_CHAIN can be any negative number in |
| 178 | * rmap_hlist_len negative range, but better not -1 to be able |
| 179 | * to reliably detect underflows. |
| 180 | */ |
| 181 | #define STABLE_NODE_CHAIN -1024 |
| 182 | int rmap_hlist_len; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 183 | #ifdef CONFIG_NUMA |
| 184 | int nid; |
| 185 | #endif |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 186 | }; |
| 187 | |
| 188 | /** |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 189 | * struct ksm_rmap_item - reverse mapping item for virtual addresses |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 190 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 191 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 192 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 193 | * @mm: the memory structure this rmap_item is pointing into |
| 194 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) |
| 195 | * @oldchecksum: previous checksum of the page at that virtual address |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 196 | * @node: rb node of this rmap_item in the unstable tree |
| 197 | * @head: pointer to stable_node heading this list in the stable tree |
| 198 | * @hlist: link into hlist of rmap_items hanging off that stable_node |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 199 | * @age: number of scan iterations since creation |
| 200 | * @remaining_skips: how many scans to skip |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 201 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 202 | struct ksm_rmap_item { |
| 203 | struct ksm_rmap_item *rmap_list; |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 204 | union { |
| 205 | struct anon_vma *anon_vma; /* when stable */ |
| 206 | #ifdef CONFIG_NUMA |
| 207 | int nid; /* when node of unstable tree */ |
| 208 | #endif |
| 209 | }; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 210 | struct mm_struct *mm; |
| 211 | unsigned long address; /* + low bits used for flags below */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 212 | unsigned int oldchecksum; /* when unstable */ |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 213 | rmap_age_t age; |
| 214 | rmap_age_t remaining_skips; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 215 | union { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 216 | struct rb_node node; /* when node of unstable tree */ |
| 217 | struct { /* when listed from stable tree */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 218 | struct ksm_stable_node *head; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 219 | struct hlist_node hlist; |
| 220 | }; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 221 | }; |
| 222 | }; |
| 223 | |
| 224 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 225 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
| 226 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 227 | |
| 228 | /* The stable and unstable tree heads */ |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 229 | static struct rb_root one_stable_tree[1] = { RB_ROOT }; |
| 230 | static struct rb_root one_unstable_tree[1] = { RB_ROOT }; |
| 231 | static struct rb_root *root_stable_tree = one_stable_tree; |
| 232 | static struct rb_root *root_unstable_tree = one_unstable_tree; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 233 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 234 | /* Recently migrated nodes of stable tree, pending proper placement */ |
| 235 | static LIST_HEAD(migrate_nodes); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 236 | #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 237 | |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 238 | #define MM_SLOTS_HASH_BITS 10 |
| 239 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 240 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 241 | static struct ksm_mm_slot ksm_mm_head = { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 242 | .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 243 | }; |
| 244 | static struct ksm_scan ksm_scan = { |
| 245 | .mm_slot = &ksm_mm_head, |
| 246 | }; |
| 247 | |
| 248 | static struct kmem_cache *rmap_item_cache; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 249 | static struct kmem_cache *stable_node_cache; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 250 | static struct kmem_cache *mm_slot_cache; |
| 251 | |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 252 | /* Default number of pages to scan per batch */ |
| 253 | #define DEFAULT_PAGES_TO_SCAN 100 |
| 254 | |
Stefan Roesch | b348b5f | 2023-08-11 12:36:55 -0700 | [diff] [blame] | 255 | /* The number of pages scanned */ |
| 256 | static unsigned long ksm_pages_scanned; |
| 257 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 258 | /* The number of nodes in the stable tree */ |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 259 | static unsigned long ksm_pages_shared; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 260 | |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 261 | /* The number of page slots additionally sharing those nodes */ |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 262 | static unsigned long ksm_pages_sharing; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 263 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 264 | /* The number of nodes in the unstable tree */ |
| 265 | static unsigned long ksm_pages_unshared; |
| 266 | |
| 267 | /* The number of rmap_items in use: to calculate pages_volatile */ |
| 268 | static unsigned long ksm_rmap_items; |
| 269 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 270 | /* The number of stable_node chains */ |
| 271 | static unsigned long ksm_stable_node_chains; |
| 272 | |
| 273 | /* The number of stable_node dups linked to the stable_node chains */ |
| 274 | static unsigned long ksm_stable_node_dups; |
| 275 | |
| 276 | /* Delay in pruning stale stable_node_dups in the stable_node_chains */ |
Zhansaya Bagdauletkyzy | 584ff0d | 2021-09-02 15:00:51 -0700 | [diff] [blame] | 277 | static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 278 | |
| 279 | /* Maximum number of page slots sharing a stable node */ |
| 280 | static int ksm_max_page_sharing = 256; |
| 281 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 282 | /* Number of pages ksmd should scan in one batch */ |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 283 | static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 284 | |
| 285 | /* Milliseconds ksmd should sleep between batches */ |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 286 | static unsigned int ksm_thread_sleep_millisecs = 20; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 287 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 288 | /* Checksum of an empty (zeroed) page */ |
| 289 | static unsigned int zero_checksum __read_mostly; |
| 290 | |
| 291 | /* Whether to merge empty (zeroed) pages with actual zero pages */ |
| 292 | static bool ksm_use_zero_pages __read_mostly; |
| 293 | |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 294 | /* Skip pages that couldn't be de-duplicated previously */ |
| 295 | /* Default to true at least temporarily, for testing */ |
| 296 | static bool ksm_smart_scan = true; |
| 297 | |
xu xin | e294206 | 2023-06-13 11:09:34 +0800 | [diff] [blame] | 298 | /* The number of zero pages which is placed by KSM */ |
Chengming Zhou | c2dc78b | 2024-05-28 13:15:22 +0800 | [diff] [blame] | 299 | atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0); |
xu xin | e294206 | 2023-06-13 11:09:34 +0800 | [diff] [blame] | 300 | |
Stefan Roesch | e5a6899 | 2023-09-25 21:09:37 -0700 | [diff] [blame] | 301 | /* The number of pages that have been skipped due to "smart scanning" */ |
| 302 | static unsigned long ksm_pages_skipped; |
| 303 | |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 304 | /* Don't scan more than max pages per batch. */ |
| 305 | static unsigned long ksm_advisor_max_pages_to_scan = 30000; |
| 306 | |
| 307 | /* Min CPU for scanning pages per scan */ |
| 308 | #define KSM_ADVISOR_MIN_CPU 10 |
| 309 | |
| 310 | /* Max CPU for scanning pages per scan */ |
| 311 | static unsigned int ksm_advisor_max_cpu = 70; |
| 312 | |
| 313 | /* Target scan time in seconds to analyze all KSM candidate pages. */ |
| 314 | static unsigned long ksm_advisor_target_scan_time = 200; |
| 315 | |
| 316 | /* Exponentially weighted moving average. */ |
| 317 | #define EWMA_WEIGHT 30 |
| 318 | |
| 319 | /** |
| 320 | * struct advisor_ctx - metadata for KSM advisor |
| 321 | * @start_scan: start time of the current scan |
| 322 | * @scan_time: scan time of previous scan |
| 323 | * @change: change in percent to pages_to_scan parameter |
| 324 | * @cpu_time: cpu time consumed by the ksmd thread in the previous scan |
| 325 | */ |
| 326 | struct advisor_ctx { |
| 327 | ktime_t start_scan; |
| 328 | unsigned long scan_time; |
| 329 | unsigned long change; |
| 330 | unsigned long long cpu_time; |
| 331 | }; |
| 332 | static struct advisor_ctx advisor_ctx; |
| 333 | |
| 334 | /* Define different advisor's */ |
| 335 | enum ksm_advisor_type { |
| 336 | KSM_ADVISOR_NONE, |
| 337 | KSM_ADVISOR_SCAN_TIME, |
| 338 | }; |
| 339 | static enum ksm_advisor_type ksm_advisor; |
| 340 | |
Stefan Roesch | 66790e9 | 2023-12-18 15:10:52 -0800 | [diff] [blame] | 341 | #ifdef CONFIG_SYSFS |
| 342 | /* |
| 343 | * Only called through the sysfs control interface: |
| 344 | */ |
| 345 | |
| 346 | /* At least scan this many pages per batch. */ |
| 347 | static unsigned long ksm_advisor_min_pages_to_scan = 500; |
| 348 | |
| 349 | static void set_advisor_defaults(void) |
| 350 | { |
| 351 | if (ksm_advisor == KSM_ADVISOR_NONE) { |
| 352 | ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; |
| 353 | } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) { |
| 354 | advisor_ctx = (const struct advisor_ctx){ 0 }; |
| 355 | ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan; |
| 356 | } |
| 357 | } |
| 358 | #endif /* CONFIG_SYSFS */ |
| 359 | |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 360 | static inline void advisor_start_scan(void) |
| 361 | { |
| 362 | if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) |
| 363 | advisor_ctx.start_scan = ktime_get(); |
| 364 | } |
| 365 | |
| 366 | /* |
| 367 | * Use previous scan time if available, otherwise use current scan time as an |
| 368 | * approximation for the previous scan time. |
| 369 | */ |
| 370 | static inline unsigned long prev_scan_time(struct advisor_ctx *ctx, |
| 371 | unsigned long scan_time) |
| 372 | { |
| 373 | return ctx->scan_time ? ctx->scan_time : scan_time; |
| 374 | } |
| 375 | |
| 376 | /* Calculate exponential weighted moving average */ |
| 377 | static unsigned long ewma(unsigned long prev, unsigned long curr) |
| 378 | { |
| 379 | return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100; |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * The scan time advisor is based on the current scan rate and the target |
| 384 | * scan rate. |
| 385 | * |
| 386 | * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time) |
| 387 | * |
| 388 | * To avoid perturbations it calculates a change factor of previous changes. |
| 389 | * A new change factor is calculated for each iteration and it uses an |
| 390 | * exponentially weighted moving average. The new pages_to_scan value is |
| 391 | * multiplied with that change factor: |
| 392 | * |
| 393 | * new_pages_to_scan *= change facor |
| 394 | * |
| 395 | * The new_pages_to_scan value is limited by the cpu min and max values. It |
| 396 | * calculates the cpu percent for the last scan and calculates the new |
| 397 | * estimated cpu percent cost for the next scan. That value is capped by the |
| 398 | * cpu min and max setting. |
| 399 | * |
| 400 | * In addition the new pages_to_scan value is capped by the max and min |
| 401 | * limits. |
| 402 | */ |
| 403 | static void scan_time_advisor(void) |
| 404 | { |
| 405 | unsigned int cpu_percent; |
| 406 | unsigned long cpu_time; |
| 407 | unsigned long cpu_time_diff; |
| 408 | unsigned long cpu_time_diff_ms; |
| 409 | unsigned long pages; |
| 410 | unsigned long per_page_cost; |
| 411 | unsigned long factor; |
| 412 | unsigned long change; |
| 413 | unsigned long last_scan_time; |
| 414 | unsigned long scan_time; |
| 415 | |
| 416 | /* Convert scan time to seconds */ |
| 417 | scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan), |
| 418 | MSEC_PER_SEC); |
| 419 | scan_time = scan_time ? scan_time : 1; |
| 420 | |
| 421 | /* Calculate CPU consumption of ksmd background thread */ |
| 422 | cpu_time = task_sched_runtime(current); |
| 423 | cpu_time_diff = cpu_time - advisor_ctx.cpu_time; |
| 424 | cpu_time_diff_ms = cpu_time_diff / 1000 / 1000; |
| 425 | |
| 426 | cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000); |
| 427 | cpu_percent = cpu_percent ? cpu_percent : 1; |
| 428 | last_scan_time = prev_scan_time(&advisor_ctx, scan_time); |
| 429 | |
| 430 | /* Calculate scan time as percentage of target scan time */ |
| 431 | factor = ksm_advisor_target_scan_time * 100 / scan_time; |
| 432 | factor = factor ? factor : 1; |
| 433 | |
| 434 | /* |
| 435 | * Calculate scan time as percentage of last scan time and use |
| 436 | * exponentially weighted average to smooth it |
| 437 | */ |
| 438 | change = scan_time * 100 / last_scan_time; |
| 439 | change = change ? change : 1; |
| 440 | change = ewma(advisor_ctx.change, change); |
| 441 | |
| 442 | /* Calculate new scan rate based on target scan rate. */ |
| 443 | pages = ksm_thread_pages_to_scan * 100 / factor; |
| 444 | /* Update pages_to_scan by weighted change percentage. */ |
| 445 | pages = pages * change / 100; |
| 446 | |
| 447 | /* Cap new pages_to_scan value */ |
| 448 | per_page_cost = ksm_thread_pages_to_scan / cpu_percent; |
| 449 | per_page_cost = per_page_cost ? per_page_cost : 1; |
| 450 | |
| 451 | pages = min(pages, per_page_cost * ksm_advisor_max_cpu); |
| 452 | pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU); |
| 453 | pages = min(pages, ksm_advisor_max_pages_to_scan); |
| 454 | |
| 455 | /* Update advisor context */ |
| 456 | advisor_ctx.change = change; |
| 457 | advisor_ctx.scan_time = scan_time; |
| 458 | advisor_ctx.cpu_time = cpu_time; |
| 459 | |
| 460 | ksm_thread_pages_to_scan = pages; |
Stefan Roesch | 5088b49 | 2023-12-18 15:10:53 -0800 | [diff] [blame] | 461 | trace_ksm_advisor(scan_time, pages, cpu_percent); |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | static void advisor_stop_scan(void) |
| 465 | { |
| 466 | if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) |
| 467 | scan_time_advisor(); |
| 468 | } |
| 469 | |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 470 | #ifdef CONFIG_NUMA |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 471 | /* Zeroed when merging across nodes is not allowed */ |
| 472 | static unsigned int ksm_merge_across_nodes = 1; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 473 | static int ksm_nr_node_ids = 1; |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 474 | #else |
| 475 | #define ksm_merge_across_nodes 1U |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 476 | #define ksm_nr_node_ids 1 |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 477 | #endif |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 478 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 479 | #define KSM_RUN_STOP 0 |
| 480 | #define KSM_RUN_MERGE 1 |
| 481 | #define KSM_RUN_UNMERGE 2 |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 482 | #define KSM_RUN_OFFLINE 4 |
| 483 | static unsigned long ksm_run = KSM_RUN_STOP; |
| 484 | static void wait_while_offlining(void); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 485 | |
| 486 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); |
Kirill Tkhai | fcf9a0e | 2018-12-28 00:38:40 -0800 | [diff] [blame] | 487 | static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 488 | static DEFINE_MUTEX(ksm_thread_mutex); |
| 489 | static DEFINE_SPINLOCK(ksm_mmlist_lock); |
| 490 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 491 | static int __init ksm_slab_init(void) |
| 492 | { |
Kefeng Wang | aa1b948 | 2024-06-18 16:12:01 +0800 | [diff] [blame] | 493 | rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 494 | if (!rmap_item_cache) |
| 495 | goto out; |
| 496 | |
Kefeng Wang | aa1b948 | 2024-06-18 16:12:01 +0800 | [diff] [blame] | 497 | stable_node_cache = KMEM_CACHE(ksm_stable_node, 0); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 498 | if (!stable_node_cache) |
| 499 | goto out_free1; |
| 500 | |
Kefeng Wang | aa1b948 | 2024-06-18 16:12:01 +0800 | [diff] [blame] | 501 | mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 502 | if (!mm_slot_cache) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 503 | goto out_free2; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 504 | |
| 505 | return 0; |
| 506 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 507 | out_free2: |
| 508 | kmem_cache_destroy(stable_node_cache); |
| 509 | out_free1: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 510 | kmem_cache_destroy(rmap_item_cache); |
| 511 | out: |
| 512 | return -ENOMEM; |
| 513 | } |
| 514 | |
| 515 | static void __init ksm_slab_free(void) |
| 516 | { |
| 517 | kmem_cache_destroy(mm_slot_cache); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 518 | kmem_cache_destroy(stable_node_cache); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 519 | kmem_cache_destroy(rmap_item_cache); |
| 520 | mm_slot_cache = NULL; |
| 521 | } |
| 522 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 523 | static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 524 | { |
| 525 | return chain->rmap_hlist_len == STABLE_NODE_CHAIN; |
| 526 | } |
| 527 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 528 | static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 529 | { |
| 530 | return dup->head == STABLE_NODE_DUP_HEAD; |
| 531 | } |
| 532 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 533 | static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, |
| 534 | struct ksm_stable_node *chain) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 535 | { |
| 536 | VM_BUG_ON(is_stable_node_dup(dup)); |
| 537 | dup->head = STABLE_NODE_DUP_HEAD; |
| 538 | VM_BUG_ON(!is_stable_node_chain(chain)); |
| 539 | hlist_add_head(&dup->hlist_dup, &chain->hlist); |
| 540 | ksm_stable_node_dups++; |
| 541 | } |
| 542 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 543 | static inline void __stable_node_dup_del(struct ksm_stable_node *dup) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 544 | { |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 545 | VM_BUG_ON(!is_stable_node_dup(dup)); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 546 | hlist_del(&dup->hlist_dup); |
| 547 | ksm_stable_node_dups--; |
| 548 | } |
| 549 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 550 | static inline void stable_node_dup_del(struct ksm_stable_node *dup) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 551 | { |
| 552 | VM_BUG_ON(is_stable_node_chain(dup)); |
| 553 | if (is_stable_node_dup(dup)) |
| 554 | __stable_node_dup_del(dup); |
| 555 | else |
| 556 | rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); |
| 557 | #ifdef CONFIG_DEBUG_VM |
| 558 | dup->head = NULL; |
| 559 | #endif |
| 560 | } |
| 561 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 562 | static inline struct ksm_rmap_item *alloc_rmap_item(void) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 563 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 564 | struct ksm_rmap_item *rmap_item; |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 565 | |
zhong jiang | 5b398e4 | 2016-09-28 15:22:30 -0700 | [diff] [blame] | 566 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | |
| 567 | __GFP_NORETRY | __GFP_NOWARN); |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 568 | if (rmap_item) |
| 569 | ksm_rmap_items++; |
| 570 | return rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 571 | } |
| 572 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 573 | static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 574 | { |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 575 | ksm_rmap_items--; |
xu xin | cb4df4c | 2022-08-30 14:38:38 +0000 | [diff] [blame] | 576 | rmap_item->mm->ksm_rmap_items--; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 577 | rmap_item->mm = NULL; /* debug safety */ |
| 578 | kmem_cache_free(rmap_item_cache, rmap_item); |
| 579 | } |
| 580 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 581 | static inline struct ksm_stable_node *alloc_stable_node(void) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 582 | { |
zhong jiang | 6213055 | 2016-10-07 17:01:19 -0700 | [diff] [blame] | 583 | /* |
| 584 | * The allocation can take too long with GFP_KERNEL when memory is under |
| 585 | * pressure, which may lead to hung task warnings. Adding __GFP_HIGH |
| 586 | * grants access to memory reserves, helping to avoid this problem. |
| 587 | */ |
| 588 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 589 | } |
| 590 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 591 | static inline void free_stable_node(struct ksm_stable_node *stable_node) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 592 | { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 593 | VM_BUG_ON(stable_node->rmap_hlist_len && |
| 594 | !is_stable_node_chain(stable_node)); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 595 | kmem_cache_free(stable_node_cache, stable_node); |
| 596 | } |
| 597 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 598 | /* |
Hugh Dickins | a913e18 | 2009-09-21 17:02:26 -0700 | [diff] [blame] | 599 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's |
| 600 | * page tables after it has passed through ksm_exit() - which, if necessary, |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 601 | * takes mmap_lock briefly to serialize against them. ksm_exit() does not set |
Hugh Dickins | a913e18 | 2009-09-21 17:02:26 -0700 | [diff] [blame] | 602 | * a special flag: they can just back out as soon as mm_users goes to zero. |
| 603 | * ksm_test_exit() is used throughout to make this test for exit: in some |
| 604 | * places for correctness, in some places just to avoid unnecessary work. |
| 605 | */ |
| 606 | static inline bool ksm_test_exit(struct mm_struct *mm) |
| 607 | { |
| 608 | return atomic_read(&mm->mm_users) == 0; |
| 609 | } |
| 610 | |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 611 | static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, |
| 612 | struct mm_walk *walk) |
| 613 | { |
| 614 | struct page *page = NULL; |
| 615 | spinlock_t *ptl; |
| 616 | pte_t *pte; |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 617 | pte_t ptent; |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 618 | int ret; |
| 619 | |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 620 | pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
Hugh Dickins | 04dee9e | 2023-06-08 18:29:22 -0700 | [diff] [blame] | 621 | if (!pte) |
| 622 | return 0; |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 623 | ptent = ptep_get(pte); |
| 624 | if (pte_present(ptent)) { |
| 625 | page = vm_normal_page(walk->vma, addr, ptent); |
| 626 | } else if (!pte_none(ptent)) { |
| 627 | swp_entry_t entry = pte_to_swp_entry(ptent); |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 628 | |
| 629 | /* |
| 630 | * As KSM pages remain KSM pages until freed, no need to wait |
| 631 | * here for migration to end. |
| 632 | */ |
| 633 | if (is_migration_entry(entry)) |
| 634 | page = pfn_swap_entry_to_page(entry); |
| 635 | } |
xu xin | 7927147 | 2023-06-13 11:09:28 +0800 | [diff] [blame] | 636 | /* return 1 if the page is an normal ksm page or KSM-placed zero page */ |
Ryan Roberts | afccb08 | 2023-11-14 15:49:45 +0000 | [diff] [blame] | 637 | ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent); |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 638 | pte_unmap_unlock(pte, ptl); |
| 639 | return ret; |
| 640 | } |
| 641 | |
| 642 | static const struct mm_walk_ops break_ksm_ops = { |
| 643 | .pmd_entry = break_ksm_pmd_entry, |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 644 | .walk_lock = PGWALK_RDLOCK, |
| 645 | }; |
| 646 | |
| 647 | static const struct mm_walk_ops break_ksm_lock_vma_ops = { |
| 648 | .pmd_entry = break_ksm_pmd_entry, |
| 649 | .walk_lock = PGWALK_WRLOCK, |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 650 | }; |
| 651 | |
Hugh Dickins | a913e18 | 2009-09-21 17:02:26 -0700 | [diff] [blame] | 652 | /* |
David Hildenbrand | 6cce331 | 2022-10-21 12:11:37 +0200 | [diff] [blame] | 653 | * We use break_ksm to break COW on a ksm page by triggering unsharing, |
| 654 | * such that the ksm page will get replaced by an exclusive anonymous page. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 655 | * |
David Hildenbrand | 6cce331 | 2022-10-21 12:11:37 +0200 | [diff] [blame] | 656 | * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 657 | * in case the application has unmapped and remapped mm,addr meanwhile. |
| 658 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP |
David Hildenbrand | bbcd53c | 2021-05-06 18:05:55 -0700 | [diff] [blame] | 659 | * mmap of /dev/mem, where we would not want to touch it. |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 660 | * |
David Hildenbrand | 6cce331 | 2022-10-21 12:11:37 +0200 | [diff] [blame] | 661 | * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 662 | * of the process that owns 'vma'. We also do not want to enforce |
| 663 | * protection keys here anyway. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 664 | */ |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 665 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 666 | { |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 667 | vm_fault_t ret = 0; |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 668 | const struct mm_walk_ops *ops = lock_vma ? |
| 669 | &break_ksm_lock_vma_ops : &break_ksm_ops; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 670 | |
| 671 | do { |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 672 | int ksm_page; |
David Hildenbrand | 58f595c | 2022-10-21 12:11:34 +0200 | [diff] [blame] | 673 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 674 | cond_resched(); |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 675 | ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL); |
David Hildenbrand | d7c0e68 | 2022-10-21 12:11:40 +0200 | [diff] [blame] | 676 | if (WARN_ON_ONCE(ksm_page < 0)) |
| 677 | return ksm_page; |
David Hildenbrand | 58f595c | 2022-10-21 12:11:34 +0200 | [diff] [blame] | 678 | if (!ksm_page) |
| 679 | return 0; |
| 680 | ret = handle_mm_fault(vma, addr, |
David Hildenbrand | 6cce331 | 2022-10-21 12:11:37 +0200 | [diff] [blame] | 681 | FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, |
David Hildenbrand | 58f595c | 2022-10-21 12:11:34 +0200 | [diff] [blame] | 682 | NULL); |
| 683 | } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 684 | /* |
David Hildenbrand | 58f595c | 2022-10-21 12:11:34 +0200 | [diff] [blame] | 685 | * We must loop until we no longer find a KSM page because |
| 686 | * handle_mm_fault() may back out if there's any difficulty e.g. if |
| 687 | * pte accessed bit gets updated concurrently. |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 688 | * |
| 689 | * VM_FAULT_SIGBUS could occur if we race with truncation of the |
| 690 | * backing file, which also invalidates anonymous pages: that's |
| 691 | * okay, that truncation will have unmapped the PageKsm for us. |
| 692 | * |
| 693 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting |
| 694 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the |
| 695 | * current task has TIF_MEMDIE set, and will be OOM killed on return |
| 696 | * to user; and ksmd, having no mm, would never be chosen for that. |
| 697 | * |
| 698 | * But if the mm is in a limited mem_cgroup, then the fault may fail |
| 699 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and |
| 700 | * even ksmd can fail in this way - though it's usually breaking ksm |
| 701 | * just to undo a merge it made a moment before, so unlikely to oom. |
| 702 | * |
| 703 | * That's a pity: we might therefore have more kernel pages allocated |
| 704 | * than we're counting as nodes in the stable tree; but ksm_do_scan |
| 705 | * will retry to break_cow on each pass, so should recover the page |
| 706 | * in due course. The important thing is to not let VM_MERGEABLE |
| 707 | * be cleared while any such pages might remain in the area. |
| 708 | */ |
| 709 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 710 | } |
| 711 | |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 712 | static bool vma_ksm_compatible(struct vm_area_struct *vma) |
| 713 | { |
| 714 | if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | |
| 715 | VM_IO | VM_DONTEXPAND | VM_HUGETLB | |
Jason A. Donenfeld | 9651fce | 2022-12-08 17:55:04 +0100 | [diff] [blame] | 716 | VM_MIXEDMAP| VM_DROPPABLE)) |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 717 | return false; /* just ignore the advice */ |
| 718 | |
| 719 | if (vma_is_dax(vma)) |
| 720 | return false; |
| 721 | |
| 722 | #ifdef VM_SAO |
| 723 | if (vma->vm_flags & VM_SAO) |
| 724 | return false; |
| 725 | #endif |
| 726 | #ifdef VM_SPARC_ADI |
| 727 | if (vma->vm_flags & VM_SPARC_ADI) |
| 728 | return false; |
| 729 | #endif |
| 730 | |
| 731 | return true; |
| 732 | } |
| 733 | |
Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 734 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
| 735 | unsigned long addr) |
| 736 | { |
| 737 | struct vm_area_struct *vma; |
| 738 | if (ksm_test_exit(mm)) |
| 739 | return NULL; |
Liam Howlett | ff69fb8 | 2021-06-28 19:39:41 -0700 | [diff] [blame] | 740 | vma = vma_lookup(mm, addr); |
| 741 | if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 742 | return NULL; |
| 743 | return vma; |
| 744 | } |
| 745 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 746 | static void break_cow(struct ksm_rmap_item *rmap_item) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 747 | { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 748 | struct mm_struct *mm = rmap_item->mm; |
| 749 | unsigned long addr = rmap_item->address; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 750 | struct vm_area_struct *vma; |
| 751 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 752 | /* |
| 753 | * It is not an accident that whenever we want to break COW |
| 754 | * to undo, we also need to drop a reference to the anon_vma. |
| 755 | */ |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 756 | put_anon_vma(rmap_item->anon_vma); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 757 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 758 | mmap_read_lock(mm); |
Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 759 | vma = find_mergeable_vma(mm, addr); |
| 760 | if (vma) |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 761 | break_ksm(vma, addr, false); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 762 | mmap_read_unlock(mm); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 763 | } |
| 764 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 765 | static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 766 | { |
| 767 | struct mm_struct *mm = rmap_item->mm; |
| 768 | unsigned long addr = rmap_item->address; |
| 769 | struct vm_area_struct *vma; |
| 770 | struct page *page; |
| 771 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 772 | mmap_read_lock(mm); |
Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 773 | vma = find_mergeable_vma(mm, addr); |
| 774 | if (!vma) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 775 | goto out; |
| 776 | |
| 777 | page = follow_page(vma, addr, FOLL_GET); |
Haiyue Wang | f7091ed | 2022-08-23 21:58:41 +0800 | [diff] [blame] | 778 | if (IS_ERR_OR_NULL(page)) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 779 | goto out; |
Haiyue Wang | f7091ed | 2022-08-23 21:58:41 +0800 | [diff] [blame] | 780 | if (is_zone_device_page(page)) |
| 781 | goto out_putpage; |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 782 | if (PageAnon(page)) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 783 | flush_anon_page(vma, page, addr); |
| 784 | flush_dcache_page(page); |
| 785 | } else { |
Haiyue Wang | f7091ed | 2022-08-23 21:58:41 +0800 | [diff] [blame] | 786 | out_putpage: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 787 | put_page(page); |
Andrea Arcangeli | c8f95ed | 2015-11-05 18:49:19 -0800 | [diff] [blame] | 788 | out: |
| 789 | page = NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 790 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 791 | mmap_read_unlock(mm); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 792 | return page; |
| 793 | } |
| 794 | |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 795 | /* |
| 796 | * This helper is used for getting right index into array of tree roots. |
| 797 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for |
| 798 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, |
| 799 | * every node has its own stable and unstable tree. |
| 800 | */ |
| 801 | static inline int get_kpfn_nid(unsigned long kpfn) |
| 802 | { |
Hugh Dickins | d8fc16a | 2013-03-08 12:43:34 -0800 | [diff] [blame] | 803 | return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 804 | } |
| 805 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 806 | static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 807 | struct rb_root *root) |
| 808 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 809 | struct ksm_stable_node *chain = alloc_stable_node(); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 810 | VM_BUG_ON(is_stable_node_chain(dup)); |
| 811 | if (likely(chain)) { |
| 812 | INIT_HLIST_HEAD(&chain->hlist); |
| 813 | chain->chain_prune_time = jiffies; |
| 814 | chain->rmap_hlist_len = STABLE_NODE_CHAIN; |
| 815 | #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) |
Anshuman Khandual | 98fa15f | 2019-03-05 15:42:58 -0800 | [diff] [blame] | 816 | chain->nid = NUMA_NO_NODE; /* debug */ |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 817 | #endif |
| 818 | ksm_stable_node_chains++; |
| 819 | |
| 820 | /* |
| 821 | * Put the stable node chain in the first dimension of |
| 822 | * the stable tree and at the same time remove the old |
| 823 | * stable node. |
| 824 | */ |
| 825 | rb_replace_node(&dup->node, &chain->node, root); |
| 826 | |
| 827 | /* |
| 828 | * Move the old stable node to the second dimension |
| 829 | * queued in the hlist_dup. The invariant is that all |
| 830 | * dup stable_nodes in the chain->hlist point to pages |
Ethon Paul | 457aef9 | 2020-06-04 16:49:01 -0700 | [diff] [blame] | 831 | * that are write protected and have the exact same |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 832 | * content. |
| 833 | */ |
| 834 | stable_node_chain_add_dup(dup, chain); |
| 835 | } |
| 836 | return chain; |
| 837 | } |
| 838 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 839 | static inline void free_stable_node_chain(struct ksm_stable_node *chain, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 840 | struct rb_root *root) |
| 841 | { |
| 842 | rb_erase(&chain->node, root); |
| 843 | free_stable_node(chain); |
| 844 | ksm_stable_node_chains--; |
| 845 | } |
| 846 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 847 | static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 848 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 849 | struct ksm_rmap_item *rmap_item; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 850 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 851 | /* check it's not STABLE_NODE_CHAIN or negative */ |
| 852 | BUG_ON(stable_node->rmap_hlist_len < 0); |
| 853 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 854 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 855 | if (rmap_item->hlist.next) { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 856 | ksm_pages_sharing--; |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 857 | trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); |
| 858 | } else { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 859 | ksm_pages_shared--; |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 860 | } |
xu xin | 7609385 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 861 | |
| 862 | rmap_item->mm->ksm_merging_pages--; |
| 863 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 864 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
| 865 | stable_node->rmap_hlist_len--; |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 866 | put_anon_vma(rmap_item->anon_vma); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 867 | rmap_item->address &= PAGE_MASK; |
| 868 | cond_resched(); |
| 869 | } |
| 870 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 871 | /* |
| 872 | * We need the second aligned pointer of the migrate_nodes |
| 873 | * list_head to stay clear from the rb_parent_color union |
| 874 | * (aligned and different than any node) and also different |
| 875 | * from &migrate_nodes. This will verify that future list.h changes |
Nick Desaulniers | 815f0dd | 2018-08-22 16:37:24 -0700 | [diff] [blame] | 876 | * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 877 | */ |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 878 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); |
| 879 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 880 | |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 881 | trace_ksm_remove_ksm_page(stable_node->kpfn); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 882 | if (stable_node->head == &migrate_nodes) |
| 883 | list_del(&stable_node->list); |
| 884 | else |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 885 | stable_node_dup_del(stable_node); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 886 | free_stable_node(stable_node); |
| 887 | } |
| 888 | |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 889 | enum ksm_get_folio_flags { |
| 890 | KSM_GET_FOLIO_NOLOCK, |
| 891 | KSM_GET_FOLIO_LOCK, |
| 892 | KSM_GET_FOLIO_TRYLOCK |
Yang Shi | 2cee57d1 | 2019-03-05 15:48:12 -0800 | [diff] [blame] | 893 | }; |
| 894 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 895 | /* |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 896 | * ksm_get_folio: checks if the page indicated by the stable node |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 897 | * is still its ksm page, despite having held no reference to it. |
| 898 | * In which case we can trust the content of the page, and it |
| 899 | * returns the gotten page; but if the page has now been zapped, |
| 900 | * remove the stale node from the stable tree and return NULL. |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 901 | * But beware, the stable node's page might be being migrated. |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 902 | * |
| 903 | * You would expect the stable_node to hold a reference to the ksm page. |
| 904 | * But if it increments the page's count, swapping out has to wait for |
| 905 | * ksmd to come around again before it can free the page, which may take |
| 906 | * seconds or even minutes: much too unresponsive. So instead we use a |
| 907 | * "keyhole reference": access to the ksm page from the stable node peeps |
| 908 | * out through its keyhole to see if that page still holds the right key, |
| 909 | * pointing back to this stable node. This relies on freeing a PageAnon |
| 910 | * page to reset its page->mapping to NULL, and relies on no other use of |
| 911 | * a page to put something that might look like our key in page->mapping. |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 912 | * is on its way to being freed; but it is an anomaly to bear in mind. |
| 913 | */ |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 914 | static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node, |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 915 | enum ksm_get_folio_flags flags) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 916 | { |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 917 | struct folio *folio; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 918 | void *expected_mapping; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 919 | unsigned long kpfn; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 920 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 921 | expected_mapping = (void *)((unsigned long)stable_node | |
| 922 | PAGE_MAPPING_KSM); |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 923 | again: |
Paul E. McKenney | 08df477 | 2017-10-09 11:51:45 -0700 | [diff] [blame] | 924 | kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 925 | folio = pfn_folio(kpfn); |
| 926 | if (READ_ONCE(folio->mapping) != expected_mapping) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 927 | goto stale; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 928 | |
| 929 | /* |
| 930 | * We cannot do anything with the page while its refcount is 0. |
| 931 | * Usually 0 means free, or tail of a higher-order page: in which |
| 932 | * case this node is no longer referenced, and should be freed; |
Jiang Biao | 1c4c3b9 | 2018-08-21 21:53:13 -0700 | [diff] [blame] | 933 | * however, it might mean that the page is under page_ref_freeze(). |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 934 | * The __remove_mapping() case is easy, again the node is now stale; |
Kirill Tkhai | 52d1e60 | 2019-03-05 15:43:06 -0800 | [diff] [blame] | 935 | * the same is in reuse_ksm_page() case; but if page is swapcache |
Matthew Wilcox (Oracle) | 9800562 | 2022-06-06 13:29:10 -0400 | [diff] [blame] | 936 | * in folio_migrate_mapping(), it might still be our page, |
Kirill Tkhai | 52d1e60 | 2019-03-05 15:43:06 -0800 | [diff] [blame] | 937 | * in which case it's essential to keep the node. |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 938 | */ |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 939 | while (!folio_try_get(folio)) { |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 940 | /* |
| 941 | * Another check for page->mapping != expected_mapping would |
| 942 | * work here too. We have chosen the !PageSwapCache test to |
| 943 | * optimize the common case, when the page is or is about to |
| 944 | * be freed: PageSwapCache is cleared (under spin_lock_irq) |
Jiang Biao | 1c4c3b9 | 2018-08-21 21:53:13 -0700 | [diff] [blame] | 945 | * in the ref_freeze section of __remove_mapping(); but Anon |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 946 | * folio->mapping reset to NULL later, in free_pages_prepare(). |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 947 | */ |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 948 | if (!folio_test_swapcache(folio)) |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 949 | goto stale; |
| 950 | cpu_relax(); |
| 951 | } |
| 952 | |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 953 | if (READ_ONCE(folio->mapping) != expected_mapping) { |
| 954 | folio_put(folio); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 955 | goto stale; |
| 956 | } |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 957 | |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 958 | if (flags == KSM_GET_FOLIO_TRYLOCK) { |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 959 | if (!folio_trylock(folio)) { |
| 960 | folio_put(folio); |
Yang Shi | 2cee57d1 | 2019-03-05 15:48:12 -0800 | [diff] [blame] | 961 | return ERR_PTR(-EBUSY); |
| 962 | } |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 963 | } else if (flags == KSM_GET_FOLIO_LOCK) |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 964 | folio_lock(folio); |
Yang Shi | 2cee57d1 | 2019-03-05 15:48:12 -0800 | [diff] [blame] | 965 | |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 966 | if (flags != KSM_GET_FOLIO_NOLOCK) { |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 967 | if (READ_ONCE(folio->mapping) != expected_mapping) { |
| 968 | folio_unlock(folio); |
| 969 | folio_put(folio); |
Hugh Dickins | 8aafa6a | 2013-02-22 16:35:06 -0800 | [diff] [blame] | 970 | goto stale; |
| 971 | } |
| 972 | } |
Alex Shi (tencent) | b91f947 | 2024-04-11 14:17:02 +0800 | [diff] [blame] | 973 | return folio; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 974 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 975 | stale: |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 976 | /* |
| 977 | * We come here from above when page->mapping or !PageSwapCache |
| 978 | * suggests that the node is stale; but it might be under migration. |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 979 | * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 980 | * before checking whether node->kpfn has been changed. |
| 981 | */ |
| 982 | smp_rmb(); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 983 | if (READ_ONCE(stable_node->kpfn) != kpfn) |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 984 | goto again; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 985 | remove_node_from_stable_tree(stable_node); |
| 986 | return NULL; |
| 987 | } |
| 988 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 989 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 990 | * Removing rmap_item from stable or unstable tree. |
| 991 | * This function will clean the information from the stable/unstable tree. |
| 992 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 993 | static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 994 | { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 995 | if (rmap_item->address & STABLE_FLAG) { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 996 | struct ksm_stable_node *stable_node; |
Alex Shi (tencent) | f39b6e2 | 2024-04-11 14:17:03 +0800 | [diff] [blame] | 997 | struct folio *folio; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 998 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 999 | stable_node = rmap_item->head; |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 1000 | folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); |
Alex Shi (tencent) | f39b6e2 | 2024-04-11 14:17:03 +0800 | [diff] [blame] | 1001 | if (!folio) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1002 | goto out; |
| 1003 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1004 | hlist_del(&rmap_item->hlist); |
Alex Shi (tencent) | f39b6e2 | 2024-04-11 14:17:03 +0800 | [diff] [blame] | 1005 | folio_unlock(folio); |
| 1006 | folio_put(folio); |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1007 | |
Andrea Arcangeli | 98666f8a | 2015-11-05 18:49:13 -0800 | [diff] [blame] | 1008 | if (!hlist_empty(&stable_node->hlist)) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1009 | ksm_pages_sharing--; |
| 1010 | else |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1011 | ksm_pages_shared--; |
xu xin | 7609385 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 1012 | |
| 1013 | rmap_item->mm->ksm_merging_pages--; |
| 1014 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1015 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
| 1016 | stable_node->rmap_hlist_len--; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1017 | |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 1018 | put_anon_vma(rmap_item->anon_vma); |
Miaohe Lin | c89a384 | 2021-05-04 18:37:45 -0700 | [diff] [blame] | 1019 | rmap_item->head = NULL; |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1020 | rmap_item->address &= PAGE_MASK; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1021 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1022 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1023 | unsigned char age; |
| 1024 | /* |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1025 | * Usually ksmd can and must skip the rb_erase, because |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1026 | * root_unstable_tree was already reset to RB_ROOT. |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1027 | * But be careful when an mm is exiting: do the rb_erase |
| 1028 | * if this rmap_item was inserted by this scan, rather |
| 1029 | * than left over from before. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1030 | */ |
| 1031 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1032 | BUG_ON(age > 1); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1033 | if (!age) |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1034 | rb_erase(&rmap_item->node, |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1035 | root_unstable_tree + NUMA(rmap_item->nid)); |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1036 | ksm_pages_unshared--; |
| 1037 | rmap_item->address &= PAGE_MASK; |
| 1038 | } |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1039 | out: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1040 | cond_resched(); /* we're called from many long loops */ |
| 1041 | } |
| 1042 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1043 | static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1044 | { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1045 | while (*rmap_list) { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1046 | struct ksm_rmap_item *rmap_item = *rmap_list; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1047 | *rmap_list = rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1048 | remove_rmap_item_from_tree(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1049 | free_rmap_item(rmap_item); |
| 1050 | } |
| 1051 | } |
| 1052 | |
| 1053 | /* |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 1054 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1055 | * than check every pte of a given vma, the locking doesn't quite work for |
| 1056 | * that - an rmap_item is assigned to the stable tree after inserting ksm |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1057 | * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1058 | * rmap_items from parent to child at fork time (so as not to waste time |
| 1059 | * if exit comes before the next scan reaches it). |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1060 | * |
| 1061 | * Similarly, although we'd like to remove rmap_items (so updating counts |
| 1062 | * and freeing memory) when unmerging an area, it's easier to leave that |
| 1063 | * to the next pass of ksmd - consider, for example, how ksmd might be |
| 1064 | * in cmp_and_merge_page on one of the rmap_items we would be removing. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1065 | */ |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1066 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 1067 | unsigned long start, unsigned long end, bool lock_vma) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1068 | { |
| 1069 | unsigned long addr; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1070 | int err = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1071 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1072 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1073 | if (ksm_test_exit(vma->vm_mm)) |
| 1074 | break; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1075 | if (signal_pending(current)) |
| 1076 | err = -ERESTARTSYS; |
| 1077 | else |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 1078 | err = break_ksm(vma, addr, lock_vma); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1079 | } |
| 1080 | return err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1081 | } |
| 1082 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1083 | static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 1084 | { |
| 1085 | return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; |
| 1086 | } |
| 1087 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1088 | static inline struct ksm_stable_node *page_stable_node(struct page *page) |
Mike Rapoport | 8848482 | 2018-06-07 17:07:11 -0700 | [diff] [blame] | 1089 | { |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 1090 | return folio_stable_node(page_folio(page)); |
Mike Rapoport | 8848482 | 2018-06-07 17:07:11 -0700 | [diff] [blame] | 1091 | } |
| 1092 | |
Alex Shi (tencent) | b8b0ff2 | 2024-04-11 14:17:04 +0800 | [diff] [blame] | 1093 | static inline void folio_set_stable_node(struct folio *folio, |
| 1094 | struct ksm_stable_node *stable_node) |
Mike Rapoport | 8848482 | 2018-06-07 17:07:11 -0700 | [diff] [blame] | 1095 | { |
Alex Shi (tencent) | 452e862 | 2024-04-11 14:17:11 +0800 | [diff] [blame] | 1096 | VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio); |
| 1097 | folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); |
Mike Rapoport | 8848482 | 2018-06-07 17:07:11 -0700 | [diff] [blame] | 1098 | } |
| 1099 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1100 | #ifdef CONFIG_SYSFS |
| 1101 | /* |
| 1102 | * Only called through the sysfs control interface: |
| 1103 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1104 | static int remove_stable_node(struct ksm_stable_node *stable_node) |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1105 | { |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1106 | struct folio *folio; |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1107 | int err; |
| 1108 | |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 1109 | folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1110 | if (!folio) { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1111 | /* |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1112 | * ksm_get_folio did remove_node_from_stable_tree itself. |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1113 | */ |
| 1114 | return 0; |
| 1115 | } |
| 1116 | |
Andrey Ryabinin | 9a63236 | 2019-11-21 17:54:01 -0800 | [diff] [blame] | 1117 | /* |
| 1118 | * Page could be still mapped if this races with __mmput() running in |
| 1119 | * between ksm_exit() and exit_mmap(). Just refuse to let |
| 1120 | * merge_across_nodes/max_page_sharing be switched. |
| 1121 | */ |
| 1122 | err = -EBUSY; |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1123 | if (!folio_mapped(folio)) { |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 1124 | /* |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1125 | * The stable node did not yet appear stale to ksm_get_folio(), |
| 1126 | * since that allows for an unmapped ksm folio to be recognized |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 1127 | * right up until it is freed; but the node is safe to remove. |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1128 | * This folio might be in an LRU cache waiting to be freed, |
| 1129 | * or it might be in the swapcache (perhaps under writeback), |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1130 | * or it might have been removed from swapcache a moment ago. |
| 1131 | */ |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1132 | folio_set_stable_node(folio, NULL); |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1133 | remove_node_from_stable_tree(stable_node); |
| 1134 | err = 0; |
| 1135 | } |
| 1136 | |
Alex Shi (tencent) | 9d5cc14 | 2024-04-11 14:17:05 +0800 | [diff] [blame] | 1137 | folio_unlock(folio); |
| 1138 | folio_put(folio); |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1139 | return err; |
| 1140 | } |
| 1141 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1142 | static int remove_stable_node_chain(struct ksm_stable_node *stable_node, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1143 | struct rb_root *root) |
| 1144 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1145 | struct ksm_stable_node *dup; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1146 | struct hlist_node *hlist_safe; |
| 1147 | |
| 1148 | if (!is_stable_node_chain(stable_node)) { |
| 1149 | VM_BUG_ON(is_stable_node_dup(stable_node)); |
| 1150 | if (remove_stable_node(stable_node)) |
| 1151 | return true; |
| 1152 | else |
| 1153 | return false; |
| 1154 | } |
| 1155 | |
| 1156 | hlist_for_each_entry_safe(dup, hlist_safe, |
| 1157 | &stable_node->hlist, hlist_dup) { |
| 1158 | VM_BUG_ON(!is_stable_node_dup(dup)); |
| 1159 | if (remove_stable_node(dup)) |
| 1160 | return true; |
| 1161 | } |
| 1162 | BUG_ON(!hlist_empty(&stable_node->hlist)); |
| 1163 | free_stable_node_chain(stable_node, root); |
| 1164 | return false; |
| 1165 | } |
| 1166 | |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1167 | static int remove_all_stable_nodes(void) |
| 1168 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1169 | struct ksm_stable_node *stable_node, *next; |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1170 | int nid; |
| 1171 | int err = 0; |
| 1172 | |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1173 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1174 | while (root_stable_tree[nid].rb_node) { |
| 1175 | stable_node = rb_entry(root_stable_tree[nid].rb_node, |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1176 | struct ksm_stable_node, node); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1177 | if (remove_stable_node_chain(stable_node, |
| 1178 | root_stable_tree + nid)) { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1179 | err = -EBUSY; |
| 1180 | break; /* proceed to next nid */ |
| 1181 | } |
| 1182 | cond_resched(); |
| 1183 | } |
| 1184 | } |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 1185 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1186 | if (remove_stable_node(stable_node)) |
| 1187 | err = -EBUSY; |
| 1188 | cond_resched(); |
| 1189 | } |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1190 | return err; |
| 1191 | } |
| 1192 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1193 | static int unmerge_and_remove_all_rmap_items(void) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1194 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1195 | struct ksm_mm_slot *mm_slot; |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 1196 | struct mm_slot *slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1197 | struct mm_struct *mm; |
| 1198 | struct vm_area_struct *vma; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1199 | int err = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1200 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1201 | spin_lock(&ksm_mmlist_lock); |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 1202 | slot = list_entry(ksm_mm_head.slot.mm_node.next, |
| 1203 | struct mm_slot, mm_node); |
| 1204 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1205 | spin_unlock(&ksm_mmlist_lock); |
| 1206 | |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 1207 | for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; |
| 1208 | mm_slot = ksm_scan.mm_slot) { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 1209 | VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 1210 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 1211 | mm = mm_slot->slot.mm; |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1212 | mmap_read_lock(mm); |
Liam R. Howlett | 6db504c | 2023-03-08 17:03:10 -0500 | [diff] [blame] | 1213 | |
| 1214 | /* |
| 1215 | * Exit right away if mm is exiting to avoid lockdep issue in |
| 1216 | * the maple tree |
| 1217 | */ |
| 1218 | if (ksm_test_exit(mm)) |
| 1219 | goto mm_exiting; |
| 1220 | |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 1221 | for_each_vma(vmi, vma) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1222 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
| 1223 | continue; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1224 | err = unmerge_ksm_pages(vma, |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 1225 | vma->vm_start, vma->vm_end, false); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1226 | if (err) |
| 1227 | goto error; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1228 | } |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1229 | |
Liam R. Howlett | 6db504c | 2023-03-08 17:03:10 -0500 | [diff] [blame] | 1230 | mm_exiting: |
Chengyang Fan | 420be4e | 2021-05-04 18:37:48 -0700 | [diff] [blame] | 1231 | remove_trailing_rmap_items(&mm_slot->rmap_list); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1232 | mmap_read_unlock(mm); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1233 | |
| 1234 | spin_lock(&ksm_mmlist_lock); |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 1235 | slot = list_entry(mm_slot->slot.mm_node.next, |
| 1236 | struct mm_slot, mm_node); |
| 1237 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1238 | if (ksm_test_exit(mm)) { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 1239 | hash_del(&mm_slot->slot.hash); |
| 1240 | list_del(&mm_slot->slot.mm_node); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1241 | spin_unlock(&ksm_mmlist_lock); |
| 1242 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 1243 | mm_slot_free(mm_slot_cache, mm_slot); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1244 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 1245 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1246 | mmdrop(mm); |
Zhou Chengming | 7496fea | 2016-05-12 15:42:21 -0700 | [diff] [blame] | 1247 | } else |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1248 | spin_unlock(&ksm_mmlist_lock); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1249 | } |
| 1250 | |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1251 | /* Clean up stable nodes, but don't worry if some are still busy */ |
| 1252 | remove_all_stable_nodes(); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1253 | ksm_scan.seqnr = 0; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1254 | return 0; |
| 1255 | |
| 1256 | error: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1257 | mmap_read_unlock(mm); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1258 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1259 | ksm_scan.mm_slot = &ksm_mm_head; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1260 | spin_unlock(&ksm_mmlist_lock); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1261 | return err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1262 | } |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1263 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1264 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1265 | static u32 calc_checksum(struct page *page) |
| 1266 | { |
| 1267 | u32 checksum; |
Fabio M. De Francesco | b335198 | 2023-11-20 15:18:44 +0100 | [diff] [blame] | 1268 | void *addr = kmap_local_page(page); |
Timofey Titovets | 59e1a2f4 | 2018-12-28 00:34:05 -0800 | [diff] [blame] | 1269 | checksum = xxhash(addr, PAGE_SIZE, 0); |
Fabio M. De Francesco | b335198 | 2023-11-20 15:18:44 +0100 | [diff] [blame] | 1270 | kunmap_local(addr); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1271 | return checksum; |
| 1272 | } |
| 1273 | |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1274 | static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1275 | pte_t *orig_pte) |
| 1276 | { |
| 1277 | struct mm_struct *mm = vma->vm_mm; |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1278 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1279 | int swapped; |
| 1280 | int err = -EFAULT; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1281 | struct mmu_notifier_range range; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1282 | bool anon_exclusive; |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 1283 | pte_t entry; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1284 | |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1285 | if (WARN_ON_ONCE(folio_test_large(folio))) |
| 1286 | return err; |
| 1287 | |
| 1288 | pvmw.address = page_address_in_vma(&folio->page, vma); |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 1289 | if (pvmw.address == -EFAULT) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1290 | goto out; |
| 1291 | |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 1292 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1293 | pvmw.address + PAGE_SIZE); |
| 1294 | mmu_notifier_invalidate_range_start(&range); |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 1295 | |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 1296 | if (!page_vma_mapped_walk(&pvmw)) |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 1297 | goto out_mn; |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 1298 | if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) |
| 1299 | goto out_unlock; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1300 | |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1301 | anon_exclusive = PageAnonExclusive(&folio->page); |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 1302 | entry = ptep_get(pvmw.pte); |
| 1303 | if (pte_write(entry) || pte_dirty(entry) || |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1304 | anon_exclusive || mm_tlb_flush_pending(mm)) { |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1305 | swapped = folio_test_swapcache(folio); |
| 1306 | flush_cache_page(vma, pvmw.address, folio_pfn(folio)); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1307 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1308 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1309 | * take any lock, therefore the check that we are going to make |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 1310 | * with the pagecount against the mapcount is racy and |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1311 | * O_DIRECT can happen right after the check. |
| 1312 | * So we clear the pte and flush the tlb before the check |
| 1313 | * this assure us that no O_DIRECT can happen after the check |
| 1314 | * or in the middle of the check. |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1315 | * |
| 1316 | * No need to notify as we are downgrading page table to read |
| 1317 | * only not changing it to point to a new page. |
| 1318 | * |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 1319 | * See Documentation/mm/mmu_notifier.rst |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1320 | */ |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1321 | entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1322 | /* |
| 1323 | * Check that no O_DIRECT or similar I/O is in progress on the |
| 1324 | * page |
| 1325 | */ |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1326 | if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) { |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 1327 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1328 | goto out_unlock; |
| 1329 | } |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1330 | |
David Hildenbrand | e3b4b13 | 2023-12-20 23:45:02 +0100 | [diff] [blame] | 1331 | /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ |
| 1332 | if (anon_exclusive && |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1333 | folio_try_share_anon_rmap_pte(folio, &folio->page)) { |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1334 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
| 1335 | goto out_unlock; |
| 1336 | } |
| 1337 | |
Hugh Dickins | 4e31635 | 2010-10-02 17:49:08 -0700 | [diff] [blame] | 1338 | if (pte_dirty(entry)) |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1339 | folio_mark_dirty(folio); |
David Hildenbrand | 6a56ccb | 2022-11-08 18:46:50 +0100 | [diff] [blame] | 1340 | entry = pte_mkclean(entry); |
Aneesh Kumar K.V | 595cd8f | 2017-02-24 14:59:19 -0800 | [diff] [blame] | 1341 | |
David Hildenbrand | 6a56ccb | 2022-11-08 18:46:50 +0100 | [diff] [blame] | 1342 | if (pte_write(entry)) |
| 1343 | entry = pte_wrprotect(entry); |
| 1344 | |
Paolo Bonzini | f784274 | 2024-04-05 07:58:15 -0400 | [diff] [blame] | 1345 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1346 | } |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 1347 | *orig_pte = entry; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1348 | err = 0; |
| 1349 | |
| 1350 | out_unlock: |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 1351 | page_vma_mapped_walk_done(&pvmw); |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 1352 | out_mn: |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1353 | mmu_notifier_invalidate_range_end(&range); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1354 | out: |
| 1355 | return err; |
| 1356 | } |
| 1357 | |
| 1358 | /** |
| 1359 | * replace_page - replace page in vma by new ksm page |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1360 | * @vma: vma that holds the pte pointing to page |
| 1361 | * @page: the page we are replacing by kpage |
| 1362 | * @kpage: the ksm page we replace page by |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1363 | * @orig_pte: the original value of the pte |
| 1364 | * |
| 1365 | * Returns 0 on success, -EFAULT on failure. |
| 1366 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1367 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
| 1368 | struct page *kpage, pte_t orig_pte) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1369 | { |
David Hildenbrand | 9772953 | 2023-12-20 23:44:42 +0100 | [diff] [blame] | 1370 | struct folio *kfolio = page_folio(kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1371 | struct mm_struct *mm = vma->vm_mm; |
Matthew Wilcox (Oracle) | b4e6f66e | 2022-09-02 20:46:41 +0100 | [diff] [blame] | 1372 | struct folio *folio; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1373 | pmd_t *pmd; |
Zach O'Keefe | 5072280 | 2022-07-06 16:59:26 -0700 | [diff] [blame] | 1374 | pmd_t pmde; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1375 | pte_t *ptep; |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 1376 | pte_t newpte; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1377 | spinlock_t *ptl; |
| 1378 | unsigned long addr; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1379 | int err = -EFAULT; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1380 | struct mmu_notifier_range range; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1381 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1382 | addr = page_address_in_vma(page, vma); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1383 | if (addr == -EFAULT) |
| 1384 | goto out; |
| 1385 | |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 1386 | pmd = mm_find_pmd(mm, addr); |
| 1387 | if (!pmd) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1388 | goto out; |
Zach O'Keefe | 5072280 | 2022-07-06 16:59:26 -0700 | [diff] [blame] | 1389 | /* |
| 1390 | * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() |
| 1391 | * without holding anon_vma lock for write. So when looking for a |
| 1392 | * genuine pmde (in which to find pte), test present and !THP together. |
| 1393 | */ |
Hugh Dickins | 26e1a0c | 2023-06-08 18:06:53 -0700 | [diff] [blame] | 1394 | pmde = pmdp_get_lockless(pmd); |
Zach O'Keefe | 5072280 | 2022-07-06 16:59:26 -0700 | [diff] [blame] | 1395 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) |
| 1396 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1397 | |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 1398 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, |
Jérôme Glisse | 6f4f13e | 2019-05-13 17:20:49 -0700 | [diff] [blame] | 1399 | addr + PAGE_SIZE); |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1400 | mmu_notifier_invalidate_range_start(&range); |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 1401 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1402 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
Hugh Dickins | 04dee9e | 2023-06-08 18:29:22 -0700 | [diff] [blame] | 1403 | if (!ptep) |
| 1404 | goto out_mn; |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 1405 | if (!pte_same(ptep_get(ptep), orig_pte)) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1406 | pte_unmap_unlock(ptep, ptl); |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 1407 | goto out_mn; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1408 | } |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1409 | VM_BUG_ON_PAGE(PageAnonExclusive(page), page); |
David Hildenbrand | 9772953 | 2023-12-20 23:44:42 +0100 | [diff] [blame] | 1410 | VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage), |
| 1411 | kfolio); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1412 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 1413 | /* |
| 1414 | * No need to check ksm_use_zero_pages here: we can only have a |
Ethon Paul | 457aef9 | 2020-06-04 16:49:01 -0700 | [diff] [blame] | 1415 | * zero_page here if ksm_use_zero_pages was enabled already. |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 1416 | */ |
| 1417 | if (!is_zero_pfn(page_to_pfn(kpage))) { |
David Hildenbrand | 9772953 | 2023-12-20 23:44:42 +0100 | [diff] [blame] | 1418 | folio_get(kfolio); |
| 1419 | folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 1420 | newpte = mk_pte(kpage, vma->vm_page_prot); |
| 1421 | } else { |
xu xin | 7927147 | 2023-06-13 11:09:28 +0800 | [diff] [blame] | 1422 | /* |
| 1423 | * Use pte_mkdirty to mark the zero page mapped by KSM, and then |
| 1424 | * we can easily track all KSM-placed zero pages by checking if |
| 1425 | * the dirty bit in zero page's PTE is set. |
| 1426 | */ |
| 1427 | newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); |
Chengming Zhou | c2dc78b | 2024-05-28 13:15:22 +0800 | [diff] [blame] | 1428 | ksm_map_zero_page(mm); |
Claudio Imbrenda | a38c015 | 2018-04-10 16:29:41 -0700 | [diff] [blame] | 1429 | /* |
| 1430 | * We're replacing an anonymous page with a zero page, which is |
| 1431 | * not anonymous. We need to do proper accounting otherwise we |
| 1432 | * will get wrong values in /proc, and a BUG message in dmesg |
| 1433 | * when tearing down the mm. |
| 1434 | */ |
| 1435 | dec_mm_counter(mm, MM_ANONPAGES); |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 1436 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1437 | |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 1438 | flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1439 | /* |
| 1440 | * No need to notify as we are replacing a read only page with another |
| 1441 | * read only page with the same content. |
| 1442 | * |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 1443 | * See Documentation/mm/mmu_notifier.rst |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1444 | */ |
| 1445 | ptep_clear_flush(vma, addr, ptep); |
Paolo Bonzini | f784274 | 2024-04-05 07:58:15 -0400 | [diff] [blame] | 1446 | set_pte_at(mm, addr, ptep, newpte); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1447 | |
Matthew Wilcox (Oracle) | b4e6f66e | 2022-09-02 20:46:41 +0100 | [diff] [blame] | 1448 | folio = page_folio(page); |
David Hildenbrand | 18e8612 | 2023-12-20 23:44:51 +0100 | [diff] [blame] | 1449 | folio_remove_rmap_pte(folio, page, vma); |
Matthew Wilcox (Oracle) | b4e6f66e | 2022-09-02 20:46:41 +0100 | [diff] [blame] | 1450 | if (!folio_mapped(folio)) |
| 1451 | folio_free_swap(folio); |
| 1452 | folio_put(folio); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1453 | |
| 1454 | pte_unmap_unlock(ptep, ptl); |
| 1455 | err = 0; |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 1456 | out_mn: |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1457 | mmu_notifier_invalidate_range_end(&range); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1458 | out: |
| 1459 | return err; |
| 1460 | } |
| 1461 | |
| 1462 | /* |
| 1463 | * try_to_merge_one_page - take two pages and merge them into one |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1464 | * @vma: the vma that holds the pte pointing to page |
| 1465 | * @page: the PageAnon page that we want to replace with kpage |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1466 | * @kpage: the PageKsm page that we want to map instead of page, |
| 1467 | * or NULL the first time when we want to use page as kpage. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1468 | * |
| 1469 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
| 1470 | */ |
| 1471 | static int try_to_merge_one_page(struct vm_area_struct *vma, |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1472 | struct page *page, struct page *kpage) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1473 | { |
| 1474 | pte_t orig_pte = __pte(0); |
| 1475 | int err = -EFAULT; |
| 1476 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1477 | if (page == kpage) /* ksm page forked */ |
| 1478 | return 0; |
| 1479 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1480 | if (!PageAnon(page)) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1481 | goto out; |
| 1482 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1483 | /* |
| 1484 | * We need the page lock to read a stable PageSwapCache in |
| 1485 | * write_protect_page(). We use trylock_page() instead of |
| 1486 | * lock_page() because we don't want to wait here - we |
| 1487 | * prefer to continue scanning and merging different pages, |
| 1488 | * then come back to this page when it is unlocked. |
| 1489 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1490 | if (!trylock_page(page)) |
Hugh Dickins | 31e855e | 2009-12-14 17:59:17 -0800 | [diff] [blame] | 1491 | goto out; |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 1492 | |
| 1493 | if (PageTransCompound(page)) { |
Andrea Arcangeli | a7306c3 | 2017-06-02 14:46:11 -0700 | [diff] [blame] | 1494 | if (split_huge_page(page)) |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 1495 | goto out_unlock; |
| 1496 | } |
| 1497 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1498 | /* |
| 1499 | * If this anonymous page is mapped only here, its pte may need |
| 1500 | * to be write-protected. If it's mapped elsewhere, all of its |
| 1501 | * ptes are necessarily already write-protected. But in either |
| 1502 | * case, we need to lock and check page_count is not raised. |
| 1503 | */ |
Alex Shi (tencent) | 40d707f | 2024-04-11 14:17:08 +0800 | [diff] [blame] | 1504 | if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) { |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1505 | if (!kpage) { |
| 1506 | /* |
| 1507 | * While we hold page lock, upgrade page from |
| 1508 | * PageAnon+anon_vma to PageKsm+NULL stable_node: |
| 1509 | * stable_tree_insert() will update stable_node. |
| 1510 | */ |
Alex Shi (tencent) | 452e862 | 2024-04-11 14:17:11 +0800 | [diff] [blame] | 1511 | folio_set_stable_node(page_folio(page), NULL); |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1512 | mark_page_accessed(page); |
Minchan Kim | 337ed7e | 2016-01-15 16:55:15 -0800 | [diff] [blame] | 1513 | /* |
| 1514 | * Page reclaim just frees a clean page with no dirty |
| 1515 | * ptes: make sure that the ksm page would be swapped. |
| 1516 | */ |
| 1517 | if (!PageDirty(page)) |
| 1518 | SetPageDirty(page); |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1519 | err = 0; |
| 1520 | } else if (pages_identical(page, kpage)) |
| 1521 | err = replace_page(vma, page, kpage, orig_pte); |
| 1522 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1523 | |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 1524 | out_unlock: |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1525 | unlock_page(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1526 | out: |
| 1527 | return err; |
| 1528 | } |
| 1529 | |
| 1530 | /* |
Chengming Zhou | ac90c56 | 2024-06-21 15:54:29 +0800 | [diff] [blame] | 1531 | * This function returns 0 if the pages were merged or if they are |
| 1532 | * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise. |
| 1533 | */ |
| 1534 | static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item, |
| 1535 | struct page *page) |
| 1536 | { |
| 1537 | struct mm_struct *mm = rmap_item->mm; |
| 1538 | int err = -EFAULT; |
| 1539 | |
| 1540 | /* |
| 1541 | * Same checksum as an empty page. We attempt to merge it with the |
| 1542 | * appropriate zero page if the user enabled this via sysfs. |
| 1543 | */ |
| 1544 | if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) { |
| 1545 | struct vm_area_struct *vma; |
| 1546 | |
| 1547 | mmap_read_lock(mm); |
| 1548 | vma = find_mergeable_vma(mm, rmap_item->address); |
| 1549 | if (vma) { |
| 1550 | err = try_to_merge_one_page(vma, page, |
| 1551 | ZERO_PAGE(rmap_item->address)); |
| 1552 | trace_ksm_merge_one_page( |
| 1553 | page_to_pfn(ZERO_PAGE(rmap_item->address)), |
| 1554 | rmap_item, mm, err); |
| 1555 | } else { |
| 1556 | /* |
| 1557 | * If the vma is out of date, we do not need to |
| 1558 | * continue. |
| 1559 | */ |
| 1560 | err = 0; |
| 1561 | } |
| 1562 | mmap_read_unlock(mm); |
| 1563 | } |
| 1564 | |
| 1565 | return err; |
| 1566 | } |
| 1567 | |
| 1568 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1569 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, |
| 1570 | * but no new kernel page is allocated: kpage must already be a ksm page. |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1571 | * |
| 1572 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1573 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1574 | static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1575 | struct page *page, struct page *kpage) |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1576 | { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1577 | struct mm_struct *mm = rmap_item->mm; |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1578 | struct vm_area_struct *vma; |
| 1579 | int err = -EFAULT; |
| 1580 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1581 | mmap_read_lock(mm); |
Andrea Arcangeli | 85c6e8d | 2015-11-05 18:49:16 -0800 | [diff] [blame] | 1582 | vma = find_mergeable_vma(mm, rmap_item->address); |
| 1583 | if (!vma) |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1584 | goto out; |
| 1585 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1586 | err = try_to_merge_one_page(vma, page, kpage); |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1587 | if (err) |
| 1588 | goto out; |
| 1589 | |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 1590 | /* Unstable nid is in union with stable anon_vma: remove first */ |
| 1591 | remove_rmap_item_from_tree(rmap_item); |
| 1592 | |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1593 | /* Must get reference to anon_vma while still holding mmap_lock */ |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 1594 | rmap_item->anon_vma = vma->anon_vma; |
| 1595 | get_anon_vma(vma->anon_vma); |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1596 | out: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1597 | mmap_read_unlock(mm); |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 1598 | trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page), |
| 1599 | rmap_item, mm, err); |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1600 | return err; |
| 1601 | } |
| 1602 | |
| 1603 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1604 | * try_to_merge_two_pages - take two identical pages and prepare them |
| 1605 | * to be merged into one page. |
| 1606 | * |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1607 | * This function returns the kpage if we successfully merged two identical |
| 1608 | * pages into one ksm page, NULL otherwise. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1609 | * |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1610 | * Note that this function upgrades page to ksm page: if one of the pages |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1611 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
| 1612 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1613 | static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1614 | struct page *page, |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1615 | struct ksm_rmap_item *tree_rmap_item, |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1616 | struct page *tree_page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1617 | { |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1618 | int err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1619 | |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1620 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1621 | if (!err) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1622 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1623 | tree_page, page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1624 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1625 | * If that fails, we have a ksm page with only one pte |
| 1626 | * pointing to it: so break it. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1627 | */ |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1628 | if (err) |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1629 | break_cow(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1630 | } |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1631 | return err ? NULL : page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1632 | } |
| 1633 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1634 | static __always_inline |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1635 | bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1636 | { |
| 1637 | VM_BUG_ON(stable_node->rmap_hlist_len < 0); |
| 1638 | /* |
| 1639 | * Check that at least one mapping still exists, otherwise |
| 1640 | * there's no much point to merge and share with this |
| 1641 | * stable_node, as the underlying tree_page of the other |
| 1642 | * sharer is going to be freed soon. |
| 1643 | */ |
| 1644 | return stable_node->rmap_hlist_len && |
| 1645 | stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; |
| 1646 | } |
| 1647 | |
| 1648 | static __always_inline |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1649 | bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1650 | { |
| 1651 | return __is_page_sharing_candidate(stable_node, 0); |
| 1652 | } |
| 1653 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1654 | static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup, |
| 1655 | struct ksm_stable_node **_stable_node, |
| 1656 | struct rb_root *root, |
| 1657 | bool prune_stale_stable_nodes) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1658 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1659 | struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1660 | struct hlist_node *hlist_safe; |
Alex Shi (tencent) | 6f528de | 2024-04-11 14:17:06 +0800 | [diff] [blame] | 1661 | struct folio *folio, *tree_folio = NULL; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1662 | int found_rmap_hlist_len; |
| 1663 | |
| 1664 | if (!prune_stale_stable_nodes || |
| 1665 | time_before(jiffies, stable_node->chain_prune_time + |
| 1666 | msecs_to_jiffies( |
| 1667 | ksm_stable_node_chains_prune_millisecs))) |
| 1668 | prune_stale_stable_nodes = false; |
| 1669 | else |
| 1670 | stable_node->chain_prune_time = jiffies; |
| 1671 | |
| 1672 | hlist_for_each_entry_safe(dup, hlist_safe, |
| 1673 | &stable_node->hlist, hlist_dup) { |
| 1674 | cond_resched(); |
| 1675 | /* |
| 1676 | * We must walk all stable_node_dup to prune the stale |
| 1677 | * stable nodes during lookup. |
| 1678 | * |
Alex Shi (tencent) | 6f528de | 2024-04-11 14:17:06 +0800 | [diff] [blame] | 1679 | * ksm_get_folio can drop the nodes from the |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1680 | * stable_node->hlist if they point to freed pages |
| 1681 | * (that's why we do a _safe walk). The "dup" |
| 1682 | * stable_node parameter itself will be freed from |
| 1683 | * under us if it returns NULL. |
| 1684 | */ |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 1685 | folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK); |
Alex Shi (tencent) | 6f528de | 2024-04-11 14:17:06 +0800 | [diff] [blame] | 1686 | if (!folio) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1687 | continue; |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1688 | /* Pick the best candidate if possible. */ |
| 1689 | if (!found || (is_page_sharing_candidate(dup) && |
| 1690 | (!is_page_sharing_candidate(found) || |
| 1691 | dup->rmap_hlist_len > found_rmap_hlist_len))) { |
| 1692 | if (found) |
| 1693 | folio_put(tree_folio); |
| 1694 | found = dup; |
| 1695 | found_rmap_hlist_len = found->rmap_hlist_len; |
| 1696 | tree_folio = folio; |
| 1697 | /* skip put_page for found candidate */ |
| 1698 | if (!prune_stale_stable_nodes && |
| 1699 | is_page_sharing_candidate(found)) |
| 1700 | break; |
| 1701 | continue; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1702 | } |
Alex Shi (tencent) | 6f528de | 2024-04-11 14:17:06 +0800 | [diff] [blame] | 1703 | folio_put(folio); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1704 | } |
| 1705 | |
Andrea Arcangeli | 80b18df | 2017-07-06 15:37:08 -0700 | [diff] [blame] | 1706 | if (found) { |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1707 | if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1708 | /* |
| 1709 | * If there's not just one entry it would |
| 1710 | * corrupt memory, better BUG_ON. In KSM |
| 1711 | * context with no lock held it's not even |
| 1712 | * fatal. |
| 1713 | */ |
| 1714 | BUG_ON(stable_node->hlist.first->next); |
| 1715 | |
| 1716 | /* |
| 1717 | * There's just one entry and it is below the |
| 1718 | * deduplication limit so drop the chain. |
| 1719 | */ |
| 1720 | rb_replace_node(&stable_node->node, &found->node, |
| 1721 | root); |
| 1722 | free_stable_node(stable_node); |
| 1723 | ksm_stable_node_chains--; |
| 1724 | ksm_stable_node_dups--; |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1725 | /* |
Andrea Arcangeli | 0ba1d0f | 2017-07-06 15:37:02 -0700 | [diff] [blame] | 1726 | * NOTE: the caller depends on the stable_node |
| 1727 | * to be equal to stable_node_dup if the chain |
| 1728 | * was collapsed. |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1729 | */ |
Andrea Arcangeli | 0ba1d0f | 2017-07-06 15:37:02 -0700 | [diff] [blame] | 1730 | *_stable_node = found; |
| 1731 | /* |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 1732 | * Just for robustness, as stable_node is |
Andrea Arcangeli | 0ba1d0f | 2017-07-06 15:37:02 -0700 | [diff] [blame] | 1733 | * otherwise left as a stable pointer, the |
| 1734 | * compiler shall optimize it away at build |
| 1735 | * time. |
| 1736 | */ |
| 1737 | stable_node = NULL; |
Andrea Arcangeli | 80b18df | 2017-07-06 15:37:08 -0700 | [diff] [blame] | 1738 | } else if (stable_node->hlist.first != &found->hlist_dup && |
| 1739 | __is_page_sharing_candidate(found, 1)) { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1740 | /* |
Andrea Arcangeli | 80b18df | 2017-07-06 15:37:08 -0700 | [diff] [blame] | 1741 | * If the found stable_node dup can accept one |
| 1742 | * more future merge (in addition to the one |
| 1743 | * that is underway) and is not at the head of |
| 1744 | * the chain, put it there so next search will |
| 1745 | * be quicker in the !prune_stale_stable_nodes |
| 1746 | * case. |
| 1747 | * |
| 1748 | * NOTE: it would be inaccurate to use nr > 1 |
| 1749 | * instead of checking the hlist.first pointer |
| 1750 | * directly, because in the |
| 1751 | * prune_stale_stable_nodes case "nr" isn't |
| 1752 | * the position of the found dup in the chain, |
| 1753 | * but the total number of dups in the chain. |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1754 | */ |
| 1755 | hlist_del(&found->hlist_dup); |
| 1756 | hlist_add_head(&found->hlist_dup, |
| 1757 | &stable_node->hlist); |
| 1758 | } |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1759 | } else { |
| 1760 | /* Its hlist must be empty if no one found. */ |
| 1761 | free_stable_node_chain(stable_node, root); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1762 | } |
| 1763 | |
Andrea Arcangeli | 8dc5ffc | 2017-07-06 15:37:05 -0700 | [diff] [blame] | 1764 | *_stable_node_dup = found; |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1765 | return tree_folio; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1766 | } |
| 1767 | |
Andrea Arcangeli | 8dc5ffc | 2017-07-06 15:37:05 -0700 | [diff] [blame] | 1768 | /* |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1769 | * Like for ksm_get_folio, this function can free the *_stable_node and |
Andrea Arcangeli | 8dc5ffc | 2017-07-06 15:37:05 -0700 | [diff] [blame] | 1770 | * *_stable_node_dup if the returned tree_page is NULL. |
| 1771 | * |
| 1772 | * It can also free and overwrite *_stable_node with the found |
| 1773 | * stable_node_dup if the chain is collapsed (in which case |
| 1774 | * *_stable_node will be equal to *_stable_node_dup like if the chain |
| 1775 | * never existed). It's up to the caller to verify tree_page is not |
| 1776 | * NULL before dereferencing *_stable_node or *_stable_node_dup. |
| 1777 | * |
| 1778 | * *_stable_node_dup is really a second output parameter of this |
| 1779 | * function and will be overwritten in all cases, the caller doesn't |
| 1780 | * need to initialize it. |
| 1781 | */ |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1782 | static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, |
| 1783 | struct ksm_stable_node **_stable_node, |
| 1784 | struct rb_root *root, |
| 1785 | bool prune_stale_stable_nodes) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1786 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1787 | struct ksm_stable_node *stable_node = *_stable_node; |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1788 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1789 | if (!is_stable_node_chain(stable_node)) { |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1790 | *_stable_node_dup = stable_node; |
| 1791 | return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1792 | } |
Andrea Arcangeli | 8dc5ffc | 2017-07-06 15:37:05 -0700 | [diff] [blame] | 1793 | return stable_node_dup(_stable_node_dup, _stable_node, root, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1794 | prune_stale_stable_nodes); |
| 1795 | } |
| 1796 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1797 | static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d, |
| 1798 | struct ksm_stable_node **s_n, |
| 1799 | struct rb_root *root) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1800 | { |
Andrea Arcangeli | 8dc5ffc | 2017-07-06 15:37:05 -0700 | [diff] [blame] | 1801 | return __stable_node_chain(s_n_d, s_n, root, true); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1802 | } |
| 1803 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1804 | static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1805 | struct ksm_stable_node **s_n, |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1806 | struct rb_root *root) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1807 | { |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1808 | return __stable_node_chain(s_n_d, s_n, root, false); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1809 | } |
| 1810 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1811 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1812 | * stable_tree_search - search for page inside the stable tree |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1813 | * |
| 1814 | * This function checks if there is a page inside the stable tree |
| 1815 | * with identical content to the page that we are scanning right now. |
| 1816 | * |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1817 | * This function returns the stable tree node of identical content if found, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1818 | * NULL otherwise. |
| 1819 | */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1820 | static struct page *stable_tree_search(struct page *page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1821 | { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1822 | int nid; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1823 | struct rb_root *root; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1824 | struct rb_node **new; |
| 1825 | struct rb_node *parent; |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1826 | struct ksm_stable_node *stable_node, *stable_node_dup; |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1827 | struct ksm_stable_node *page_node; |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1828 | struct folio *folio; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1829 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1830 | folio = page_folio(page); |
| 1831 | page_node = folio_stable_node(folio); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1832 | if (page_node && page_node->head != &migrate_nodes) { |
| 1833 | /* ksm page forked */ |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1834 | folio_get(folio); |
| 1835 | return &folio->page; |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1836 | } |
| 1837 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1838 | nid = get_kpfn_nid(folio_pfn(folio)); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1839 | root = root_stable_tree + nid; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1840 | again: |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1841 | new = &root->rb_node; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1842 | parent = NULL; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1843 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1844 | while (*new) { |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1845 | struct folio *tree_folio; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1846 | int ret; |
| 1847 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1848 | cond_resched(); |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 1849 | stable_node = rb_entry(*new, struct ksm_stable_node, node); |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1850 | tree_folio = chain_prune(&stable_node_dup, &stable_node, root); |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1851 | if (!tree_folio) { |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 1852 | /* |
| 1853 | * If we walked over a stale stable_node, |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1854 | * ksm_get_folio() will call rb_erase() and it |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 1855 | * may rebalance the tree from under us. So |
| 1856 | * restart the search from scratch. Returning |
| 1857 | * NULL would be safe too, but we'd generate |
| 1858 | * false negative insertions just because some |
| 1859 | * stable_node was stale. |
| 1860 | */ |
| 1861 | goto again; |
| 1862 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1863 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1864 | ret = memcmp_pages(page, &tree_folio->page); |
| 1865 | folio_put(tree_folio); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1866 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1867 | parent = *new; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1868 | if (ret < 0) |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1869 | new = &parent->rb_left; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1870 | else if (ret > 0) |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1871 | new = &parent->rb_right; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1872 | else { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1873 | if (page_node) { |
| 1874 | VM_BUG_ON(page_node->head != &migrate_nodes); |
| 1875 | /* |
David Hildenbrand | 2aa33912 | 2024-04-16 19:25:33 +0200 | [diff] [blame] | 1876 | * If the mapcount of our migrated KSM folio is |
| 1877 | * at most 1, we can merge it with another |
| 1878 | * KSM folio where we know that we have space |
| 1879 | * for one more mapping without exceeding the |
| 1880 | * ksm_max_page_sharing limit: see |
| 1881 | * chain_prune(). This way, we can avoid adding |
| 1882 | * this stable node to the chain. |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1883 | */ |
David Hildenbrand | 2aa33912 | 2024-04-16 19:25:33 +0200 | [diff] [blame] | 1884 | if (folio_mapcount(folio) > 1) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1885 | goto chain_append; |
| 1886 | } |
| 1887 | |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 1888 | if (!is_page_sharing_candidate(stable_node_dup)) { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1889 | /* |
| 1890 | * If the stable_node is a chain and |
| 1891 | * we got a payload match in memcmp |
| 1892 | * but we cannot merge the scanned |
| 1893 | * page in any of the existing |
| 1894 | * stable_node dups because they're |
| 1895 | * all full, we need to wait the |
| 1896 | * scanned page to find itself a match |
| 1897 | * in the unstable tree to create a |
| 1898 | * brand new KSM page to add later to |
| 1899 | * the dups of this stable_node. |
| 1900 | */ |
| 1901 | return NULL; |
| 1902 | } |
| 1903 | |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1904 | /* |
| 1905 | * Lock and unlock the stable_node's page (which |
| 1906 | * might already have been migrated) so that page |
| 1907 | * migration is sure to notice its raised count. |
| 1908 | * It would be more elegant to return stable_node |
| 1909 | * than kpage, but that involves more changes. |
| 1910 | */ |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1911 | tree_folio = ksm_get_folio(stable_node_dup, |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 1912 | KSM_GET_FOLIO_TRYLOCK); |
Yang Shi | 2cee57d1 | 2019-03-05 15:48:12 -0800 | [diff] [blame] | 1913 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1914 | if (PTR_ERR(tree_folio) == -EBUSY) |
Yang Shi | 2cee57d1 | 2019-03-05 15:48:12 -0800 | [diff] [blame] | 1915 | return ERR_PTR(-EBUSY); |
| 1916 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1917 | if (unlikely(!tree_folio)) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1918 | /* |
| 1919 | * The tree may have been rebalanced, |
| 1920 | * so re-evaluate parent and new. |
| 1921 | */ |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1922 | goto again; |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1923 | folio_unlock(tree_folio); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1924 | |
| 1925 | if (get_kpfn_nid(stable_node_dup->kpfn) != |
| 1926 | NUMA(stable_node_dup->nid)) { |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1927 | folio_put(tree_folio); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1928 | goto replace; |
| 1929 | } |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1930 | return &tree_folio->page; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1931 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1932 | } |
| 1933 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1934 | if (!page_node) |
| 1935 | return NULL; |
| 1936 | |
| 1937 | list_del(&page_node->list); |
| 1938 | DO_NUMA(page_node->nid = nid); |
| 1939 | rb_link_node(&page_node->node, parent, new); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1940 | rb_insert_color(&page_node->node, root); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1941 | out: |
| 1942 | if (is_page_sharing_candidate(page_node)) { |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1943 | folio_get(folio); |
| 1944 | return &folio->page; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1945 | } else |
| 1946 | return NULL; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1947 | |
| 1948 | replace: |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1949 | /* |
| 1950 | * If stable_node was a chain and chain_prune collapsed it, |
Andrea Arcangeli | 0ba1d0f | 2017-07-06 15:37:02 -0700 | [diff] [blame] | 1951 | * stable_node has been updated to be the new regular |
| 1952 | * stable_node. A collapse of the chain is indistinguishable |
| 1953 | * from the case there was no chain in the stable |
| 1954 | * rbtree. Otherwise stable_node is the chain and |
| 1955 | * stable_node_dup is the dup to replace. |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1956 | */ |
Andrea Arcangeli | 0ba1d0f | 2017-07-06 15:37:02 -0700 | [diff] [blame] | 1957 | if (stable_node_dup == stable_node) { |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1958 | VM_BUG_ON(is_stable_node_chain(stable_node_dup)); |
| 1959 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1960 | /* there is no chain */ |
| 1961 | if (page_node) { |
| 1962 | VM_BUG_ON(page_node->head != &migrate_nodes); |
| 1963 | list_del(&page_node->list); |
| 1964 | DO_NUMA(page_node->nid = nid); |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1965 | rb_replace_node(&stable_node_dup->node, |
| 1966 | &page_node->node, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1967 | root); |
| 1968 | if (is_page_sharing_candidate(page_node)) |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1969 | folio_get(folio); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1970 | else |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1971 | folio = NULL; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1972 | } else { |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1973 | rb_erase(&stable_node_dup->node, root); |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1974 | folio = NULL; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1975 | } |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1976 | } else { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1977 | VM_BUG_ON(!is_stable_node_chain(stable_node)); |
| 1978 | __stable_node_dup_del(stable_node_dup); |
| 1979 | if (page_node) { |
| 1980 | VM_BUG_ON(page_node->head != &migrate_nodes); |
| 1981 | list_del(&page_node->list); |
| 1982 | DO_NUMA(page_node->nid = nid); |
| 1983 | stable_node_chain_add_dup(page_node, stable_node); |
| 1984 | if (is_page_sharing_candidate(page_node)) |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1985 | folio_get(folio); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1986 | else |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1987 | folio = NULL; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1988 | } else { |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1989 | folio = NULL; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1990 | } |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1991 | } |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1992 | stable_node_dup->head = &migrate_nodes; |
| 1993 | list_add(&stable_node_dup->list, stable_node_dup->head); |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 1994 | return &folio->page; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 1995 | |
| 1996 | chain_append: |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 1997 | /* |
| 1998 | * If stable_node was a chain and chain_prune collapsed it, |
Andrea Arcangeli | 0ba1d0f | 2017-07-06 15:37:02 -0700 | [diff] [blame] | 1999 | * stable_node has been updated to be the new regular |
| 2000 | * stable_node. A collapse of the chain is indistinguishable |
| 2001 | * from the case there was no chain in the stable |
| 2002 | * rbtree. Otherwise stable_node is the chain and |
| 2003 | * stable_node_dup is the dup to replace. |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 2004 | */ |
Andrea Arcangeli | 0ba1d0f | 2017-07-06 15:37:02 -0700 | [diff] [blame] | 2005 | if (stable_node_dup == stable_node) { |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 2006 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2007 | /* chain is missing so create it */ |
| 2008 | stable_node = alloc_stable_node_chain(stable_node_dup, |
| 2009 | root); |
| 2010 | if (!stable_node) |
| 2011 | return NULL; |
| 2012 | } |
| 2013 | /* |
| 2014 | * Add this stable_node dup that was |
| 2015 | * migrated to the stable_node chain |
| 2016 | * of the current nid for this page |
| 2017 | * content. |
| 2018 | */ |
Andrea Arcangeli | b4fecc6 | 2017-07-06 15:36:59 -0700 | [diff] [blame] | 2019 | VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2020 | VM_BUG_ON(page_node->head != &migrate_nodes); |
| 2021 | list_del(&page_node->list); |
| 2022 | DO_NUMA(page_node->nid = nid); |
| 2023 | stable_node_chain_add_dup(page_node, stable_node); |
| 2024 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2025 | } |
| 2026 | |
| 2027 | /* |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 2028 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2029 | * into the stable tree. |
| 2030 | * |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2031 | * This function returns the stable tree node just allocated on success, |
| 2032 | * NULL otherwise. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2033 | */ |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 2034 | static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2035 | { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2036 | int nid; |
| 2037 | unsigned long kpfn; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2038 | struct rb_root *root; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2039 | struct rb_node **new; |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 2040 | struct rb_node *parent; |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 2041 | struct ksm_stable_node *stable_node, *stable_node_dup; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2042 | bool need_chain = false; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2043 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 2044 | kpfn = folio_pfn(kfolio); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2045 | nid = get_kpfn_nid(kpfn); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2046 | root = root_stable_tree + nid; |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 2047 | again: |
| 2048 | parent = NULL; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2049 | new = &root->rb_node; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2050 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2051 | while (*new) { |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 2052 | struct folio *tree_folio; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2053 | int ret; |
| 2054 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 2055 | cond_resched(); |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2056 | stable_node = rb_entry(*new, struct ksm_stable_node, node); |
Chengming Zhou | a0b856b | 2024-06-21 15:54:31 +0800 | [diff] [blame] | 2057 | tree_folio = chain(&stable_node_dup, &stable_node, root); |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 2058 | if (!tree_folio) { |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 2059 | /* |
| 2060 | * If we walked over a stale stable_node, |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 2061 | * ksm_get_folio() will call rb_erase() and it |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 2062 | * may rebalance the tree from under us. So |
| 2063 | * restart the search from scratch. Returning |
| 2064 | * NULL would be safe too, but we'd generate |
| 2065 | * false negative insertions just because some |
| 2066 | * stable_node was stale. |
| 2067 | */ |
| 2068 | goto again; |
| 2069 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2070 | |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 2071 | ret = memcmp_pages(&kfolio->page, &tree_folio->page); |
| 2072 | folio_put(tree_folio); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2073 | |
| 2074 | parent = *new; |
| 2075 | if (ret < 0) |
| 2076 | new = &parent->rb_left; |
| 2077 | else if (ret > 0) |
| 2078 | new = &parent->rb_right; |
| 2079 | else { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2080 | need_chain = true; |
| 2081 | break; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2082 | } |
| 2083 | } |
| 2084 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2085 | stable_node_dup = alloc_stable_node(); |
| 2086 | if (!stable_node_dup) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2087 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2088 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2089 | INIT_HLIST_HEAD(&stable_node_dup->hlist); |
| 2090 | stable_node_dup->kpfn = kpfn; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2091 | stable_node_dup->rmap_hlist_len = 0; |
| 2092 | DO_NUMA(stable_node_dup->nid = nid); |
| 2093 | if (!need_chain) { |
| 2094 | rb_link_node(&stable_node_dup->node, parent, new); |
| 2095 | rb_insert_color(&stable_node_dup->node, root); |
| 2096 | } else { |
| 2097 | if (!is_stable_node_chain(stable_node)) { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2098 | struct ksm_stable_node *orig = stable_node; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2099 | /* chain is missing so create it */ |
| 2100 | stable_node = alloc_stable_node_chain(orig, root); |
| 2101 | if (!stable_node) { |
| 2102 | free_stable_node(stable_node_dup); |
| 2103 | return NULL; |
| 2104 | } |
| 2105 | } |
| 2106 | stable_node_chain_add_dup(stable_node_dup, stable_node); |
| 2107 | } |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 2108 | |
Chengming Zhou | 90e8234 | 2024-05-13 11:07:56 +0800 | [diff] [blame] | 2109 | folio_set_stable_node(kfolio, stable_node_dup); |
| 2110 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2111 | return stable_node_dup; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2112 | } |
| 2113 | |
| 2114 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2115 | * unstable_tree_search_insert - search for identical page, |
| 2116 | * else insert rmap_item into the unstable tree. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2117 | * |
| 2118 | * This function searches for a page in the unstable tree identical to the |
| 2119 | * page currently being scanned; and if no identical page is found in the |
| 2120 | * tree, we insert rmap_item as a new object into the unstable tree. |
| 2121 | * |
| 2122 | * This function returns pointer to rmap_item found to be identical |
| 2123 | * to the currently scanned page, NULL otherwise. |
| 2124 | * |
| 2125 | * This function does both searching and inserting, because they share |
| 2126 | * the same walking algorithm in an rbtree. |
| 2127 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2128 | static |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2129 | struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2130 | struct page *page, |
| 2131 | struct page **tree_pagep) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2132 | { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2133 | struct rb_node **new; |
| 2134 | struct rb_root *root; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2135 | struct rb_node *parent = NULL; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2136 | int nid; |
| 2137 | |
| 2138 | nid = get_kpfn_nid(page_to_pfn(page)); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2139 | root = root_unstable_tree + nid; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2140 | new = &root->rb_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2141 | |
| 2142 | while (*new) { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2143 | struct ksm_rmap_item *tree_rmap_item; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2144 | struct page *tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2145 | int ret; |
| 2146 | |
Hugh Dickins | d178f27 | 2009-11-09 15:58:23 +0000 | [diff] [blame] | 2147 | cond_resched(); |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2148 | tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2149 | tree_page = get_mergeable_page(tree_rmap_item); |
Andrea Arcangeli | c8f95ed | 2015-11-05 18:49:19 -0800 | [diff] [blame] | 2150 | if (!tree_page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2151 | return NULL; |
| 2152 | |
| 2153 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2154 | * Don't substitute a ksm page for a forked page. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2155 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2156 | if (page == tree_page) { |
| 2157 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2158 | return NULL; |
| 2159 | } |
| 2160 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2161 | ret = memcmp_pages(page, tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2162 | |
| 2163 | parent = *new; |
| 2164 | if (ret < 0) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2165 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2166 | new = &parent->rb_left; |
| 2167 | } else if (ret > 0) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2168 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2169 | new = &parent->rb_right; |
Hugh Dickins | b599cbd | 2013-02-22 16:36:05 -0800 | [diff] [blame] | 2170 | } else if (!ksm_merge_across_nodes && |
| 2171 | page_to_nid(tree_page) != nid) { |
| 2172 | /* |
| 2173 | * If tree_page has been migrated to another NUMA node, |
| 2174 | * it will be flushed out and put in the right unstable |
| 2175 | * tree next time: only merge with it when across_nodes. |
| 2176 | */ |
| 2177 | put_page(tree_page); |
| 2178 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2179 | } else { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2180 | *tree_pagep = tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2181 | return tree_rmap_item; |
| 2182 | } |
| 2183 | } |
| 2184 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2185 | rmap_item->address |= UNSTABLE_FLAG; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2186 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 2187 | DO_NUMA(rmap_item->nid = nid); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2188 | rb_link_node(&rmap_item->node, parent, new); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2189 | rb_insert_color(&rmap_item->node, root); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2190 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 2191 | ksm_pages_unshared++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2192 | return NULL; |
| 2193 | } |
| 2194 | |
| 2195 | /* |
| 2196 | * stable_tree_append - add another rmap_item to the linked list of |
| 2197 | * rmap_items hanging off a given node of the stable tree, all sharing |
| 2198 | * the same ksm page. |
| 2199 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2200 | static void stable_tree_append(struct ksm_rmap_item *rmap_item, |
| 2201 | struct ksm_stable_node *stable_node, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2202 | bool max_page_sharing_bypass) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2203 | { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2204 | /* |
| 2205 | * rmap won't find this mapping if we don't insert the |
| 2206 | * rmap_item in the right stable_node |
| 2207 | * duplicate. page_migration could break later if rmap breaks, |
| 2208 | * so we can as well crash here. We really need to check for |
| 2209 | * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check |
Ethon Paul | 457aef9 | 2020-06-04 16:49:01 -0700 | [diff] [blame] | 2210 | * for other negative values as an underflow if detected here |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2211 | * for the first time (and not when decreasing rmap_hlist_len) |
| 2212 | * would be sign of memory corruption in the stable_node. |
| 2213 | */ |
| 2214 | BUG_ON(stable_node->rmap_hlist_len < 0); |
| 2215 | |
| 2216 | stable_node->rmap_hlist_len++; |
| 2217 | if (!max_page_sharing_bypass) |
| 2218 | /* possibly non fatal but unexpected overflow, only warn */ |
| 2219 | WARN_ON_ONCE(stable_node->rmap_hlist_len > |
| 2220 | ksm_max_page_sharing); |
| 2221 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2222 | rmap_item->head = stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2223 | rmap_item->address |= STABLE_FLAG; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2224 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 2225 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2226 | if (rmap_item->hlist.next) |
| 2227 | ksm_pages_sharing++; |
| 2228 | else |
| 2229 | ksm_pages_shared++; |
xu xin | 7609385 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 2230 | |
| 2231 | rmap_item->mm->ksm_merging_pages++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2232 | } |
| 2233 | |
| 2234 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 2235 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
| 2236 | * if not, compare checksum to previous and if it's the same, see if page can |
| 2237 | * be inserted into the unstable tree, or merged with a page already there and |
| 2238 | * both transferred to the stable tree. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2239 | * |
| 2240 | * @page: the page that we are searching identical page to. |
| 2241 | * @rmap_item: the reverse mapping into the virtual address of this page |
| 2242 | */ |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2243 | static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2244 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2245 | struct ksm_rmap_item *tree_rmap_item; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2246 | struct page *tree_page = NULL; |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2247 | struct ksm_stable_node *stable_node; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2248 | struct page *kpage; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2249 | unsigned int checksum; |
| 2250 | int err; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2251 | bool max_page_sharing_bypass = false; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2252 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2253 | stable_node = page_stable_node(page); |
| 2254 | if (stable_node) { |
| 2255 | if (stable_node->head != &migrate_nodes && |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2256 | get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != |
| 2257 | NUMA(stable_node->nid)) { |
| 2258 | stable_node_dup_del(stable_node); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2259 | stable_node->head = &migrate_nodes; |
| 2260 | list_add(&stable_node->list, stable_node->head); |
| 2261 | } |
| 2262 | if (stable_node->head != &migrate_nodes && |
| 2263 | rmap_item->head == stable_node) |
| 2264 | return; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2265 | /* |
| 2266 | * If it's a KSM fork, allow it to go over the sharing limit |
| 2267 | * without warnings. |
| 2268 | */ |
| 2269 | if (!is_page_sharing_candidate(stable_node)) |
| 2270 | max_page_sharing_bypass = true; |
Chengming Zhou | d58a361 | 2024-06-21 15:54:30 +0800 | [diff] [blame] | 2271 | } else { |
| 2272 | remove_rmap_item_from_tree(rmap_item); |
| 2273 | |
| 2274 | /* |
| 2275 | * If the hash value of the page has changed from the last time |
| 2276 | * we calculated it, this page is changing frequently: therefore we |
| 2277 | * don't want to insert it in the unstable tree, and we don't want |
| 2278 | * to waste our time searching for something identical to it there. |
| 2279 | */ |
| 2280 | checksum = calc_checksum(page); |
| 2281 | if (rmap_item->oldchecksum != checksum) { |
| 2282 | rmap_item->oldchecksum = checksum; |
| 2283 | return; |
| 2284 | } |
| 2285 | |
| 2286 | if (!try_to_merge_with_zero_page(rmap_item, page)) |
| 2287 | return; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2288 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2289 | |
| 2290 | /* We first start with searching the page inside the stable tree */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2291 | kpage = stable_tree_search(page); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2292 | if (kpage == page && rmap_item->head == stable_node) { |
| 2293 | put_page(kpage); |
| 2294 | return; |
| 2295 | } |
| 2296 | |
| 2297 | remove_rmap_item_from_tree(rmap_item); |
| 2298 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2299 | if (kpage) { |
Yang Shi | 2cee57d1 | 2019-03-05 15:48:12 -0800 | [diff] [blame] | 2300 | if (PTR_ERR(kpage) == -EBUSY) |
| 2301 | return; |
| 2302 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 2303 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2304 | if (!err) { |
| 2305 | /* |
| 2306 | * The page was successfully merged: |
| 2307 | * add its rmap_item to the stable tree. |
| 2308 | */ |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 2309 | lock_page(kpage); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2310 | stable_tree_append(rmap_item, page_stable_node(kpage), |
| 2311 | max_page_sharing_bypass); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 2312 | unlock_page(kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2313 | } |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2314 | put_page(kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2315 | return; |
| 2316 | } |
| 2317 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2318 | tree_rmap_item = |
| 2319 | unstable_tree_search_insert(rmap_item, page, &tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2320 | if (tree_rmap_item) { |
Claudio Imbrenda | 77da2ba | 2018-04-05 16:25:41 -0700 | [diff] [blame] | 2321 | bool split; |
| 2322 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2323 | kpage = try_to_merge_two_pages(rmap_item, page, |
| 2324 | tree_rmap_item, tree_page); |
Claudio Imbrenda | 77da2ba | 2018-04-05 16:25:41 -0700 | [diff] [blame] | 2325 | /* |
| 2326 | * If both pages we tried to merge belong to the same compound |
| 2327 | * page, then we actually ended up increasing the reference |
| 2328 | * count of the same compound page twice, and split_huge_page |
| 2329 | * failed. |
| 2330 | * Here we set a flag if that happened, and we use it later to |
| 2331 | * try split_huge_page again. Since we call put_page right |
| 2332 | * afterwards, the reference count will be correct and |
| 2333 | * split_huge_page should succeed. |
| 2334 | */ |
| 2335 | split = PageTransCompound(page) |
| 2336 | && compound_head(page) == compound_head(tree_page); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2337 | put_page(tree_page); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2338 | if (kpage) { |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 2339 | /* |
| 2340 | * The pages were successfully merged: insert new |
| 2341 | * node in the stable tree and add both rmap_items. |
| 2342 | */ |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 2343 | lock_page(kpage); |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 2344 | stable_node = stable_tree_insert(page_folio(kpage)); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2345 | if (stable_node) { |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 2346 | stable_tree_append(tree_rmap_item, stable_node, |
| 2347 | false); |
| 2348 | stable_tree_append(rmap_item, stable_node, |
| 2349 | false); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2350 | } |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 2351 | unlock_page(kpage); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2352 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2353 | /* |
| 2354 | * If we fail to insert the page into the stable tree, |
| 2355 | * we will have 2 virtual addresses that are pointing |
| 2356 | * to a ksm page left outside the stable tree, |
| 2357 | * in which case we need to break_cow on both. |
| 2358 | */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 2359 | if (!stable_node) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 2360 | break_cow(tree_rmap_item); |
| 2361 | break_cow(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2362 | } |
Claudio Imbrenda | 77da2ba | 2018-04-05 16:25:41 -0700 | [diff] [blame] | 2363 | } else if (split) { |
| 2364 | /* |
| 2365 | * We are here if we tried to merge two pages and |
| 2366 | * failed because they both belonged to the same |
| 2367 | * compound page. We will split the page now, but no |
| 2368 | * merging will take place. |
| 2369 | * We do not want to add the cost of a full lock; if |
| 2370 | * the page is locked, it is better to skip it and |
| 2371 | * perhaps try again later. |
| 2372 | */ |
| 2373 | if (!trylock_page(page)) |
| 2374 | return; |
| 2375 | split_huge_page(page); |
| 2376 | unlock_page(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2377 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2378 | } |
| 2379 | } |
| 2380 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2381 | static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, |
| 2382 | struct ksm_rmap_item **rmap_list, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2383 | unsigned long addr) |
| 2384 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2385 | struct ksm_rmap_item *rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2386 | |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 2387 | while (*rmap_list) { |
| 2388 | rmap_item = *rmap_list; |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 2389 | if ((rmap_item->address & PAGE_MASK) == addr) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2390 | return rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2391 | if (rmap_item->address > addr) |
| 2392 | break; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 2393 | *rmap_list = rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2394 | remove_rmap_item_from_tree(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2395 | free_rmap_item(rmap_item); |
| 2396 | } |
| 2397 | |
| 2398 | rmap_item = alloc_rmap_item(); |
| 2399 | if (rmap_item) { |
| 2400 | /* It has already been zeroed */ |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2401 | rmap_item->mm = mm_slot->slot.mm; |
xu xin | cb4df4c | 2022-08-30 14:38:38 +0000 | [diff] [blame] | 2402 | rmap_item->mm->ksm_rmap_items++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2403 | rmap_item->address = addr; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 2404 | rmap_item->rmap_list = *rmap_list; |
| 2405 | *rmap_list = rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2406 | } |
| 2407 | return rmap_item; |
| 2408 | } |
| 2409 | |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 2410 | /* |
| 2411 | * Calculate skip age for the ksm page age. The age determines how often |
| 2412 | * de-duplicating has already been tried unsuccessfully. If the age is |
| 2413 | * smaller, the scanning of this page is skipped for less scans. |
| 2414 | * |
| 2415 | * @age: rmap_item age of page |
| 2416 | */ |
| 2417 | static unsigned int skip_age(rmap_age_t age) |
| 2418 | { |
| 2419 | if (age <= 3) |
| 2420 | return 1; |
| 2421 | if (age <= 5) |
| 2422 | return 2; |
| 2423 | if (age <= 8) |
| 2424 | return 4; |
| 2425 | |
| 2426 | return 8; |
| 2427 | } |
| 2428 | |
| 2429 | /* |
| 2430 | * Determines if a page should be skipped for the current scan. |
| 2431 | * |
| 2432 | * @page: page to check |
| 2433 | * @rmap_item: associated rmap_item of page |
| 2434 | */ |
| 2435 | static bool should_skip_rmap_item(struct page *page, |
| 2436 | struct ksm_rmap_item *rmap_item) |
| 2437 | { |
| 2438 | rmap_age_t age; |
| 2439 | |
| 2440 | if (!ksm_smart_scan) |
| 2441 | return false; |
| 2442 | |
| 2443 | /* |
| 2444 | * Never skip pages that are already KSM; pages cmp_and_merge_page() |
| 2445 | * will essentially ignore them, but we still have to process them |
| 2446 | * properly. |
| 2447 | */ |
| 2448 | if (PageKsm(page)) |
| 2449 | return false; |
| 2450 | |
| 2451 | age = rmap_item->age; |
| 2452 | if (age != U8_MAX) |
| 2453 | rmap_item->age++; |
| 2454 | |
| 2455 | /* |
| 2456 | * Smaller ages are not skipped, they need to get a chance to go |
| 2457 | * through the different phases of the KSM merging. |
| 2458 | */ |
| 2459 | if (age < 3) |
| 2460 | return false; |
| 2461 | |
| 2462 | /* |
| 2463 | * Are we still allowed to skip? If not, then don't skip it |
| 2464 | * and determine how much more often we are allowed to skip next. |
| 2465 | */ |
| 2466 | if (!rmap_item->remaining_skips) { |
| 2467 | rmap_item->remaining_skips = skip_age(age); |
| 2468 | return false; |
| 2469 | } |
| 2470 | |
| 2471 | /* Skip this page */ |
Stefan Roesch | e5a6899 | 2023-09-25 21:09:37 -0700 | [diff] [blame] | 2472 | ksm_pages_skipped++; |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 2473 | rmap_item->remaining_skips--; |
| 2474 | remove_rmap_item_from_tree(rmap_item); |
| 2475 | return true; |
| 2476 | } |
| 2477 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2478 | static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2479 | { |
| 2480 | struct mm_struct *mm; |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2481 | struct ksm_mm_slot *mm_slot; |
| 2482 | struct mm_slot *slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2483 | struct vm_area_struct *vma; |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2484 | struct ksm_rmap_item *rmap_item; |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 2485 | struct vma_iterator vmi; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2486 | int nid; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2487 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2488 | if (list_empty(&ksm_mm_head.slot.mm_node)) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2489 | return NULL; |
| 2490 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2491 | mm_slot = ksm_scan.mm_slot; |
| 2492 | if (mm_slot == &ksm_mm_head) { |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 2493 | advisor_start_scan(); |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 2494 | trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); |
| 2495 | |
Hugh Dickins | 2919bfd | 2011-01-13 15:47:29 -0800 | [diff] [blame] | 2496 | /* |
Matthew Wilcox (Oracle) | 1fec6890 | 2023-06-21 17:45:56 +0100 | [diff] [blame] | 2497 | * A number of pages can hang around indefinitely in per-cpu |
| 2498 | * LRU cache, raised page count preventing write_protect_page |
Hugh Dickins | 2919bfd | 2011-01-13 15:47:29 -0800 | [diff] [blame] | 2499 | * from merging them. Though it doesn't really matter much, |
| 2500 | * it is puzzling to see some stuck in pages_volatile until |
| 2501 | * other activity jostles them out, and they also prevented |
| 2502 | * LTP's KSM test from succeeding deterministically; so drain |
| 2503 | * them here (here rather than on entry to ksm_do_scan(), |
| 2504 | * so we don't IPI too often when pages_to_scan is set low). |
| 2505 | */ |
| 2506 | lru_add_drain_all(); |
| 2507 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2508 | /* |
| 2509 | * Whereas stale stable_nodes on the stable_tree itself |
| 2510 | * get pruned in the regular course of stable_tree_search(), |
| 2511 | * those moved out to the migrate_nodes list can accumulate: |
| 2512 | * so prune them once before each full scan. |
| 2513 | */ |
| 2514 | if (!ksm_merge_across_nodes) { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2515 | struct ksm_stable_node *stable_node, *next; |
Alex Shi (tencent) | 72556a4 | 2024-04-11 14:17:07 +0800 | [diff] [blame] | 2516 | struct folio *folio; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2517 | |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 2518 | list_for_each_entry_safe(stable_node, next, |
| 2519 | &migrate_nodes, list) { |
Alex Shi (tencent) | 72556a4 | 2024-04-11 14:17:07 +0800 | [diff] [blame] | 2520 | folio = ksm_get_folio(stable_node, |
David Hildenbrand | 85b67b0 | 2024-04-11 14:17:10 +0800 | [diff] [blame] | 2521 | KSM_GET_FOLIO_NOLOCK); |
Alex Shi (tencent) | 72556a4 | 2024-04-11 14:17:07 +0800 | [diff] [blame] | 2522 | if (folio) |
| 2523 | folio_put(folio); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2524 | cond_resched(); |
| 2525 | } |
| 2526 | } |
| 2527 | |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2528 | for (nid = 0; nid < ksm_nr_node_ids; nid++) |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2529 | root_unstable_tree[nid] = RB_ROOT; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2530 | |
| 2531 | spin_lock(&ksm_mmlist_lock); |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2532 | slot = list_entry(mm_slot->slot.mm_node.next, |
| 2533 | struct mm_slot, mm_node); |
| 2534 | mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
| 2535 | ksm_scan.mm_slot = mm_slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2536 | spin_unlock(&ksm_mmlist_lock); |
Hugh Dickins | 2b47261 | 2011-06-15 15:08:58 -0700 | [diff] [blame] | 2537 | /* |
| 2538 | * Although we tested list_empty() above, a racing __ksm_exit |
| 2539 | * of the last mm on the list may have removed it since then. |
| 2540 | */ |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2541 | if (mm_slot == &ksm_mm_head) |
Hugh Dickins | 2b47261 | 2011-06-15 15:08:58 -0700 | [diff] [blame] | 2542 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2543 | next_mm: |
| 2544 | ksm_scan.address = 0; |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2545 | ksm_scan.rmap_list = &mm_slot->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2546 | } |
| 2547 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2548 | slot = &mm_slot->slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2549 | mm = slot->mm; |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 2550 | vma_iter_init(&vmi, mm, ksm_scan.address); |
| 2551 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2552 | mmap_read_lock(mm); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2553 | if (ksm_test_exit(mm)) |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 2554 | goto no_vmas; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2555 | |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 2556 | for_each_vma(vmi, vma) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2557 | if (!(vma->vm_flags & VM_MERGEABLE)) |
| 2558 | continue; |
| 2559 | if (ksm_scan.address < vma->vm_start) |
| 2560 | ksm_scan.address = vma->vm_start; |
| 2561 | if (!vma->anon_vma) |
| 2562 | ksm_scan.address = vma->vm_end; |
| 2563 | |
| 2564 | while (ksm_scan.address < vma->vm_end) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2565 | if (ksm_test_exit(mm)) |
| 2566 | break; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2567 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
Haiyue Wang | f7091ed | 2022-08-23 21:58:41 +0800 | [diff] [blame] | 2568 | if (IS_ERR_OR_NULL(*page)) { |
Andrea Arcangeli | 21ae5b0 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 2569 | ksm_scan.address += PAGE_SIZE; |
| 2570 | cond_resched(); |
| 2571 | continue; |
| 2572 | } |
Haiyue Wang | f7091ed | 2022-08-23 21:58:41 +0800 | [diff] [blame] | 2573 | if (is_zone_device_page(*page)) |
| 2574 | goto next_page; |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 2575 | if (PageAnon(*page)) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2576 | flush_anon_page(vma, *page, ksm_scan.address); |
| 2577 | flush_dcache_page(*page); |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2578 | rmap_item = get_next_rmap_item(mm_slot, |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 2579 | ksm_scan.rmap_list, ksm_scan.address); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2580 | if (rmap_item) { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 2581 | ksm_scan.rmap_list = |
| 2582 | &rmap_item->rmap_list; |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 2583 | |
| 2584 | if (should_skip_rmap_item(*page, rmap_item)) |
| 2585 | goto next_page; |
| 2586 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2587 | ksm_scan.address += PAGE_SIZE; |
| 2588 | } else |
| 2589 | put_page(*page); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2590 | mmap_read_unlock(mm); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2591 | return rmap_item; |
| 2592 | } |
Haiyue Wang | f7091ed | 2022-08-23 21:58:41 +0800 | [diff] [blame] | 2593 | next_page: |
Andrea Arcangeli | 21ae5b0 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 2594 | put_page(*page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2595 | ksm_scan.address += PAGE_SIZE; |
| 2596 | cond_resched(); |
| 2597 | } |
| 2598 | } |
| 2599 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2600 | if (ksm_test_exit(mm)) { |
Matthew Wilcox (Oracle) | a5f18ba0 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 2601 | no_vmas: |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2602 | ksm_scan.address = 0; |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2603 | ksm_scan.rmap_list = &mm_slot->rmap_list; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2604 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2605 | /* |
| 2606 | * Nuke all the rmap_items that are above this current rmap: |
| 2607 | * because there were no VM_MERGEABLE vmas with such addresses. |
| 2608 | */ |
Chengyang Fan | 420be4e | 2021-05-04 18:37:48 -0700 | [diff] [blame] | 2609 | remove_trailing_rmap_items(ksm_scan.rmap_list); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2610 | |
| 2611 | spin_lock(&ksm_mmlist_lock); |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2612 | slot = list_entry(mm_slot->slot.mm_node.next, |
| 2613 | struct mm_slot, mm_node); |
| 2614 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2615 | if (ksm_scan.address == 0) { |
| 2616 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 2617 | * We've completed a full scan of all vmas, holding mmap_lock |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2618 | * throughout, and found no VM_MERGEABLE: so do the same as |
| 2619 | * __ksm_exit does to remove this mm from all our lists now. |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2620 | * This applies either when cleaning up after __ksm_exit |
| 2621 | * (but beware: we can reach here even before __ksm_exit), |
| 2622 | * or when all VM_MERGEABLE areas have been unmapped (and |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 2623 | * mmap_lock then protects against race with MADV_MERGEABLE). |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2624 | */ |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2625 | hash_del(&mm_slot->slot.hash); |
| 2626 | list_del(&mm_slot->slot.mm_node); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2627 | spin_unlock(&ksm_mmlist_lock); |
| 2628 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2629 | mm_slot_free(mm_slot_cache, mm_slot); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2630 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 2631 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2632 | mmap_read_unlock(mm); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2633 | mmdrop(mm); |
| 2634 | } else { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2635 | mmap_read_unlock(mm); |
Zhou Chengming | 7496fea | 2016-05-12 15:42:21 -0700 | [diff] [blame] | 2636 | /* |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 2637 | * mmap_read_unlock(mm) first because after |
Zhou Chengming | 7496fea | 2016-05-12 15:42:21 -0700 | [diff] [blame] | 2638 | * spin_unlock(&ksm_mmlist_lock) run, the "mm" may |
| 2639 | * already have been freed under us by __ksm_exit() |
| 2640 | * because the "mm_slot" is still hashed and |
| 2641 | * ksm_scan.mm_slot doesn't point to it anymore. |
| 2642 | */ |
| 2643 | spin_unlock(&ksm_mmlist_lock); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2644 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2645 | |
| 2646 | /* Repeat until we've completed scanning the whole list */ |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2647 | mm_slot = ksm_scan.mm_slot; |
| 2648 | if (mm_slot != &ksm_mm_head) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2649 | goto next_mm; |
| 2650 | |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 2651 | advisor_stop_scan(); |
| 2652 | |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 2653 | trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2654 | ksm_scan.seqnr++; |
| 2655 | return NULL; |
| 2656 | } |
| 2657 | |
| 2658 | /** |
| 2659 | * ksm_do_scan - the ksm scanner main worker function. |
Mike Rapoport | b7701a5 | 2018-02-06 15:42:13 -0800 | [diff] [blame] | 2660 | * @scan_npages: number of pages we want to scan before we return. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2661 | */ |
| 2662 | static void ksm_do_scan(unsigned int scan_npages) |
| 2663 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2664 | struct ksm_rmap_item *rmap_item; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 2665 | struct page *page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2666 | |
Chengming Zhou | 730cdc2 | 2024-05-28 13:15:21 +0800 | [diff] [blame] | 2667 | while (scan_npages-- && likely(!freezing(current))) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2668 | cond_resched(); |
| 2669 | rmap_item = scan_get_next_rmap_item(&page); |
| 2670 | if (!rmap_item) |
| 2671 | return; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2672 | cmp_and_merge_page(page, rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2673 | put_page(page); |
Chengming Zhou | 730cdc2 | 2024-05-28 13:15:21 +0800 | [diff] [blame] | 2674 | ksm_pages_scanned++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2675 | } |
| 2676 | } |
| 2677 | |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2678 | static int ksmd_should_run(void) |
| 2679 | { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2680 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2681 | } |
| 2682 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2683 | static int ksm_scan_thread(void *nothing) |
| 2684 | { |
Kirill Tkhai | fcf9a0e | 2018-12-28 00:38:40 -0800 | [diff] [blame] | 2685 | unsigned int sleep_ms; |
| 2686 | |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2687 | set_freezable(); |
Izik Eidus | 339aa62 | 2009-09-21 17:02:07 -0700 | [diff] [blame] | 2688 | set_user_nice(current, 5); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2689 | |
| 2690 | while (!kthread_should_stop()) { |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2691 | mutex_lock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2692 | wait_while_offlining(); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2693 | if (ksmd_should_run()) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2694 | ksm_do_scan(ksm_thread_pages_to_scan); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2695 | mutex_unlock(&ksm_thread_mutex); |
| 2696 | |
| 2697 | if (ksmd_should_run()) { |
Kirill Tkhai | fcf9a0e | 2018-12-28 00:38:40 -0800 | [diff] [blame] | 2698 | sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); |
Kevin Hao | f55afd9 | 2023-12-13 17:09:06 +0800 | [diff] [blame] | 2699 | wait_event_freezable_timeout(ksm_iter_wait, |
Kirill Tkhai | fcf9a0e | 2018-12-28 00:38:40 -0800 | [diff] [blame] | 2700 | sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), |
| 2701 | msecs_to_jiffies(sleep_ms)); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2702 | } else { |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2703 | wait_event_freezable(ksm_thread_wait, |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2704 | ksmd_should_run() || kthread_should_stop()); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2705 | } |
| 2706 | } |
| 2707 | return 0; |
| 2708 | } |
| 2709 | |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 2710 | static void __ksm_add_vma(struct vm_area_struct *vma) |
| 2711 | { |
| 2712 | unsigned long vm_flags = vma->vm_flags; |
| 2713 | |
| 2714 | if (vm_flags & VM_MERGEABLE) |
| 2715 | return; |
| 2716 | |
| 2717 | if (vma_ksm_compatible(vma)) |
| 2718 | vm_flags_set(vma, VM_MERGEABLE); |
| 2719 | } |
| 2720 | |
David Hildenbrand | 24139c0 | 2023-04-22 22:54:18 +0200 | [diff] [blame] | 2721 | static int __ksm_del_vma(struct vm_area_struct *vma) |
| 2722 | { |
| 2723 | int err; |
| 2724 | |
| 2725 | if (!(vma->vm_flags & VM_MERGEABLE)) |
| 2726 | return 0; |
| 2727 | |
| 2728 | if (vma->anon_vma) { |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 2729 | err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); |
David Hildenbrand | 24139c0 | 2023-04-22 22:54:18 +0200 | [diff] [blame] | 2730 | if (err) |
| 2731 | return err; |
| 2732 | } |
| 2733 | |
| 2734 | vm_flags_clear(vma, VM_MERGEABLE); |
| 2735 | return 0; |
| 2736 | } |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 2737 | /** |
| 2738 | * ksm_add_vma - Mark vma as mergeable if compatible |
| 2739 | * |
| 2740 | * @vma: Pointer to vma |
| 2741 | */ |
| 2742 | void ksm_add_vma(struct vm_area_struct *vma) |
| 2743 | { |
| 2744 | struct mm_struct *mm = vma->vm_mm; |
| 2745 | |
| 2746 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
| 2747 | __ksm_add_vma(vma); |
| 2748 | } |
| 2749 | |
| 2750 | static void ksm_add_vmas(struct mm_struct *mm) |
| 2751 | { |
| 2752 | struct vm_area_struct *vma; |
| 2753 | |
| 2754 | VMA_ITERATOR(vmi, mm, 0); |
| 2755 | for_each_vma(vmi, vma) |
| 2756 | __ksm_add_vma(vma); |
| 2757 | } |
| 2758 | |
David Hildenbrand | 24139c0 | 2023-04-22 22:54:18 +0200 | [diff] [blame] | 2759 | static int ksm_del_vmas(struct mm_struct *mm) |
| 2760 | { |
| 2761 | struct vm_area_struct *vma; |
| 2762 | int err; |
| 2763 | |
| 2764 | VMA_ITERATOR(vmi, mm, 0); |
| 2765 | for_each_vma(vmi, vma) { |
| 2766 | err = __ksm_del_vma(vma); |
| 2767 | if (err) |
| 2768 | return err; |
| 2769 | } |
| 2770 | return 0; |
| 2771 | } |
| 2772 | |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 2773 | /** |
| 2774 | * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all |
| 2775 | * compatible VMA's |
| 2776 | * |
| 2777 | * @mm: Pointer to mm |
| 2778 | * |
| 2779 | * Returns 0 on success, otherwise error code |
| 2780 | */ |
| 2781 | int ksm_enable_merge_any(struct mm_struct *mm) |
| 2782 | { |
| 2783 | int err; |
| 2784 | |
| 2785 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
| 2786 | return 0; |
| 2787 | |
| 2788 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
| 2789 | err = __ksm_enter(mm); |
| 2790 | if (err) |
| 2791 | return err; |
| 2792 | } |
| 2793 | |
| 2794 | set_bit(MMF_VM_MERGE_ANY, &mm->flags); |
| 2795 | ksm_add_vmas(mm); |
| 2796 | |
| 2797 | return 0; |
| 2798 | } |
| 2799 | |
David Hildenbrand | 24139c0 | 2023-04-22 22:54:18 +0200 | [diff] [blame] | 2800 | /** |
| 2801 | * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, |
| 2802 | * previously enabled via ksm_enable_merge_any(). |
| 2803 | * |
| 2804 | * Disabling merging implies unmerging any merged pages, like setting |
| 2805 | * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and |
| 2806 | * merging on all compatible VMA's remains enabled. |
| 2807 | * |
| 2808 | * @mm: Pointer to mm |
| 2809 | * |
| 2810 | * Returns 0 on success, otherwise error code |
| 2811 | */ |
| 2812 | int ksm_disable_merge_any(struct mm_struct *mm) |
| 2813 | { |
| 2814 | int err; |
| 2815 | |
| 2816 | if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
| 2817 | return 0; |
| 2818 | |
| 2819 | err = ksm_del_vmas(mm); |
| 2820 | if (err) { |
| 2821 | ksm_add_vmas(mm); |
| 2822 | return err; |
| 2823 | } |
| 2824 | |
| 2825 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); |
| 2826 | return 0; |
| 2827 | } |
| 2828 | |
David Hildenbrand | 2c281f5 | 2023-04-22 23:01:56 +0200 | [diff] [blame] | 2829 | int ksm_disable(struct mm_struct *mm) |
| 2830 | { |
| 2831 | mmap_assert_write_locked(mm); |
| 2832 | |
| 2833 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) |
| 2834 | return 0; |
| 2835 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
| 2836 | return ksm_disable_merge_any(mm); |
| 2837 | return ksm_del_vmas(mm); |
| 2838 | } |
| 2839 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2840 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
| 2841 | unsigned long end, int advice, unsigned long *vm_flags) |
| 2842 | { |
| 2843 | struct mm_struct *mm = vma->vm_mm; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 2844 | int err; |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2845 | |
| 2846 | switch (advice) { |
| 2847 | case MADV_MERGEABLE: |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 2848 | if (vma->vm_flags & VM_MERGEABLE) |
Dave Jiang | e1fb4a0 | 2018-08-17 15:43:40 -0700 | [diff] [blame] | 2849 | return 0; |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 2850 | if (!vma_ksm_compatible(vma)) |
Shawn Anastasio | 1256448 | 2020-08-21 13:55:56 -0500 | [diff] [blame] | 2851 | return 0; |
Konstantin Khlebnikov | cc2383e | 2012-10-08 16:28:37 -0700 | [diff] [blame] | 2852 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 2853 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
| 2854 | err = __ksm_enter(mm); |
| 2855 | if (err) |
| 2856 | return err; |
| 2857 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2858 | |
| 2859 | *vm_flags |= VM_MERGEABLE; |
| 2860 | break; |
| 2861 | |
| 2862 | case MADV_UNMERGEABLE: |
| 2863 | if (!(*vm_flags & VM_MERGEABLE)) |
| 2864 | return 0; /* just ignore the advice */ |
| 2865 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 2866 | if (vma->anon_vma) { |
Suren Baghdasaryan | 49b0638 | 2023-08-04 08:27:19 -0700 | [diff] [blame] | 2867 | err = unmerge_ksm_pages(vma, start, end, true); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 2868 | if (err) |
| 2869 | return err; |
| 2870 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2871 | |
| 2872 | *vm_flags &= ~VM_MERGEABLE; |
| 2873 | break; |
| 2874 | } |
| 2875 | |
| 2876 | return 0; |
| 2877 | } |
Bharata B Rao | 33cf170 | 2019-11-25 08:36:25 +0530 | [diff] [blame] | 2878 | EXPORT_SYMBOL_GPL(ksm_madvise); |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2879 | |
| 2880 | int __ksm_enter(struct mm_struct *mm) |
| 2881 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2882 | struct ksm_mm_slot *mm_slot; |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2883 | struct mm_slot *slot; |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2884 | int needs_wakeup; |
| 2885 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2886 | mm_slot = mm_slot_alloc(mm_slot_cache); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2887 | if (!mm_slot) |
| 2888 | return -ENOMEM; |
| 2889 | |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2890 | slot = &mm_slot->slot; |
| 2891 | |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2892 | /* Check ksm_run too? Would need tighter locking */ |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2893 | needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2894 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2895 | spin_lock(&ksm_mmlist_lock); |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2896 | mm_slot_insert(mm_slots_hash, mm, slot); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2897 | /* |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2898 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
| 2899 | * insert just behind the scanning cursor, to let the area settle |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2900 | * down a little; when fork is followed by immediate exec, we don't |
| 2901 | * want ksmd to waste time setting up and tearing down an rmap_list. |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2902 | * |
| 2903 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its |
| 2904 | * scanning cursor, otherwise KSM pages in newly forked mms will be |
| 2905 | * missed: then we might as well insert at the end of the list. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2906 | */ |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2907 | if (ksm_run & KSM_RUN_UNMERGE) |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2908 | list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2909 | else |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2910 | list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2911 | spin_unlock(&ksm_mmlist_lock); |
| 2912 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2913 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 2914 | mmgrab(mm); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 2915 | |
| 2916 | if (needs_wakeup) |
| 2917 | wake_up_interruptible(&ksm_thread_wait); |
| 2918 | |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 2919 | trace_ksm_enter(mm); |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2920 | return 0; |
| 2921 | } |
| 2922 | |
Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 2923 | void __ksm_exit(struct mm_struct *mm) |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2924 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 2925 | struct ksm_mm_slot *mm_slot; |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2926 | struct mm_slot *slot; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2927 | int easy_to_free = 0; |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2928 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2929 | /* |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2930 | * This process is exiting: if it's straightforward (as is the |
| 2931 | * case when ksmd was never running), free mm_slot immediately. |
| 2932 | * But if it's at the cursor or has rmap_items linked to it, use |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 2933 | * mmap_lock to synchronize with any break_cows before pagetables |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2934 | * are freed, and leave the mm_slot on the list for ksmd to free. |
| 2935 | * Beware: ksm may already have noticed it exiting and freed the slot. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2936 | */ |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2937 | |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2938 | spin_lock(&ksm_mmlist_lock); |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2939 | slot = mm_slot_lookup(mm_slots_hash, mm); |
| 2940 | mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2941 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 2942 | if (!mm_slot->rmap_list) { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2943 | hash_del(&slot->hash); |
| 2944 | list_del(&slot->mm_node); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2945 | easy_to_free = 1; |
| 2946 | } else { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2947 | list_move(&slot->mm_node, |
| 2948 | &ksm_scan.mm_slot->slot.mm_node); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2949 | } |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2950 | } |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 2951 | spin_unlock(&ksm_mmlist_lock); |
| 2952 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2953 | if (easy_to_free) { |
Qi Zheng | 58730ab | 2022-08-31 11:19:51 +0800 | [diff] [blame] | 2954 | mm_slot_free(mm_slot_cache, mm_slot); |
Stefan Roesch | d7597f5 | 2023-04-17 22:13:40 -0700 | [diff] [blame] | 2955 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2956 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
| 2957 | mmdrop(mm); |
| 2958 | } else if (mm_slot) { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2959 | mmap_write_lock(mm); |
| 2960 | mmap_write_unlock(mm); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2961 | } |
Stefan Roesch | 739100c | 2023-02-10 13:46:45 -0800 | [diff] [blame] | 2962 | |
| 2963 | trace_ksm_exit(mm); |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 2964 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2965 | |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2966 | struct folio *ksm_might_need_to_copy(struct folio *folio, |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2967 | struct vm_area_struct *vma, unsigned long addr) |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 2968 | { |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2969 | struct page *page = folio_page(folio, 0); |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 2970 | struct anon_vma *anon_vma = folio_anon_vma(folio); |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2971 | struct folio *new_folio; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 2972 | |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2973 | if (folio_test_large(folio)) |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2974 | return folio; |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2975 | |
| 2976 | if (folio_test_ksm(folio)) { |
| 2977 | if (folio_stable_node(folio) && |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2978 | !(ksm_run & KSM_RUN_UNMERGE)) |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2979 | return folio; /* no need to copy it */ |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2980 | } else if (!anon_vma) { |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2981 | return folio; /* no need to copy it */ |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2982 | } else if (folio->index == linear_page_index(vma, addr) && |
Nanyong Sun | e1c63e1 | 2022-01-14 14:08:59 -0800 | [diff] [blame] | 2983 | anon_vma->root == vma->anon_vma->root) { |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2984 | return folio; /* still no need to copy it */ |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2985 | } |
Miaohe Lin | f985fc3 | 2023-07-27 19:56:40 +0800 | [diff] [blame] | 2986 | if (PageHWPoison(page)) |
| 2987 | return ERR_PTR(-EHWPOISON); |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2988 | if (!folio_test_uptodate(folio)) |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2989 | return folio; /* let do_swap_page report the error */ |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2990 | |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2991 | new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); |
| 2992 | if (new_folio && |
| 2993 | mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { |
| 2994 | folio_put(new_folio); |
| 2995 | new_folio = NULL; |
Hugh Dickins | 62fdb16 | 2020-09-18 21:20:03 -0700 | [diff] [blame] | 2996 | } |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 2997 | if (new_folio) { |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 2998 | if (copy_mc_user_highpage(folio_page(new_folio, 0), page, |
| 2999 | addr, vma)) { |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 3000 | folio_put(new_folio); |
Kefeng Wang | 6b97059 | 2022-12-09 15:28:01 +0800 | [diff] [blame] | 3001 | return ERR_PTR(-EHWPOISON); |
| 3002 | } |
Kefeng Wang | 1486fb5 | 2023-11-18 10:32:28 +0800 | [diff] [blame] | 3003 | folio_set_dirty(new_folio); |
| 3004 | __folio_mark_uptodate(new_folio); |
| 3005 | __folio_set_locked(new_folio); |
Yang Yang | 4d45c3a | 2022-03-22 14:46:33 -0700 | [diff] [blame] | 3006 | #ifdef CONFIG_SWAP |
| 3007 | count_vm_event(KSM_SWPIN_COPY); |
| 3008 | #endif |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 3009 | } |
| 3010 | |
Matthew Wilcox (Oracle) | 96db66d | 2023-12-11 16:22:06 +0000 | [diff] [blame] | 3011 | return new_folio; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 3012 | } |
| 3013 | |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 3014 | void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3015 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 3016 | struct ksm_stable_node *stable_node; |
| 3017 | struct ksm_rmap_item *rmap_item; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3018 | int search_new_forks = 0; |
| 3019 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 3020 | VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 3021 | |
| 3022 | /* |
| 3023 | * Rely on the page lock to protect against concurrent modifications |
| 3024 | * to that page's node of the stable tree. |
| 3025 | */ |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 3026 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3027 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 3028 | stable_node = folio_stable_node(folio); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3029 | if (!stable_node) |
Minchan Kim | 1df631a | 2017-05-03 14:54:23 -0700 | [diff] [blame] | 3030 | return; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3031 | again: |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 3032 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3033 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 3034 | struct anon_vma_chain *vmac; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3035 | struct vm_area_struct *vma; |
| 3036 | |
Andrea Arcangeli | ad12695 | 2015-11-05 18:49:07 -0800 | [diff] [blame] | 3037 | cond_resched(); |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 3038 | if (!anon_vma_trylock_read(anon_vma)) { |
| 3039 | if (rwc->try_lock) { |
| 3040 | rwc->contended = true; |
| 3041 | return; |
| 3042 | } |
| 3043 | anon_vma_lock_read(anon_vma); |
| 3044 | } |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 3045 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
| 3046 | 0, ULONG_MAX) { |
Jia He | 1105a2f | 2018-06-14 15:26:14 -0700 | [diff] [blame] | 3047 | unsigned long addr; |
| 3048 | |
Andrea Arcangeli | ad12695 | 2015-11-05 18:49:07 -0800 | [diff] [blame] | 3049 | cond_resched(); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 3050 | vma = vmac->vma; |
Jia He | 1105a2f | 2018-06-14 15:26:14 -0700 | [diff] [blame] | 3051 | |
| 3052 | /* Ignore the stable/unstable/sqnr flags */ |
Miaohe Lin | cd7fae2 | 2021-05-04 18:37:42 -0700 | [diff] [blame] | 3053 | addr = rmap_item->address & PAGE_MASK; |
Jia He | 1105a2f | 2018-06-14 15:26:14 -0700 | [diff] [blame] | 3054 | |
| 3055 | if (addr < vma->vm_start || addr >= vma->vm_end) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3056 | continue; |
| 3057 | /* |
| 3058 | * Initially we examine only the vma which covers this |
| 3059 | * rmap_item; but later, if there is still work to do, |
| 3060 | * we examine covering vmas in other mms: in case they |
| 3061 | * were forked from the original since ksmd passed. |
| 3062 | */ |
| 3063 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
| 3064 | continue; |
| 3065 | |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 3066 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
| 3067 | continue; |
| 3068 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 3069 | if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { |
Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 3070 | anon_vma_unlock_read(anon_vma); |
Minchan Kim | 1df631a | 2017-05-03 14:54:23 -0700 | [diff] [blame] | 3071 | return; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3072 | } |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 3073 | if (rwc->done && rwc->done(folio)) { |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 3074 | anon_vma_unlock_read(anon_vma); |
Minchan Kim | 1df631a | 2017-05-03 14:54:23 -0700 | [diff] [blame] | 3075 | return; |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 3076 | } |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3077 | } |
Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 3078 | anon_vma_unlock_read(anon_vma); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3079 | } |
| 3080 | if (!search_new_forks++) |
| 3081 | goto again; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3082 | } |
| 3083 | |
Longlong Xia | 4248d00 | 2023-04-14 10:17:41 +0800 | [diff] [blame] | 3084 | #ifdef CONFIG_MEMORY_FAILURE |
| 3085 | /* |
| 3086 | * Collect processes when the error hit an ksm page. |
| 3087 | */ |
Matthew Wilcox (Oracle) | b650e1d | 2024-04-12 20:35:08 +0100 | [diff] [blame] | 3088 | void collect_procs_ksm(struct folio *folio, struct page *page, |
| 3089 | struct list_head *to_kill, int force_early) |
Longlong Xia | 4248d00 | 2023-04-14 10:17:41 +0800 | [diff] [blame] | 3090 | { |
| 3091 | struct ksm_stable_node *stable_node; |
| 3092 | struct ksm_rmap_item *rmap_item; |
Longlong Xia | 4248d00 | 2023-04-14 10:17:41 +0800 | [diff] [blame] | 3093 | struct vm_area_struct *vma; |
| 3094 | struct task_struct *tsk; |
| 3095 | |
| 3096 | stable_node = folio_stable_node(folio); |
| 3097 | if (!stable_node) |
| 3098 | return; |
| 3099 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
| 3100 | struct anon_vma *av = rmap_item->anon_vma; |
| 3101 | |
| 3102 | anon_vma_lock_read(av); |
Tong Tiangen | d256d1c | 2023-08-28 10:25:27 +0800 | [diff] [blame] | 3103 | rcu_read_lock(); |
Longlong Xia | 4248d00 | 2023-04-14 10:17:41 +0800 | [diff] [blame] | 3104 | for_each_process(tsk) { |
| 3105 | struct anon_vma_chain *vmac; |
| 3106 | unsigned long addr; |
| 3107 | struct task_struct *t = |
| 3108 | task_early_kill(tsk, force_early); |
| 3109 | if (!t) |
| 3110 | continue; |
| 3111 | anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, |
| 3112 | ULONG_MAX) |
| 3113 | { |
| 3114 | vma = vmac->vma; |
| 3115 | if (vma->vm_mm == t->mm) { |
| 3116 | addr = rmap_item->address & PAGE_MASK; |
| 3117 | add_to_kill_ksm(t, page, vma, to_kill, |
| 3118 | addr); |
| 3119 | } |
| 3120 | } |
| 3121 | } |
Tong Tiangen | d256d1c | 2023-08-28 10:25:27 +0800 | [diff] [blame] | 3122 | rcu_read_unlock(); |
Longlong Xia | 4248d00 | 2023-04-14 10:17:41 +0800 | [diff] [blame] | 3123 | anon_vma_unlock_read(av); |
| 3124 | } |
| 3125 | } |
| 3126 | #endif |
| 3127 | |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 3128 | #ifdef CONFIG_MIGRATION |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 3129 | void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3130 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 3131 | struct ksm_stable_node *stable_node; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3132 | |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 3133 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 3134 | VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); |
| 3135 | VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3136 | |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 3137 | stable_node = folio_stable_node(folio); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3138 | if (stable_node) { |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 3139 | VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); |
| 3140 | stable_node->kpfn = folio_pfn(newfolio); |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 3141 | /* |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 3142 | * newfolio->mapping was set in advance; now we need smp_wmb() |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 3143 | * to make sure that the new stable_node->kpfn is visible |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 3144 | * to ksm_get_folio() before it can see that folio->mapping |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 3145 | * has gone stale (or that folio_test_swapcache has been cleared). |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 3146 | */ |
| 3147 | smp_wmb(); |
Alex Shi (tencent) | b8b0ff2 | 2024-04-11 14:17:04 +0800 | [diff] [blame] | 3148 | folio_set_stable_node(folio, NULL); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 3149 | } |
| 3150 | } |
| 3151 | #endif /* CONFIG_MIGRATION */ |
| 3152 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3153 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3154 | static void wait_while_offlining(void) |
| 3155 | { |
| 3156 | while (ksm_run & KSM_RUN_OFFLINE) { |
| 3157 | mutex_unlock(&ksm_thread_mutex); |
| 3158 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 3159 | TASK_UNINTERRUPTIBLE); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3160 | mutex_lock(&ksm_thread_mutex); |
| 3161 | } |
| 3162 | } |
| 3163 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 3164 | static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3165 | unsigned long start_pfn, |
| 3166 | unsigned long end_pfn) |
| 3167 | { |
| 3168 | if (stable_node->kpfn >= start_pfn && |
| 3169 | stable_node->kpfn < end_pfn) { |
| 3170 | /* |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 3171 | * Don't ksm_get_folio, page has already gone: |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3172 | * which is why we keep kpfn instead of page* |
| 3173 | */ |
| 3174 | remove_node_from_stable_tree(stable_node); |
| 3175 | return true; |
| 3176 | } |
| 3177 | return false; |
| 3178 | } |
| 3179 | |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 3180 | static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3181 | unsigned long start_pfn, |
| 3182 | unsigned long end_pfn, |
| 3183 | struct rb_root *root) |
| 3184 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 3185 | struct ksm_stable_node *dup; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3186 | struct hlist_node *hlist_safe; |
| 3187 | |
| 3188 | if (!is_stable_node_chain(stable_node)) { |
| 3189 | VM_BUG_ON(is_stable_node_dup(stable_node)); |
| 3190 | return stable_node_dup_remove_range(stable_node, start_pfn, |
| 3191 | end_pfn); |
| 3192 | } |
| 3193 | |
| 3194 | hlist_for_each_entry_safe(dup, hlist_safe, |
| 3195 | &stable_node->hlist, hlist_dup) { |
| 3196 | VM_BUG_ON(!is_stable_node_dup(dup)); |
| 3197 | stable_node_dup_remove_range(dup, start_pfn, end_pfn); |
| 3198 | } |
| 3199 | if (hlist_empty(&stable_node->hlist)) { |
| 3200 | free_stable_node_chain(stable_node, root); |
| 3201 | return true; /* notify caller that tree was rebalanced */ |
| 3202 | } else |
| 3203 | return false; |
| 3204 | } |
| 3205 | |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 3206 | static void ksm_check_stable_tree(unsigned long start_pfn, |
| 3207 | unsigned long end_pfn) |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3208 | { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 3209 | struct ksm_stable_node *stable_node, *next; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3210 | struct rb_node *node; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3211 | int nid; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3212 | |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 3213 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
| 3214 | node = rb_first(root_stable_tree + nid); |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 3215 | while (node) { |
Qi Zheng | 21fbd59 | 2022-08-31 11:19:48 +0800 | [diff] [blame] | 3216 | stable_node = rb_entry(node, struct ksm_stable_node, node); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3217 | if (stable_node_chain_remove_range(stable_node, |
| 3218 | start_pfn, end_pfn, |
| 3219 | root_stable_tree + |
| 3220 | nid)) |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 3221 | node = rb_first(root_stable_tree + nid); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3222 | else |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 3223 | node = rb_next(node); |
| 3224 | cond_resched(); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3225 | } |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 3226 | } |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 3227 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 3228 | if (stable_node->kpfn >= start_pfn && |
| 3229 | stable_node->kpfn < end_pfn) |
| 3230 | remove_node_from_stable_tree(stable_node); |
| 3231 | cond_resched(); |
| 3232 | } |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3233 | } |
| 3234 | |
| 3235 | static int ksm_memory_callback(struct notifier_block *self, |
| 3236 | unsigned long action, void *arg) |
| 3237 | { |
| 3238 | struct memory_notify *mn = arg; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3239 | |
| 3240 | switch (action) { |
| 3241 | case MEM_GOING_OFFLINE: |
| 3242 | /* |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3243 | * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() |
| 3244 | * and remove_all_stable_nodes() while memory is going offline: |
| 3245 | * it is unsafe for them to touch the stable tree at this time. |
| 3246 | * But unmerge_ksm_pages(), rmap lookups and other entry points |
| 3247 | * which do not need the ksm_thread_mutex are all safe. |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3248 | */ |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3249 | mutex_lock(&ksm_thread_mutex); |
| 3250 | ksm_run |= KSM_RUN_OFFLINE; |
| 3251 | mutex_unlock(&ksm_thread_mutex); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3252 | break; |
| 3253 | |
| 3254 | case MEM_OFFLINE: |
| 3255 | /* |
| 3256 | * Most of the work is done by page migration; but there might |
| 3257 | * be a few stable_nodes left over, still pointing to struct |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 3258 | * pages which have been offlined: prune those from the tree, |
Alex Shi (tencent) | 79899cc | 2024-04-11 14:17:09 +0800 | [diff] [blame] | 3259 | * otherwise ksm_get_folio() might later try to access a |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 3260 | * non-existent struct page. |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3261 | */ |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 3262 | ksm_check_stable_tree(mn->start_pfn, |
| 3263 | mn->start_pfn + mn->nr_pages); |
Joe Perches | e4a9bc5 | 2020-04-06 20:08:39 -0700 | [diff] [blame] | 3264 | fallthrough; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3265 | case MEM_CANCEL_OFFLINE: |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3266 | mutex_lock(&ksm_thread_mutex); |
| 3267 | ksm_run &= ~KSM_RUN_OFFLINE; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3268 | mutex_unlock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3269 | |
| 3270 | smp_mb(); /* wake_up_bit advises this */ |
| 3271 | wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3272 | break; |
| 3273 | } |
| 3274 | return NOTIFY_OK; |
| 3275 | } |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3276 | #else |
| 3277 | static void wait_while_offlining(void) |
| 3278 | { |
| 3279 | } |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3280 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 3281 | |
Stefan Roesch | d21077f | 2023-04-17 22:13:41 -0700 | [diff] [blame] | 3282 | #ifdef CONFIG_PROC_FS |
| 3283 | long ksm_process_profit(struct mm_struct *mm) |
| 3284 | { |
Chengming Zhou | c2dc78b | 2024-05-28 13:15:22 +0800 | [diff] [blame] | 3285 | return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - |
Stefan Roesch | d21077f | 2023-04-17 22:13:41 -0700 | [diff] [blame] | 3286 | mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); |
| 3287 | } |
| 3288 | #endif /* CONFIG_PROC_FS */ |
| 3289 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 3290 | #ifdef CONFIG_SYSFS |
| 3291 | /* |
| 3292 | * This all compiles without CONFIG_SYSFS, but is a waste of space. |
| 3293 | */ |
| 3294 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3295 | #define KSM_ATTR_RO(_name) \ |
| 3296 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
| 3297 | #define KSM_ATTR(_name) \ |
Miaohe Lin | 1bad2e5 | 2022-03-22 14:46:35 -0700 | [diff] [blame] | 3298 | static struct kobj_attribute _name##_attr = __ATTR_RW(_name) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3299 | |
| 3300 | static ssize_t sleep_millisecs_show(struct kobject *kobj, |
| 3301 | struct kobj_attribute *attr, char *buf) |
| 3302 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3303 | return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3304 | } |
| 3305 | |
| 3306 | static ssize_t sleep_millisecs_store(struct kobject *kobj, |
| 3307 | struct kobj_attribute *attr, |
| 3308 | const char *buf, size_t count) |
| 3309 | { |
Alexey Dobriyan | dfefd22 | 2020-12-14 19:15:03 -0800 | [diff] [blame] | 3310 | unsigned int msecs; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3311 | int err; |
| 3312 | |
Alexey Dobriyan | dfefd22 | 2020-12-14 19:15:03 -0800 | [diff] [blame] | 3313 | err = kstrtouint(buf, 10, &msecs); |
| 3314 | if (err) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3315 | return -EINVAL; |
| 3316 | |
| 3317 | ksm_thread_sleep_millisecs = msecs; |
Kirill Tkhai | fcf9a0e | 2018-12-28 00:38:40 -0800 | [diff] [blame] | 3318 | wake_up_interruptible(&ksm_iter_wait); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3319 | |
| 3320 | return count; |
| 3321 | } |
| 3322 | KSM_ATTR(sleep_millisecs); |
| 3323 | |
| 3324 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
| 3325 | struct kobj_attribute *attr, char *buf) |
| 3326 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3327 | return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3328 | } |
| 3329 | |
| 3330 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
| 3331 | struct kobj_attribute *attr, |
| 3332 | const char *buf, size_t count) |
| 3333 | { |
Alexey Dobriyan | dfefd22 | 2020-12-14 19:15:03 -0800 | [diff] [blame] | 3334 | unsigned int nr_pages; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3335 | int err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3336 | |
Stefan Roesch | 4e5fa4f | 2023-12-18 15:10:51 -0800 | [diff] [blame] | 3337 | if (ksm_advisor != KSM_ADVISOR_NONE) |
| 3338 | return -EINVAL; |
| 3339 | |
Alexey Dobriyan | dfefd22 | 2020-12-14 19:15:03 -0800 | [diff] [blame] | 3340 | err = kstrtouint(buf, 10, &nr_pages); |
| 3341 | if (err) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3342 | return -EINVAL; |
| 3343 | |
| 3344 | ksm_thread_pages_to_scan = nr_pages; |
| 3345 | |
| 3346 | return count; |
| 3347 | } |
| 3348 | KSM_ATTR(pages_to_scan); |
| 3349 | |
| 3350 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, |
| 3351 | char *buf) |
| 3352 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3353 | return sysfs_emit(buf, "%lu\n", ksm_run); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3354 | } |
| 3355 | |
| 3356 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, |
| 3357 | const char *buf, size_t count) |
| 3358 | { |
Alexey Dobriyan | dfefd22 | 2020-12-14 19:15:03 -0800 | [diff] [blame] | 3359 | unsigned int flags; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3360 | int err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3361 | |
Alexey Dobriyan | dfefd22 | 2020-12-14 19:15:03 -0800 | [diff] [blame] | 3362 | err = kstrtouint(buf, 10, &flags); |
| 3363 | if (err) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3364 | return -EINVAL; |
| 3365 | if (flags > KSM_RUN_UNMERGE) |
| 3366 | return -EINVAL; |
| 3367 | |
| 3368 | /* |
| 3369 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. |
| 3370 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, |
Hugh Dickins | d0f209f | 2009-12-14 17:59:34 -0800 | [diff] [blame] | 3371 | * breaking COW to free the pages_shared (but leaves mm_slots |
| 3372 | * on the list for when ksmd may be set running again). |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3373 | */ |
| 3374 | |
| 3375 | mutex_lock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3376 | wait_while_offlining(); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3377 | if (ksm_run != flags) { |
| 3378 | ksm_run = flags; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 3379 | if (flags & KSM_RUN_UNMERGE) { |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 3380 | set_current_oom_origin(); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 3381 | err = unmerge_and_remove_all_rmap_items(); |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 3382 | clear_current_oom_origin(); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 3383 | if (err) { |
| 3384 | ksm_run = KSM_RUN_STOP; |
| 3385 | count = err; |
| 3386 | } |
| 3387 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3388 | } |
| 3389 | mutex_unlock(&ksm_thread_mutex); |
| 3390 | |
| 3391 | if (flags & KSM_RUN_MERGE) |
| 3392 | wake_up_interruptible(&ksm_thread_wait); |
| 3393 | |
| 3394 | return count; |
| 3395 | } |
| 3396 | KSM_ATTR(run); |
| 3397 | |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3398 | #ifdef CONFIG_NUMA |
| 3399 | static ssize_t merge_across_nodes_show(struct kobject *kobj, |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3400 | struct kobj_attribute *attr, char *buf) |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3401 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3402 | return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3403 | } |
| 3404 | |
| 3405 | static ssize_t merge_across_nodes_store(struct kobject *kobj, |
| 3406 | struct kobj_attribute *attr, |
| 3407 | const char *buf, size_t count) |
| 3408 | { |
| 3409 | int err; |
| 3410 | unsigned long knob; |
| 3411 | |
| 3412 | err = kstrtoul(buf, 10, &knob); |
| 3413 | if (err) |
| 3414 | return err; |
| 3415 | if (knob > 1) |
| 3416 | return -EINVAL; |
| 3417 | |
| 3418 | mutex_lock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3419 | wait_while_offlining(); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3420 | if (ksm_merge_across_nodes != knob) { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 3421 | if (ksm_pages_shared || remove_all_stable_nodes()) |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3422 | err = -EBUSY; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 3423 | else if (root_stable_tree == one_stable_tree) { |
| 3424 | struct rb_root *buf; |
| 3425 | /* |
| 3426 | * This is the first time that we switch away from the |
| 3427 | * default of merging across nodes: must now allocate |
| 3428 | * a buffer to hold as many roots as may be needed. |
| 3429 | * Allocate stable and unstable together: |
| 3430 | * MAXSMP NODES_SHIFT 10 will use 16kB. |
| 3431 | */ |
Joe Perches | bafe1e1 | 2013-11-12 15:07:10 -0800 | [diff] [blame] | 3432 | buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), |
| 3433 | GFP_KERNEL); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 3434 | /* Let us assume that RB_ROOT is NULL is zero */ |
| 3435 | if (!buf) |
| 3436 | err = -ENOMEM; |
| 3437 | else { |
| 3438 | root_stable_tree = buf; |
| 3439 | root_unstable_tree = buf + nr_node_ids; |
| 3440 | /* Stable tree is empty but not the unstable */ |
| 3441 | root_unstable_tree[0] = one_unstable_tree[0]; |
| 3442 | } |
| 3443 | } |
| 3444 | if (!err) { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3445 | ksm_merge_across_nodes = knob; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 3446 | ksm_nr_node_ids = knob ? 1 : nr_node_ids; |
| 3447 | } |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3448 | } |
| 3449 | mutex_unlock(&ksm_thread_mutex); |
| 3450 | |
| 3451 | return err ? err : count; |
| 3452 | } |
| 3453 | KSM_ATTR(merge_across_nodes); |
| 3454 | #endif |
| 3455 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 3456 | static ssize_t use_zero_pages_show(struct kobject *kobj, |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3457 | struct kobj_attribute *attr, char *buf) |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 3458 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3459 | return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 3460 | } |
| 3461 | static ssize_t use_zero_pages_store(struct kobject *kobj, |
| 3462 | struct kobj_attribute *attr, |
| 3463 | const char *buf, size_t count) |
| 3464 | { |
| 3465 | int err; |
| 3466 | bool value; |
| 3467 | |
| 3468 | err = kstrtobool(buf, &value); |
| 3469 | if (err) |
| 3470 | return -EINVAL; |
| 3471 | |
| 3472 | ksm_use_zero_pages = value; |
| 3473 | |
| 3474 | return count; |
| 3475 | } |
| 3476 | KSM_ATTR(use_zero_pages); |
| 3477 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3478 | static ssize_t max_page_sharing_show(struct kobject *kobj, |
| 3479 | struct kobj_attribute *attr, char *buf) |
| 3480 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3481 | return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3482 | } |
| 3483 | |
| 3484 | static ssize_t max_page_sharing_store(struct kobject *kobj, |
| 3485 | struct kobj_attribute *attr, |
| 3486 | const char *buf, size_t count) |
| 3487 | { |
| 3488 | int err; |
| 3489 | int knob; |
| 3490 | |
| 3491 | err = kstrtoint(buf, 10, &knob); |
| 3492 | if (err) |
| 3493 | return err; |
| 3494 | /* |
| 3495 | * When a KSM page is created it is shared by 2 mappings. This |
| 3496 | * being a signed comparison, it implicitly verifies it's not |
| 3497 | * negative. |
| 3498 | */ |
| 3499 | if (knob < 2) |
| 3500 | return -EINVAL; |
| 3501 | |
| 3502 | if (READ_ONCE(ksm_max_page_sharing) == knob) |
| 3503 | return count; |
| 3504 | |
| 3505 | mutex_lock(&ksm_thread_mutex); |
| 3506 | wait_while_offlining(); |
| 3507 | if (ksm_max_page_sharing != knob) { |
| 3508 | if (ksm_pages_shared || remove_all_stable_nodes()) |
| 3509 | err = -EBUSY; |
| 3510 | else |
| 3511 | ksm_max_page_sharing = knob; |
| 3512 | } |
| 3513 | mutex_unlock(&ksm_thread_mutex); |
| 3514 | |
| 3515 | return err ? err : count; |
| 3516 | } |
| 3517 | KSM_ATTR(max_page_sharing); |
| 3518 | |
Stefan Roesch | b348b5f | 2023-08-11 12:36:55 -0700 | [diff] [blame] | 3519 | static ssize_t pages_scanned_show(struct kobject *kobj, |
| 3520 | struct kobj_attribute *attr, char *buf) |
| 3521 | { |
| 3522 | return sysfs_emit(buf, "%lu\n", ksm_pages_scanned); |
| 3523 | } |
| 3524 | KSM_ATTR_RO(pages_scanned); |
| 3525 | |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 3526 | static ssize_t pages_shared_show(struct kobject *kobj, |
| 3527 | struct kobj_attribute *attr, char *buf) |
| 3528 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3529 | return sysfs_emit(buf, "%lu\n", ksm_pages_shared); |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 3530 | } |
| 3531 | KSM_ATTR_RO(pages_shared); |
| 3532 | |
| 3533 | static ssize_t pages_sharing_show(struct kobject *kobj, |
| 3534 | struct kobj_attribute *attr, char *buf) |
| 3535 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3536 | return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 3537 | } |
| 3538 | KSM_ATTR_RO(pages_sharing); |
| 3539 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 3540 | static ssize_t pages_unshared_show(struct kobject *kobj, |
| 3541 | struct kobj_attribute *attr, char *buf) |
| 3542 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3543 | return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 3544 | } |
| 3545 | KSM_ATTR_RO(pages_unshared); |
| 3546 | |
| 3547 | static ssize_t pages_volatile_show(struct kobject *kobj, |
| 3548 | struct kobj_attribute *attr, char *buf) |
| 3549 | { |
| 3550 | long ksm_pages_volatile; |
| 3551 | |
| 3552 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared |
| 3553 | - ksm_pages_sharing - ksm_pages_unshared; |
| 3554 | /* |
| 3555 | * It was not worth any locking to calculate that statistic, |
| 3556 | * but it might therefore sometimes be negative: conceal that. |
| 3557 | */ |
| 3558 | if (ksm_pages_volatile < 0) |
| 3559 | ksm_pages_volatile = 0; |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3560 | return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 3561 | } |
| 3562 | KSM_ATTR_RO(pages_volatile); |
| 3563 | |
Stefan Roesch | e5a6899 | 2023-09-25 21:09:37 -0700 | [diff] [blame] | 3564 | static ssize_t pages_skipped_show(struct kobject *kobj, |
| 3565 | struct kobj_attribute *attr, char *buf) |
| 3566 | { |
| 3567 | return sysfs_emit(buf, "%lu\n", ksm_pages_skipped); |
| 3568 | } |
| 3569 | KSM_ATTR_RO(pages_skipped); |
| 3570 | |
xu xin | e294206 | 2023-06-13 11:09:34 +0800 | [diff] [blame] | 3571 | static ssize_t ksm_zero_pages_show(struct kobject *kobj, |
| 3572 | struct kobj_attribute *attr, char *buf) |
| 3573 | { |
Chengming Zhou | c2dc78b | 2024-05-28 13:15:22 +0800 | [diff] [blame] | 3574 | return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages)); |
xu xin | e294206 | 2023-06-13 11:09:34 +0800 | [diff] [blame] | 3575 | } |
| 3576 | KSM_ATTR_RO(ksm_zero_pages); |
| 3577 | |
Stefan Roesch | d21077f | 2023-04-17 22:13:41 -0700 | [diff] [blame] | 3578 | static ssize_t general_profit_show(struct kobject *kobj, |
| 3579 | struct kobj_attribute *attr, char *buf) |
| 3580 | { |
| 3581 | long general_profit; |
| 3582 | |
Chengming Zhou | c2dc78b | 2024-05-28 13:15:22 +0800 | [diff] [blame] | 3583 | general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - |
Stefan Roesch | d21077f | 2023-04-17 22:13:41 -0700 | [diff] [blame] | 3584 | ksm_rmap_items * sizeof(struct ksm_rmap_item); |
| 3585 | |
| 3586 | return sysfs_emit(buf, "%ld\n", general_profit); |
| 3587 | } |
| 3588 | KSM_ATTR_RO(general_profit); |
| 3589 | |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3590 | static ssize_t stable_node_dups_show(struct kobject *kobj, |
| 3591 | struct kobj_attribute *attr, char *buf) |
| 3592 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3593 | return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3594 | } |
| 3595 | KSM_ATTR_RO(stable_node_dups); |
| 3596 | |
| 3597 | static ssize_t stable_node_chains_show(struct kobject *kobj, |
| 3598 | struct kobj_attribute *attr, char *buf) |
| 3599 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3600 | return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3601 | } |
| 3602 | KSM_ATTR_RO(stable_node_chains); |
| 3603 | |
| 3604 | static ssize_t |
| 3605 | stable_node_chains_prune_millisecs_show(struct kobject *kobj, |
| 3606 | struct kobj_attribute *attr, |
| 3607 | char *buf) |
| 3608 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3609 | return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3610 | } |
| 3611 | |
| 3612 | static ssize_t |
| 3613 | stable_node_chains_prune_millisecs_store(struct kobject *kobj, |
| 3614 | struct kobj_attribute *attr, |
| 3615 | const char *buf, size_t count) |
| 3616 | { |
Zhansaya Bagdauletkyzy | 584ff0d | 2021-09-02 15:00:51 -0700 | [diff] [blame] | 3617 | unsigned int msecs; |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3618 | int err; |
| 3619 | |
Zhansaya Bagdauletkyzy | 584ff0d | 2021-09-02 15:00:51 -0700 | [diff] [blame] | 3620 | err = kstrtouint(buf, 10, &msecs); |
| 3621 | if (err) |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3622 | return -EINVAL; |
| 3623 | |
| 3624 | ksm_stable_node_chains_prune_millisecs = msecs; |
| 3625 | |
| 3626 | return count; |
| 3627 | } |
| 3628 | KSM_ATTR(stable_node_chains_prune_millisecs); |
| 3629 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 3630 | static ssize_t full_scans_show(struct kobject *kobj, |
| 3631 | struct kobj_attribute *attr, char *buf) |
| 3632 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 3633 | return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 3634 | } |
| 3635 | KSM_ATTR_RO(full_scans); |
| 3636 | |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 3637 | static ssize_t smart_scan_show(struct kobject *kobj, |
| 3638 | struct kobj_attribute *attr, char *buf) |
| 3639 | { |
| 3640 | return sysfs_emit(buf, "%u\n", ksm_smart_scan); |
| 3641 | } |
| 3642 | |
| 3643 | static ssize_t smart_scan_store(struct kobject *kobj, |
| 3644 | struct kobj_attribute *attr, |
| 3645 | const char *buf, size_t count) |
| 3646 | { |
| 3647 | int err; |
| 3648 | bool value; |
| 3649 | |
| 3650 | err = kstrtobool(buf, &value); |
| 3651 | if (err) |
| 3652 | return -EINVAL; |
| 3653 | |
| 3654 | ksm_smart_scan = value; |
| 3655 | return count; |
| 3656 | } |
| 3657 | KSM_ATTR(smart_scan); |
| 3658 | |
Stefan Roesch | 66790e9 | 2023-12-18 15:10:52 -0800 | [diff] [blame] | 3659 | static ssize_t advisor_mode_show(struct kobject *kobj, |
| 3660 | struct kobj_attribute *attr, char *buf) |
| 3661 | { |
| 3662 | const char *output; |
| 3663 | |
| 3664 | if (ksm_advisor == KSM_ADVISOR_NONE) |
| 3665 | output = "[none] scan-time"; |
| 3666 | else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) |
| 3667 | output = "none [scan-time]"; |
| 3668 | |
| 3669 | return sysfs_emit(buf, "%s\n", output); |
| 3670 | } |
| 3671 | |
| 3672 | static ssize_t advisor_mode_store(struct kobject *kobj, |
| 3673 | struct kobj_attribute *attr, const char *buf, |
| 3674 | size_t count) |
| 3675 | { |
| 3676 | enum ksm_advisor_type curr_advisor = ksm_advisor; |
| 3677 | |
| 3678 | if (sysfs_streq("scan-time", buf)) |
| 3679 | ksm_advisor = KSM_ADVISOR_SCAN_TIME; |
| 3680 | else if (sysfs_streq("none", buf)) |
| 3681 | ksm_advisor = KSM_ADVISOR_NONE; |
| 3682 | else |
| 3683 | return -EINVAL; |
| 3684 | |
| 3685 | /* Set advisor default values */ |
| 3686 | if (curr_advisor != ksm_advisor) |
| 3687 | set_advisor_defaults(); |
| 3688 | |
| 3689 | return count; |
| 3690 | } |
| 3691 | KSM_ATTR(advisor_mode); |
| 3692 | |
| 3693 | static ssize_t advisor_max_cpu_show(struct kobject *kobj, |
| 3694 | struct kobj_attribute *attr, char *buf) |
| 3695 | { |
| 3696 | return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu); |
| 3697 | } |
| 3698 | |
| 3699 | static ssize_t advisor_max_cpu_store(struct kobject *kobj, |
| 3700 | struct kobj_attribute *attr, |
| 3701 | const char *buf, size_t count) |
| 3702 | { |
| 3703 | int err; |
| 3704 | unsigned long value; |
| 3705 | |
| 3706 | err = kstrtoul(buf, 10, &value); |
| 3707 | if (err) |
| 3708 | return -EINVAL; |
| 3709 | |
| 3710 | ksm_advisor_max_cpu = value; |
| 3711 | return count; |
| 3712 | } |
| 3713 | KSM_ATTR(advisor_max_cpu); |
| 3714 | |
| 3715 | static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj, |
| 3716 | struct kobj_attribute *attr, char *buf) |
| 3717 | { |
| 3718 | return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan); |
| 3719 | } |
| 3720 | |
| 3721 | static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj, |
| 3722 | struct kobj_attribute *attr, |
| 3723 | const char *buf, size_t count) |
| 3724 | { |
| 3725 | int err; |
| 3726 | unsigned long value; |
| 3727 | |
| 3728 | err = kstrtoul(buf, 10, &value); |
| 3729 | if (err) |
| 3730 | return -EINVAL; |
| 3731 | |
| 3732 | ksm_advisor_min_pages_to_scan = value; |
| 3733 | return count; |
| 3734 | } |
| 3735 | KSM_ATTR(advisor_min_pages_to_scan); |
| 3736 | |
| 3737 | static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj, |
| 3738 | struct kobj_attribute *attr, char *buf) |
| 3739 | { |
| 3740 | return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan); |
| 3741 | } |
| 3742 | |
| 3743 | static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj, |
| 3744 | struct kobj_attribute *attr, |
| 3745 | const char *buf, size_t count) |
| 3746 | { |
| 3747 | int err; |
| 3748 | unsigned long value; |
| 3749 | |
| 3750 | err = kstrtoul(buf, 10, &value); |
| 3751 | if (err) |
| 3752 | return -EINVAL; |
| 3753 | |
| 3754 | ksm_advisor_max_pages_to_scan = value; |
| 3755 | return count; |
| 3756 | } |
| 3757 | KSM_ATTR(advisor_max_pages_to_scan); |
| 3758 | |
| 3759 | static ssize_t advisor_target_scan_time_show(struct kobject *kobj, |
| 3760 | struct kobj_attribute *attr, char *buf) |
| 3761 | { |
| 3762 | return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time); |
| 3763 | } |
| 3764 | |
| 3765 | static ssize_t advisor_target_scan_time_store(struct kobject *kobj, |
| 3766 | struct kobj_attribute *attr, |
| 3767 | const char *buf, size_t count) |
| 3768 | { |
| 3769 | int err; |
| 3770 | unsigned long value; |
| 3771 | |
| 3772 | err = kstrtoul(buf, 10, &value); |
| 3773 | if (err) |
| 3774 | return -EINVAL; |
| 3775 | if (value < 1) |
| 3776 | return -EINVAL; |
| 3777 | |
| 3778 | ksm_advisor_target_scan_time = value; |
| 3779 | return count; |
| 3780 | } |
| 3781 | KSM_ATTR(advisor_target_scan_time); |
| 3782 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3783 | static struct attribute *ksm_attrs[] = { |
| 3784 | &sleep_millisecs_attr.attr, |
| 3785 | &pages_to_scan_attr.attr, |
| 3786 | &run_attr.attr, |
Stefan Roesch | b348b5f | 2023-08-11 12:36:55 -0700 | [diff] [blame] | 3787 | &pages_scanned_attr.attr, |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 3788 | &pages_shared_attr.attr, |
| 3789 | &pages_sharing_attr.attr, |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 3790 | &pages_unshared_attr.attr, |
| 3791 | &pages_volatile_attr.attr, |
Stefan Roesch | e5a6899 | 2023-09-25 21:09:37 -0700 | [diff] [blame] | 3792 | &pages_skipped_attr.attr, |
xu xin | e294206 | 2023-06-13 11:09:34 +0800 | [diff] [blame] | 3793 | &ksm_zero_pages_attr.attr, |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 3794 | &full_scans_attr.attr, |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 3795 | #ifdef CONFIG_NUMA |
| 3796 | &merge_across_nodes_attr.attr, |
| 3797 | #endif |
Andrea Arcangeli | 2c653d0 | 2017-07-06 15:36:55 -0700 | [diff] [blame] | 3798 | &max_page_sharing_attr.attr, |
| 3799 | &stable_node_chains_attr.attr, |
| 3800 | &stable_node_dups_attr.attr, |
| 3801 | &stable_node_chains_prune_millisecs_attr.attr, |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 3802 | &use_zero_pages_attr.attr, |
Stefan Roesch | d21077f | 2023-04-17 22:13:41 -0700 | [diff] [blame] | 3803 | &general_profit_attr.attr, |
Stefan Roesch | 5e924ff | 2023-09-25 21:09:36 -0700 | [diff] [blame] | 3804 | &smart_scan_attr.attr, |
Stefan Roesch | 66790e9 | 2023-12-18 15:10:52 -0800 | [diff] [blame] | 3805 | &advisor_mode_attr.attr, |
| 3806 | &advisor_max_cpu_attr.attr, |
| 3807 | &advisor_min_pages_to_scan_attr.attr, |
| 3808 | &advisor_max_pages_to_scan_attr.attr, |
| 3809 | &advisor_target_scan_time_attr.attr, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3810 | NULL, |
| 3811 | }; |
| 3812 | |
Arvind Yadav | f907c26 | 2017-09-06 16:21:53 -0700 | [diff] [blame] | 3813 | static const struct attribute_group ksm_attr_group = { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3814 | .attrs = ksm_attrs, |
| 3815 | .name = "ksm", |
| 3816 | }; |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 3817 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3818 | |
| 3819 | static int __init ksm_init(void) |
| 3820 | { |
| 3821 | struct task_struct *ksm_thread; |
| 3822 | int err; |
| 3823 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 3824 | /* The correct value depends on page size and endianness */ |
| 3825 | zero_checksum = calc_checksum(ZERO_PAGE(0)); |
| 3826 | /* Default to false for backwards compatibility */ |
| 3827 | ksm_use_zero_pages = false; |
| 3828 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3829 | err = ksm_slab_init(); |
| 3830 | if (err) |
| 3831 | goto out; |
| 3832 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3833 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
| 3834 | if (IS_ERR(ksm_thread)) { |
Paul McQuade | 25acde3 | 2014-10-09 15:29:09 -0700 | [diff] [blame] | 3835 | pr_err("ksm: creating kthread failed\n"); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3836 | err = PTR_ERR(ksm_thread); |
Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 3837 | goto out_free; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3838 | } |
| 3839 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 3840 | #ifdef CONFIG_SYSFS |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3841 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
| 3842 | if (err) { |
Paul McQuade | 25acde3 | 2014-10-09 15:29:09 -0700 | [diff] [blame] | 3843 | pr_err("ksm: register sysfs failed\n"); |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 3844 | kthread_stop(ksm_thread); |
Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 3845 | goto out_free; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3846 | } |
Hugh Dickins | c73602a | 2009-10-07 16:32:22 -0700 | [diff] [blame] | 3847 | #else |
| 3848 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ |
| 3849 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 3850 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3851 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3852 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 3853 | /* There is no significance to this priority 100 */ |
Liu Shixin | 1eeaa4f | 2022-09-23 11:33:47 +0800 | [diff] [blame] | 3854 | hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 3855 | #endif |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3856 | return 0; |
| 3857 | |
Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 3858 | out_free: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 3859 | ksm_slab_free(); |
| 3860 | out: |
| 3861 | return err; |
| 3862 | } |
Paul Gortmaker | a64fb3c | 2014-01-23 15:53:30 -0800 | [diff] [blame] | 3863 | subsys_initcall(ksm_init); |