Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // Copyright(c) 2018 Intel Corporation. All rights reserved. |
| 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/init.h> |
| 6 | #include <linux/mmzone.h> |
| 7 | #include <linux/random.h> |
| 8 | #include <linux/moduleparam.h> |
| 9 | #include "internal.h" |
| 10 | #include "shuffle.h" |
| 11 | |
| 12 | DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key); |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 13 | |
| 14 | static bool shuffle_param; |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 15 | |
Liu Shixin | 85a3410 | 2022-09-09 16:39:47 +0800 | [diff] [blame] | 16 | static __meminit int shuffle_param_set(const char *val, |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 17 | const struct kernel_param *kp) |
| 18 | { |
Liu Shixin | 85a3410 | 2022-09-09 16:39:47 +0800 | [diff] [blame] | 19 | if (param_set_bool(val, kp)) |
| 20 | return -EINVAL; |
| 21 | if (*(bool *)kp->arg) |
David Hildenbrand | 8391953 | 2020-08-06 23:25:38 -0700 | [diff] [blame] | 22 | static_branch_enable(&page_alloc_shuffle_key); |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 23 | return 0; |
| 24 | } |
Liu Shixin | 85a3410 | 2022-09-09 16:39:47 +0800 | [diff] [blame] | 25 | |
| 26 | static const struct kernel_param_ops shuffle_param_ops = { |
| 27 | .set = shuffle_param_set, |
| 28 | .get = param_get_bool, |
| 29 | }; |
| 30 | module_param_cb(shuffle, &shuffle_param_ops, &shuffle_param, 0400); |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 31 | |
| 32 | /* |
| 33 | * For two pages to be swapped in the shuffle, they must be free (on a |
| 34 | * 'free_area' lru), have the same order, and have the same migratetype. |
| 35 | */ |
David Hildenbrand | 4a93025 | 2020-08-06 23:17:13 -0700 | [diff] [blame] | 36 | static struct page * __meminit shuffle_valid_page(struct zone *zone, |
| 37 | unsigned long pfn, int order) |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 38 | { |
David Hildenbrand | 4a93025 | 2020-08-06 23:17:13 -0700 | [diff] [blame] | 39 | struct page *page = pfn_to_online_page(pfn); |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * Given we're dealing with randomly selected pfns in a zone we |
| 43 | * need to ask questions like... |
| 44 | */ |
| 45 | |
David Hildenbrand | 4a93025 | 2020-08-06 23:17:13 -0700 | [diff] [blame] | 46 | /* ... is the page managed by the buddy? */ |
| 47 | if (!page) |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 48 | return NULL; |
| 49 | |
David Hildenbrand | 4a93025 | 2020-08-06 23:17:13 -0700 | [diff] [blame] | 50 | /* ... is the page assigned to the same zone? */ |
| 51 | if (page_zone(page) != zone) |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 52 | return NULL; |
| 53 | |
| 54 | /* ...is the page free and currently on a free_area list? */ |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 55 | if (!PageBuddy(page)) |
| 56 | return NULL; |
| 57 | |
| 58 | /* |
| 59 | * ...is the page on the same list as the page we will |
| 60 | * shuffle it with? |
| 61 | */ |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 62 | if (buddy_order(page) != order) |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 63 | return NULL; |
| 64 | |
| 65 | return page; |
| 66 | } |
| 67 | |
| 68 | /* |
| 69 | * Fisher-Yates shuffle the freelist which prescribes iterating through an |
| 70 | * array, pfns in this case, and randomly swapping each entry with another in |
| 71 | * the span, end_pfn - start_pfn. |
| 72 | * |
| 73 | * To keep the implementation simple it does not attempt to correct for sources |
| 74 | * of bias in the distribution, like modulo bias or pseudo-random number |
| 75 | * generator bias. I.e. the expectation is that this shuffling raises the bar |
| 76 | * for attacks that exploit the predictability of page allocations, but need not |
| 77 | * be a perfect shuffle. |
| 78 | */ |
| 79 | #define SHUFFLE_RETRY 10 |
| 80 | void __meminit __shuffle_zone(struct zone *z) |
| 81 | { |
| 82 | unsigned long i, flags; |
| 83 | unsigned long start_pfn = z->zone_start_pfn; |
| 84 | unsigned long end_pfn = zone_end_pfn(z); |
| 85 | const int order = SHUFFLE_ORDER; |
| 86 | const int order_pages = 1 << order; |
| 87 | |
| 88 | spin_lock_irqsave(&z->lock, flags); |
| 89 | start_pfn = ALIGN(start_pfn, order_pages); |
| 90 | for (i = start_pfn; i < end_pfn; i += order_pages) { |
| 91 | unsigned long j; |
| 92 | int migratetype, retry; |
| 93 | struct page *page_i, *page_j; |
| 94 | |
| 95 | /* |
| 96 | * We expect page_i, in the sub-range of a zone being added |
| 97 | * (@start_pfn to @end_pfn), to more likely be valid compared to |
| 98 | * page_j randomly selected in the span @zone_start_pfn to |
| 99 | * @spanned_pages. |
| 100 | */ |
David Hildenbrand | 4a93025 | 2020-08-06 23:17:13 -0700 | [diff] [blame] | 101 | page_i = shuffle_valid_page(z, i, order); |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 102 | if (!page_i) |
| 103 | continue; |
| 104 | |
| 105 | for (retry = 0; retry < SHUFFLE_RETRY; retry++) { |
| 106 | /* |
| 107 | * Pick a random order aligned page in the zone span as |
| 108 | * a swap target. If the selected pfn is a hole, retry |
| 109 | * up to SHUFFLE_RETRY attempts find a random valid pfn |
| 110 | * in the zone. |
| 111 | */ |
| 112 | j = z->zone_start_pfn + |
| 113 | ALIGN_DOWN(get_random_long() % z->spanned_pages, |
| 114 | order_pages); |
David Hildenbrand | 4a93025 | 2020-08-06 23:17:13 -0700 | [diff] [blame] | 115 | page_j = shuffle_valid_page(z, j, order); |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 116 | if (page_j && page_j != page_i) |
| 117 | break; |
| 118 | } |
| 119 | if (retry >= SHUFFLE_RETRY) { |
| 120 | pr_debug("%s: failed to swap %#lx\n", __func__, i); |
| 121 | continue; |
| 122 | } |
| 123 | |
| 124 | /* |
| 125 | * Each migratetype corresponds to its own list, make sure the |
| 126 | * types match otherwise we're moving pages to lists where they |
| 127 | * do not belong. |
| 128 | */ |
| 129 | migratetype = get_pageblock_migratetype(page_i); |
| 130 | if (get_pageblock_migratetype(page_j) != migratetype) { |
| 131 | pr_debug("%s: migratetype mismatch %#lx\n", __func__, i); |
| 132 | continue; |
| 133 | } |
| 134 | |
| 135 | list_swap(&page_i->lru, &page_j->lru); |
| 136 | |
| 137 | pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j); |
| 138 | |
| 139 | /* take it easy on the zone lock */ |
| 140 | if ((i % (100 * order_pages)) == 0) { |
| 141 | spin_unlock_irqrestore(&z->lock, flags); |
| 142 | cond_resched(); |
| 143 | spin_lock_irqsave(&z->lock, flags); |
| 144 | } |
| 145 | } |
| 146 | spin_unlock_irqrestore(&z->lock, flags); |
| 147 | } |
| 148 | |
Randy Dunlap | 845be1c | 2021-04-16 15:45:54 -0700 | [diff] [blame] | 149 | /* |
| 150 | * __shuffle_free_memory - reduce the predictability of the page allocator |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 151 | * @pgdat: node page data |
| 152 | */ |
| 153 | void __meminit __shuffle_free_memory(pg_data_t *pgdat) |
| 154 | { |
| 155 | struct zone *z; |
| 156 | |
| 157 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
| 158 | shuffle_zone(z); |
| 159 | } |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 160 | |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 161 | bool shuffle_pick_tail(void) |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 162 | { |
| 163 | static u64 rand; |
| 164 | static u8 rand_bits; |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 165 | bool ret; |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 166 | |
| 167 | /* |
| 168 | * The lack of locking is deliberate. If 2 threads race to |
| 169 | * update the rand state it just adds to the entropy. |
| 170 | */ |
| 171 | if (rand_bits == 0) { |
| 172 | rand_bits = 64; |
| 173 | rand = get_random_u64(); |
| 174 | } |
| 175 | |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 176 | ret = rand & 1; |
| 177 | |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 178 | rand_bits--; |
| 179 | rand >>= 1; |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 180 | |
| 181 | return ret; |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 182 | } |