Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. |
| 4 | * Authors: David Chinner and Glauber Costa |
| 5 | * |
| 6 | * Generic LRU infrastructure |
| 7 | */ |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/module.h> |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 10 | #include <linux/mm.h> |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 11 | #include <linux/list_lru.h> |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 12 | #include <linux/slab.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 13 | #include <linux/mutex.h> |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 14 | #include <linux/memcontrol.h> |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 15 | #include "slab.h" |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 16 | #include "internal.h" |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 17 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 18 | #ifdef CONFIG_MEMCG_KMEM |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 19 | static LIST_HEAD(memcg_list_lrus); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 20 | static DEFINE_MUTEX(list_lrus_mutex); |
| 21 | |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 22 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 23 | { |
| 24 | return lru->memcg_aware; |
| 25 | } |
| 26 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 27 | static void list_lru_register(struct list_lru *lru) |
| 28 | { |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 29 | if (!list_lru_memcg_aware(lru)) |
| 30 | return; |
| 31 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 32 | mutex_lock(&list_lrus_mutex); |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 33 | list_add(&lru->list, &memcg_list_lrus); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 34 | mutex_unlock(&list_lrus_mutex); |
| 35 | } |
| 36 | |
| 37 | static void list_lru_unregister(struct list_lru *lru) |
| 38 | { |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 39 | if (!list_lru_memcg_aware(lru)) |
| 40 | return; |
| 41 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 42 | mutex_lock(&list_lrus_mutex); |
| 43 | list_del(&lru->list); |
| 44 | mutex_unlock(&list_lrus_mutex); |
| 45 | } |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 46 | |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 47 | static int lru_shrinker_id(struct list_lru *lru) |
| 48 | { |
| 49 | return lru->shrinker_id; |
| 50 | } |
| 51 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 52 | static inline struct list_lru_one * |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 53 | list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 54 | { |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 55 | if (list_lru_memcg_aware(lru) && idx >= 0) { |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 56 | struct list_lru_memcg *mlru = xa_load(&lru->xa, idx); |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 57 | |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 58 | return mlru ? &mlru->node[nid] : NULL; |
| 59 | } |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 60 | return &lru->node[nid].lru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | static inline struct list_lru_one * |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 64 | list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 65 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 66 | { |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 67 | struct list_lru_node *nlru = &lru->node[nid]; |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 68 | struct list_lru_one *l = &nlru->lru; |
| 69 | struct mem_cgroup *memcg = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 70 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 71 | if (!list_lru_memcg_aware(lru)) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 72 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 73 | |
Roman Gushchin | 4f103c6 | 2020-04-01 21:06:36 -0700 | [diff] [blame] | 74 | memcg = mem_cgroup_from_obj(ptr); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 75 | if (!memcg) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 76 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 77 | |
Muchun Song | 7c52f65 | 2022-03-22 14:41:38 -0700 | [diff] [blame] | 78 | l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 79 | out: |
| 80 | if (memcg_ptr) |
| 81 | *memcg_ptr = memcg; |
| 82 | return l; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 83 | } |
| 84 | #else |
Kirill Tkhai | e029523 | 2018-08-17 15:47:21 -0700 | [diff] [blame] | 85 | static void list_lru_register(struct list_lru *lru) |
| 86 | { |
| 87 | } |
| 88 | |
| 89 | static void list_lru_unregister(struct list_lru *lru) |
| 90 | { |
| 91 | } |
| 92 | |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 93 | static int lru_shrinker_id(struct list_lru *lru) |
| 94 | { |
| 95 | return -1; |
| 96 | } |
| 97 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 98 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 99 | { |
| 100 | return false; |
| 101 | } |
| 102 | |
| 103 | static inline struct list_lru_one * |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 104 | list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 105 | { |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 106 | return &lru->node[nid].lru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | static inline struct list_lru_one * |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 110 | list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 111 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 112 | { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 113 | if (memcg_ptr) |
| 114 | *memcg_ptr = NULL; |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 115 | return &lru->node[nid].lru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 116 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 117 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 118 | |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 119 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
| 120 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 121 | int nid = page_to_nid(virt_to_page(item)); |
| 122 | struct list_lru_node *nlru = &lru->node[nid]; |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 123 | struct mem_cgroup *memcg; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 124 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 125 | |
| 126 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 127 | if (list_empty(item)) { |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 128 | l = list_lru_from_kmem(lru, nid, item, &memcg); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 129 | list_add_tail(item, &l->list); |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 130 | /* Set shrinker bit if the first element was added */ |
| 131 | if (!l->nr_items++) |
Yang Shi | 2bfd363 | 2021-05-04 18:36:11 -0700 | [diff] [blame] | 132 | set_shrinker_bit(memcg, nid, |
| 133 | lru_shrinker_id(lru)); |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 134 | nlru->nr_items++; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 135 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 136 | return true; |
| 137 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 138 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 139 | return false; |
| 140 | } |
| 141 | EXPORT_SYMBOL_GPL(list_lru_add); |
| 142 | |
| 143 | bool list_lru_del(struct list_lru *lru, struct list_head *item) |
| 144 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 145 | int nid = page_to_nid(virt_to_page(item)); |
| 146 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 147 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 148 | |
| 149 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 150 | if (!list_empty(item)) { |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 151 | l = list_lru_from_kmem(lru, nid, item, NULL); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 152 | list_del_init(item); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 153 | l->nr_items--; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 154 | nlru->nr_items--; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 155 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 156 | return true; |
| 157 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 158 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 159 | return false; |
| 160 | } |
| 161 | EXPORT_SYMBOL_GPL(list_lru_del); |
| 162 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 163 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item) |
| 164 | { |
| 165 | list_del_init(item); |
| 166 | list->nr_items--; |
| 167 | } |
| 168 | EXPORT_SYMBOL_GPL(list_lru_isolate); |
| 169 | |
| 170 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, |
| 171 | struct list_head *head) |
| 172 | { |
| 173 | list_move(item, head); |
| 174 | list->nr_items--; |
| 175 | } |
| 176 | EXPORT_SYMBOL_GPL(list_lru_isolate_move); |
| 177 | |
Andrew Morton | 930eaac | 2018-08-17 15:46:11 -0700 | [diff] [blame] | 178 | unsigned long list_lru_count_one(struct list_lru *lru, |
| 179 | int nid, struct mem_cgroup *memcg) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 180 | { |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 181 | struct list_lru_one *l; |
Muchun Song | 41d1743 | 2021-11-05 13:37:50 -0700 | [diff] [blame] | 182 | long count; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 183 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 184 | rcu_read_lock(); |
Muchun Song | 7c52f65 | 2022-03-22 14:41:38 -0700 | [diff] [blame] | 185 | l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 186 | count = l ? READ_ONCE(l->nr_items) : 0; |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 187 | rcu_read_unlock(); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 188 | |
Muchun Song | 41d1743 | 2021-11-05 13:37:50 -0700 | [diff] [blame] | 189 | if (unlikely(count < 0)) |
| 190 | count = 0; |
| 191 | |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 192 | return count; |
| 193 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 194 | EXPORT_SYMBOL_GPL(list_lru_count_one); |
| 195 | |
| 196 | unsigned long list_lru_count_node(struct list_lru *lru, int nid) |
| 197 | { |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 198 | struct list_lru_node *nlru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 199 | |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 200 | nlru = &lru->node[nid]; |
| 201 | return nlru->nr_items; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 202 | } |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 203 | EXPORT_SYMBOL_GPL(list_lru_count_node); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 204 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 205 | static unsigned long |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 206 | __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 207 | list_lru_walk_cb isolate, void *cb_arg, |
| 208 | unsigned long *nr_to_walk) |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 209 | { |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 210 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 211 | struct list_lru_one *l; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 212 | struct list_head *item, *n; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 213 | unsigned long isolated = 0; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 214 | |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 215 | restart: |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 216 | l = list_lru_from_memcg_idx(lru, nid, memcg_idx); |
| 217 | if (!l) |
| 218 | goto out; |
| 219 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 220 | list_for_each_safe(item, n, &l->list) { |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 221 | enum lru_status ret; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 222 | |
| 223 | /* |
| 224 | * decrement nr_to_walk first so that we don't livelock if we |
Ethon Paul | 3dc5f03 | 2020-06-04 16:49:19 -0700 | [diff] [blame] | 225 | * get stuck on large numbers of LRU_RETRY items |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 226 | */ |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 227 | if (!*nr_to_walk) |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 228 | break; |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 229 | --*nr_to_walk; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 230 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 231 | ret = isolate(item, l, &nlru->lock, cb_arg); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 232 | switch (ret) { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 233 | case LRU_REMOVED_RETRY: |
| 234 | assert_spin_locked(&nlru->lock); |
Joe Perches | e4a9bc5 | 2020-04-06 20:08:39 -0700 | [diff] [blame] | 235 | fallthrough; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 236 | case LRU_REMOVED: |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 237 | isolated++; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 238 | nlru->nr_items--; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 239 | /* |
| 240 | * If the lru lock has been dropped, our list |
| 241 | * traversal is now invalid and so we have to |
| 242 | * restart from scratch. |
| 243 | */ |
| 244 | if (ret == LRU_REMOVED_RETRY) |
| 245 | goto restart; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 246 | break; |
| 247 | case LRU_ROTATE: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 248 | list_move_tail(item, &l->list); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 249 | break; |
| 250 | case LRU_SKIP: |
| 251 | break; |
| 252 | case LRU_RETRY: |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 253 | /* |
| 254 | * The lru lock has been dropped, our list traversal is |
| 255 | * now invalid and so we have to restart from scratch. |
| 256 | */ |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 257 | assert_spin_locked(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 258 | goto restart; |
| 259 | default: |
| 260 | BUG(); |
| 261 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 262 | } |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 263 | out: |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 264 | return isolated; |
| 265 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 266 | |
| 267 | unsigned long |
| 268 | list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 269 | list_lru_walk_cb isolate, void *cb_arg, |
| 270 | unsigned long *nr_to_walk) |
| 271 | { |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 272 | struct list_lru_node *nlru = &lru->node[nid]; |
| 273 | unsigned long ret; |
| 274 | |
| 275 | spin_lock(&nlru->lock); |
Muchun Song | 7c52f65 | 2022-03-22 14:41:38 -0700 | [diff] [blame] | 276 | ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 277 | cb_arg, nr_to_walk); |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 278 | spin_unlock(&nlru->lock); |
| 279 | return ret; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 280 | } |
| 281 | EXPORT_SYMBOL_GPL(list_lru_walk_one); |
| 282 | |
Sebastian Andrzej Siewior | 6b51e88 | 2018-08-17 15:49:55 -0700 | [diff] [blame] | 283 | unsigned long |
| 284 | list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 285 | list_lru_walk_cb isolate, void *cb_arg, |
| 286 | unsigned long *nr_to_walk) |
| 287 | { |
| 288 | struct list_lru_node *nlru = &lru->node[nid]; |
| 289 | unsigned long ret; |
| 290 | |
| 291 | spin_lock_irq(&nlru->lock); |
Muchun Song | 7c52f65 | 2022-03-22 14:41:38 -0700 | [diff] [blame] | 292 | ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 293 | cb_arg, nr_to_walk); |
Sebastian Andrzej Siewior | 6b51e88 | 2018-08-17 15:49:55 -0700 | [diff] [blame] | 294 | spin_unlock_irq(&nlru->lock); |
| 295 | return ret; |
| 296 | } |
| 297 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 298 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
| 299 | list_lru_walk_cb isolate, void *cb_arg, |
| 300 | unsigned long *nr_to_walk) |
| 301 | { |
| 302 | long isolated = 0; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 303 | |
Sebastian Andrzej Siewior | 87a5ffc | 2018-08-17 15:49:45 -0700 | [diff] [blame] | 304 | isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, |
| 305 | nr_to_walk); |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 306 | |
| 307 | #ifdef CONFIG_MEMCG_KMEM |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 308 | if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 309 | struct list_lru_memcg *mlru; |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 310 | unsigned long index; |
| 311 | |
| 312 | xa_for_each(&lru->xa, index, mlru) { |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 313 | struct list_lru_node *nlru = &lru->node[nid]; |
| 314 | |
| 315 | spin_lock(&nlru->lock); |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 316 | isolated += __list_lru_walk_one(lru, nid, index, |
Sebastian Andrzej Siewior | 6e01896 | 2018-08-17 15:49:51 -0700 | [diff] [blame] | 317 | isolate, cb_arg, |
| 318 | nr_to_walk); |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 319 | spin_unlock(&nlru->lock); |
| 320 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 321 | if (*nr_to_walk <= 0) |
| 322 | break; |
| 323 | } |
| 324 | } |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 325 | #endif |
| 326 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 327 | return isolated; |
| 328 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 329 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
| 330 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 331 | static void init_one_lru(struct list_lru_one *l) |
| 332 | { |
| 333 | INIT_LIST_HEAD(&l->list); |
| 334 | l->nr_items = 0; |
| 335 | } |
| 336 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 337 | #ifdef CONFIG_MEMCG_KMEM |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 338 | static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp) |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 339 | { |
| 340 | int nid; |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 341 | struct list_lru_memcg *mlru; |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 342 | |
| 343 | mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); |
| 344 | if (!mlru) |
| 345 | return NULL; |
| 346 | |
| 347 | for_each_node(nid) |
| 348 | init_one_lru(&mlru->node[nid]); |
| 349 | |
| 350 | return mlru; |
| 351 | } |
| 352 | |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 353 | static void memcg_list_lru_free(struct list_lru *lru, int src_idx) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 354 | { |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 355 | struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 356 | |
| 357 | /* |
| 358 | * The __list_lru_walk_one() can walk the list of this node. |
| 359 | * We need kvfree_rcu() here. And the walking of the list |
| 360 | * is under lru->node[nid]->lock, which can serve as a RCU |
| 361 | * read-side critical section. |
| 362 | */ |
| 363 | if (mlru) |
| 364 | kvfree_rcu(mlru, rcu); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 365 | } |
| 366 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 367 | static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 368 | { |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 369 | if (memcg_aware) |
| 370 | xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ); |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 371 | lru->memcg_aware = memcg_aware; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 372 | } |
| 373 | |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 374 | static void memcg_destroy_list_lru(struct list_lru *lru) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 375 | { |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 376 | XA_STATE(xas, &lru->xa, 0); |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 377 | struct list_lru_memcg *mlru; |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 378 | |
| 379 | if (!list_lru_memcg_aware(lru)) |
| 380 | return; |
| 381 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 382 | xas_lock_irq(&xas); |
| 383 | xas_for_each(&xas, mlru, ULONG_MAX) { |
| 384 | kfree(mlru); |
| 385 | xas_store(&xas, NULL); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 386 | } |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 387 | xas_unlock_irq(&xas); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 388 | } |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 389 | |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 390 | static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, |
| 391 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 392 | { |
Kirill Tkhai | 3b82c4d | 2018-08-17 15:48:01 -0700 | [diff] [blame] | 393 | struct list_lru_node *nlru = &lru->node[nid]; |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 394 | int dst_idx = dst_memcg->kmemcg_id; |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 395 | struct list_lru_one *src, *dst; |
| 396 | |
| 397 | /* |
| 398 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 399 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 400 | */ |
| 401 | spin_lock_irq(&nlru->lock); |
| 402 | |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 403 | src = list_lru_from_memcg_idx(lru, nid, src_idx); |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 404 | if (!src) |
| 405 | goto out; |
Muchun Song | 6a6b7b7 | 2022-03-22 14:40:53 -0700 | [diff] [blame] | 406 | dst = list_lru_from_memcg_idx(lru, nid, dst_idx); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 407 | |
| 408 | list_splice_init(&src->list, &dst->list); |
Yang Shi | 8199be0 | 2020-12-05 22:14:48 -0800 | [diff] [blame] | 409 | |
| 410 | if (src->nr_items) { |
| 411 | dst->nr_items += src->nr_items; |
Yang Shi | 2bfd363 | 2021-05-04 18:36:11 -0700 | [diff] [blame] | 412 | set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); |
Yang Shi | 8199be0 | 2020-12-05 22:14:48 -0800 | [diff] [blame] | 413 | src->nr_items = 0; |
| 414 | } |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 415 | out: |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 416 | spin_unlock_irq(&nlru->lock); |
| 417 | } |
| 418 | |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 419 | static void memcg_reparent_list_lru(struct list_lru *lru, |
| 420 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 421 | { |
| 422 | int i; |
| 423 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 424 | for_each_node(i) |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 425 | memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg); |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 426 | |
| 427 | memcg_list_lru_free(lru, src_idx); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 428 | } |
| 429 | |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 430 | void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 431 | { |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 432 | struct cgroup_subsys_state *css; |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 433 | struct list_lru *lru; |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 434 | int src_idx = memcg->kmemcg_id; |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 435 | |
| 436 | /* |
| 437 | * Change kmemcg_id of this cgroup and all its descendants to the |
| 438 | * parent's id, and then move all entries from this cgroup's list_lrus |
| 439 | * to ones of the parent. |
| 440 | * |
| 441 | * After we have finished, all list_lrus corresponding to this cgroup |
| 442 | * are guaranteed to remain empty. So we can safely free this cgroup's |
| 443 | * list lrus in memcg_list_lru_free(). |
| 444 | * |
| 445 | * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc() |
| 446 | * from allocating list lrus for this cgroup after memcg_list_lru_free() |
| 447 | * call. |
| 448 | */ |
| 449 | rcu_read_lock(); |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 450 | css_for_each_descendant_pre(css, &memcg->css) { |
| 451 | struct mem_cgroup *child; |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 452 | |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 453 | child = mem_cgroup_from_css(css); |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 454 | WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id); |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 455 | } |
| 456 | rcu_read_unlock(); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 457 | |
| 458 | mutex_lock(&list_lrus_mutex); |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 459 | list_for_each_entry(lru, &memcg_list_lrus, list) |
Muchun Song | 1f391eb | 2022-03-22 14:41:22 -0700 | [diff] [blame] | 460 | memcg_reparent_list_lru(lru, src_idx, parent); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 461 | mutex_unlock(&list_lrus_mutex); |
| 462 | } |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 463 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 464 | static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg, |
| 465 | struct list_lru *lru) |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 466 | { |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 467 | int idx = memcg->kmemcg_id; |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 468 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 469 | return idx < 0 || xa_load(&lru->xa, idx); |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 470 | } |
| 471 | |
| 472 | int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, |
| 473 | gfp_t gfp) |
| 474 | { |
| 475 | int i; |
| 476 | unsigned long flags; |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 477 | struct list_lru_memcg_table { |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 478 | struct list_lru_memcg *mlru; |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 479 | struct mem_cgroup *memcg; |
| 480 | } *table; |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 481 | XA_STATE(xas, &lru->xa, 0); |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 482 | |
| 483 | if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru)) |
| 484 | return 0; |
| 485 | |
| 486 | gfp &= GFP_RECLAIM_MASK; |
| 487 | table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp); |
| 488 | if (!table) |
| 489 | return -ENOMEM; |
| 490 | |
| 491 | /* |
| 492 | * Because the list_lru can be reparented to the parent cgroup's |
| 493 | * list_lru, we should make sure that this cgroup and all its |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 494 | * ancestors have allocated list_lru_memcg. |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 495 | */ |
| 496 | for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { |
| 497 | if (memcg_list_lru_allocated(memcg, lru)) |
| 498 | break; |
| 499 | |
| 500 | table[i].memcg = memcg; |
| 501 | table[i].mlru = memcg_init_list_lru_one(gfp); |
| 502 | if (!table[i].mlru) { |
| 503 | while (i--) |
| 504 | kfree(table[i].mlru); |
| 505 | kfree(table); |
| 506 | return -ENOMEM; |
| 507 | } |
| 508 | } |
| 509 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 510 | xas_lock_irqsave(&xas, flags); |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 511 | while (i--) { |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 512 | int index = READ_ONCE(table[i].memcg->kmemcg_id); |
Muchun Song | d70110704 | 2022-03-22 14:41:35 -0700 | [diff] [blame] | 513 | struct list_lru_memcg *mlru = table[i].mlru; |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 514 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 515 | xas_set(&xas, index); |
| 516 | retry: |
| 517 | if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) { |
Muchun Song | 5abc1e3 | 2022-03-22 14:41:19 -0700 | [diff] [blame] | 518 | kfree(mlru); |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 519 | } else { |
| 520 | xas_store(&xas, mlru); |
| 521 | if (xas_error(&xas) == -ENOMEM) { |
| 522 | xas_unlock_irqrestore(&xas, flags); |
| 523 | if (xas_nomem(&xas, gfp)) |
| 524 | xas_set_err(&xas, 0); |
| 525 | xas_lock_irqsave(&xas, flags); |
| 526 | /* |
| 527 | * The xas lock has been released, this memcg |
| 528 | * can be reparented before us. So reload |
| 529 | * memcg id. More details see the comments |
| 530 | * in memcg_reparent_list_lrus(). |
| 531 | */ |
| 532 | index = READ_ONCE(table[i].memcg->kmemcg_id); |
| 533 | if (index < 0) |
| 534 | xas_set_err(&xas, 0); |
| 535 | else if (!xas_error(&xas) && index != xas.xa_index) |
| 536 | xas_set(&xas, index); |
| 537 | goto retry; |
| 538 | } |
| 539 | } |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 540 | } |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 541 | /* xas_nomem() is used to free memory instead of memory allocation. */ |
| 542 | if (xas.xa_alloc) |
| 543 | xas_nomem(&xas, gfp); |
| 544 | xas_unlock_irqrestore(&xas, flags); |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 545 | kfree(table); |
| 546 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 547 | return xas_error(&xas); |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 548 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 549 | #else |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 550 | static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 551 | { |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 555 | { |
| 556 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 557 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 558 | |
| 559 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 560 | struct lock_class_key *key, struct shrinker *shrinker) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 561 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 562 | int i; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 563 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 564 | #ifdef CONFIG_MEMCG_KMEM |
| 565 | if (shrinker) |
| 566 | lru->shrinker_id = shrinker->id; |
| 567 | else |
| 568 | lru->shrinker_id = -1; |
| 569 | #endif |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 570 | |
Alexey Dobriyan | b9726c2 | 2019-03-05 15:48:26 -0800 | [diff] [blame] | 571 | lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 572 | if (!lru->node) |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 573 | return -ENOMEM; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 574 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 575 | for_each_node(i) { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 576 | spin_lock_init(&lru->node[i].lock); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 577 | if (key) |
| 578 | lockdep_set_class(&lru->node[i].lock, key); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 579 | init_one_lru(&lru->node[i].lru); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 580 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 581 | |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 582 | memcg_init_list_lru(lru, memcg_aware); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 583 | list_lru_register(lru); |
Muchun Song | bbca91c | 2022-03-22 14:41:25 -0700 | [diff] [blame] | 584 | |
| 585 | return 0; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 586 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 587 | EXPORT_SYMBOL_GPL(__list_lru_init); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 588 | |
| 589 | void list_lru_destroy(struct list_lru *lru) |
| 590 | { |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 591 | /* Already destroyed or not yet initialized? */ |
| 592 | if (!lru->node) |
| 593 | return; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 594 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 595 | list_lru_unregister(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 596 | |
| 597 | memcg_destroy_list_lru(lru); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 598 | kfree(lru->node); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 599 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 600 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 601 | #ifdef CONFIG_MEMCG_KMEM |
| 602 | lru->shrinker_id = -1; |
| 603 | #endif |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 604 | } |
| 605 | EXPORT_SYMBOL_GPL(list_lru_destroy); |