Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2006-2007 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_mru_cache.h" |
| 8 | |
| 9 | /* |
| 10 | * The MRU Cache data structure consists of a data store, an array of lists and |
| 11 | * a lock to protect its internal state. At initialisation time, the client |
| 12 | * supplies an element lifetime in milliseconds and a group count, as well as a |
| 13 | * function pointer to call when deleting elements. A data structure for |
| 14 | * queueing up work in the form of timed callbacks is also included. |
| 15 | * |
| 16 | * The group count controls how many lists are created, and thereby how finely |
| 17 | * the elements are grouped in time. When reaping occurs, all the elements in |
| 18 | * all the lists whose time has expired are deleted. |
| 19 | * |
| 20 | * To give an example of how this works in practice, consider a client that |
| 21 | * initialises an MRU Cache with a lifetime of ten seconds and a group count of |
| 22 | * five. Five internal lists will be created, each representing a two second |
| 23 | * period in time. When the first element is added, time zero for the data |
| 24 | * structure is initialised to the current time. |
| 25 | * |
| 26 | * All the elements added in the first two seconds are appended to the first |
| 27 | * list. Elements added in the third second go into the second list, and so on. |
| 28 | * If an element is accessed at any point, it is removed from its list and |
| 29 | * inserted at the head of the current most-recently-used list. |
| 30 | * |
| 31 | * The reaper function will have nothing to do until at least twelve seconds |
| 32 | * have elapsed since the first element was added. The reason for this is that |
| 33 | * if it were called at t=11s, there could be elements in the first list that |
| 34 | * have only been inactive for nine seconds, so it still does nothing. If it is |
| 35 | * called anywhere between t=12 and t=14 seconds, it will delete all the |
| 36 | * elements that remain in the first list. It's therefore possible for elements |
| 37 | * to remain in the data store even after they've been inactive for up to |
| 38 | * (t + t/g) seconds, where t is the inactive element lifetime and g is the |
| 39 | * number of groups. |
| 40 | * |
| 41 | * The above example assumes that the reaper function gets called at least once |
| 42 | * every (t/g) seconds. If it is called less frequently, unused elements will |
| 43 | * accumulate in the reap list until the reaper function is eventually called. |
| 44 | * The current implementation uses work queue callbacks to carefully time the |
| 45 | * reaper function calls, so this should happen rarely, if at all. |
| 46 | * |
| 47 | * From a design perspective, the primary reason for the choice of a list array |
| 48 | * representing discrete time intervals is that it's only practical to reap |
| 49 | * expired elements in groups of some appreciable size. This automatically |
| 50 | * introduces a granularity to element lifetimes, so there's no point storing an |
| 51 | * individual timeout with each element that specifies a more precise reap time. |
| 52 | * The bonus is a saving of sizeof(long) bytes of memory per element stored. |
| 53 | * |
| 54 | * The elements could have been stored in just one list, but an array of |
| 55 | * counters or pointers would need to be maintained to allow them to be divided |
| 56 | * up into discrete time groups. More critically, the process of touching or |
| 57 | * removing an element would involve walking large portions of the entire list, |
| 58 | * which would have a detrimental effect on performance. The additional memory |
| 59 | * requirement for the array of list heads is minimal. |
| 60 | * |
| 61 | * When an element is touched or deleted, it needs to be removed from its |
| 62 | * current list. Doubly linked lists are used to make the list maintenance |
| 63 | * portion of these operations O(1). Since reaper timing can be imprecise, |
| 64 | * inserts and lookups can occur when there are no free lists available. When |
| 65 | * this happens, all the elements on the LRU list need to be migrated to the end |
| 66 | * of the reap list. To keep the list maintenance portion of these operations |
| 67 | * O(1) also, list tails need to be accessible without walking the entire list. |
| 68 | * This is the reason why doubly linked list heads are used. |
| 69 | */ |
| 70 | |
| 71 | /* |
| 72 | * An MRU Cache is a dynamic data structure that stores its elements in a way |
| 73 | * that allows efficient lookups, but also groups them into discrete time |
| 74 | * intervals based on insertion time. This allows elements to be efficiently |
| 75 | * and automatically reaped after a fixed period of inactivity. |
| 76 | * |
| 77 | * When a client data pointer is stored in the MRU Cache it needs to be added to |
| 78 | * both the data store and to one of the lists. It must also be possible to |
| 79 | * access each of these entries via the other, i.e. to: |
| 80 | * |
| 81 | * a) Walk a list, removing the corresponding data store entry for each item. |
| 82 | * b) Look up a data store entry, then access its list entry directly. |
| 83 | * |
| 84 | * To achieve both of these goals, each entry must contain both a list entry and |
| 85 | * a key, in addition to the user's data pointer. Note that it's not a good |
| 86 | * idea to have the client embed one of these structures at the top of their own |
| 87 | * data structure, because inserting the same item more than once would most |
| 88 | * likely result in a loop in one of the lists. That's a sure-fire recipe for |
| 89 | * an infinite loop in the code. |
| 90 | */ |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 91 | struct xfs_mru_cache { |
| 92 | struct radix_tree_root store; /* Core storage data structure. */ |
| 93 | struct list_head *lists; /* Array of lists, one per grp. */ |
| 94 | struct list_head reap_list; /* Elements overdue for reaping. */ |
| 95 | spinlock_t lock; /* Lock to protect this struct. */ |
| 96 | unsigned int grp_count; /* Number of discrete groups. */ |
| 97 | unsigned int grp_time; /* Time period spanned by grps. */ |
| 98 | unsigned int lru_grp; /* Group containing time zero. */ |
| 99 | unsigned long time_zero; /* Time first element was added. */ |
| 100 | xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */ |
| 101 | struct delayed_work work; /* Workqueue data for reaping. */ |
| 102 | unsigned int queued; /* work has been queued */ |
Christoph Hellwig | 7fcd3ef | 2018-04-09 10:23:39 -0700 | [diff] [blame] | 103 | void *data; |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 104 | }; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 105 | |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 106 | static struct workqueue_struct *xfs_mru_reap_wq; |
| 107 | |
| 108 | /* |
| 109 | * When inserting, destroying or reaping, it's first necessary to update the |
| 110 | * lists relative to a particular time. In the case of destroying, that time |
| 111 | * will be well in the future to ensure that all items are moved to the reap |
| 112 | * list. In all other cases though, the time will be the current time. |
| 113 | * |
| 114 | * This function enters a loop, moving the contents of the LRU list to the reap |
| 115 | * list again and again until either a) the lists are all empty, or b) time zero |
| 116 | * has been advanced sufficiently to be within the immediate element lifetime. |
| 117 | * |
| 118 | * Case a) above is detected by counting how many groups are migrated and |
| 119 | * stopping when they've all been moved. Case b) is detected by monitoring the |
| 120 | * time_zero field, which is updated as each group is migrated. |
| 121 | * |
| 122 | * The return value is the earliest time that more migration could be needed, or |
| 123 | * zero if there's no need to schedule more work because the lists are empty. |
| 124 | */ |
| 125 | STATIC unsigned long |
| 126 | _xfs_mru_cache_migrate( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 127 | struct xfs_mru_cache *mru, |
| 128 | unsigned long now) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 129 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 130 | unsigned int grp; |
| 131 | unsigned int migrated = 0; |
| 132 | struct list_head *lru_list; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 133 | |
| 134 | /* Nothing to do if the data store is empty. */ |
| 135 | if (!mru->time_zero) |
| 136 | return 0; |
| 137 | |
| 138 | /* While time zero is older than the time spanned by all the lists. */ |
| 139 | while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { |
| 140 | |
| 141 | /* |
| 142 | * If the LRU list isn't empty, migrate its elements to the tail |
| 143 | * of the reap list. |
| 144 | */ |
| 145 | lru_list = mru->lists + mru->lru_grp; |
| 146 | if (!list_empty(lru_list)) |
| 147 | list_splice_init(lru_list, mru->reap_list.prev); |
| 148 | |
| 149 | /* |
| 150 | * Advance the LRU group number, freeing the old LRU list to |
| 151 | * become the new MRU list; advance time zero accordingly. |
| 152 | */ |
| 153 | mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; |
| 154 | mru->time_zero += mru->grp_time; |
| 155 | |
| 156 | /* |
| 157 | * If reaping is so far behind that all the elements on all the |
| 158 | * lists have been migrated to the reap list, it's now empty. |
| 159 | */ |
| 160 | if (++migrated == mru->grp_count) { |
| 161 | mru->lru_grp = 0; |
| 162 | mru->time_zero = 0; |
| 163 | return 0; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | /* Find the first non-empty list from the LRU end. */ |
| 168 | for (grp = 0; grp < mru->grp_count; grp++) { |
| 169 | |
| 170 | /* Check the grp'th list from the LRU end. */ |
| 171 | lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); |
| 172 | if (!list_empty(lru_list)) |
| 173 | return mru->time_zero + |
| 174 | (mru->grp_count + grp) * mru->grp_time; |
| 175 | } |
| 176 | |
| 177 | /* All the lists must be empty. */ |
| 178 | mru->lru_grp = 0; |
| 179 | mru->time_zero = 0; |
| 180 | return 0; |
| 181 | } |
| 182 | |
| 183 | /* |
| 184 | * When inserting or doing a lookup, an element needs to be inserted into the |
| 185 | * MRU list. The lists must be migrated first to ensure that they're |
| 186 | * up-to-date, otherwise the new element could be given a shorter lifetime in |
| 187 | * the cache than it should. |
| 188 | */ |
| 189 | STATIC void |
| 190 | _xfs_mru_cache_list_insert( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 191 | struct xfs_mru_cache *mru, |
| 192 | struct xfs_mru_cache_elem *elem) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 193 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 194 | unsigned int grp = 0; |
| 195 | unsigned long now = jiffies; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 196 | |
| 197 | /* |
| 198 | * If the data store is empty, initialise time zero, leave grp set to |
| 199 | * zero and start the work queue timer if necessary. Otherwise, set grp |
| 200 | * to the number of group times that have elapsed since time zero. |
| 201 | */ |
| 202 | if (!_xfs_mru_cache_migrate(mru, now)) { |
| 203 | mru->time_zero = now; |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 204 | if (!mru->queued) { |
| 205 | mru->queued = 1; |
| 206 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, |
| 207 | mru->grp_count * mru->grp_time); |
| 208 | } |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 209 | } else { |
| 210 | grp = (now - mru->time_zero) / mru->grp_time; |
| 211 | grp = (mru->lru_grp + grp) % mru->grp_count; |
| 212 | } |
| 213 | |
| 214 | /* Insert the element at the tail of the corresponding list. */ |
| 215 | list_add_tail(&elem->list_node, mru->lists + grp); |
| 216 | } |
| 217 | |
| 218 | /* |
| 219 | * When destroying or reaping, all the elements that were migrated to the reap |
| 220 | * list need to be deleted. For each element this involves removing it from the |
| 221 | * data store, removing it from the reap list, calling the client's free |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 222 | * function and deleting the element from the element cache. |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 223 | * |
| 224 | * We get called holding the mru->lock, which we drop and then reacquire. |
| 225 | * Sparse need special help with this to tell it we know what we are doing. |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 226 | */ |
| 227 | STATIC void |
| 228 | _xfs_mru_cache_clear_reap_list( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 229 | struct xfs_mru_cache *mru) |
| 230 | __releases(mru->lock) __acquires(mru->lock) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 231 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 232 | struct xfs_mru_cache_elem *elem, *next; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 233 | struct list_head tmp; |
| 234 | |
| 235 | INIT_LIST_HEAD(&tmp); |
| 236 | list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { |
| 237 | |
| 238 | /* Remove the element from the data store. */ |
| 239 | radix_tree_delete(&mru->store, elem->key); |
| 240 | |
| 241 | /* |
| 242 | * remove to temp list so it can be freed without |
| 243 | * needing to hold the lock |
| 244 | */ |
| 245 | list_move(&elem->list_node, &tmp); |
| 246 | } |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 247 | spin_unlock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 248 | |
| 249 | list_for_each_entry_safe(elem, next, &tmp, list_node) { |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 250 | list_del_init(&elem->list_node); |
Christoph Hellwig | 7fcd3ef | 2018-04-09 10:23:39 -0700 | [diff] [blame] | 251 | mru->free_func(mru->data, elem); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 252 | } |
| 253 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 254 | spin_lock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | /* |
| 258 | * We fire the reap timer every group expiry interval so |
| 259 | * we always have a reaper ready to run. This makes shutdown |
| 260 | * and flushing of the reaper easy to do. Hence we need to |
| 261 | * keep when the next reap must occur so we can determine |
| 262 | * at each interval whether there is anything we need to do. |
| 263 | */ |
| 264 | STATIC void |
| 265 | _xfs_mru_cache_reap( |
| 266 | struct work_struct *work) |
| 267 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 268 | struct xfs_mru_cache *mru = |
| 269 | container_of(work, struct xfs_mru_cache, work.work); |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 270 | unsigned long now, next; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 271 | |
| 272 | ASSERT(mru && mru->lists); |
| 273 | if (!mru || !mru->lists) |
| 274 | return; |
| 275 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 276 | spin_lock(&mru->lock); |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 277 | next = _xfs_mru_cache_migrate(mru, jiffies); |
| 278 | _xfs_mru_cache_clear_reap_list(mru); |
| 279 | |
| 280 | mru->queued = next; |
| 281 | if ((mru->queued > 0)) { |
| 282 | now = jiffies; |
| 283 | if (next <= now) |
| 284 | next = 0; |
| 285 | else |
| 286 | next -= now; |
| 287 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 288 | } |
| 289 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 290 | spin_unlock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | int |
| 294 | xfs_mru_cache_init(void) |
| 295 | { |
Brian Foster | 8018ec0 | 2014-09-09 11:44:46 +1000 | [diff] [blame] | 296 | xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 297 | XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 1); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 298 | if (!xfs_mru_reap_wq) |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 299 | return -ENOMEM; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 300 | return 0; |
| 301 | } |
| 302 | |
| 303 | void |
| 304 | xfs_mru_cache_uninit(void) |
| 305 | { |
| 306 | destroy_workqueue(xfs_mru_reap_wq); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 307 | } |
| 308 | |
| 309 | /* |
| 310 | * To initialise a struct xfs_mru_cache pointer, call xfs_mru_cache_create() |
| 311 | * with the address of the pointer, a lifetime value in milliseconds, a group |
| 312 | * count and a free function to use when deleting elements. This function |
| 313 | * returns 0 if the initialisation was successful. |
| 314 | */ |
| 315 | int |
| 316 | xfs_mru_cache_create( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 317 | struct xfs_mru_cache **mrup, |
Christoph Hellwig | 7fcd3ef | 2018-04-09 10:23:39 -0700 | [diff] [blame] | 318 | void *data, |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 319 | unsigned int lifetime_ms, |
| 320 | unsigned int grp_count, |
| 321 | xfs_mru_cache_free_func_t free_func) |
| 322 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 323 | struct xfs_mru_cache *mru = NULL; |
| 324 | int err = 0, grp; |
| 325 | unsigned int grp_time; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 326 | |
| 327 | if (mrup) |
| 328 | *mrup = NULL; |
| 329 | |
| 330 | if (!mrup || !grp_count || !lifetime_ms || !free_func) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 331 | return -EINVAL; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 332 | |
| 333 | if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 334 | return -EINVAL; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 335 | |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 336 | if (!(mru = kmem_zalloc(sizeof(*mru), 0))) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 337 | return -ENOMEM; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 338 | |
| 339 | /* An extra list is needed to avoid reaping up to a grp_time early. */ |
| 340 | mru->grp_count = grp_count + 1; |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 341 | mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 342 | |
| 343 | if (!mru->lists) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 344 | err = -ENOMEM; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 345 | goto exit; |
| 346 | } |
| 347 | |
| 348 | for (grp = 0; grp < mru->grp_count; grp++) |
| 349 | INIT_LIST_HEAD(mru->lists + grp); |
| 350 | |
| 351 | /* |
| 352 | * We use GFP_KERNEL radix tree preload and do inserts under a |
| 353 | * spinlock so GFP_ATOMIC is appropriate for the radix tree itself. |
| 354 | */ |
| 355 | INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); |
| 356 | INIT_LIST_HEAD(&mru->reap_list); |
Eric Sandeen | 007c61c | 2007-10-11 17:43:56 +1000 | [diff] [blame] | 357 | spin_lock_init(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 358 | INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); |
| 359 | |
| 360 | mru->grp_time = grp_time; |
| 361 | mru->free_func = free_func; |
Christoph Hellwig | 7fcd3ef | 2018-04-09 10:23:39 -0700 | [diff] [blame] | 362 | mru->data = data; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 363 | *mrup = mru; |
| 364 | |
| 365 | exit: |
| 366 | if (err && mru && mru->lists) |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 367 | kmem_free(mru->lists); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 368 | if (err && mru) |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 369 | kmem_free(mru); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 370 | |
| 371 | return err; |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * Call xfs_mru_cache_flush() to flush out all cached entries, calling their |
| 376 | * free functions as they're deleted. When this function returns, the caller is |
| 377 | * guaranteed that all the free functions for all the elements have finished |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 378 | * executing and the reaper is not running. |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 379 | */ |
Dave Chinner | b657fc8 | 2010-01-11 11:47:47 +0000 | [diff] [blame] | 380 | static void |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 381 | xfs_mru_cache_flush( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 382 | struct xfs_mru_cache *mru) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 383 | { |
| 384 | if (!mru || !mru->lists) |
| 385 | return; |
| 386 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 387 | spin_lock(&mru->lock); |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 388 | if (mru->queued) { |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 389 | spin_unlock(&mru->lock); |
Tejun Heo | afe2c51 | 2010-12-14 16:21:17 +0100 | [diff] [blame] | 390 | cancel_delayed_work_sync(&mru->work); |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 391 | spin_lock(&mru->lock); |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 392 | } |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 393 | |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 394 | _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); |
| 395 | _xfs_mru_cache_clear_reap_list(mru); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 396 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 397 | spin_unlock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 398 | } |
| 399 | |
| 400 | void |
| 401 | xfs_mru_cache_destroy( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 402 | struct xfs_mru_cache *mru) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 403 | { |
| 404 | if (!mru || !mru->lists) |
| 405 | return; |
| 406 | |
David Chinner | 65de556 | 2007-08-16 15:21:11 +1000 | [diff] [blame] | 407 | xfs_mru_cache_flush(mru); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 408 | |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 409 | kmem_free(mru->lists); |
| 410 | kmem_free(mru); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 411 | } |
| 412 | |
| 413 | /* |
| 414 | * To insert an element, call xfs_mru_cache_insert() with the data store, the |
| 415 | * element's key and the client data pointer. This function returns 0 on |
| 416 | * success or ENOMEM if memory for the data element couldn't be allocated. |
| 417 | */ |
| 418 | int |
| 419 | xfs_mru_cache_insert( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 420 | struct xfs_mru_cache *mru, |
| 421 | unsigned long key, |
| 422 | struct xfs_mru_cache_elem *elem) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 423 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 424 | int error; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 425 | |
| 426 | ASSERT(mru && mru->lists); |
| 427 | if (!mru || !mru->lists) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 428 | return -EINVAL; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 429 | |
Byoungyoung Lee | 20dafee | 2015-03-25 14:57:53 +1100 | [diff] [blame] | 430 | if (radix_tree_preload(GFP_NOFS)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 431 | return -ENOMEM; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 432 | |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 433 | INIT_LIST_HEAD(&elem->list_node); |
| 434 | elem->key = key; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 435 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 436 | spin_lock(&mru->lock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 437 | error = radix_tree_insert(&mru->store, key, elem); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 438 | radix_tree_preload_end(); |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 439 | if (!error) |
| 440 | _xfs_mru_cache_list_insert(mru, elem); |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 441 | spin_unlock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 442 | |
Christoph Hellwig | ce695c6 | 2014-04-23 07:11:50 +1000 | [diff] [blame] | 443 | return error; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 444 | } |
| 445 | |
| 446 | /* |
| 447 | * To remove an element without calling the free function, call |
| 448 | * xfs_mru_cache_remove() with the data store and the element's key. On success |
| 449 | * the client data pointer for the removed element is returned, otherwise this |
| 450 | * function will return a NULL pointer. |
| 451 | */ |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 452 | struct xfs_mru_cache_elem * |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 453 | xfs_mru_cache_remove( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 454 | struct xfs_mru_cache *mru, |
| 455 | unsigned long key) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 456 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 457 | struct xfs_mru_cache_elem *elem; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 458 | |
| 459 | ASSERT(mru && mru->lists); |
| 460 | if (!mru || !mru->lists) |
| 461 | return NULL; |
| 462 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 463 | spin_lock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 464 | elem = radix_tree_delete(&mru->store, key); |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 465 | if (elem) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 466 | list_del(&elem->list_node); |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 467 | spin_unlock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 468 | |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 469 | return elem; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 470 | } |
| 471 | |
| 472 | /* |
| 473 | * To remove and element and call the free function, call xfs_mru_cache_delete() |
| 474 | * with the data store and the element's key. |
| 475 | */ |
| 476 | void |
| 477 | xfs_mru_cache_delete( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 478 | struct xfs_mru_cache *mru, |
| 479 | unsigned long key) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 480 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 481 | struct xfs_mru_cache_elem *elem; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 482 | |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 483 | elem = xfs_mru_cache_remove(mru, key); |
| 484 | if (elem) |
Christoph Hellwig | 7fcd3ef | 2018-04-09 10:23:39 -0700 | [diff] [blame] | 485 | mru->free_func(mru->data, elem); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | /* |
| 489 | * To look up an element using its key, call xfs_mru_cache_lookup() with the |
| 490 | * data store and the element's key. If found, the element will be moved to the |
| 491 | * head of the MRU list to indicate that it's been touched. |
| 492 | * |
| 493 | * The internal data structures are protected by a spinlock that is STILL HELD |
| 494 | * when this function returns. Call xfs_mru_cache_done() to release it. Note |
| 495 | * that it is not safe to call any function that might sleep in the interim. |
| 496 | * |
| 497 | * The implementation could have used reference counting to avoid this |
| 498 | * restriction, but since most clients simply want to get, set or test a member |
| 499 | * of the returned data structure, the extra per-element memory isn't warranted. |
| 500 | * |
| 501 | * If the element isn't found, this function returns NULL and the spinlock is |
| 502 | * released. xfs_mru_cache_done() should NOT be called when this occurs. |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 503 | * |
| 504 | * Because sparse isn't smart enough to know about conditional lock return |
| 505 | * status, we need to help it get it right by annotating the path that does |
| 506 | * not release the lock. |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 507 | */ |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 508 | struct xfs_mru_cache_elem * |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 509 | xfs_mru_cache_lookup( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 510 | struct xfs_mru_cache *mru, |
| 511 | unsigned long key) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 512 | { |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 513 | struct xfs_mru_cache_elem *elem; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 514 | |
| 515 | ASSERT(mru && mru->lists); |
| 516 | if (!mru || !mru->lists) |
| 517 | return NULL; |
| 518 | |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 519 | spin_lock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 520 | elem = radix_tree_lookup(&mru->store, key); |
| 521 | if (elem) { |
| 522 | list_del(&elem->list_node); |
| 523 | _xfs_mru_cache_list_insert(mru, elem); |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 524 | __release(mru_lock); /* help sparse not be stupid */ |
| 525 | } else |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 526 | spin_unlock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 527 | |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 528 | return elem; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 529 | } |
| 530 | |
| 531 | /* |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 532 | * To release the internal data structure spinlock after having performed an |
| 533 | * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done() |
| 534 | * with the data store pointer. |
| 535 | */ |
| 536 | void |
| 537 | xfs_mru_cache_done( |
Christoph Hellwig | 22328d71 | 2014-04-23 07:11:51 +1000 | [diff] [blame] | 538 | struct xfs_mru_cache *mru) |
| 539 | __releases(mru->lock) |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 540 | { |
Eric Sandeen | ba74d0c | 2007-10-11 17:42:10 +1000 | [diff] [blame] | 541 | spin_unlock(&mru->lock); |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 542 | } |