blob: ea9ce1f0b3863c9486755d2e6f71503d022c033d [file] [log] [blame]
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -05001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * XArray implementation
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -05004 * Copyright (c) 2017-2018 Microsoft Corporation
5 * Copyright (c) 2018-2020 Oracle
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -05006 * Author: Matthew Wilcox <willy@infradead.org>
7 */
8
Matthew Wilcox9b89a032017-11-10 09:34:31 -05009#include <linux/bitmap.h>
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -050010#include <linux/export.h>
Matthew Wilcox58d6ea32017-11-10 15:15:08 -050011#include <linux/list.h>
12#include <linux/slab.h>
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -050013#include <linux/xarray.h>
14
15/*
16 * Coding conventions in this file:
17 *
18 * @xa is used to refer to the entire xarray.
19 * @xas is the 'xarray operation state'. It may be either a pointer to
20 * an xa_state, or an xa_state stored on the stack. This is an unfortunate
21 * ambiguity.
22 * @index is the index of the entry being operated on
23 * @mark is an xa_mark_t; a small number indicating one of the mark bits.
24 * @node refers to an xa_node; usually the primary one being operated on by
25 * this function.
26 * @offset is the index into the slots array inside an xa_node.
27 * @parent refers to the @xa_node closer to the head than @node.
28 * @entry refers to something stored in a slot in the xarray
29 */
30
Matthew Wilcox58d6ea32017-11-10 15:15:08 -050031static inline unsigned int xa_lock_type(const struct xarray *xa)
32{
33 return (__force unsigned int)xa->xa_flags & 3;
34}
35
36static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
37{
38 if (lock_type == XA_LOCK_IRQ)
39 xas_lock_irq(xas);
40 else if (lock_type == XA_LOCK_BH)
41 xas_lock_bh(xas);
42 else
43 xas_lock(xas);
44}
45
46static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
47{
48 if (lock_type == XA_LOCK_IRQ)
49 xas_unlock_irq(xas);
50 else if (lock_type == XA_LOCK_BH)
51 xas_unlock_bh(xas);
52 else
53 xas_unlock(xas);
54}
55
Matthew Wilcox371c7522018-07-04 10:50:12 -040056static inline bool xa_track_free(const struct xarray *xa)
57{
58 return xa->xa_flags & XA_FLAGS_TRACK_FREE;
59}
60
Matthew Wilcox3ccaf572018-10-26 14:43:22 -040061static inline bool xa_zero_busy(const struct xarray *xa)
62{
63 return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
64}
65
Matthew Wilcox9b89a032017-11-10 09:34:31 -050066static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
67{
68 if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
69 xa->xa_flags |= XA_FLAGS_MARK(mark);
70}
71
72static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
73{
74 if (xa->xa_flags & XA_FLAGS_MARK(mark))
75 xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
76}
77
78static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
79{
80 return node->marks[(__force unsigned)mark];
81}
82
83static inline bool node_get_mark(struct xa_node *node,
84 unsigned int offset, xa_mark_t mark)
85{
86 return test_bit(offset, node_marks(node, mark));
87}
88
89/* returns true if the bit was set */
90static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
91 xa_mark_t mark)
92{
93 return __test_and_set_bit(offset, node_marks(node, mark));
94}
95
96/* returns true if the bit was set */
97static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
98 xa_mark_t mark)
99{
100 return __test_and_clear_bit(offset, node_marks(node, mark));
101}
102
103static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
104{
105 return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
106}
107
Matthew Wilcox371c7522018-07-04 10:50:12 -0400108static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
109{
110 bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
111}
112
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500113#define mark_inc(mark) do { \
114 mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
115} while (0)
116
117/*
118 * xas_squash_marks() - Merge all marks to the first entry
119 * @xas: Array operation state.
120 *
121 * Set a mark on the first entry if any entry has it set. Clear marks on
122 * all sibling entries.
123 */
124static void xas_squash_marks(const struct xa_state *xas)
125{
126 unsigned int mark = 0;
127 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
128
129 if (!xas->xa_sibs)
130 return;
131
132 do {
133 unsigned long *marks = xas->xa_node->marks[mark];
134 if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
135 continue;
136 __set_bit(xas->xa_offset, marks);
137 bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
138 } while (mark++ != (__force unsigned)XA_MARK_MAX);
139}
140
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500141/* extracts the offset within this node from the index */
142static unsigned int get_offset(unsigned long index, struct xa_node *node)
143{
144 return (index >> node->shift) & XA_CHUNK_MASK;
145}
146
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500147static void xas_set_offset(struct xa_state *xas)
148{
149 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
150}
151
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500152/* move the index either forwards (find) or backwards (sibling slot) */
153static void xas_move_index(struct xa_state *xas, unsigned long offset)
154{
155 unsigned int shift = xas->xa_node->shift;
156 xas->xa_index &= ~XA_CHUNK_MASK << shift;
157 xas->xa_index += offset << shift;
158}
159
Matthew Wilcox (Oracle)25a8de72021-08-27 07:21:49 -0400160static void xas_next_offset(struct xa_state *xas)
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500161{
162 xas->xa_offset++;
163 xas_move_index(xas, xas->xa_offset);
164}
165
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500166static void *set_bounds(struct xa_state *xas)
167{
168 xas->xa_node = XAS_BOUNDS;
169 return NULL;
170}
171
172/*
173 * Starts a walk. If the @xas is already valid, we assume that it's on
174 * the right path and just return where we've got to. If we're in an
175 * error state, return NULL. If the index is outside the current scope
176 * of the xarray, return NULL without changing @xas->xa_node. Otherwise
177 * set @xas->xa_node to NULL and return the current head of the array.
178 */
179static void *xas_start(struct xa_state *xas)
180{
181 void *entry;
182
183 if (xas_valid(xas))
184 return xas_reload(xas);
185 if (xas_error(xas))
186 return NULL;
187
188 entry = xa_head(xas->xa);
189 if (!xa_is_node(entry)) {
190 if (xas->xa_index)
191 return set_bounds(xas);
192 } else {
193 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
194 return set_bounds(xas);
195 }
196
197 xas->xa_node = NULL;
198 return entry;
199}
200
201static void *xas_descend(struct xa_state *xas, struct xa_node *node)
202{
203 unsigned int offset = get_offset(xas->xa_index, node);
204 void *entry = xa_entry(xas->xa, node, offset);
205
206 xas->xa_node = node;
207 if (xa_is_sibling(entry)) {
208 offset = xa_to_sibling(entry);
209 entry = xa_entry(xas->xa, node, offset);
Matthew Wilcox (Oracle)63b18982022-04-22 13:23:12 -0400210 if (node->shift && xa_is_node(entry))
211 entry = XA_RETRY_ENTRY;
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500212 }
213
214 xas->xa_offset = offset;
215 return entry;
216}
217
218/**
219 * xas_load() - Load an entry from the XArray (advanced).
220 * @xas: XArray operation state.
221 *
222 * Usually walks the @xas to the appropriate state to load the entry
223 * stored at xa_index. However, it will do nothing and return %NULL if
224 * @xas is in an error state. xas_load() will never expand the tree.
225 *
226 * If the xa_state is set up to operate on a multi-index entry, xas_load()
227 * may return %NULL or an internal entry, even if there are entries
228 * present within the range specified by @xas.
229 *
230 * Context: Any context. The caller should hold the xa_lock or the RCU lock.
231 * Return: Usually an entry in the XArray, but see description for exceptions.
232 */
233void *xas_load(struct xa_state *xas)
234{
235 void *entry = xas_start(xas);
236
237 while (xa_is_node(entry)) {
238 struct xa_node *node = xa_to_node(entry);
239
240 if (xas->xa_shift > node->shift)
241 break;
242 entry = xas_descend(xas, node);
Matthew Wilcox76b4e522018-12-28 23:20:44 -0500243 if (node->shift == 0)
244 break;
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500245 }
246 return entry;
247}
248EXPORT_SYMBOL_GPL(xas_load);
249
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500250/* Move the radix tree node cache here */
251extern struct kmem_cache *radix_tree_node_cachep;
252extern void radix_tree_node_rcu_free(struct rcu_head *head);
253
254#define XA_RCU_FREE ((struct xarray *)1)
255
256static void xa_node_free(struct xa_node *node)
257{
258 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
259 node->array = XA_RCU_FREE;
260 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
261}
262
263/*
264 * xas_destroy() - Free any resources allocated during the XArray operation.
265 * @xas: XArray operation state.
266 *
Matthew Wilcox (Oracle)69a37a82022-06-08 15:18:34 -0400267 * Most users will not need to call this function; it is called for you
268 * by xas_nomem().
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500269 */
Matthew Wilcox (Oracle)69a37a82022-06-08 15:18:34 -0400270void xas_destroy(struct xa_state *xas)
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500271{
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -0700272 struct xa_node *next, *node = xas->xa_alloc;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500273
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -0700274 while (node) {
275 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
276 next = rcu_dereference_raw(node->parent);
277 radix_tree_node_rcu_free(&node->rcu_head);
278 xas->xa_alloc = node = next;
279 }
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500280}
281
282/**
283 * xas_nomem() - Allocate memory if needed.
284 * @xas: XArray operation state.
285 * @gfp: Memory allocation flags.
286 *
287 * If we need to add new nodes to the XArray, we try to allocate memory
288 * with GFP_NOWAIT while holding the lock, which will usually succeed.
289 * If it fails, @xas is flagged as needing memory to continue. The caller
290 * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
291 * the caller should retry the operation.
292 *
293 * Forward progress is guaranteed as one node is allocated here and
294 * stored in the xa_state where it will be found by xas_alloc(). More
295 * nodes will likely be found in the slab allocator, but we do not tie
296 * them up here.
297 *
298 * Return: true if memory was needed, and was successfully allocated.
299 */
300bool xas_nomem(struct xa_state *xas, gfp_t gfp)
301{
302 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
303 xas_destroy(xas);
304 return false;
305 }
Johannes Weiner7b785642019-05-24 10:12:46 -0400306 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
307 gfp |= __GFP_ACCOUNT;
Muchun Song9bbdc0f2022-03-22 14:41:12 -0700308 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500309 if (!xas->xa_alloc)
310 return false;
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -0700311 xas->xa_alloc->parent = NULL;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500312 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
313 xas->xa_node = XAS_RESTART;
314 return true;
315}
316EXPORT_SYMBOL_GPL(xas_nomem);
317
318/*
319 * __xas_nomem() - Drop locks and allocate memory if needed.
320 * @xas: XArray operation state.
321 * @gfp: Memory allocation flags.
322 *
323 * Internal variant of xas_nomem().
324 *
325 * Return: true if memory was needed, and was successfully allocated.
326 */
327static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
328 __must_hold(xas->xa->xa_lock)
329{
330 unsigned int lock_type = xa_lock_type(xas->xa);
331
332 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
333 xas_destroy(xas);
334 return false;
335 }
Johannes Weiner7b785642019-05-24 10:12:46 -0400336 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
337 gfp |= __GFP_ACCOUNT;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500338 if (gfpflags_allow_blocking(gfp)) {
339 xas_unlock_type(xas, lock_type);
Muchun Song9bbdc0f2022-03-22 14:41:12 -0700340 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500341 xas_lock_type(xas, lock_type);
342 } else {
Muchun Song9bbdc0f2022-03-22 14:41:12 -0700343 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500344 }
345 if (!xas->xa_alloc)
346 return false;
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -0700347 xas->xa_alloc->parent = NULL;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500348 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
349 xas->xa_node = XAS_RESTART;
350 return true;
351}
352
353static void xas_update(struct xa_state *xas, struct xa_node *node)
354{
355 if (xas->xa_update)
356 xas->xa_update(node);
357 else
358 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
359}
360
361static void *xas_alloc(struct xa_state *xas, unsigned int shift)
362{
363 struct xa_node *parent = xas->xa_node;
364 struct xa_node *node = xas->xa_alloc;
365
366 if (xas_invalid(xas))
367 return NULL;
368
369 if (node) {
370 xas->xa_alloc = NULL;
371 } else {
Johannes Weiner7b785642019-05-24 10:12:46 -0400372 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
373
374 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
375 gfp |= __GFP_ACCOUNT;
376
Muchun Song9bbdc0f2022-03-22 14:41:12 -0700377 node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500378 if (!node) {
379 xas_set_err(xas, -ENOMEM);
380 return NULL;
381 }
382 }
383
384 if (parent) {
385 node->offset = xas->xa_offset;
386 parent->count++;
387 XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
388 xas_update(xas, parent);
389 }
390 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
391 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
392 node->shift = shift;
393 node->count = 0;
394 node->nr_values = 0;
395 RCU_INIT_POINTER(node->parent, xas->xa_node);
396 node->array = xas->xa;
397
398 return node;
399}
400
Matthew Wilcox0e9446c2018-08-15 14:13:29 -0400401#ifdef CONFIG_XARRAY_MULTI
402/* Returns the number of indices covered by a given xa_state */
403static unsigned long xas_size(const struct xa_state *xas)
404{
405 return (xas->xa_sibs + 1UL) << xas->xa_shift;
406}
407#endif
408
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500409/*
410 * Use this to calculate the maximum index that will need to be created
411 * in order to add the entry described by @xas. Because we cannot store a
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -0700412 * multi-index entry at index 0, the calculation is a little more complex
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500413 * than you might expect.
414 */
415static unsigned long xas_max(struct xa_state *xas)
416{
417 unsigned long max = xas->xa_index;
418
419#ifdef CONFIG_XARRAY_MULTI
420 if (xas->xa_shift || xas->xa_sibs) {
Matthew Wilcox0e9446c2018-08-15 14:13:29 -0400421 unsigned long mask = xas_size(xas) - 1;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500422 max |= mask;
423 if (mask == max)
424 max++;
425 }
426#endif
427
428 return max;
429}
430
431/* The maximum index that can be contained in the array without expanding it */
432static unsigned long max_index(void *entry)
433{
434 if (!xa_is_node(entry))
435 return 0;
436 return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
437}
438
439static void xas_shrink(struct xa_state *xas)
440{
441 struct xarray *xa = xas->xa;
442 struct xa_node *node = xas->xa_node;
443
444 for (;;) {
445 void *entry;
446
447 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
448 if (node->count != 1)
449 break;
450 entry = xa_entry_locked(xa, node, 0);
451 if (!entry)
452 break;
453 if (!xa_is_node(entry) && node->shift)
454 break;
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400455 if (xa_is_zero(entry) && xa_zero_busy(xa))
456 entry = NULL;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500457 xas->xa_node = XAS_BOUNDS;
458
459 RCU_INIT_POINTER(xa->xa_head, entry);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400460 if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
461 xa_mark_clear(xa, XA_FREE_MARK);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500462
463 node->count = 0;
464 node->nr_values = 0;
465 if (!xa_is_node(entry))
466 RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
467 xas_update(xas, node);
468 xa_node_free(node);
469 if (!xa_is_node(entry))
470 break;
471 node = xa_to_node(entry);
472 node->parent = NULL;
473 }
474}
475
476/*
477 * xas_delete_node() - Attempt to delete an xa_node
478 * @xas: Array operation state.
479 *
480 * Attempts to delete the @xas->xa_node. This will fail if xa->node has
481 * a non-zero reference count.
482 */
483static void xas_delete_node(struct xa_state *xas)
484{
485 struct xa_node *node = xas->xa_node;
486
487 for (;;) {
488 struct xa_node *parent;
489
490 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
491 if (node->count)
492 break;
493
494 parent = xa_parent_locked(xas->xa, node);
495 xas->xa_node = parent;
496 xas->xa_offset = node->offset;
497 xa_node_free(node);
498
499 if (!parent) {
500 xas->xa->xa_head = NULL;
501 xas->xa_node = XAS_BOUNDS;
502 return;
503 }
504
505 parent->slots[xas->xa_offset] = NULL;
506 parent->count--;
507 XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
508 node = parent;
509 xas_update(xas, node);
510 }
511
512 if (!node->parent)
513 xas_shrink(xas);
514}
515
516/**
517 * xas_free_nodes() - Free this node and all nodes that it references
518 * @xas: Array operation state.
519 * @top: Node to free
520 *
521 * This node has been removed from the tree. We must now free it and all
522 * of its subnodes. There may be RCU walkers with references into the tree,
523 * so we must replace all entries with retry markers.
524 */
525static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
526{
527 unsigned int offset = 0;
528 struct xa_node *node = top;
529
530 for (;;) {
531 void *entry = xa_entry_locked(xas->xa, node, offset);
532
Matthew Wilcox76b4e522018-12-28 23:20:44 -0500533 if (node->shift && xa_is_node(entry)) {
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500534 node = xa_to_node(entry);
535 offset = 0;
536 continue;
537 }
538 if (entry)
539 RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
540 offset++;
541 while (offset == XA_CHUNK_SIZE) {
542 struct xa_node *parent;
543
544 parent = xa_parent_locked(xas->xa, node);
545 offset = node->offset + 1;
546 node->count = 0;
547 node->nr_values = 0;
548 xas_update(xas, node);
549 xa_node_free(node);
550 if (node == top)
551 return;
552 node = parent;
553 }
554 }
555}
556
557/*
558 * xas_expand adds nodes to the head of the tree until it has reached
559 * sufficient height to be able to contain @xas->xa_index
560 */
561static int xas_expand(struct xa_state *xas, void *head)
562{
563 struct xarray *xa = xas->xa;
564 struct xa_node *node = NULL;
565 unsigned int shift = 0;
566 unsigned long max = xas_max(xas);
567
568 if (!head) {
569 if (max == 0)
570 return 0;
571 while ((max >> shift) >= XA_CHUNK_SIZE)
572 shift += XA_CHUNK_SHIFT;
573 return shift + XA_CHUNK_SHIFT;
574 } else if (xa_is_node(head)) {
575 node = xa_to_node(head);
576 shift = node->shift + XA_CHUNK_SHIFT;
577 }
578 xas->xa_node = NULL;
579
580 while (max > max_index(head)) {
581 xa_mark_t mark = 0;
582
583 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
584 node = xas_alloc(xas, shift);
585 if (!node)
586 return -ENOMEM;
587
588 node->count = 1;
589 if (xa_is_value(head))
590 node->nr_values = 1;
591 RCU_INIT_POINTER(node->slots[0], head);
592
593 /* Propagate the aggregated mark info to the new child */
594 for (;;) {
Matthew Wilcox371c7522018-07-04 10:50:12 -0400595 if (xa_track_free(xa) && mark == XA_FREE_MARK) {
596 node_mark_all(node, XA_FREE_MARK);
597 if (!xa_marked(xa, XA_FREE_MARK)) {
598 node_clear_mark(node, 0, XA_FREE_MARK);
599 xa_mark_set(xa, XA_FREE_MARK);
600 }
601 } else if (xa_marked(xa, mark)) {
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500602 node_set_mark(node, 0, mark);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400603 }
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500604 if (mark == XA_MARK_MAX)
605 break;
606 mark_inc(mark);
607 }
608
609 /*
610 * Now that the new node is fully initialised, we can add
611 * it to the tree
612 */
613 if (xa_is_node(head)) {
614 xa_to_node(head)->offset = 0;
615 rcu_assign_pointer(xa_to_node(head)->parent, node);
616 }
617 head = xa_mk_node(node);
618 rcu_assign_pointer(xa->xa_head, head);
619 xas_update(xas, node);
620
621 shift += XA_CHUNK_SHIFT;
622 }
623
624 xas->xa_node = node;
625 return shift;
626}
627
628/*
629 * xas_create() - Create a slot to store an entry in.
630 * @xas: XArray operation state.
Matthew Wilcox76b4e522018-12-28 23:20:44 -0500631 * @allow_root: %true if we can store the entry in the root directly
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500632 *
633 * Most users will not need to call this function directly, as it is called
634 * by xas_store(). It is useful for doing conditional store operations
635 * (see the xa_cmpxchg() implementation for an example).
636 *
637 * Return: If the slot already existed, returns the contents of this slot.
Matthew Wilcox804dfaf2018-11-05 16:37:15 -0500638 * If the slot was newly created, returns %NULL. If it failed to create the
639 * slot, returns %NULL and indicates the error in @xas.
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500640 */
Matthew Wilcox76b4e522018-12-28 23:20:44 -0500641static void *xas_create(struct xa_state *xas, bool allow_root)
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500642{
643 struct xarray *xa = xas->xa;
644 void *entry;
645 void __rcu **slot;
646 struct xa_node *node = xas->xa_node;
647 int shift;
648 unsigned int order = xas->xa_shift;
649
650 if (xas_top(node)) {
651 entry = xa_head_locked(xa);
652 xas->xa_node = NULL;
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400653 if (!entry && xa_zero_busy(xa))
654 entry = XA_ZERO_ENTRY;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500655 shift = xas_expand(xas, entry);
656 if (shift < 0)
657 return NULL;
Matthew Wilcox76b4e522018-12-28 23:20:44 -0500658 if (!shift && !allow_root)
659 shift = XA_CHUNK_SHIFT;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500660 entry = xa_head_locked(xa);
661 slot = &xa->xa_head;
662 } else if (xas_error(xas)) {
663 return NULL;
664 } else if (node) {
665 unsigned int offset = xas->xa_offset;
666
667 shift = node->shift;
668 entry = xa_entry_locked(xa, node, offset);
669 slot = &node->slots[offset];
670 } else {
671 shift = 0;
672 entry = xa_head_locked(xa);
673 slot = &xa->xa_head;
674 }
675
676 while (shift > order) {
677 shift -= XA_CHUNK_SHIFT;
678 if (!entry) {
679 node = xas_alloc(xas, shift);
680 if (!node)
681 break;
Matthew Wilcox371c7522018-07-04 10:50:12 -0400682 if (xa_track_free(xa))
683 node_mark_all(node, XA_FREE_MARK);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500684 rcu_assign_pointer(*slot, xa_mk_node(node));
685 } else if (xa_is_node(entry)) {
686 node = xa_to_node(entry);
687 } else {
688 break;
689 }
690 entry = xas_descend(xas, node);
691 slot = &node->slots[xas->xa_offset];
692 }
693
694 return entry;
695}
696
Matthew Wilcox2264f512017-12-04 00:11:48 -0500697/**
698 * xas_create_range() - Ensure that stores to this range will succeed
699 * @xas: XArray operation state.
700 *
701 * Creates all of the slots in the range covered by @xas. Sets @xas to
702 * create single-index entries and positions it at the beginning of the
703 * range. This is for the benefit of users which have not yet been
704 * converted to use multi-index entries.
705 */
706void xas_create_range(struct xa_state *xas)
707{
708 unsigned long index = xas->xa_index;
709 unsigned char shift = xas->xa_shift;
710 unsigned char sibs = xas->xa_sibs;
711
Matthew Wilcox (Oracle)84c34df2020-10-13 08:46:29 -0400712 xas->xa_index |= ((sibs + 1UL) << shift) - 1;
Matthew Wilcox2264f512017-12-04 00:11:48 -0500713 if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
714 xas->xa_offset |= sibs;
715 xas->xa_shift = 0;
716 xas->xa_sibs = 0;
717
718 for (;;) {
Matthew Wilcox76b4e522018-12-28 23:20:44 -0500719 xas_create(xas, true);
Matthew Wilcox2264f512017-12-04 00:11:48 -0500720 if (xas_error(xas))
721 goto restore;
722 if (xas->xa_index <= (index | XA_CHUNK_MASK))
723 goto success;
724 xas->xa_index -= XA_CHUNK_SIZE;
725
726 for (;;) {
727 struct xa_node *node = xas->xa_node;
Matthew Wilcox (Oracle)3e3c6582022-03-28 19:25:11 -0400728 if (node->shift >= shift)
729 break;
Matthew Wilcox2264f512017-12-04 00:11:48 -0500730 xas->xa_node = xa_parent_locked(xas->xa, node);
731 xas->xa_offset = node->offset - 1;
732 if (node->offset != 0)
733 break;
734 }
735 }
736
737restore:
738 xas->xa_shift = shift;
739 xas->xa_sibs = sibs;
740 xas->xa_index = index;
741 return;
742success:
743 xas->xa_index = index;
744 if (xas->xa_node)
745 xas_set_offset(xas);
746}
747EXPORT_SYMBOL_GPL(xas_create_range);
748
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500749static void update_node(struct xa_state *xas, struct xa_node *node,
750 int count, int values)
751{
752 if (!node || (!count && !values))
753 return;
754
755 node->count += count;
756 node->nr_values += values;
757 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
758 XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
759 xas_update(xas, node);
760 if (count < 0)
761 xas_delete_node(xas);
762}
763
764/**
765 * xas_store() - Store this entry in the XArray.
766 * @xas: XArray operation state.
767 * @entry: New entry.
768 *
769 * If @xas is operating on a multi-index entry, the entry returned by this
770 * function is essentially meaningless (it may be an internal entry or it
771 * may be %NULL, even if there are non-NULL entries at some of the indices
772 * covered by the range). This is not a problem for any current users,
773 * and can be changed if needed.
774 *
775 * Return: The old entry at this index.
776 */
777void *xas_store(struct xa_state *xas, void *entry)
778{
779 struct xa_node *node;
780 void __rcu **slot = &xas->xa->xa_head;
781 unsigned int offset, max;
782 int count = 0;
783 int values = 0;
784 void *first, *next;
785 bool value = xa_is_value(entry);
786
Matthew Wilcox4a5c8d82019-02-21 17:54:44 -0500787 if (entry) {
788 bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
789 first = xas_create(xas, allow_root);
790 } else {
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500791 first = xas_load(xas);
Matthew Wilcox4a5c8d82019-02-21 17:54:44 -0500792 }
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500793
794 if (xas_invalid(xas))
795 return first;
796 node = xas->xa_node;
797 if (node && (xas->xa_shift < node->shift))
798 xas->xa_sibs = 0;
799 if ((first == entry) && !xas->xa_sibs)
800 return first;
801
802 next = first;
803 offset = xas->xa_offset;
804 max = xas->xa_offset + xas->xa_sibs;
805 if (node) {
806 slot = &node->slots[offset];
807 if (xas->xa_sibs)
808 xas_squash_marks(xas);
809 }
810 if (!entry)
811 xas_init_marks(xas);
812
813 for (;;) {
814 /*
815 * Must clear the marks before setting the entry to NULL,
816 * otherwise xas_for_each_marked may find a NULL entry and
817 * stop early. rcu_assign_pointer contains a release barrier
818 * so the mark clearing will appear to happen before the
819 * entry is set to NULL.
820 */
821 rcu_assign_pointer(*slot, entry);
Matthew Wilcox2fbe9672019-02-21 17:36:45 -0500822 if (xa_is_node(next) && (!node || node->shift))
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500823 xas_free_nodes(xas, xa_to_node(next));
824 if (!node)
825 break;
826 count += !next - !entry;
827 values += !xa_is_value(first) - !value;
828 if (entry) {
829 if (offset == max)
830 break;
831 if (!xa_is_sibling(entry))
832 entry = xa_mk_sibling(xas->xa_offset);
833 } else {
834 if (offset == XA_CHUNK_MASK)
835 break;
836 }
837 next = xa_entry_locked(xas->xa, node, ++offset);
838 if (!xa_is_sibling(next)) {
839 if (!entry && (offset > max))
840 break;
841 first = next;
842 }
843 slot++;
844 }
845
846 update_node(xas, node, count, values);
847 return first;
848}
849EXPORT_SYMBOL_GPL(xas_store);
850
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -0500851/**
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500852 * xas_get_mark() - Returns the state of this mark.
853 * @xas: XArray operation state.
854 * @mark: Mark number.
855 *
856 * Return: true if the mark is set, false if the mark is clear or @xas
857 * is in an error state.
858 */
859bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
860{
861 if (xas_invalid(xas))
862 return false;
863 if (!xas->xa_node)
864 return xa_marked(xas->xa, mark);
865 return node_get_mark(xas->xa_node, xas->xa_offset, mark);
866}
867EXPORT_SYMBOL_GPL(xas_get_mark);
868
869/**
870 * xas_set_mark() - Sets the mark on this entry and its parents.
871 * @xas: XArray operation state.
872 * @mark: Mark number.
873 *
874 * Sets the specified mark on this entry, and walks up the tree setting it
875 * on all the ancestor entries. Does nothing if @xas has not been walked to
876 * an entry, or is in an error state.
877 */
878void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
879{
880 struct xa_node *node = xas->xa_node;
881 unsigned int offset = xas->xa_offset;
882
883 if (xas_invalid(xas))
884 return;
885
886 while (node) {
887 if (node_set_mark(node, offset, mark))
888 return;
889 offset = node->offset;
890 node = xa_parent_locked(xas->xa, node);
891 }
892
893 if (!xa_marked(xas->xa, mark))
894 xa_mark_set(xas->xa, mark);
895}
896EXPORT_SYMBOL_GPL(xas_set_mark);
897
898/**
899 * xas_clear_mark() - Clears the mark on this entry and its parents.
900 * @xas: XArray operation state.
901 * @mark: Mark number.
902 *
903 * Clears the specified mark on this entry, and walks back to the head
904 * attempting to clear it on all the ancestor entries. Does nothing if
905 * @xas has not been walked to an entry, or is in an error state.
906 */
907void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
908{
909 struct xa_node *node = xas->xa_node;
910 unsigned int offset = xas->xa_offset;
911
912 if (xas_invalid(xas))
913 return;
914
915 while (node) {
916 if (!node_clear_mark(node, offset, mark))
917 return;
918 if (node_any_mark(node, mark))
919 return;
920
921 offset = node->offset;
922 node = xa_parent_locked(xas->xa, node);
923 }
924
925 if (xa_marked(xas->xa, mark))
926 xa_mark_clear(xas->xa, mark);
927}
928EXPORT_SYMBOL_GPL(xas_clear_mark);
929
930/**
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500931 * xas_init_marks() - Initialise all marks for the entry
932 * @xas: Array operations state.
933 *
934 * Initialise all marks for the entry specified by @xas. If we're tracking
935 * free entries with a mark, we need to set it on all entries. All other
936 * marks are cleared.
937 *
938 * This implementation is not as efficient as it could be; we may walk
939 * up the tree multiple times.
940 */
941void xas_init_marks(const struct xa_state *xas)
942{
943 xa_mark_t mark = 0;
944
945 for (;;) {
Matthew Wilcox371c7522018-07-04 10:50:12 -0400946 if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
947 xas_set_mark(xas, mark);
948 else
949 xas_clear_mark(xas, mark);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500950 if (mark == XA_MARK_MAX)
951 break;
952 mark_inc(mark);
953 }
954}
955EXPORT_SYMBOL_GPL(xas_init_marks);
956
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -0700957#ifdef CONFIG_XARRAY_MULTI
958static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
959{
960 unsigned int marks = 0;
961 xa_mark_t mark = XA_MARK_0;
962
963 for (;;) {
964 if (node_get_mark(node, offset, mark))
965 marks |= 1 << (__force unsigned int)mark;
966 if (mark == XA_MARK_MAX)
967 break;
968 mark_inc(mark);
969 }
970
971 return marks;
972}
973
974static void node_set_marks(struct xa_node *node, unsigned int offset,
975 struct xa_node *child, unsigned int marks)
976{
977 xa_mark_t mark = XA_MARK_0;
978
979 for (;;) {
980 if (marks & (1 << (__force unsigned int)mark)) {
981 node_set_mark(node, offset, mark);
982 if (child)
983 node_mark_all(child, mark);
984 }
985 if (mark == XA_MARK_MAX)
986 break;
987 mark_inc(mark);
988 }
989}
990
991/**
992 * xas_split_alloc() - Allocate memory for splitting an entry.
993 * @xas: XArray operation state.
994 * @entry: New entry which will be stored in the array.
Matthew Wilcox (Oracle)12efeba2020-10-10 11:17:44 -0400995 * @order: Current entry order.
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -0700996 * @gfp: Memory allocation flags.
997 *
998 * This function should be called before calling xas_split().
999 * If necessary, it will allocate new nodes (and fill them with @entry)
1000 * to prepare for the upcoming split of an entry of @order size into
1001 * entries of the order stored in the @xas.
1002 *
1003 * Context: May sleep if @gfp flags permit.
1004 */
1005void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1006 gfp_t gfp)
1007{
1008 unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1009 unsigned int mask = xas->xa_sibs;
1010
1011 /* XXX: no support for splitting really large entries yet */
1012 if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1013 goto nomem;
1014 if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1015 return;
1016
1017 do {
1018 unsigned int i;
Matthew Wilcox (Oracle)30121102020-11-19 08:32:31 -05001019 void *sibling = NULL;
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001020 struct xa_node *node;
1021
Muchun Song9bbdc0f2022-03-22 14:41:12 -07001022 node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001023 if (!node)
1024 goto nomem;
1025 node->array = xas->xa;
1026 for (i = 0; i < XA_CHUNK_SIZE; i++) {
1027 if ((i & mask) == 0) {
1028 RCU_INIT_POINTER(node->slots[i], entry);
Matthew Wilcox (Oracle)30121102020-11-19 08:32:31 -05001029 sibling = xa_mk_sibling(i);
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001030 } else {
1031 RCU_INIT_POINTER(node->slots[i], sibling);
1032 }
1033 }
1034 RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1035 xas->xa_alloc = node;
1036 } while (sibs-- > 0);
1037
1038 return;
1039nomem:
1040 xas_destroy(xas);
1041 xas_set_err(xas, -ENOMEM);
1042}
1043EXPORT_SYMBOL_GPL(xas_split_alloc);
1044
1045/**
1046 * xas_split() - Split a multi-index entry into smaller entries.
1047 * @xas: XArray operation state.
1048 * @entry: New entry to store in the array.
Matthew Wilcox (Oracle)12efeba2020-10-10 11:17:44 -04001049 * @order: Current entry order.
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001050 *
Matthew Wilcox (Oracle)12efeba2020-10-10 11:17:44 -04001051 * The size of the new entries is set in @xas. The value in @entry is
1052 * copied to all the replacement entries.
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001053 *
1054 * Context: Any context. The caller should hold the xa_lock.
1055 */
1056void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1057{
1058 unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1059 unsigned int offset, marks;
1060 struct xa_node *node;
1061 void *curr = xas_load(xas);
1062 int values = 0;
1063
1064 node = xas->xa_node;
1065 if (xas_top(node))
1066 return;
1067
1068 marks = node_get_marks(node, xas->xa_offset);
1069
1070 offset = xas->xa_offset + sibs;
1071 do {
1072 if (xas->xa_shift < node->shift) {
1073 struct xa_node *child = xas->xa_alloc;
1074
1075 xas->xa_alloc = rcu_dereference_raw(child->parent);
1076 child->shift = node->shift - XA_CHUNK_SHIFT;
1077 child->offset = offset;
1078 child->count = XA_CHUNK_SIZE;
1079 child->nr_values = xa_is_value(entry) ?
1080 XA_CHUNK_SIZE : 0;
1081 RCU_INIT_POINTER(child->parent, node);
1082 node_set_marks(node, offset, child, marks);
1083 rcu_assign_pointer(node->slots[offset],
1084 xa_mk_node(child));
1085 if (xa_is_value(curr))
1086 values--;
Matthew Wilcox (Oracle)3ed4bb72022-03-31 08:27:09 -04001087 xas_update(xas, child);
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001088 } else {
1089 unsigned int canon = offset - xas->xa_sibs;
1090
1091 node_set_marks(node, canon, NULL, marks);
1092 rcu_assign_pointer(node->slots[canon], entry);
1093 while (offset > canon)
1094 rcu_assign_pointer(node->slots[offset--],
1095 xa_mk_sibling(canon));
1096 values += (xa_is_value(entry) - xa_is_value(curr)) *
1097 (xas->xa_sibs + 1);
1098 }
1099 } while (offset-- > xas->xa_offset);
1100
1101 node->nr_values += values;
Matthew Wilcox (Oracle)3ed4bb72022-03-31 08:27:09 -04001102 xas_update(xas, node);
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001103}
1104EXPORT_SYMBOL_GPL(xas_split);
1105#endif
1106
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001107/**
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001108 * xas_pause() - Pause a walk to drop a lock.
1109 * @xas: XArray operation state.
1110 *
1111 * Some users need to pause a walk and drop the lock they're holding in
1112 * order to yield to a higher priority thread or carry out an operation
1113 * on an entry. Those users should call this function before they drop
1114 * the lock. It resets the @xas to be suitable for the next iteration
1115 * of the loop after the user has reacquired the lock. If most entries
1116 * found during a walk require you to call xas_pause(), the xa_for_each()
1117 * iterator may be more appropriate.
1118 *
1119 * Note that xas_pause() only works for forward iteration. If a user needs
1120 * to pause a reverse iteration, we will need a xas_pause_rev().
1121 */
1122void xas_pause(struct xa_state *xas)
1123{
1124 struct xa_node *node = xas->xa_node;
1125
1126 if (xas_invalid(xas))
1127 return;
1128
Matthew Wilcox (Oracle)82a22312019-11-07 22:49:11 -05001129 xas->xa_node = XAS_RESTART;
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001130 if (node) {
Matthew Wilcox (Oracle)c36d4512020-01-31 06:17:09 -05001131 unsigned long offset = xas->xa_offset;
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001132 while (++offset < XA_CHUNK_SIZE) {
1133 if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1134 break;
1135 }
1136 xas->xa_index += (offset - xas->xa_offset) << node->shift;
Matthew Wilcox (Oracle)82a22312019-11-07 22:49:11 -05001137 if (xas->xa_index == 0)
1138 xas->xa_node = XAS_BOUNDS;
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001139 } else {
1140 xas->xa_index++;
1141 }
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001142}
1143EXPORT_SYMBOL_GPL(xas_pause);
1144
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001145/*
1146 * __xas_prev() - Find the previous entry in the XArray.
1147 * @xas: XArray operation state.
1148 *
1149 * Helper function for xas_prev() which handles all the complex cases
1150 * out of line.
1151 */
1152void *__xas_prev(struct xa_state *xas)
1153{
1154 void *entry;
1155
1156 if (!xas_frozen(xas->xa_node))
1157 xas->xa_index--;
Matthew Wilcox (Oracle)91abab82019-07-01 17:03:29 -04001158 if (!xas->xa_node)
1159 return set_bounds(xas);
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001160 if (xas_not_node(xas->xa_node))
1161 return xas_load(xas);
1162
1163 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1164 xas->xa_offset--;
1165
1166 while (xas->xa_offset == 255) {
1167 xas->xa_offset = xas->xa_node->offset - 1;
1168 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1169 if (!xas->xa_node)
1170 return set_bounds(xas);
1171 }
1172
1173 for (;;) {
1174 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1175 if (!xa_is_node(entry))
1176 return entry;
1177
1178 xas->xa_node = xa_to_node(entry);
1179 xas_set_offset(xas);
1180 }
1181}
1182EXPORT_SYMBOL_GPL(__xas_prev);
1183
1184/*
1185 * __xas_next() - Find the next entry in the XArray.
1186 * @xas: XArray operation state.
1187 *
1188 * Helper function for xas_next() which handles all the complex cases
1189 * out of line.
1190 */
1191void *__xas_next(struct xa_state *xas)
1192{
1193 void *entry;
1194
1195 if (!xas_frozen(xas->xa_node))
1196 xas->xa_index++;
Matthew Wilcox (Oracle)91abab82019-07-01 17:03:29 -04001197 if (!xas->xa_node)
1198 return set_bounds(xas);
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001199 if (xas_not_node(xas->xa_node))
1200 return xas_load(xas);
1201
1202 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1203 xas->xa_offset++;
1204
1205 while (xas->xa_offset == XA_CHUNK_SIZE) {
1206 xas->xa_offset = xas->xa_node->offset + 1;
1207 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1208 if (!xas->xa_node)
1209 return set_bounds(xas);
1210 }
1211
1212 for (;;) {
1213 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1214 if (!xa_is_node(entry))
1215 return entry;
1216
1217 xas->xa_node = xa_to_node(entry);
1218 xas_set_offset(xas);
1219 }
1220}
1221EXPORT_SYMBOL_GPL(__xas_next);
1222
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001223/**
1224 * xas_find() - Find the next present entry in the XArray.
1225 * @xas: XArray operation state.
1226 * @max: Highest index to return.
1227 *
1228 * If the @xas has not yet been walked to an entry, return the entry
1229 * which has an index >= xas.xa_index. If it has been walked, the entry
1230 * currently being pointed at has been processed, and so we move to the
1231 * next entry.
1232 *
1233 * If no entry is found and the array is smaller than @max, the iterator
1234 * is set to the smallest index not yet in the array. This allows @xas
1235 * to be immediately passed to xas_store().
1236 *
1237 * Return: The entry, if found, otherwise %NULL.
1238 */
1239void *xas_find(struct xa_state *xas, unsigned long max)
1240{
1241 void *entry;
1242
Matthew Wilcox (Oracle)82a22312019-11-07 22:49:11 -05001243 if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001244 return NULL;
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -05001245 if (xas->xa_index > max)
1246 return set_bounds(xas);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001247
1248 if (!xas->xa_node) {
1249 xas->xa_index = 1;
1250 return set_bounds(xas);
Matthew Wilcox (Oracle)82a22312019-11-07 22:49:11 -05001251 } else if (xas->xa_node == XAS_RESTART) {
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001252 entry = xas_load(xas);
1253 if (entry || xas_not_node(xas->xa_node))
1254 return entry;
1255 } else if (!xas->xa_node->shift &&
1256 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1257 xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1258 }
1259
Matthew Wilcox (Oracle)25a8de72021-08-27 07:21:49 -04001260 xas_next_offset(xas);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001261
1262 while (xas->xa_node && (xas->xa_index <= max)) {
1263 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1264 xas->xa_offset = xas->xa_node->offset + 1;
1265 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1266 continue;
1267 }
1268
1269 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1270 if (xa_is_node(entry)) {
1271 xas->xa_node = xa_to_node(entry);
1272 xas->xa_offset = 0;
1273 continue;
1274 }
1275 if (entry && !xa_is_sibling(entry))
1276 return entry;
1277
Matthew Wilcox (Oracle)25a8de72021-08-27 07:21:49 -04001278 xas_next_offset(xas);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001279 }
1280
1281 if (!xas->xa_node)
1282 xas->xa_node = XAS_BOUNDS;
1283 return NULL;
1284}
1285EXPORT_SYMBOL_GPL(xas_find);
1286
1287/**
1288 * xas_find_marked() - Find the next marked entry in the XArray.
1289 * @xas: XArray operation state.
1290 * @max: Highest index to return.
1291 * @mark: Mark number to search for.
1292 *
1293 * If the @xas has not yet been walked to an entry, return the marked entry
1294 * which has an index >= xas.xa_index. If it has been walked, the entry
1295 * currently being pointed at has been processed, and so we return the
1296 * first marked entry with an index > xas.xa_index.
1297 *
1298 * If no marked entry is found and the array is smaller than @max, @xas is
1299 * set to the bounds state and xas->xa_index is set to the smallest index
1300 * not yet in the array. This allows @xas to be immediately passed to
1301 * xas_store().
1302 *
1303 * If no entry is found before @max is reached, @xas is set to the restart
1304 * state.
1305 *
1306 * Return: The entry, if found, otherwise %NULL.
1307 */
1308void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1309{
1310 bool advance = true;
1311 unsigned int offset;
1312 void *entry;
1313
1314 if (xas_error(xas))
1315 return NULL;
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -05001316 if (xas->xa_index > max)
1317 goto max;
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001318
1319 if (!xas->xa_node) {
1320 xas->xa_index = 1;
1321 goto out;
1322 } else if (xas_top(xas->xa_node)) {
1323 advance = false;
1324 entry = xa_head(xas->xa);
1325 xas->xa_node = NULL;
1326 if (xas->xa_index > max_index(entry))
Matthew Wilcox48483612018-12-13 13:57:42 -05001327 goto out;
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001328 if (!xa_is_node(entry)) {
1329 if (xa_marked(xas->xa, mark))
1330 return entry;
1331 xas->xa_index = 1;
1332 goto out;
1333 }
1334 xas->xa_node = xa_to_node(entry);
1335 xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1336 }
1337
1338 while (xas->xa_index <= max) {
1339 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1340 xas->xa_offset = xas->xa_node->offset + 1;
1341 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1342 if (!xas->xa_node)
1343 break;
1344 advance = false;
1345 continue;
1346 }
1347
1348 if (!advance) {
1349 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1350 if (xa_is_sibling(entry)) {
1351 xas->xa_offset = xa_to_sibling(entry);
1352 xas_move_index(xas, xas->xa_offset);
1353 }
1354 }
1355
1356 offset = xas_find_chunk(xas, advance, mark);
1357 if (offset > xas->xa_offset) {
1358 advance = false;
1359 xas_move_index(xas, offset);
1360 /* Mind the wrap */
1361 if ((xas->xa_index - 1) >= max)
1362 goto max;
1363 xas->xa_offset = offset;
1364 if (offset == XA_CHUNK_SIZE)
1365 continue;
1366 }
1367
1368 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
Matthew Wilcox (Oracle)7e934cf2020-03-12 17:29:11 -04001369 if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1370 continue;
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001371 if (!xa_is_node(entry))
1372 return entry;
1373 xas->xa_node = xa_to_node(entry);
1374 xas_set_offset(xas);
1375 }
1376
1377out:
Matthew Wilcox48483612018-12-13 13:57:42 -05001378 if (xas->xa_index > max)
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001379 goto max;
Matthew Wilcox48483612018-12-13 13:57:42 -05001380 return set_bounds(xas);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001381max:
1382 xas->xa_node = XAS_RESTART;
1383 return NULL;
1384}
1385EXPORT_SYMBOL_GPL(xas_find_marked);
1386
1387/**
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -04001388 * xas_find_conflict() - Find the next present entry in a range.
1389 * @xas: XArray operation state.
1390 *
1391 * The @xas describes both a range and a position within that range.
1392 *
1393 * Context: Any context. Expects xa_lock to be held.
1394 * Return: The next entry in the range covered by @xas or %NULL.
1395 */
1396void *xas_find_conflict(struct xa_state *xas)
1397{
1398 void *curr;
1399
1400 if (xas_error(xas))
1401 return NULL;
1402
1403 if (!xas->xa_node)
1404 return NULL;
1405
1406 if (xas_top(xas->xa_node)) {
1407 curr = xas_start(xas);
1408 if (!curr)
1409 return NULL;
1410 while (xa_is_node(curr)) {
1411 struct xa_node *node = xa_to_node(curr);
1412 curr = xas_descend(xas, node);
1413 }
1414 if (curr)
1415 return curr;
1416 }
1417
1418 if (xas->xa_node->shift > xas->xa_shift)
1419 return NULL;
1420
1421 for (;;) {
1422 if (xas->xa_node->shift == xas->xa_shift) {
1423 if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1424 break;
1425 } else if (xas->xa_offset == XA_CHUNK_MASK) {
1426 xas->xa_offset = xas->xa_node->offset;
1427 xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1428 if (!xas->xa_node)
1429 break;
1430 continue;
1431 }
1432 curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1433 if (xa_is_sibling(curr))
1434 continue;
1435 while (xa_is_node(curr)) {
1436 xas->xa_node = xa_to_node(curr);
1437 xas->xa_offset = 0;
1438 curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1439 }
1440 if (curr)
1441 return curr;
1442 }
1443 xas->xa_offset -= xas->xa_sibs;
1444 return NULL;
1445}
1446EXPORT_SYMBOL_GPL(xas_find_conflict);
1447
1448/**
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001449 * xa_load() - Load an entry from an XArray.
1450 * @xa: XArray.
1451 * @index: index into array.
1452 *
1453 * Context: Any context. Takes and releases the RCU lock.
1454 * Return: The entry at @index in @xa.
1455 */
1456void *xa_load(struct xarray *xa, unsigned long index)
1457{
1458 XA_STATE(xas, xa, index);
1459 void *entry;
1460
1461 rcu_read_lock();
1462 do {
1463 entry = xas_load(&xas);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -04001464 if (xa_is_zero(entry))
1465 entry = NULL;
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001466 } while (xas_retry(&xas, entry));
1467 rcu_read_unlock();
1468
1469 return entry;
1470}
1471EXPORT_SYMBOL(xa_load);
1472
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001473static void *xas_result(struct xa_state *xas, void *curr)
1474{
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -04001475 if (xa_is_zero(curr))
1476 return NULL;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001477 if (xas_error(xas))
1478 curr = xas->xa_node;
1479 return curr;
1480}
1481
1482/**
1483 * __xa_erase() - Erase this entry from the XArray while locked.
1484 * @xa: XArray.
1485 * @index: Index into array.
1486 *
Matthew Wilcox809ab932019-01-26 00:52:26 -05001487 * After this function returns, loading from @index will return %NULL.
1488 * If the index is part of a multi-index entry, all indices will be erased
1489 * and none of the entries will be part of a multi-index entry.
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001490 *
Matthew Wilcox809ab932019-01-26 00:52:26 -05001491 * Context: Any context. Expects xa_lock to be held on entry.
1492 * Return: The entry which used to be at this index.
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001493 */
1494void *__xa_erase(struct xarray *xa, unsigned long index)
1495{
1496 XA_STATE(xas, xa, index);
1497 return xas_result(&xas, xas_store(&xas, NULL));
1498}
Matthew Wilcox9ee5a3b2018-11-01 22:52:06 -04001499EXPORT_SYMBOL(__xa_erase);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001500
1501/**
Matthew Wilcox9c16bb82018-11-05 15:48:49 -05001502 * xa_erase() - Erase this entry from the XArray.
1503 * @xa: XArray.
1504 * @index: Index of entry.
1505 *
Matthew Wilcox809ab932019-01-26 00:52:26 -05001506 * After this function returns, loading from @index will return %NULL.
1507 * If the index is part of a multi-index entry, all indices will be erased
1508 * and none of the entries will be part of a multi-index entry.
Matthew Wilcox9c16bb82018-11-05 15:48:49 -05001509 *
1510 * Context: Any context. Takes and releases the xa_lock.
1511 * Return: The entry which used to be at this index.
1512 */
1513void *xa_erase(struct xarray *xa, unsigned long index)
1514{
1515 void *entry;
1516
1517 xa_lock(xa);
1518 entry = __xa_erase(xa, index);
1519 xa_unlock(xa);
1520
1521 return entry;
1522}
1523EXPORT_SYMBOL(xa_erase);
1524
1525/**
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001526 * __xa_store() - Store this entry in the XArray.
1527 * @xa: XArray.
1528 * @index: Index into array.
1529 * @entry: New entry.
1530 * @gfp: Memory allocation flags.
1531 *
1532 * You must already be holding the xa_lock when calling this function.
1533 * It will drop the lock if needed to allocate memory, and then reacquire
1534 * it afterwards.
1535 *
1536 * Context: Any context. Expects xa_lock to be held on entry. May
1537 * release and reacquire xa_lock if @gfp flags permit.
1538 * Return: The old entry at this index or xa_err() if an error happened.
1539 */
1540void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1541{
1542 XA_STATE(xas, xa, index);
1543 void *curr;
1544
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001545 if (WARN_ON_ONCE(xa_is_advanced(entry)))
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001546 return XA_ERROR(-EINVAL);
Matthew Wilcoxd9c48042018-11-05 16:15:56 -05001547 if (xa_track_free(xa) && !entry)
1548 entry = XA_ZERO_ENTRY;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001549
1550 do {
1551 curr = xas_store(&xas, entry);
Matthew Wilcoxd9c48042018-11-05 16:15:56 -05001552 if (xa_track_free(xa))
Matthew Wilcox371c7522018-07-04 10:50:12 -04001553 xas_clear_mark(&xas, XA_FREE_MARK);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001554 } while (__xas_nomem(&xas, gfp));
1555
1556 return xas_result(&xas, curr);
1557}
1558EXPORT_SYMBOL(__xa_store);
1559
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001560/**
Matthew Wilcox611f3182018-11-05 15:56:17 -05001561 * xa_store() - Store this entry in the XArray.
1562 * @xa: XArray.
1563 * @index: Index into array.
1564 * @entry: New entry.
1565 * @gfp: Memory allocation flags.
1566 *
1567 * After this function returns, loads from this index will return @entry.
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001568 * Storing into an existing multi-index entry updates the entry of every index.
Matthew Wilcox611f3182018-11-05 15:56:17 -05001569 * The marks associated with @index are unaffected unless @entry is %NULL.
1570 *
1571 * Context: Any context. Takes and releases the xa_lock.
1572 * May sleep if the @gfp flags permit.
1573 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1574 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1575 * failed.
1576 */
1577void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1578{
1579 void *curr;
1580
1581 xa_lock(xa);
1582 curr = __xa_store(xa, index, entry, gfp);
1583 xa_unlock(xa);
1584
1585 return curr;
1586}
1587EXPORT_SYMBOL(xa_store);
1588
1589/**
Matthew Wilcox41aec912017-11-10 15:34:55 -05001590 * __xa_cmpxchg() - Store this entry in the XArray.
1591 * @xa: XArray.
1592 * @index: Index into array.
1593 * @old: Old value to test against.
1594 * @entry: New entry.
1595 * @gfp: Memory allocation flags.
1596 *
1597 * You must already be holding the xa_lock when calling this function.
1598 * It will drop the lock if needed to allocate memory, and then reacquire
1599 * it afterwards.
1600 *
1601 * Context: Any context. Expects xa_lock to be held on entry. May
1602 * release and reacquire xa_lock if @gfp flags permit.
1603 * Return: The old entry at this index or xa_err() if an error happened.
1604 */
1605void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1606 void *old, void *entry, gfp_t gfp)
1607{
1608 XA_STATE(xas, xa, index);
1609 void *curr;
1610
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001611 if (WARN_ON_ONCE(xa_is_advanced(entry)))
Matthew Wilcox41aec912017-11-10 15:34:55 -05001612 return XA_ERROR(-EINVAL);
1613
1614 do {
1615 curr = xas_load(&xas);
Matthew Wilcox371c7522018-07-04 10:50:12 -04001616 if (curr == old) {
Matthew Wilcox41aec912017-11-10 15:34:55 -05001617 xas_store(&xas, entry);
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -05001618 if (xa_track_free(xa) && entry && !curr)
Matthew Wilcox371c7522018-07-04 10:50:12 -04001619 xas_clear_mark(&xas, XA_FREE_MARK);
1620 }
Matthew Wilcox41aec912017-11-10 15:34:55 -05001621 } while (__xas_nomem(&xas, gfp));
1622
1623 return xas_result(&xas, curr);
1624}
1625EXPORT_SYMBOL(__xa_cmpxchg);
1626
1627/**
Matthew Wilcoxb0606fe2019-01-02 13:57:03 -05001628 * __xa_insert() - Store this entry in the XArray if no entry is present.
1629 * @xa: XArray.
1630 * @index: Index into array.
1631 * @entry: New entry.
1632 * @gfp: Memory allocation flags.
1633 *
1634 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1635 * if no entry is present. Inserting will fail if a reserved entry is
1636 * present, even though loading from this index will return NULL.
1637 *
1638 * Context: Any context. Expects xa_lock to be held on entry. May
1639 * release and reacquire xa_lock if @gfp flags permit.
Matthew Wilcoxfd9dc932019-02-06 13:07:11 -05001640 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
Matthew Wilcoxb0606fe2019-01-02 13:57:03 -05001641 * -ENOMEM if memory could not be allocated.
1642 */
1643int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1644{
1645 XA_STATE(xas, xa, index);
1646 void *curr;
1647
1648 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1649 return -EINVAL;
1650 if (!entry)
1651 entry = XA_ZERO_ENTRY;
1652
1653 do {
1654 curr = xas_load(&xas);
1655 if (!curr) {
1656 xas_store(&xas, entry);
1657 if (xa_track_free(xa))
1658 xas_clear_mark(&xas, XA_FREE_MARK);
1659 } else {
Matthew Wilcoxfd9dc932019-02-06 13:07:11 -05001660 xas_set_err(&xas, -EBUSY);
Matthew Wilcoxb0606fe2019-01-02 13:57:03 -05001661 }
1662 } while (__xas_nomem(&xas, gfp));
1663
1664 return xas_error(&xas);
1665}
1666EXPORT_SYMBOL(__xa_insert);
1667
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001668#ifdef CONFIG_XARRAY_MULTI
1669static void xas_set_range(struct xa_state *xas, unsigned long first,
1670 unsigned long last)
1671{
1672 unsigned int shift = 0;
1673 unsigned long sibs = last - first;
1674 unsigned int offset = XA_CHUNK_MASK;
1675
1676 xas_set(xas, first);
1677
1678 while ((first & XA_CHUNK_MASK) == 0) {
1679 if (sibs < XA_CHUNK_MASK)
1680 break;
1681 if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1682 break;
1683 shift += XA_CHUNK_SHIFT;
1684 if (offset == XA_CHUNK_MASK)
1685 offset = sibs & XA_CHUNK_MASK;
1686 sibs >>= XA_CHUNK_SHIFT;
1687 first >>= XA_CHUNK_SHIFT;
1688 }
1689
1690 offset = first & XA_CHUNK_MASK;
1691 if (offset + sibs > XA_CHUNK_MASK)
1692 sibs = XA_CHUNK_MASK - offset;
1693 if ((((first + sibs + 1) << shift) - 1) > last)
1694 sibs -= 1;
1695
1696 xas->xa_shift = shift;
1697 xas->xa_sibs = sibs;
1698}
1699
1700/**
1701 * xa_store_range() - Store this entry at a range of indices in the XArray.
1702 * @xa: XArray.
1703 * @first: First index to affect.
1704 * @last: Last index to affect.
1705 * @entry: New entry.
1706 * @gfp: Memory allocation flags.
1707 *
1708 * After this function returns, loads from any index between @first and @last,
1709 * inclusive will return @entry.
Matthew Wilcox (Oracle)8fc75642020-10-15 20:05:16 -07001710 * Storing into an existing multi-index entry updates the entry of every index.
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001711 * The marks associated with @index are unaffected unless @entry is %NULL.
1712 *
1713 * Context: Process context. Takes and releases the xa_lock. May sleep
1714 * if the @gfp flags permit.
1715 * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1716 * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1717 */
1718void *xa_store_range(struct xarray *xa, unsigned long first,
1719 unsigned long last, void *entry, gfp_t gfp)
1720{
1721 XA_STATE(xas, xa, 0);
1722
1723 if (WARN_ON_ONCE(xa_is_internal(entry)))
1724 return XA_ERROR(-EINVAL);
1725 if (last < first)
1726 return XA_ERROR(-EINVAL);
1727
1728 do {
1729 xas_lock(&xas);
1730 if (entry) {
Matthew Wilcox44a4a662018-11-05 10:53:09 -05001731 unsigned int order = BITS_PER_LONG;
1732 if (last + 1)
1733 order = __ffs(last + 1);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001734 xas_set_order(&xas, last, order);
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001735 xas_create(&xas, true);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001736 if (xas_error(&xas))
1737 goto unlock;
1738 }
1739 do {
1740 xas_set_range(&xas, first, last);
1741 xas_store(&xas, entry);
1742 if (xas_error(&xas))
1743 goto unlock;
1744 first += xas_size(&xas);
1745 } while (first <= last);
1746unlock:
1747 xas_unlock(&xas);
1748 } while (xas_nomem(&xas, gfp));
1749
1750 return xas_result(&xas, NULL);
1751}
1752EXPORT_SYMBOL(xa_store_range);
Matthew Wilcox (Oracle)57417ce2020-10-15 20:05:13 -07001753
1754/**
1755 * xa_get_order() - Get the order of an entry.
1756 * @xa: XArray.
1757 * @index: Index of the entry.
1758 *
1759 * Return: A number between 0 and 63 indicating the order of the entry.
1760 */
1761int xa_get_order(struct xarray *xa, unsigned long index)
1762{
1763 XA_STATE(xas, xa, index);
1764 void *entry;
1765 int order = 0;
1766
1767 rcu_read_lock();
1768 entry = xas_load(&xas);
1769
1770 if (!entry)
1771 goto unlock;
1772
1773 if (!xas.xa_node)
1774 goto unlock;
1775
1776 for (;;) {
1777 unsigned int slot = xas.xa_offset + (1 << order);
1778
1779 if (slot >= XA_CHUNK_SIZE)
1780 break;
1781 if (!xa_is_sibling(xas.xa_node->slots[slot]))
1782 break;
1783 order++;
1784 }
1785
1786 order += xas.xa_node->shift;
1787unlock:
1788 rcu_read_unlock();
1789
1790 return order;
1791}
1792EXPORT_SYMBOL(xa_get_order);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001793#endif /* CONFIG_XARRAY_MULTI */
1794
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -04001795/**
Matthew Wilcox371c7522018-07-04 10:50:12 -04001796 * __xa_alloc() - Find somewhere to store this entry in the XArray.
1797 * @xa: XArray.
1798 * @id: Pointer to ID.
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001799 * @limit: Range for allocated ID.
Matthew Wilcox371c7522018-07-04 10:50:12 -04001800 * @entry: New entry.
1801 * @gfp: Memory allocation flags.
1802 *
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001803 * Finds an empty entry in @xa between @limit.min and @limit.max,
1804 * stores the index into the @id pointer, then stores the entry at
1805 * that index. A concurrent lookup will not see an uninitialised @id.
Matthew Wilcox371c7522018-07-04 10:50:12 -04001806 *
1807 * Context: Any context. Expects xa_lock to be held on entry. May
1808 * release and reacquire xa_lock if @gfp flags permit.
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001809 * Return: 0 on success, -ENOMEM if memory could not be allocated or
1810 * -EBUSY if there are no free entries in @limit.
Matthew Wilcox371c7522018-07-04 10:50:12 -04001811 */
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001812int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1813 struct xa_limit limit, gfp_t gfp)
Matthew Wilcox371c7522018-07-04 10:50:12 -04001814{
1815 XA_STATE(xas, xa, 0);
Matthew Wilcox371c7522018-07-04 10:50:12 -04001816
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001817 if (WARN_ON_ONCE(xa_is_advanced(entry)))
Matthew Wilcox371c7522018-07-04 10:50:12 -04001818 return -EINVAL;
1819 if (WARN_ON_ONCE(!xa_track_free(xa)))
1820 return -EINVAL;
1821
1822 if (!entry)
1823 entry = XA_ZERO_ENTRY;
1824
1825 do {
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001826 xas.xa_index = limit.min;
1827 xas_find_marked(&xas, limit.max, XA_FREE_MARK);
Matthew Wilcox371c7522018-07-04 10:50:12 -04001828 if (xas.xa_node == XAS_RESTART)
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001829 xas_set_err(&xas, -EBUSY);
1830 else
1831 *id = xas.xa_index;
Matthew Wilcox371c7522018-07-04 10:50:12 -04001832 xas_store(&xas, entry);
1833 xas_clear_mark(&xas, XA_FREE_MARK);
1834 } while (__xas_nomem(&xas, gfp));
1835
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001836 return xas_error(&xas);
Matthew Wilcox371c7522018-07-04 10:50:12 -04001837}
1838EXPORT_SYMBOL(__xa_alloc);
1839
1840/**
Matthew Wilcox2fa044e2018-11-06 14:13:35 -05001841 * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1842 * @xa: XArray.
1843 * @id: Pointer to ID.
1844 * @entry: New entry.
1845 * @limit: Range of allocated ID.
1846 * @next: Pointer to next ID to allocate.
1847 * @gfp: Memory allocation flags.
1848 *
1849 * Finds an empty entry in @xa between @limit.min and @limit.max,
1850 * stores the index into the @id pointer, then stores the entry at
1851 * that index. A concurrent lookup will not see an uninitialised @id.
1852 * The search for an empty entry will start at @next and will wrap
1853 * around if necessary.
1854 *
1855 * Context: Any context. Expects xa_lock to be held on entry. May
1856 * release and reacquire xa_lock if @gfp flags permit.
1857 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1858 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1859 * allocated or -EBUSY if there are no free entries in @limit.
1860 */
1861int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1862 struct xa_limit limit, u32 *next, gfp_t gfp)
1863{
1864 u32 min = limit.min;
1865 int ret;
1866
1867 limit.min = max(min, *next);
1868 ret = __xa_alloc(xa, id, entry, limit, gfp);
1869 if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1870 xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1871 ret = 1;
1872 }
1873
1874 if (ret < 0 && limit.min > min) {
1875 limit.min = min;
1876 ret = __xa_alloc(xa, id, entry, limit, gfp);
1877 if (ret == 0)
1878 ret = 1;
1879 }
1880
1881 if (ret >= 0) {
1882 *next = *id + 1;
1883 if (*next == 0)
1884 xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1885 }
1886 return ret;
1887}
1888EXPORT_SYMBOL(__xa_alloc_cyclic);
1889
1890/**
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001891 * __xa_set_mark() - Set this mark on this entry while locked.
1892 * @xa: XArray.
1893 * @index: Index of entry.
1894 * @mark: Mark number.
1895 *
Matthew Wilcox804dfaf2018-11-05 16:37:15 -05001896 * Attempting to set a mark on a %NULL entry does not succeed.
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001897 *
1898 * Context: Any context. Expects xa_lock to be held on entry.
1899 */
1900void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1901{
1902 XA_STATE(xas, xa, index);
1903 void *entry = xas_load(&xas);
1904
1905 if (entry)
1906 xas_set_mark(&xas, mark);
1907}
Matthew Wilcox9ee5a3b2018-11-01 22:52:06 -04001908EXPORT_SYMBOL(__xa_set_mark);
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001909
1910/**
1911 * __xa_clear_mark() - Clear this mark on this entry while locked.
1912 * @xa: XArray.
1913 * @index: Index of entry.
1914 * @mark: Mark number.
1915 *
1916 * Context: Any context. Expects xa_lock to be held on entry.
1917 */
1918void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1919{
1920 XA_STATE(xas, xa, index);
1921 void *entry = xas_load(&xas);
1922
1923 if (entry)
1924 xas_clear_mark(&xas, mark);
1925}
Matthew Wilcox9ee5a3b2018-11-01 22:52:06 -04001926EXPORT_SYMBOL(__xa_clear_mark);
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001927
1928/**
1929 * xa_get_mark() - Inquire whether this mark is set on this entry.
1930 * @xa: XArray.
1931 * @index: Index of entry.
1932 * @mark: Mark number.
1933 *
1934 * This function uses the RCU read lock, so the result may be out of date
1935 * by the time it returns. If you need the result to be stable, use a lock.
1936 *
1937 * Context: Any context. Takes and releases the RCU lock.
1938 * Return: True if the entry at @index has this mark set, false if it doesn't.
1939 */
1940bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1941{
1942 XA_STATE(xas, xa, index);
1943 void *entry;
1944
1945 rcu_read_lock();
1946 entry = xas_start(&xas);
1947 while (xas_get_mark(&xas, mark)) {
1948 if (!xa_is_node(entry))
1949 goto found;
1950 entry = xas_descend(&xas, xa_to_node(entry));
1951 }
1952 rcu_read_unlock();
1953 return false;
1954 found:
1955 rcu_read_unlock();
1956 return true;
1957}
1958EXPORT_SYMBOL(xa_get_mark);
1959
1960/**
1961 * xa_set_mark() - Set this mark on this entry.
1962 * @xa: XArray.
1963 * @index: Index of entry.
1964 * @mark: Mark number.
1965 *
Matthew Wilcox804dfaf2018-11-05 16:37:15 -05001966 * Attempting to set a mark on a %NULL entry does not succeed.
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001967 *
1968 * Context: Process context. Takes and releases the xa_lock.
1969 */
1970void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1971{
1972 xa_lock(xa);
1973 __xa_set_mark(xa, index, mark);
1974 xa_unlock(xa);
1975}
1976EXPORT_SYMBOL(xa_set_mark);
1977
1978/**
1979 * xa_clear_mark() - Clear this mark on this entry.
1980 * @xa: XArray.
1981 * @index: Index of entry.
1982 * @mark: Mark number.
1983 *
1984 * Clearing a mark always succeeds.
1985 *
1986 * Context: Process context. Takes and releases the xa_lock.
1987 */
1988void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1989{
1990 xa_lock(xa);
1991 __xa_clear_mark(xa, index, mark);
1992 xa_unlock(xa);
1993}
1994EXPORT_SYMBOL(xa_clear_mark);
1995
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001996/**
1997 * xa_find() - Search the XArray for an entry.
1998 * @xa: XArray.
1999 * @indexp: Pointer to an index.
2000 * @max: Maximum index to search to.
2001 * @filter: Selection criterion.
2002 *
2003 * Finds the entry in @xa which matches the @filter, and has the lowest
2004 * index that is at least @indexp and no more than @max.
2005 * If an entry is found, @indexp is updated to be the index of the entry.
2006 * This function is protected by the RCU read lock, so it may not find
2007 * entries which are being simultaneously added. It will not return an
2008 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2009 *
2010 * Context: Any context. Takes and releases the RCU lock.
2011 * Return: The entry, if found, otherwise %NULL.
2012 */
2013void *xa_find(struct xarray *xa, unsigned long *indexp,
2014 unsigned long max, xa_mark_t filter)
2015{
2016 XA_STATE(xas, xa, *indexp);
2017 void *entry;
2018
2019 rcu_read_lock();
2020 do {
2021 if ((__force unsigned int)filter < XA_MAX_MARKS)
2022 entry = xas_find_marked(&xas, max, filter);
2023 else
2024 entry = xas_find(&xas, max);
2025 } while (xas_retry(&xas, entry));
2026 rcu_read_unlock();
2027
2028 if (entry)
2029 *indexp = xas.xa_index;
2030 return entry;
2031}
2032EXPORT_SYMBOL(xa_find);
2033
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -05002034static bool xas_sibling(struct xa_state *xas)
2035{
2036 struct xa_node *node = xas->xa_node;
2037 unsigned long mask;
2038
Matthew Wilcox (Oracle)d8e93e32020-02-27 07:37:40 -05002039 if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -05002040 return false;
2041 mask = (XA_CHUNK_SIZE << node->shift) - 1;
Matthew Wilcox (Oracle)bd40b172020-01-31 05:07:55 -05002042 return (xas->xa_index & mask) >
2043 ((unsigned long)xas->xa_offset << node->shift);
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -05002044}
2045
Matthew Wilcoxb803b422017-11-14 08:30:11 -05002046/**
2047 * xa_find_after() - Search the XArray for a present entry.
2048 * @xa: XArray.
2049 * @indexp: Pointer to an index.
2050 * @max: Maximum index to search to.
2051 * @filter: Selection criterion.
2052 *
2053 * Finds the entry in @xa which matches the @filter and has the lowest
2054 * index that is above @indexp and no more than @max.
2055 * If an entry is found, @indexp is updated to be the index of the entry.
2056 * This function is protected by the RCU read lock, so it may miss entries
2057 * which are being simultaneously added. It will not return an
2058 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2059 *
2060 * Context: Any context. Takes and releases the RCU lock.
2061 * Return: The pointer, if found, otherwise %NULL.
2062 */
2063void *xa_find_after(struct xarray *xa, unsigned long *indexp,
2064 unsigned long max, xa_mark_t filter)
2065{
2066 XA_STATE(xas, xa, *indexp + 1);
2067 void *entry;
2068
Matthew Wilcox (Oracle)430f24f2020-01-17 17:45:12 -05002069 if (xas.xa_index == 0)
2070 return NULL;
2071
Matthew Wilcoxb803b422017-11-14 08:30:11 -05002072 rcu_read_lock();
2073 for (;;) {
2074 if ((__force unsigned int)filter < XA_MAX_MARKS)
2075 entry = xas_find_marked(&xas, max, filter);
2076 else
2077 entry = xas_find(&xas, max);
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -05002078
2079 if (xas_invalid(&xas))
Matthew Wilcox8229706e2018-11-01 16:55:19 -04002080 break;
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -05002081 if (xas_sibling(&xas))
2082 continue;
Matthew Wilcoxb803b422017-11-14 08:30:11 -05002083 if (!xas_retry(&xas, entry))
2084 break;
2085 }
2086 rcu_read_unlock();
2087
2088 if (entry)
2089 *indexp = xas.xa_index;
2090 return entry;
2091}
2092EXPORT_SYMBOL(xa_find_after);
2093
Matthew Wilcox80a0a1a2017-11-14 16:42:22 -05002094static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2095 unsigned long max, unsigned int n)
2096{
2097 void *entry;
2098 unsigned int i = 0;
2099
2100 rcu_read_lock();
2101 xas_for_each(xas, entry, max) {
2102 if (xas_retry(xas, entry))
2103 continue;
2104 dst[i++] = entry;
2105 if (i == n)
2106 break;
2107 }
2108 rcu_read_unlock();
2109
2110 return i;
2111}
2112
2113static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2114 unsigned long max, unsigned int n, xa_mark_t mark)
2115{
2116 void *entry;
2117 unsigned int i = 0;
2118
2119 rcu_read_lock();
2120 xas_for_each_marked(xas, entry, max, mark) {
2121 if (xas_retry(xas, entry))
2122 continue;
2123 dst[i++] = entry;
2124 if (i == n)
2125 break;
2126 }
2127 rcu_read_unlock();
2128
2129 return i;
2130}
2131
2132/**
2133 * xa_extract() - Copy selected entries from the XArray into a normal array.
2134 * @xa: The source XArray to copy from.
2135 * @dst: The buffer to copy entries into.
2136 * @start: The first index in the XArray eligible to be selected.
2137 * @max: The last index in the XArray eligible to be selected.
2138 * @n: The maximum number of entries to copy.
2139 * @filter: Selection criterion.
2140 *
2141 * Copies up to @n entries that match @filter from the XArray. The
2142 * copied entries will have indices between @start and @max, inclusive.
2143 *
2144 * The @filter may be an XArray mark value, in which case entries which are
2145 * marked with that mark will be copied. It may also be %XA_PRESENT, in
Matthew Wilcox804dfaf2018-11-05 16:37:15 -05002146 * which case all entries which are not %NULL will be copied.
Matthew Wilcox80a0a1a2017-11-14 16:42:22 -05002147 *
2148 * The entries returned may not represent a snapshot of the XArray at a
2149 * moment in time. For example, if another thread stores to index 5, then
2150 * index 10, calling xa_extract() may return the old contents of index 5
2151 * and the new contents of index 10. Indices not modified while this
2152 * function is running will not be skipped.
2153 *
2154 * If you need stronger guarantees, holding the xa_lock across calls to this
2155 * function will prevent concurrent modification.
2156 *
2157 * Context: Any context. Takes and releases the RCU lock.
2158 * Return: The number of entries copied.
2159 */
2160unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
2161 unsigned long max, unsigned int n, xa_mark_t filter)
2162{
2163 XA_STATE(xas, xa, start);
2164
2165 if (!n)
2166 return 0;
2167
2168 if ((__force unsigned int)filter < XA_MAX_MARKS)
2169 return xas_extract_marked(&xas, dst, max, n, filter);
2170 return xas_extract_present(&xas, dst, max, n);
2171}
2172EXPORT_SYMBOL(xa_extract);
2173
Matthew Wilcox687149f2017-11-17 08:16:34 -05002174/**
Matthew Wilcox (Oracle)f82cd2f2020-08-18 09:05:56 -04002175 * xa_delete_node() - Private interface for workingset code.
2176 * @node: Node to be removed from the tree.
2177 * @update: Function to call to update ancestor nodes.
2178 *
2179 * Context: xa_lock must be held on entry and will not be released.
2180 */
2181void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2182{
2183 struct xa_state xas = {
2184 .xa = node->array,
2185 .xa_index = (unsigned long)node->offset <<
2186 (node->shift + XA_CHUNK_SHIFT),
2187 .xa_shift = node->shift + XA_CHUNK_SHIFT,
2188 .xa_offset = node->offset,
2189 .xa_node = xa_parent_locked(node->array, node),
2190 .xa_update = update,
2191 };
2192
2193 xas_store(&xas, NULL);
2194}
2195EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
2196
2197/**
Matthew Wilcox687149f2017-11-17 08:16:34 -05002198 * xa_destroy() - Free all internal data structures.
2199 * @xa: XArray.
2200 *
2201 * After calling this function, the XArray is empty and has freed all memory
2202 * allocated for its internal data structures. You are responsible for
2203 * freeing the objects referenced by the XArray.
2204 *
2205 * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
2206 */
2207void xa_destroy(struct xarray *xa)
2208{
2209 XA_STATE(xas, xa, 0);
2210 unsigned long flags;
2211 void *entry;
2212
2213 xas.xa_node = NULL;
2214 xas_lock_irqsave(&xas, flags);
2215 entry = xa_head_locked(xa);
2216 RCU_INIT_POINTER(xa->xa_head, NULL);
2217 xas_init_marks(&xas);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -04002218 if (xa_zero_busy(xa))
2219 xa_mark_clear(xa, XA_FREE_MARK);
Matthew Wilcox687149f2017-11-17 08:16:34 -05002220 /* lockdep checks we're still holding the lock in xas_free_nodes() */
2221 if (xa_is_node(entry))
2222 xas_free_nodes(&xas, xa_to_node(entry));
2223 xas_unlock_irqrestore(&xas, flags);
2224}
2225EXPORT_SYMBOL(xa_destroy);
2226
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05002227#ifdef XA_DEBUG
2228void xa_dump_node(const struct xa_node *node)
2229{
2230 unsigned i, j;
2231
2232 if (!node)
2233 return;
2234 if ((unsigned long)node & 3) {
2235 pr_cont("node %px\n", node);
2236 return;
2237 }
2238
2239 pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2240 "array %px list %px %px marks",
2241 node, node->parent ? "offset" : "max", node->offset,
2242 node->parent, node->shift, node->count, node->nr_values,
2243 node->array, node->private_list.prev, node->private_list.next);
2244 for (i = 0; i < XA_MAX_MARKS; i++)
2245 for (j = 0; j < XA_MARK_LONGS; j++)
2246 pr_cont(" %lx", node->marks[i][j]);
2247 pr_cont("\n");
2248}
2249
2250void xa_dump_index(unsigned long index, unsigned int shift)
2251{
2252 if (!shift)
2253 pr_info("%lu: ", index);
2254 else if (shift >= BITS_PER_LONG)
2255 pr_info("0-%lu: ", ~0UL);
2256 else
2257 pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2258}
2259
2260void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2261{
2262 if (!entry)
2263 return;
2264
2265 xa_dump_index(index, shift);
2266
2267 if (xa_is_node(entry)) {
2268 if (shift == 0) {
2269 pr_cont("%px\n", entry);
2270 } else {
2271 unsigned long i;
2272 struct xa_node *node = xa_to_node(entry);
2273 xa_dump_node(node);
2274 for (i = 0; i < XA_CHUNK_SIZE; i++)
2275 xa_dump_entry(node->slots[i],
2276 index + (i << node->shift), node->shift);
2277 }
2278 } else if (xa_is_value(entry))
2279 pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2280 xa_to_value(entry), entry);
2281 else if (!xa_is_internal(entry))
2282 pr_cont("%px\n", entry);
2283 else if (xa_is_retry(entry))
2284 pr_cont("retry (%ld)\n", xa_to_internal(entry));
2285 else if (xa_is_sibling(entry))
2286 pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -04002287 else if (xa_is_zero(entry))
2288 pr_cont("zero (%ld)\n", xa_to_internal(entry));
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05002289 else
2290 pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2291}
2292
2293void xa_dump(const struct xarray *xa)
2294{
2295 void *entry = xa->xa_head;
2296 unsigned int shift = 0;
2297
2298 pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
Matthew Wilcox9b89a032017-11-10 09:34:31 -05002299 xa->xa_flags, xa_marked(xa, XA_MARK_0),
2300 xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05002301 if (xa_is_node(entry))
2302 shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2303 xa_dump_entry(entry, 0, shift);
2304}
2305#endif