blob: 89acec742e0bfdb2b1aab30e31991655462f05d9 [file] [log] [blame]
David Howells955d00912009-04-03 16:42:38 +01001/* netfs cookie management
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
David Howellsccc4fc32009-04-03 16:42:38 +010010 *
11 * See Documentation/filesystems/caching/netfs-api.txt for more information on
12 * the netfs API.
David Howells955d00912009-04-03 16:42:38 +010013 */
14
15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h>
17#include <linux/slab.h>
18#include "internal.h"
19
20struct kmem_cache *fscache_cookie_jar;
21
David Howellsccc4fc32009-04-03 16:42:38 +010022static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
23
24static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
25static int fscache_alloc_object(struct fscache_cache *cache,
26 struct fscache_cookie *cookie);
27static int fscache_attach_object(struct fscache_cookie *cookie,
28 struct fscache_object *object);
29
David Howells955d00912009-04-03 16:42:38 +010030/*
31 * initialise an cookie jar slab element prior to any use
32 */
33void fscache_cookie_init_once(void *_cookie)
34{
35 struct fscache_cookie *cookie = _cookie;
36
37 memset(cookie, 0, sizeof(*cookie));
38 spin_lock_init(&cookie->lock);
David Howells1bccf512009-11-19 18:11:25 +000039 spin_lock_init(&cookie->stores_lock);
David Howells955d00912009-04-03 16:42:38 +010040 INIT_HLIST_HEAD(&cookie->backing_objects);
41}
42
43/*
David Howellsccc4fc32009-04-03 16:42:38 +010044 * request a cookie to represent an object (index, datafile, xattr, etc)
45 * - parent specifies the parent object
46 * - the top level index cookie for each netfs is stored in the fscache_netfs
47 * struct upon registration
48 * - def points to the definition
49 * - the netfs_data will be passed to the functions pointed to in *def
50 * - all attached caches will be searched to see if they contain this object
51 * - index objects aren't stored on disk until there's a dependent file that
52 * needs storing
53 * - other objects are stored in a selected cache immediately, and all the
54 * indices forming the path to it are instantiated if necessary
55 * - we never let on to the netfs about errors
56 * - we may set a negative cookie pointer, but that's okay
57 */
58struct fscache_cookie *__fscache_acquire_cookie(
59 struct fscache_cookie *parent,
60 const struct fscache_cookie_def *def,
David Howells94d30ae2013-09-21 00:09:31 +010061 void *netfs_data,
62 bool enable)
David Howellsccc4fc32009-04-03 16:42:38 +010063{
64 struct fscache_cookie *cookie;
65
66 BUG_ON(!def);
67
David Howells94d30ae2013-09-21 00:09:31 +010068 _enter("{%s},{%s},%p,%u",
David Howellsccc4fc32009-04-03 16:42:38 +010069 parent ? (char *) parent->def->name : "<no-parent>",
David Howells94d30ae2013-09-21 00:09:31 +010070 def->name, netfs_data, enable);
David Howellsccc4fc32009-04-03 16:42:38 +010071
72 fscache_stat(&fscache_n_acquires);
73
74 /* if there's no parent cookie, then we don't create one here either */
75 if (!parent) {
76 fscache_stat(&fscache_n_acquires_null);
77 _leave(" [no parent]");
78 return NULL;
79 }
80
81 /* validate the definition */
82 BUG_ON(!def->get_key);
83 BUG_ON(!def->name[0]);
84
85 BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
86 parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
87
88 /* allocate and initialise a cookie */
89 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
90 if (!cookie) {
91 fscache_stat(&fscache_n_acquires_oom);
92 _leave(" [ENOMEM]");
93 return NULL;
94 }
95
96 atomic_set(&cookie->usage, 1);
97 atomic_set(&cookie->n_children, 0);
98
David Howells13627292013-05-10 19:50:26 +010099 /* We keep the active count elevated until relinquishment to prevent an
100 * attempt to wake up every time the object operations queue quiesces.
101 */
102 atomic_set(&cookie->n_active, 1);
103
David Howellsccc4fc32009-04-03 16:42:38 +0100104 atomic_inc(&parent->usage);
105 atomic_inc(&parent->n_children);
106
107 cookie->def = def;
108 cookie->parent = parent;
109 cookie->netfs_data = netfs_data;
David Howells94d30ae2013-09-21 00:09:31 +0100110 cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
David Howellsccc4fc32009-04-03 16:42:38 +0100111
David Howellsb34df792009-11-19 18:11:14 +0000112 /* radix tree insertion won't use the preallocation pool unless it's
113 * told it may not wait */
114 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
David Howellsccc4fc32009-04-03 16:42:38 +0100115
116 switch (cookie->def->type) {
117 case FSCACHE_COOKIE_TYPE_INDEX:
118 fscache_stat(&fscache_n_cookie_index);
119 break;
120 case FSCACHE_COOKIE_TYPE_DATAFILE:
121 fscache_stat(&fscache_n_cookie_data);
122 break;
123 default:
124 fscache_stat(&fscache_n_cookie_special);
125 break;
126 }
127
David Howells94d30ae2013-09-21 00:09:31 +0100128 if (enable) {
129 /* if the object is an index then we need do nothing more here
130 * - we create indices on disk when we need them as an index
131 * may exist in multiple caches */
132 if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
133 if (fscache_acquire_non_index_cookie(cookie) == 0) {
134 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
135 } else {
136 atomic_dec(&parent->n_children);
137 __fscache_cookie_put(cookie);
138 fscache_stat(&fscache_n_acquires_nobufs);
139 _leave(" = NULL");
140 return NULL;
141 }
142 } else {
143 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
David Howellsccc4fc32009-04-03 16:42:38 +0100144 }
145 }
146
147 fscache_stat(&fscache_n_acquires_ok);
148 _leave(" = %p", cookie);
149 return cookie;
150}
151EXPORT_SYMBOL(__fscache_acquire_cookie);
152
153/*
David Howells94d30ae2013-09-21 00:09:31 +0100154 * Enable a cookie to permit it to accept new operations.
155 */
156void __fscache_enable_cookie(struct fscache_cookie *cookie,
157 bool (*can_enable)(void *data),
158 void *data)
159{
160 _enter("%p", cookie);
161
162 wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
NeilBrown74316202014-07-07 15:16:04 +1000163 TASK_UNINTERRUPTIBLE);
David Howells94d30ae2013-09-21 00:09:31 +0100164
165 if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
166 goto out_unlock;
167
168 if (can_enable && !can_enable(data)) {
169 /* The netfs decided it didn't want to enable after all */
170 } else if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
171 /* Wait for outstanding disablement to complete */
172 __fscache_wait_on_invalidate(cookie);
173
174 if (fscache_acquire_non_index_cookie(cookie) == 0)
175 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
176 } else {
177 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
178 }
179
180out_unlock:
181 clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
182 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
183}
184EXPORT_SYMBOL(__fscache_enable_cookie);
185
186/*
David Howellsccc4fc32009-04-03 16:42:38 +0100187 * acquire a non-index cookie
188 * - this must make sure the index chain is instantiated and instantiate the
189 * object representation too
190 */
191static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
192{
193 struct fscache_object *object;
194 struct fscache_cache *cache;
195 uint64_t i_size;
196 int ret;
197
198 _enter("");
199
David Howells94d30ae2013-09-21 00:09:31 +0100200 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
David Howellsccc4fc32009-04-03 16:42:38 +0100201
202 /* now we need to see whether the backing objects for this cookie yet
203 * exist, if not there'll be nothing to search */
204 down_read(&fscache_addremove_sem);
205
206 if (list_empty(&fscache_cache_list)) {
207 up_read(&fscache_addremove_sem);
208 _leave(" = 0 [no caches]");
209 return 0;
210 }
211
212 /* select a cache in which to store the object */
213 cache = fscache_select_cache_for_object(cookie->parent);
214 if (!cache) {
215 up_read(&fscache_addremove_sem);
216 fscache_stat(&fscache_n_acquires_no_cache);
217 _leave(" = -ENOMEDIUM [no cache]");
218 return -ENOMEDIUM;
219 }
220
221 _debug("cache %s", cache->tag->name);
222
David Howells94d30ae2013-09-21 00:09:31 +0100223 set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
David Howellsccc4fc32009-04-03 16:42:38 +0100224
225 /* ask the cache to allocate objects for this cookie and its parent
226 * chain */
227 ret = fscache_alloc_object(cache, cookie);
228 if (ret < 0) {
229 up_read(&fscache_addremove_sem);
230 _leave(" = %d", ret);
231 return ret;
232 }
233
234 /* pass on how big the object we're caching is supposed to be */
235 cookie->def->get_attr(cookie->netfs_data, &i_size);
236
237 spin_lock(&cookie->lock);
238 if (hlist_empty(&cookie->backing_objects)) {
239 spin_unlock(&cookie->lock);
240 goto unavailable;
241 }
242
243 object = hlist_entry(cookie->backing_objects.first,
244 struct fscache_object, cookie_link);
245
246 fscache_set_store_limit(object, i_size);
247
248 /* initiate the process of looking up all the objects in the chain
249 * (done by fscache_initialise_object()) */
David Howellscaaef692013-05-10 19:50:26 +0100250 fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
David Howellsccc4fc32009-04-03 16:42:38 +0100251
252 spin_unlock(&cookie->lock);
253
254 /* we may be required to wait for lookup to complete at this point */
255 if (!fscache_defer_lookup) {
256 _debug("non-deferred lookup %p", &cookie->flags);
257 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
NeilBrown74316202014-07-07 15:16:04 +1000258 TASK_UNINTERRUPTIBLE);
David Howellsccc4fc32009-04-03 16:42:38 +0100259 _debug("complete");
260 if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
261 goto unavailable;
262 }
263
264 up_read(&fscache_addremove_sem);
265 _leave(" = 0 [deferred]");
266 return 0;
267
268unavailable:
269 up_read(&fscache_addremove_sem);
270 _leave(" = -ENOBUFS");
271 return -ENOBUFS;
272}
273
274/*
275 * recursively allocate cache object records for a cookie/cache combination
276 * - caller must be holding the addremove sem
277 */
278static int fscache_alloc_object(struct fscache_cache *cache,
279 struct fscache_cookie *cookie)
280{
281 struct fscache_object *object;
David Howellsccc4fc32009-04-03 16:42:38 +0100282 int ret;
283
284 _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
285
286 spin_lock(&cookie->lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800287 hlist_for_each_entry(object, &cookie->backing_objects,
David Howellsccc4fc32009-04-03 16:42:38 +0100288 cookie_link) {
289 if (object->cache == cache)
290 goto object_already_extant;
291 }
292 spin_unlock(&cookie->lock);
293
294 /* ask the cache to allocate an object (we may end up with duplicate
295 * objects at this stage, but we sort that out later) */
David Howells52bd75f2009-11-19 18:11:08 +0000296 fscache_stat(&fscache_n_cop_alloc_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100297 object = cache->ops->alloc_object(cache, cookie);
David Howells52bd75f2009-11-19 18:11:08 +0000298 fscache_stat_d(&fscache_n_cop_alloc_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100299 if (IS_ERR(object)) {
300 fscache_stat(&fscache_n_object_no_alloc);
301 ret = PTR_ERR(object);
302 goto error;
303 }
304
305 fscache_stat(&fscache_n_object_alloc);
306
307 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
308
309 _debug("ALLOC OBJ%x: %s {%lx}",
310 object->debug_id, cookie->def->name, object->events);
311
312 ret = fscache_alloc_object(cache, cookie->parent);
313 if (ret < 0)
314 goto error_put;
315
316 /* only attach if we managed to allocate all we needed, otherwise
317 * discard the object we just allocated and instead use the one
318 * attached to the cookie */
David Howells52bd75f2009-11-19 18:11:08 +0000319 if (fscache_attach_object(cookie, object) < 0) {
320 fscache_stat(&fscache_n_cop_put_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100321 cache->ops->put_object(object);
David Howells52bd75f2009-11-19 18:11:08 +0000322 fscache_stat_d(&fscache_n_cop_put_object);
323 }
David Howellsccc4fc32009-04-03 16:42:38 +0100324
325 _leave(" = 0");
326 return 0;
327
328object_already_extant:
329 ret = -ENOBUFS;
David Howells493f7bc2013-05-10 19:50:26 +0100330 if (fscache_object_is_dead(object)) {
David Howellsccc4fc32009-04-03 16:42:38 +0100331 spin_unlock(&cookie->lock);
332 goto error;
333 }
334 spin_unlock(&cookie->lock);
335 _leave(" = 0 [found]");
336 return 0;
337
338error_put:
David Howells52bd75f2009-11-19 18:11:08 +0000339 fscache_stat(&fscache_n_cop_put_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100340 cache->ops->put_object(object);
David Howells52bd75f2009-11-19 18:11:08 +0000341 fscache_stat_d(&fscache_n_cop_put_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100342error:
343 _leave(" = %d", ret);
344 return ret;
345}
346
347/*
348 * attach a cache object to a cookie
349 */
350static int fscache_attach_object(struct fscache_cookie *cookie,
351 struct fscache_object *object)
352{
353 struct fscache_object *p;
354 struct fscache_cache *cache = object->cache;
David Howellsccc4fc32009-04-03 16:42:38 +0100355 int ret;
356
357 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
358
359 spin_lock(&cookie->lock);
360
361 /* there may be multiple initial creations of this object, but we only
362 * want one */
363 ret = -EEXIST;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800364 hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
David Howellsccc4fc32009-04-03 16:42:38 +0100365 if (p->cache == object->cache) {
David Howells493f7bc2013-05-10 19:50:26 +0100366 if (fscache_object_is_dying(p))
David Howellsccc4fc32009-04-03 16:42:38 +0100367 ret = -ENOBUFS;
368 goto cant_attach_object;
369 }
370 }
371
372 /* pin the parent object */
373 spin_lock_nested(&cookie->parent->lock, 1);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800374 hlist_for_each_entry(p, &cookie->parent->backing_objects,
David Howellsccc4fc32009-04-03 16:42:38 +0100375 cookie_link) {
376 if (p->cache == object->cache) {
David Howells493f7bc2013-05-10 19:50:26 +0100377 if (fscache_object_is_dying(p)) {
David Howellsccc4fc32009-04-03 16:42:38 +0100378 ret = -ENOBUFS;
379 spin_unlock(&cookie->parent->lock);
380 goto cant_attach_object;
381 }
382 object->parent = p;
383 spin_lock(&p->lock);
384 p->n_children++;
385 spin_unlock(&p->lock);
386 break;
387 }
388 }
389 spin_unlock(&cookie->parent->lock);
390
391 /* attach to the cache's object list */
392 if (list_empty(&object->cache_link)) {
393 spin_lock(&cache->object_list_lock);
394 list_add(&object->cache_link, &cache->object_list);
395 spin_unlock(&cache->object_list_lock);
396 }
397
398 /* attach to the cookie */
399 object->cookie = cookie;
400 atomic_inc(&cookie->usage);
401 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
David Howells4fbf4292009-11-19 18:11:04 +0000402
403 fscache_objlist_add(object);
David Howellsccc4fc32009-04-03 16:42:38 +0100404 ret = 0;
405
406cant_attach_object:
407 spin_unlock(&cookie->lock);
408 _leave(" = %d", ret);
409 return ret;
410}
411
412/*
David Howellsef778e72012-12-20 21:52:36 +0000413 * Invalidate an object. Callable with spinlocks held.
414 */
415void __fscache_invalidate(struct fscache_cookie *cookie)
416{
417 struct fscache_object *object;
418
419 _enter("{%s}", cookie->def->name);
420
421 fscache_stat(&fscache_n_invalidates);
422
423 /* Only permit invalidation of data files. Invalidating an index will
424 * require the caller to release all its attachments to the tree rooted
425 * there, and if it's doing that, it may as well just retire the
426 * cookie.
427 */
428 ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
429
430 /* We will be updating the cookie too. */
431 BUG_ON(!cookie->def->get_aux);
432
433 /* If there's an object, we tell the object state machine to handle the
434 * invalidation on our behalf, otherwise there's nothing to do.
435 */
436 if (!hlist_empty(&cookie->backing_objects)) {
437 spin_lock(&cookie->lock);
438
David Howells94d30ae2013-09-21 00:09:31 +0100439 if (fscache_cookie_enabled(cookie) &&
440 !hlist_empty(&cookie->backing_objects) &&
David Howellsef778e72012-12-20 21:52:36 +0000441 !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
442 &cookie->flags)) {
443 object = hlist_entry(cookie->backing_objects.first,
444 struct fscache_object,
445 cookie_link);
David Howells493f7bc2013-05-10 19:50:26 +0100446 if (fscache_object_is_live(object))
David Howellsef778e72012-12-20 21:52:36 +0000447 fscache_raise_event(
448 object, FSCACHE_OBJECT_EV_INVALIDATE);
449 }
450
451 spin_unlock(&cookie->lock);
452 }
453
454 _leave("");
455}
456EXPORT_SYMBOL(__fscache_invalidate);
457
458/*
459 * Wait for object invalidation to complete.
460 */
461void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
462{
463 _enter("%p", cookie);
464
465 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
David Howellsef778e72012-12-20 21:52:36 +0000466 TASK_UNINTERRUPTIBLE);
467
468 _leave("");
469}
470EXPORT_SYMBOL(__fscache_wait_on_invalidate);
471
472/*
David Howellsccc4fc32009-04-03 16:42:38 +0100473 * update the index entries backing a cookie
474 */
475void __fscache_update_cookie(struct fscache_cookie *cookie)
476{
477 struct fscache_object *object;
David Howellsccc4fc32009-04-03 16:42:38 +0100478
479 fscache_stat(&fscache_n_updates);
480
481 if (!cookie) {
482 fscache_stat(&fscache_n_updates_null);
483 _leave(" [no cookie]");
484 return;
485 }
486
487 _enter("{%s}", cookie->def->name);
488
489 BUG_ON(!cookie->def->get_aux);
490
491 spin_lock(&cookie->lock);
492
David Howells94d30ae2013-09-21 00:09:31 +0100493 if (fscache_cookie_enabled(cookie)) {
494 /* update the index entry on disk in each cache backing this
495 * cookie.
496 */
497 hlist_for_each_entry(object,
498 &cookie->backing_objects, cookie_link) {
499 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
500 }
David Howellsccc4fc32009-04-03 16:42:38 +0100501 }
502
503 spin_unlock(&cookie->lock);
504 _leave("");
505}
506EXPORT_SYMBOL(__fscache_update_cookie);
507
508/*
David Howells94d30ae2013-09-21 00:09:31 +0100509 * Disable a cookie to stop it from accepting new requests from the netfs.
510 */
511void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
512{
513 struct fscache_object *object;
514 bool awaken = false;
515
516 _enter("%p,%u", cookie, invalidate);
517
518 ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
519
520 if (atomic_read(&cookie->n_children) != 0) {
Fabian Frederick36dfd112014-06-04 16:05:38 -0700521 pr_err("Cookie '%s' still has children\n",
David Howells94d30ae2013-09-21 00:09:31 +0100522 cookie->def->name);
523 BUG();
524 }
525
526 wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
NeilBrown74316202014-07-07 15:16:04 +1000527 TASK_UNINTERRUPTIBLE);
David Howells94d30ae2013-09-21 00:09:31 +0100528 if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
529 goto out_unlock_enable;
530
531 /* If the cookie is being invalidated, wait for that to complete first
532 * so that we can reuse the flag.
533 */
534 __fscache_wait_on_invalidate(cookie);
535
536 /* Dispose of the backing objects */
537 set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
538
539 spin_lock(&cookie->lock);
540 if (!hlist_empty(&cookie->backing_objects)) {
541 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
542 if (invalidate)
543 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
544 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
545 }
546 } else {
547 if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
548 awaken = true;
549 }
550 spin_unlock(&cookie->lock);
551 if (awaken)
552 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
553
554 /* Wait for cessation of activity requiring access to the netfs (when
555 * n_active reaches 0). This makes sure outstanding reads and writes
556 * have completed.
557 */
558 if (!atomic_dec_and_test(&cookie->n_active))
559 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
560 TASK_UNINTERRUPTIBLE);
561
562 /* Reset the cookie state if it wasn't relinquished */
563 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
564 atomic_inc(&cookie->n_active);
565 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
566 }
567
568out_unlock_enable:
569 clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
570 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
571 _leave("");
572}
573EXPORT_SYMBOL(__fscache_disable_cookie);
574
575/*
David Howellsccc4fc32009-04-03 16:42:38 +0100576 * release a cookie back to the cache
577 * - the object will be marked as recyclable on disk if retire is true
578 * - all dependents of this cookie must have already been unregistered
579 * (indices/files/pages)
580 */
David Howells94d30ae2013-09-21 00:09:31 +0100581void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
David Howellsccc4fc32009-04-03 16:42:38 +0100582{
David Howellsccc4fc32009-04-03 16:42:38 +0100583 fscache_stat(&fscache_n_relinquishes);
David Howells2175bb02009-11-19 18:11:38 +0000584 if (retire)
585 fscache_stat(&fscache_n_relinquishes_retire);
David Howellsccc4fc32009-04-03 16:42:38 +0100586
587 if (!cookie) {
588 fscache_stat(&fscache_n_relinquishes_null);
589 _leave(" [no cookie]");
590 return;
591 }
592
David Howells13627292013-05-10 19:50:26 +0100593 _enter("%p{%s,%p,%d},%d",
594 cookie, cookie->def->name, cookie->netfs_data,
595 atomic_read(&cookie->n_active), retire);
596
David Howells13627292013-05-10 19:50:26 +0100597 /* No further netfs-accessing operations on this cookie permitted */
598 set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
David Howellsccc4fc32009-04-03 16:42:38 +0100599
David Howells94d30ae2013-09-21 00:09:31 +0100600 __fscache_disable_cookie(cookie, retire);
David Howells13627292013-05-10 19:50:26 +0100601
602 /* Clear pointers back to the netfs */
David Howells7e311a22009-11-19 18:11:11 +0000603 cookie->netfs_data = NULL;
604 cookie->def = NULL;
David Howells13627292013-05-10 19:50:26 +0100605 BUG_ON(cookie->stores.rnode);
David Howellsccc4fc32009-04-03 16:42:38 +0100606
607 if (cookie->parent) {
608 ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
609 ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
610 atomic_dec(&cookie->parent->n_children);
611 }
612
David Howells13627292013-05-10 19:50:26 +0100613 /* Dispose of the netfs's link to the cookie */
David Howellsccc4fc32009-04-03 16:42:38 +0100614 ASSERTCMP(atomic_read(&cookie->usage), >, 0);
615 fscache_cookie_put(cookie);
616
617 _leave("");
618}
619EXPORT_SYMBOL(__fscache_relinquish_cookie);
620
621/*
David Howells955d00912009-04-03 16:42:38 +0100622 * destroy a cookie
623 */
624void __fscache_cookie_put(struct fscache_cookie *cookie)
625{
626 struct fscache_cookie *parent;
627
628 _enter("%p", cookie);
629
630 for (;;) {
631 _debug("FREE COOKIE %p", cookie);
632 parent = cookie->parent;
633 BUG_ON(!hlist_empty(&cookie->backing_objects));
634 kmem_cache_free(fscache_cookie_jar, cookie);
635
636 if (!parent)
637 break;
638
639 cookie = parent;
640 BUG_ON(atomic_read(&cookie->usage) <= 0);
641 if (!atomic_dec_and_test(&cookie->usage))
642 break;
643 }
644
645 _leave("");
646}
David Howellsda9803bc2013-08-21 17:29:38 -0400647
648/*
649 * check the consistency between the netfs inode and the backing cache
650 *
651 * NOTE: it only serves no-index type
652 */
653int __fscache_check_consistency(struct fscache_cookie *cookie)
654{
655 struct fscache_operation *op;
656 struct fscache_object *object;
David Howells8fb883f2013-09-21 00:09:31 +0100657 bool wake_cookie = false;
David Howellsda9803bc2013-08-21 17:29:38 -0400658 int ret;
659
660 _enter("%p,", cookie);
661
662 ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
663
664 if (fscache_wait_for_deferred_lookup(cookie) < 0)
665 return -ERESTARTSYS;
666
667 if (hlist_empty(&cookie->backing_objects))
668 return 0;
669
670 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
671 if (!op)
672 return -ENOMEM;
673
674 fscache_operation_init(op, NULL, NULL);
675 op->flags = FSCACHE_OP_MYTHREAD |
Milosz Tanski9c89d622013-09-09 14:28:57 -0400676 (1 << FSCACHE_OP_WAITING) |
677 (1 << FSCACHE_OP_UNUSE_COOKIE);
David Howellsda9803bc2013-08-21 17:29:38 -0400678
679 spin_lock(&cookie->lock);
680
David Howells94d30ae2013-09-21 00:09:31 +0100681 if (!fscache_cookie_enabled(cookie) ||
682 hlist_empty(&cookie->backing_objects))
David Howellsda9803bc2013-08-21 17:29:38 -0400683 goto inconsistent;
684 object = hlist_entry(cookie->backing_objects.first,
685 struct fscache_object, cookie_link);
686 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
687 goto inconsistent;
688
689 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
690
David Howells8fb883f2013-09-21 00:09:31 +0100691 __fscache_use_cookie(cookie);
David Howellsda9803bc2013-08-21 17:29:38 -0400692 if (fscache_submit_op(object, op) < 0)
693 goto submit_failed;
694
695 /* the work queue now carries its own ref on the object */
696 spin_unlock(&cookie->lock);
697
698 ret = fscache_wait_for_operation_activation(object, op,
699 NULL, NULL, NULL);
700 if (ret == 0) {
701 /* ask the cache to honour the operation */
702 ret = object->cache->ops->check_consistency(op);
703 fscache_op_complete(op, false);
704 } else if (ret == -ENOBUFS) {
705 ret = 0;
706 }
707
708 fscache_put_operation(op);
709 _leave(" = %d", ret);
710 return ret;
711
712submit_failed:
David Howells8fb883f2013-09-21 00:09:31 +0100713 wake_cookie = __fscache_unuse_cookie(cookie);
David Howellsda9803bc2013-08-21 17:29:38 -0400714inconsistent:
715 spin_unlock(&cookie->lock);
David Howells8fb883f2013-09-21 00:09:31 +0100716 if (wake_cookie)
717 __fscache_wake_unused_cookie(cookie);
David Howellsda9803bc2013-08-21 17:29:38 -0400718 kfree(op);
719 _leave(" = -ESTALE");
720 return -ESTALE;
721}
722EXPORT_SYMBOL(__fscache_check_consistency);