xref: /openbmc/linux/fs/fscache/cookie.c (revision 161f4089)
1 /* netfs cookie management
2  *
3  * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  *
11  * See Documentation/filesystems/caching/netfs-api.txt for more information on
12  * the netfs API.
13  */
14 
15 #define FSCACHE_DEBUG_LEVEL COOKIE
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19 
20 struct kmem_cache *fscache_cookie_jar;
21 
22 static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
23 
24 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
25 static int fscache_alloc_object(struct fscache_cache *cache,
26 				struct fscache_cookie *cookie);
27 static int fscache_attach_object(struct fscache_cookie *cookie,
28 				 struct fscache_object *object);
29 
30 /*
31  * initialise an cookie jar slab element prior to any use
32  */
33 void fscache_cookie_init_once(void *_cookie)
34 {
35 	struct fscache_cookie *cookie = _cookie;
36 
37 	memset(cookie, 0, sizeof(*cookie));
38 	spin_lock_init(&cookie->lock);
39 	spin_lock_init(&cookie->stores_lock);
40 	INIT_HLIST_HEAD(&cookie->backing_objects);
41 }
42 
43 /*
44  * request a cookie to represent an object (index, datafile, xattr, etc)
45  * - parent specifies the parent object
46  *   - the top level index cookie for each netfs is stored in the fscache_netfs
47  *     struct upon registration
48  * - def points to the definition
49  * - the netfs_data will be passed to the functions pointed to in *def
50  * - all attached caches will be searched to see if they contain this object
51  * - index objects aren't stored on disk until there's a dependent file that
52  *   needs storing
53  * - other objects are stored in a selected cache immediately, and all the
54  *   indices forming the path to it are instantiated if necessary
55  * - we never let on to the netfs about errors
56  *   - we may set a negative cookie pointer, but that's okay
57  */
58 struct fscache_cookie *__fscache_acquire_cookie(
59 	struct fscache_cookie *parent,
60 	const struct fscache_cookie_def *def,
61 	void *netfs_data,
62 	bool enable)
63 {
64 	struct fscache_cookie *cookie;
65 
66 	BUG_ON(!def);
67 
68 	_enter("{%s},{%s},%p,%u",
69 	       parent ? (char *) parent->def->name : "<no-parent>",
70 	       def->name, netfs_data, enable);
71 
72 	fscache_stat(&fscache_n_acquires);
73 
74 	/* if there's no parent cookie, then we don't create one here either */
75 	if (!parent) {
76 		fscache_stat(&fscache_n_acquires_null);
77 		_leave(" [no parent]");
78 		return NULL;
79 	}
80 
81 	/* validate the definition */
82 	BUG_ON(!def->get_key);
83 	BUG_ON(!def->name[0]);
84 
85 	BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
86 	       parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
87 
88 	/* allocate and initialise a cookie */
89 	cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
90 	if (!cookie) {
91 		fscache_stat(&fscache_n_acquires_oom);
92 		_leave(" [ENOMEM]");
93 		return NULL;
94 	}
95 
96 	atomic_set(&cookie->usage, 1);
97 	atomic_set(&cookie->n_children, 0);
98 
99 	/* We keep the active count elevated until relinquishment to prevent an
100 	 * attempt to wake up every time the object operations queue quiesces.
101 	 */
102 	atomic_set(&cookie->n_active, 1);
103 
104 	atomic_inc(&parent->usage);
105 	atomic_inc(&parent->n_children);
106 
107 	cookie->def		= def;
108 	cookie->parent		= parent;
109 	cookie->netfs_data	= netfs_data;
110 	cookie->flags		= (1 << FSCACHE_COOKIE_NO_DATA_YET);
111 
112 	/* radix tree insertion won't use the preallocation pool unless it's
113 	 * told it may not wait */
114 	INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
115 
116 	switch (cookie->def->type) {
117 	case FSCACHE_COOKIE_TYPE_INDEX:
118 		fscache_stat(&fscache_n_cookie_index);
119 		break;
120 	case FSCACHE_COOKIE_TYPE_DATAFILE:
121 		fscache_stat(&fscache_n_cookie_data);
122 		break;
123 	default:
124 		fscache_stat(&fscache_n_cookie_special);
125 		break;
126 	}
127 
128 	if (enable) {
129 		/* if the object is an index then we need do nothing more here
130 		 * - we create indices on disk when we need them as an index
131 		 * may exist in multiple caches */
132 		if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
133 			if (fscache_acquire_non_index_cookie(cookie) == 0) {
134 				set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
135 			} else {
136 				atomic_dec(&parent->n_children);
137 				__fscache_cookie_put(cookie);
138 				fscache_stat(&fscache_n_acquires_nobufs);
139 				_leave(" = NULL");
140 				return NULL;
141 			}
142 		} else {
143 			set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
144 		}
145 	}
146 
147 	fscache_stat(&fscache_n_acquires_ok);
148 	_leave(" = %p", cookie);
149 	return cookie;
150 }
151 EXPORT_SYMBOL(__fscache_acquire_cookie);
152 
153 /*
154  * Enable a cookie to permit it to accept new operations.
155  */
156 void __fscache_enable_cookie(struct fscache_cookie *cookie,
157 			     bool (*can_enable)(void *data),
158 			     void *data)
159 {
160 	_enter("%p", cookie);
161 
162 	wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
163 			 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
164 
165 	if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
166 		goto out_unlock;
167 
168 	if (can_enable && !can_enable(data)) {
169 		/* The netfs decided it didn't want to enable after all */
170 	} else if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
171 		/* Wait for outstanding disablement to complete */
172 		__fscache_wait_on_invalidate(cookie);
173 
174 		if (fscache_acquire_non_index_cookie(cookie) == 0)
175 			set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
176 	} else {
177 		set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
178 	}
179 
180 out_unlock:
181 	clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
182 	wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
183 }
184 EXPORT_SYMBOL(__fscache_enable_cookie);
185 
186 /*
187  * acquire a non-index cookie
188  * - this must make sure the index chain is instantiated and instantiate the
189  *   object representation too
190  */
191 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
192 {
193 	struct fscache_object *object;
194 	struct fscache_cache *cache;
195 	uint64_t i_size;
196 	int ret;
197 
198 	_enter("");
199 
200 	set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
201 
202 	/* now we need to see whether the backing objects for this cookie yet
203 	 * exist, if not there'll be nothing to search */
204 	down_read(&fscache_addremove_sem);
205 
206 	if (list_empty(&fscache_cache_list)) {
207 		up_read(&fscache_addremove_sem);
208 		_leave(" = 0 [no caches]");
209 		return 0;
210 	}
211 
212 	/* select a cache in which to store the object */
213 	cache = fscache_select_cache_for_object(cookie->parent);
214 	if (!cache) {
215 		up_read(&fscache_addremove_sem);
216 		fscache_stat(&fscache_n_acquires_no_cache);
217 		_leave(" = -ENOMEDIUM [no cache]");
218 		return -ENOMEDIUM;
219 	}
220 
221 	_debug("cache %s", cache->tag->name);
222 
223 	set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
224 
225 	/* ask the cache to allocate objects for this cookie and its parent
226 	 * chain */
227 	ret = fscache_alloc_object(cache, cookie);
228 	if (ret < 0) {
229 		up_read(&fscache_addremove_sem);
230 		_leave(" = %d", ret);
231 		return ret;
232 	}
233 
234 	/* pass on how big the object we're caching is supposed to be */
235 	cookie->def->get_attr(cookie->netfs_data, &i_size);
236 
237 	spin_lock(&cookie->lock);
238 	if (hlist_empty(&cookie->backing_objects)) {
239 		spin_unlock(&cookie->lock);
240 		goto unavailable;
241 	}
242 
243 	object = hlist_entry(cookie->backing_objects.first,
244 			     struct fscache_object, cookie_link);
245 
246 	fscache_set_store_limit(object, i_size);
247 
248 	/* initiate the process of looking up all the objects in the chain
249 	 * (done by fscache_initialise_object()) */
250 	fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
251 
252 	spin_unlock(&cookie->lock);
253 
254 	/* we may be required to wait for lookup to complete at this point */
255 	if (!fscache_defer_lookup) {
256 		_debug("non-deferred lookup %p", &cookie->flags);
257 		wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
258 			    fscache_wait_bit, TASK_UNINTERRUPTIBLE);
259 		_debug("complete");
260 		if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
261 			goto unavailable;
262 	}
263 
264 	up_read(&fscache_addremove_sem);
265 	_leave(" = 0 [deferred]");
266 	return 0;
267 
268 unavailable:
269 	up_read(&fscache_addremove_sem);
270 	_leave(" = -ENOBUFS");
271 	return -ENOBUFS;
272 }
273 
274 /*
275  * recursively allocate cache object records for a cookie/cache combination
276  * - caller must be holding the addremove sem
277  */
278 static int fscache_alloc_object(struct fscache_cache *cache,
279 				struct fscache_cookie *cookie)
280 {
281 	struct fscache_object *object;
282 	int ret;
283 
284 	_enter("%p,%p{%s}", cache, cookie, cookie->def->name);
285 
286 	spin_lock(&cookie->lock);
287 	hlist_for_each_entry(object, &cookie->backing_objects,
288 			     cookie_link) {
289 		if (object->cache == cache)
290 			goto object_already_extant;
291 	}
292 	spin_unlock(&cookie->lock);
293 
294 	/* ask the cache to allocate an object (we may end up with duplicate
295 	 * objects at this stage, but we sort that out later) */
296 	fscache_stat(&fscache_n_cop_alloc_object);
297 	object = cache->ops->alloc_object(cache, cookie);
298 	fscache_stat_d(&fscache_n_cop_alloc_object);
299 	if (IS_ERR(object)) {
300 		fscache_stat(&fscache_n_object_no_alloc);
301 		ret = PTR_ERR(object);
302 		goto error;
303 	}
304 
305 	fscache_stat(&fscache_n_object_alloc);
306 
307 	object->debug_id = atomic_inc_return(&fscache_object_debug_id);
308 
309 	_debug("ALLOC OBJ%x: %s {%lx}",
310 	       object->debug_id, cookie->def->name, object->events);
311 
312 	ret = fscache_alloc_object(cache, cookie->parent);
313 	if (ret < 0)
314 		goto error_put;
315 
316 	/* only attach if we managed to allocate all we needed, otherwise
317 	 * discard the object we just allocated and instead use the one
318 	 * attached to the cookie */
319 	if (fscache_attach_object(cookie, object) < 0) {
320 		fscache_stat(&fscache_n_cop_put_object);
321 		cache->ops->put_object(object);
322 		fscache_stat_d(&fscache_n_cop_put_object);
323 	}
324 
325 	_leave(" = 0");
326 	return 0;
327 
328 object_already_extant:
329 	ret = -ENOBUFS;
330 	if (fscache_object_is_dead(object)) {
331 		spin_unlock(&cookie->lock);
332 		goto error;
333 	}
334 	spin_unlock(&cookie->lock);
335 	_leave(" = 0 [found]");
336 	return 0;
337 
338 error_put:
339 	fscache_stat(&fscache_n_cop_put_object);
340 	cache->ops->put_object(object);
341 	fscache_stat_d(&fscache_n_cop_put_object);
342 error:
343 	_leave(" = %d", ret);
344 	return ret;
345 }
346 
347 /*
348  * attach a cache object to a cookie
349  */
350 static int fscache_attach_object(struct fscache_cookie *cookie,
351 				 struct fscache_object *object)
352 {
353 	struct fscache_object *p;
354 	struct fscache_cache *cache = object->cache;
355 	int ret;
356 
357 	_enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
358 
359 	spin_lock(&cookie->lock);
360 
361 	/* there may be multiple initial creations of this object, but we only
362 	 * want one */
363 	ret = -EEXIST;
364 	hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
365 		if (p->cache == object->cache) {
366 			if (fscache_object_is_dying(p))
367 				ret = -ENOBUFS;
368 			goto cant_attach_object;
369 		}
370 	}
371 
372 	/* pin the parent object */
373 	spin_lock_nested(&cookie->parent->lock, 1);
374 	hlist_for_each_entry(p, &cookie->parent->backing_objects,
375 			     cookie_link) {
376 		if (p->cache == object->cache) {
377 			if (fscache_object_is_dying(p)) {
378 				ret = -ENOBUFS;
379 				spin_unlock(&cookie->parent->lock);
380 				goto cant_attach_object;
381 			}
382 			object->parent = p;
383 			spin_lock(&p->lock);
384 			p->n_children++;
385 			spin_unlock(&p->lock);
386 			break;
387 		}
388 	}
389 	spin_unlock(&cookie->parent->lock);
390 
391 	/* attach to the cache's object list */
392 	if (list_empty(&object->cache_link)) {
393 		spin_lock(&cache->object_list_lock);
394 		list_add(&object->cache_link, &cache->object_list);
395 		spin_unlock(&cache->object_list_lock);
396 	}
397 
398 	/* attach to the cookie */
399 	object->cookie = cookie;
400 	atomic_inc(&cookie->usage);
401 	hlist_add_head(&object->cookie_link, &cookie->backing_objects);
402 
403 	fscache_objlist_add(object);
404 	ret = 0;
405 
406 cant_attach_object:
407 	spin_unlock(&cookie->lock);
408 	_leave(" = %d", ret);
409 	return ret;
410 }
411 
412 /*
413  * Invalidate an object.  Callable with spinlocks held.
414  */
415 void __fscache_invalidate(struct fscache_cookie *cookie)
416 {
417 	struct fscache_object *object;
418 
419 	_enter("{%s}", cookie->def->name);
420 
421 	fscache_stat(&fscache_n_invalidates);
422 
423 	/* Only permit invalidation of data files.  Invalidating an index will
424 	 * require the caller to release all its attachments to the tree rooted
425 	 * there, and if it's doing that, it may as well just retire the
426 	 * cookie.
427 	 */
428 	ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
429 
430 	/* We will be updating the cookie too. */
431 	BUG_ON(!cookie->def->get_aux);
432 
433 	/* If there's an object, we tell the object state machine to handle the
434 	 * invalidation on our behalf, otherwise there's nothing to do.
435 	 */
436 	if (!hlist_empty(&cookie->backing_objects)) {
437 		spin_lock(&cookie->lock);
438 
439 		if (fscache_cookie_enabled(cookie) &&
440 		    !hlist_empty(&cookie->backing_objects) &&
441 		    !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
442 				      &cookie->flags)) {
443 			object = hlist_entry(cookie->backing_objects.first,
444 					     struct fscache_object,
445 					     cookie_link);
446 			if (fscache_object_is_live(object))
447 				fscache_raise_event(
448 					object, FSCACHE_OBJECT_EV_INVALIDATE);
449 		}
450 
451 		spin_unlock(&cookie->lock);
452 	}
453 
454 	_leave("");
455 }
456 EXPORT_SYMBOL(__fscache_invalidate);
457 
458 /*
459  * Wait for object invalidation to complete.
460  */
461 void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
462 {
463 	_enter("%p", cookie);
464 
465 	wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
466 		    fscache_wait_bit_interruptible,
467 		    TASK_UNINTERRUPTIBLE);
468 
469 	_leave("");
470 }
471 EXPORT_SYMBOL(__fscache_wait_on_invalidate);
472 
473 /*
474  * update the index entries backing a cookie
475  */
476 void __fscache_update_cookie(struct fscache_cookie *cookie)
477 {
478 	struct fscache_object *object;
479 
480 	fscache_stat(&fscache_n_updates);
481 
482 	if (!cookie) {
483 		fscache_stat(&fscache_n_updates_null);
484 		_leave(" [no cookie]");
485 		return;
486 	}
487 
488 	_enter("{%s}", cookie->def->name);
489 
490 	BUG_ON(!cookie->def->get_aux);
491 
492 	spin_lock(&cookie->lock);
493 
494 	if (fscache_cookie_enabled(cookie)) {
495 		/* update the index entry on disk in each cache backing this
496 		 * cookie.
497 		 */
498 		hlist_for_each_entry(object,
499 				     &cookie->backing_objects, cookie_link) {
500 			fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
501 		}
502 	}
503 
504 	spin_unlock(&cookie->lock);
505 	_leave("");
506 }
507 EXPORT_SYMBOL(__fscache_update_cookie);
508 
509 /*
510  * Disable a cookie to stop it from accepting new requests from the netfs.
511  */
512 void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
513 {
514 	struct fscache_object *object;
515 	bool awaken = false;
516 
517 	_enter("%p,%u", cookie, invalidate);
518 
519 	ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
520 
521 	if (atomic_read(&cookie->n_children) != 0) {
522 		printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
523 		       cookie->def->name);
524 		BUG();
525 	}
526 
527 	wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
528 			 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
529 	if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
530 		goto out_unlock_enable;
531 
532 	/* If the cookie is being invalidated, wait for that to complete first
533 	 * so that we can reuse the flag.
534 	 */
535 	__fscache_wait_on_invalidate(cookie);
536 
537 	/* Dispose of the backing objects */
538 	set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
539 
540 	spin_lock(&cookie->lock);
541 	if (!hlist_empty(&cookie->backing_objects)) {
542 		hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
543 			if (invalidate)
544 				set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
545 			fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
546 		}
547 	} else {
548 		if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
549 			awaken = true;
550 	}
551 	spin_unlock(&cookie->lock);
552 	if (awaken)
553 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
554 
555 	/* Wait for cessation of activity requiring access to the netfs (when
556 	 * n_active reaches 0).  This makes sure outstanding reads and writes
557 	 * have completed.
558 	 */
559 	if (!atomic_dec_and_test(&cookie->n_active))
560 		wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
561 				 TASK_UNINTERRUPTIBLE);
562 
563 	/* Reset the cookie state if it wasn't relinquished */
564 	if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
565 		atomic_inc(&cookie->n_active);
566 		set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
567 	}
568 
569 out_unlock_enable:
570 	clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
571 	wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
572 	_leave("");
573 }
574 EXPORT_SYMBOL(__fscache_disable_cookie);
575 
576 /*
577  * release a cookie back to the cache
578  * - the object will be marked as recyclable on disk if retire is true
579  * - all dependents of this cookie must have already been unregistered
580  *   (indices/files/pages)
581  */
582 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
583 {
584 	fscache_stat(&fscache_n_relinquishes);
585 	if (retire)
586 		fscache_stat(&fscache_n_relinquishes_retire);
587 
588 	if (!cookie) {
589 		fscache_stat(&fscache_n_relinquishes_null);
590 		_leave(" [no cookie]");
591 		return;
592 	}
593 
594 	_enter("%p{%s,%p,%d},%d",
595 	       cookie, cookie->def->name, cookie->netfs_data,
596 	       atomic_read(&cookie->n_active), retire);
597 
598 	/* No further netfs-accessing operations on this cookie permitted */
599 	set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
600 
601 	__fscache_disable_cookie(cookie, retire);
602 
603 	/* Clear pointers back to the netfs */
604 	cookie->netfs_data	= NULL;
605 	cookie->def		= NULL;
606 	BUG_ON(cookie->stores.rnode);
607 
608 	if (cookie->parent) {
609 		ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
610 		ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
611 		atomic_dec(&cookie->parent->n_children);
612 	}
613 
614 	/* Dispose of the netfs's link to the cookie */
615 	ASSERTCMP(atomic_read(&cookie->usage), >, 0);
616 	fscache_cookie_put(cookie);
617 
618 	_leave("");
619 }
620 EXPORT_SYMBOL(__fscache_relinquish_cookie);
621 
622 /*
623  * destroy a cookie
624  */
625 void __fscache_cookie_put(struct fscache_cookie *cookie)
626 {
627 	struct fscache_cookie *parent;
628 
629 	_enter("%p", cookie);
630 
631 	for (;;) {
632 		_debug("FREE COOKIE %p", cookie);
633 		parent = cookie->parent;
634 		BUG_ON(!hlist_empty(&cookie->backing_objects));
635 		kmem_cache_free(fscache_cookie_jar, cookie);
636 
637 		if (!parent)
638 			break;
639 
640 		cookie = parent;
641 		BUG_ON(atomic_read(&cookie->usage) <= 0);
642 		if (!atomic_dec_and_test(&cookie->usage))
643 			break;
644 	}
645 
646 	_leave("");
647 }
648 
649 /*
650  * check the consistency between the netfs inode and the backing cache
651  *
652  * NOTE: it only serves no-index type
653  */
654 int __fscache_check_consistency(struct fscache_cookie *cookie)
655 {
656 	struct fscache_operation *op;
657 	struct fscache_object *object;
658 	bool wake_cookie = false;
659 	int ret;
660 
661 	_enter("%p,", cookie);
662 
663 	ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
664 
665 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
666 		return -ERESTARTSYS;
667 
668 	if (hlist_empty(&cookie->backing_objects))
669 		return 0;
670 
671 	op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
672 	if (!op)
673 		return -ENOMEM;
674 
675 	fscache_operation_init(op, NULL, NULL);
676 	op->flags = FSCACHE_OP_MYTHREAD |
677 		(1 << FSCACHE_OP_WAITING) |
678 		(1 << FSCACHE_OP_UNUSE_COOKIE);
679 
680 	spin_lock(&cookie->lock);
681 
682 	if (!fscache_cookie_enabled(cookie) ||
683 	    hlist_empty(&cookie->backing_objects))
684 		goto inconsistent;
685 	object = hlist_entry(cookie->backing_objects.first,
686 			     struct fscache_object, cookie_link);
687 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
688 		goto inconsistent;
689 
690 	op->debug_id = atomic_inc_return(&fscache_op_debug_id);
691 
692 	__fscache_use_cookie(cookie);
693 	if (fscache_submit_op(object, op) < 0)
694 		goto submit_failed;
695 
696 	/* the work queue now carries its own ref on the object */
697 	spin_unlock(&cookie->lock);
698 
699 	ret = fscache_wait_for_operation_activation(object, op,
700 						    NULL, NULL, NULL);
701 	if (ret == 0) {
702 		/* ask the cache to honour the operation */
703 		ret = object->cache->ops->check_consistency(op);
704 		fscache_op_complete(op, false);
705 	} else if (ret == -ENOBUFS) {
706 		ret = 0;
707 	}
708 
709 	fscache_put_operation(op);
710 	_leave(" = %d", ret);
711 	return ret;
712 
713 submit_failed:
714 	wake_cookie = __fscache_unuse_cookie(cookie);
715 inconsistent:
716 	spin_unlock(&cookie->lock);
717 	if (wake_cookie)
718 		__fscache_wake_unused_cookie(cookie);
719 	kfree(op);
720 	_leave(" = -ESTALE");
721 	return -ESTALE;
722 }
723 EXPORT_SYMBOL(__fscache_check_consistency);
724