xref: /openbmc/linux/fs/mbcache.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * linux/fs/mbcache.c
3  * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
4  */
5 
6 /*
7  * Filesystem Meta Information Block Cache (mbcache)
8  *
9  * The mbcache caches blocks of block devices that need to be located
10  * by their device/block number, as well as by other criteria (such
11  * as the block's contents).
12  *
13  * There can only be one cache entry in a cache per device and block number.
14  * Additional indexes need not be unique in this sense. The number of
15  * additional indexes (=other criteria) can be hardwired at compile time
16  * or specified at cache create time.
17  *
18  * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19  * in the cache. A valid entry is in the main hash tables of the cache,
20  * and may also be in the lru list. An invalid entry is not in any hashes
21  * or lists.
22  *
23  * A valid cache entry is only in the lru list if no handles refer to it.
24  * Invalid cache entries will be freed when the last handle to the cache
25  * entry is released. Entries that cannot be freed immediately are put
26  * back on the lru list.
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 
32 #include <linux/hash.h>
33 #include <linux/fs.h>
34 #include <linux/mm.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
39 
40 
41 #ifdef MB_CACHE_DEBUG
42 # define mb_debug(f...) do { \
43 		printk(KERN_DEBUG f); \
44 		printk("\n"); \
45 	} while (0)
46 #define mb_assert(c) do { if (!(c)) \
47 		printk(KERN_ERR "assertion " #c " failed\n"); \
48 	} while(0)
49 #else
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
52 #endif
53 #define mb_error(f...) do { \
54 		printk(KERN_ERR f); \
55 		printk("\n"); \
56 	} while(0)
57 
58 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
59 
60 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
61 
62 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
63 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
64 MODULE_LICENSE("GPL");
65 
66 EXPORT_SYMBOL(mb_cache_create);
67 EXPORT_SYMBOL(mb_cache_shrink);
68 EXPORT_SYMBOL(mb_cache_destroy);
69 EXPORT_SYMBOL(mb_cache_entry_alloc);
70 EXPORT_SYMBOL(mb_cache_entry_insert);
71 EXPORT_SYMBOL(mb_cache_entry_release);
72 EXPORT_SYMBOL(mb_cache_entry_free);
73 EXPORT_SYMBOL(mb_cache_entry_get);
74 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
75 EXPORT_SYMBOL(mb_cache_entry_find_first);
76 EXPORT_SYMBOL(mb_cache_entry_find_next);
77 #endif
78 
79 struct mb_cache {
80 	struct list_head		c_cache_list;
81 	const char			*c_name;
82 	struct mb_cache_op		c_op;
83 	atomic_t			c_entry_count;
84 	int				c_bucket_bits;
85 #ifndef MB_CACHE_INDEXES_COUNT
86 	int				c_indexes_count;
87 #endif
88 	struct kmem_cache			*c_entry_cache;
89 	struct list_head		*c_block_hash;
90 	struct list_head		*c_indexes_hash[0];
91 };
92 
93 
94 /*
95  * Global data: list of all mbcache's, lru list, and a spinlock for
96  * accessing cache data structures on SMP machines. The lru list is
97  * global across all mbcaches.
98  */
99 
100 static LIST_HEAD(mb_cache_list);
101 static LIST_HEAD(mb_cache_lru_list);
102 static DEFINE_SPINLOCK(mb_cache_spinlock);
103 static struct shrinker *mb_shrinker;
104 
105 static inline int
106 mb_cache_indexes(struct mb_cache *cache)
107 {
108 #ifdef MB_CACHE_INDEXES_COUNT
109 	return MB_CACHE_INDEXES_COUNT;
110 #else
111 	return cache->c_indexes_count;
112 #endif
113 }
114 
115 /*
116  * What the mbcache registers as to get shrunk dynamically.
117  */
118 
119 static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
120 
121 
122 static inline int
123 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
124 {
125 	return !list_empty(&ce->e_block_list);
126 }
127 
128 
129 static void
130 __mb_cache_entry_unhash(struct mb_cache_entry *ce)
131 {
132 	int n;
133 
134 	if (__mb_cache_entry_is_hashed(ce)) {
135 		list_del_init(&ce->e_block_list);
136 		for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
137 			list_del(&ce->e_indexes[n].o_list);
138 	}
139 }
140 
141 
142 static void
143 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
144 {
145 	struct mb_cache *cache = ce->e_cache;
146 
147 	mb_assert(!(ce->e_used || ce->e_queued));
148 	if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
149 		/* free failed -- put back on the lru list
150 		   for freeing later. */
151 		spin_lock(&mb_cache_spinlock);
152 		list_add(&ce->e_lru_list, &mb_cache_lru_list);
153 		spin_unlock(&mb_cache_spinlock);
154 	} else {
155 		kmem_cache_free(cache->c_entry_cache, ce);
156 		atomic_dec(&cache->c_entry_count);
157 	}
158 }
159 
160 
161 static void
162 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
163 	__releases(mb_cache_spinlock)
164 {
165 	/* Wake up all processes queuing for this cache entry. */
166 	if (ce->e_queued)
167 		wake_up_all(&mb_cache_queue);
168 	if (ce->e_used >= MB_CACHE_WRITER)
169 		ce->e_used -= MB_CACHE_WRITER;
170 	ce->e_used--;
171 	if (!(ce->e_used || ce->e_queued)) {
172 		if (!__mb_cache_entry_is_hashed(ce))
173 			goto forget;
174 		mb_assert(list_empty(&ce->e_lru_list));
175 		list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
176 	}
177 	spin_unlock(&mb_cache_spinlock);
178 	return;
179 forget:
180 	spin_unlock(&mb_cache_spinlock);
181 	__mb_cache_entry_forget(ce, GFP_KERNEL);
182 }
183 
184 
185 /*
186  * mb_cache_shrink_fn()  memory pressure callback
187  *
188  * This function is called by the kernel memory management when memory
189  * gets low.
190  *
191  * @nr_to_scan: Number of objects to scan
192  * @gfp_mask: (ignored)
193  *
194  * Returns the number of objects which are present in the cache.
195  */
196 static int
197 mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
198 {
199 	LIST_HEAD(free_list);
200 	struct list_head *l, *ltmp;
201 	int count = 0;
202 
203 	spin_lock(&mb_cache_spinlock);
204 	list_for_each(l, &mb_cache_list) {
205 		struct mb_cache *cache =
206 			list_entry(l, struct mb_cache, c_cache_list);
207 		mb_debug("cache %s (%d)", cache->c_name,
208 			  atomic_read(&cache->c_entry_count));
209 		count += atomic_read(&cache->c_entry_count);
210 	}
211 	mb_debug("trying to free %d entries", nr_to_scan);
212 	if (nr_to_scan == 0) {
213 		spin_unlock(&mb_cache_spinlock);
214 		goto out;
215 	}
216 	while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
217 		struct mb_cache_entry *ce =
218 			list_entry(mb_cache_lru_list.next,
219 				   struct mb_cache_entry, e_lru_list);
220 		list_move_tail(&ce->e_lru_list, &free_list);
221 		__mb_cache_entry_unhash(ce);
222 	}
223 	spin_unlock(&mb_cache_spinlock);
224 	list_for_each_safe(l, ltmp, &free_list) {
225 		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
226 						   e_lru_list), gfp_mask);
227 	}
228 out:
229 	return (count / 100) * sysctl_vfs_cache_pressure;
230 }
231 
232 
233 /*
234  * mb_cache_create()  create a new cache
235  *
236  * All entries in one cache are equal size. Cache entries may be from
237  * multiple devices. If this is the first mbcache created, registers
238  * the cache with kernel memory management. Returns NULL if no more
239  * memory was available.
240  *
241  * @name: name of the cache (informal)
242  * @cache_op: contains the callback called when freeing a cache entry
243  * @entry_size: The size of a cache entry, including
244  *              struct mb_cache_entry
245  * @indexes_count: number of additional indexes in the cache. Must equal
246  *                 MB_CACHE_INDEXES_COUNT if the number of indexes is
247  *                 hardwired.
248  * @bucket_bits: log2(number of hash buckets)
249  */
250 struct mb_cache *
251 mb_cache_create(const char *name, struct mb_cache_op *cache_op,
252 		size_t entry_size, int indexes_count, int bucket_bits)
253 {
254 	int m=0, n, bucket_count = 1 << bucket_bits;
255 	struct mb_cache *cache = NULL;
256 
257 	if(entry_size < sizeof(struct mb_cache_entry) +
258 	   indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]))
259 		return NULL;
260 
261 	cache = kmalloc(sizeof(struct mb_cache) +
262 	                indexes_count * sizeof(struct list_head), GFP_KERNEL);
263 	if (!cache)
264 		goto fail;
265 	cache->c_name = name;
266 	cache->c_op.free = NULL;
267 	if (cache_op)
268 		cache->c_op.free = cache_op->free;
269 	atomic_set(&cache->c_entry_count, 0);
270 	cache->c_bucket_bits = bucket_bits;
271 #ifdef MB_CACHE_INDEXES_COUNT
272 	mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
273 #else
274 	cache->c_indexes_count = indexes_count;
275 #endif
276 	cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
277 	                              GFP_KERNEL);
278 	if (!cache->c_block_hash)
279 		goto fail;
280 	for (n=0; n<bucket_count; n++)
281 		INIT_LIST_HEAD(&cache->c_block_hash[n]);
282 	for (m=0; m<indexes_count; m++) {
283 		cache->c_indexes_hash[m] = kmalloc(bucket_count *
284 		                                 sizeof(struct list_head),
285 		                                 GFP_KERNEL);
286 		if (!cache->c_indexes_hash[m])
287 			goto fail;
288 		for (n=0; n<bucket_count; n++)
289 			INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
290 	}
291 	cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
292 		SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL);
293 	if (!cache->c_entry_cache)
294 		goto fail;
295 
296 	spin_lock(&mb_cache_spinlock);
297 	list_add(&cache->c_cache_list, &mb_cache_list);
298 	spin_unlock(&mb_cache_spinlock);
299 	return cache;
300 
301 fail:
302 	if (cache) {
303 		while (--m >= 0)
304 			kfree(cache->c_indexes_hash[m]);
305 		kfree(cache->c_block_hash);
306 		kfree(cache);
307 	}
308 	return NULL;
309 }
310 
311 
312 /*
313  * mb_cache_shrink()
314  *
315  * Removes all cache entries of a device from the cache. All cache entries
316  * currently in use cannot be freed, and thus remain in the cache. All others
317  * are freed.
318  *
319  * @bdev: which device's cache entries to shrink
320  */
321 void
322 mb_cache_shrink(struct block_device *bdev)
323 {
324 	LIST_HEAD(free_list);
325 	struct list_head *l, *ltmp;
326 
327 	spin_lock(&mb_cache_spinlock);
328 	list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
329 		struct mb_cache_entry *ce =
330 			list_entry(l, struct mb_cache_entry, e_lru_list);
331 		if (ce->e_bdev == bdev) {
332 			list_move_tail(&ce->e_lru_list, &free_list);
333 			__mb_cache_entry_unhash(ce);
334 		}
335 	}
336 	spin_unlock(&mb_cache_spinlock);
337 	list_for_each_safe(l, ltmp, &free_list) {
338 		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
339 						   e_lru_list), GFP_KERNEL);
340 	}
341 }
342 
343 
344 /*
345  * mb_cache_destroy()
346  *
347  * Shrinks the cache to its minimum possible size (hopefully 0 entries),
348  * and then destroys it. If this was the last mbcache, un-registers the
349  * mbcache from kernel memory management.
350  */
351 void
352 mb_cache_destroy(struct mb_cache *cache)
353 {
354 	LIST_HEAD(free_list);
355 	struct list_head *l, *ltmp;
356 	int n;
357 
358 	spin_lock(&mb_cache_spinlock);
359 	list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
360 		struct mb_cache_entry *ce =
361 			list_entry(l, struct mb_cache_entry, e_lru_list);
362 		if (ce->e_cache == cache) {
363 			list_move_tail(&ce->e_lru_list, &free_list);
364 			__mb_cache_entry_unhash(ce);
365 		}
366 	}
367 	list_del(&cache->c_cache_list);
368 	spin_unlock(&mb_cache_spinlock);
369 
370 	list_for_each_safe(l, ltmp, &free_list) {
371 		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
372 						   e_lru_list), GFP_KERNEL);
373 	}
374 
375 	if (atomic_read(&cache->c_entry_count) > 0) {
376 		mb_error("cache %s: %d orphaned entries",
377 			  cache->c_name,
378 			  atomic_read(&cache->c_entry_count));
379 	}
380 
381 	kmem_cache_destroy(cache->c_entry_cache);
382 
383 	for (n=0; n < mb_cache_indexes(cache); n++)
384 		kfree(cache->c_indexes_hash[n]);
385 	kfree(cache->c_block_hash);
386 	kfree(cache);
387 }
388 
389 
390 /*
391  * mb_cache_entry_alloc()
392  *
393  * Allocates a new cache entry. The new entry will not be valid initially,
394  * and thus cannot be looked up yet. It should be filled with data, and
395  * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
396  * if no more memory was available.
397  */
398 struct mb_cache_entry *
399 mb_cache_entry_alloc(struct mb_cache *cache)
400 {
401 	struct mb_cache_entry *ce;
402 
403 	atomic_inc(&cache->c_entry_count);
404 	ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
405 	if (ce) {
406 		INIT_LIST_HEAD(&ce->e_lru_list);
407 		INIT_LIST_HEAD(&ce->e_block_list);
408 		ce->e_cache = cache;
409 		ce->e_used = 1 + MB_CACHE_WRITER;
410 		ce->e_queued = 0;
411 	}
412 	return ce;
413 }
414 
415 
416 /*
417  * mb_cache_entry_insert()
418  *
419  * Inserts an entry that was allocated using mb_cache_entry_alloc() into
420  * the cache. After this, the cache entry can be looked up, but is not yet
421  * in the lru list as the caller still holds a handle to it. Returns 0 on
422  * success, or -EBUSY if a cache entry for that device + inode exists
423  * already (this may happen after a failed lookup, but when another process
424  * has inserted the same cache entry in the meantime).
425  *
426  * @bdev: device the cache entry belongs to
427  * @block: block number
428  * @keys: array of additional keys. There must be indexes_count entries
429  *        in the array (as specified when creating the cache).
430  */
431 int
432 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
433 		      sector_t block, unsigned int keys[])
434 {
435 	struct mb_cache *cache = ce->e_cache;
436 	unsigned int bucket;
437 	struct list_head *l;
438 	int error = -EBUSY, n;
439 
440 	bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
441 			   cache->c_bucket_bits);
442 	spin_lock(&mb_cache_spinlock);
443 	list_for_each_prev(l, &cache->c_block_hash[bucket]) {
444 		struct mb_cache_entry *ce =
445 			list_entry(l, struct mb_cache_entry, e_block_list);
446 		if (ce->e_bdev == bdev && ce->e_block == block)
447 			goto out;
448 	}
449 	__mb_cache_entry_unhash(ce);
450 	ce->e_bdev = bdev;
451 	ce->e_block = block;
452 	list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
453 	for (n=0; n<mb_cache_indexes(cache); n++) {
454 		ce->e_indexes[n].o_key = keys[n];
455 		bucket = hash_long(keys[n], cache->c_bucket_bits);
456 		list_add(&ce->e_indexes[n].o_list,
457 			 &cache->c_indexes_hash[n][bucket]);
458 	}
459 	error = 0;
460 out:
461 	spin_unlock(&mb_cache_spinlock);
462 	return error;
463 }
464 
465 
466 /*
467  * mb_cache_entry_release()
468  *
469  * Release a handle to a cache entry. When the last handle to a cache entry
470  * is released it is either freed (if it is invalid) or otherwise inserted
471  * in to the lru list.
472  */
473 void
474 mb_cache_entry_release(struct mb_cache_entry *ce)
475 {
476 	spin_lock(&mb_cache_spinlock);
477 	__mb_cache_entry_release_unlock(ce);
478 }
479 
480 
481 /*
482  * mb_cache_entry_free()
483  *
484  * This is equivalent to the sequence mb_cache_entry_takeout() --
485  * mb_cache_entry_release().
486  */
487 void
488 mb_cache_entry_free(struct mb_cache_entry *ce)
489 {
490 	spin_lock(&mb_cache_spinlock);
491 	mb_assert(list_empty(&ce->e_lru_list));
492 	__mb_cache_entry_unhash(ce);
493 	__mb_cache_entry_release_unlock(ce);
494 }
495 
496 
497 /*
498  * mb_cache_entry_get()
499  *
500  * Get a cache entry  by device / block number. (There can only be one entry
501  * in the cache per device and block.) Returns NULL if no such cache entry
502  * exists. The returned cache entry is locked for exclusive access ("single
503  * writer").
504  */
505 struct mb_cache_entry *
506 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
507 		   sector_t block)
508 {
509 	unsigned int bucket;
510 	struct list_head *l;
511 	struct mb_cache_entry *ce;
512 
513 	bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
514 			   cache->c_bucket_bits);
515 	spin_lock(&mb_cache_spinlock);
516 	list_for_each(l, &cache->c_block_hash[bucket]) {
517 		ce = list_entry(l, struct mb_cache_entry, e_block_list);
518 		if (ce->e_bdev == bdev && ce->e_block == block) {
519 			DEFINE_WAIT(wait);
520 
521 			if (!list_empty(&ce->e_lru_list))
522 				list_del_init(&ce->e_lru_list);
523 
524 			while (ce->e_used > 0) {
525 				ce->e_queued++;
526 				prepare_to_wait(&mb_cache_queue, &wait,
527 						TASK_UNINTERRUPTIBLE);
528 				spin_unlock(&mb_cache_spinlock);
529 				schedule();
530 				spin_lock(&mb_cache_spinlock);
531 				ce->e_queued--;
532 			}
533 			finish_wait(&mb_cache_queue, &wait);
534 			ce->e_used += 1 + MB_CACHE_WRITER;
535 
536 			if (!__mb_cache_entry_is_hashed(ce)) {
537 				__mb_cache_entry_release_unlock(ce);
538 				return NULL;
539 			}
540 			goto cleanup;
541 		}
542 	}
543 	ce = NULL;
544 
545 cleanup:
546 	spin_unlock(&mb_cache_spinlock);
547 	return ce;
548 }
549 
550 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
551 
552 static struct mb_cache_entry *
553 __mb_cache_entry_find(struct list_head *l, struct list_head *head,
554 		      int index, struct block_device *bdev, unsigned int key)
555 {
556 	while (l != head) {
557 		struct mb_cache_entry *ce =
558 			list_entry(l, struct mb_cache_entry,
559 			           e_indexes[index].o_list);
560 		if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
561 			DEFINE_WAIT(wait);
562 
563 			if (!list_empty(&ce->e_lru_list))
564 				list_del_init(&ce->e_lru_list);
565 
566 			/* Incrementing before holding the lock gives readers
567 			   priority over writers. */
568 			ce->e_used++;
569 			while (ce->e_used >= MB_CACHE_WRITER) {
570 				ce->e_queued++;
571 				prepare_to_wait(&mb_cache_queue, &wait,
572 						TASK_UNINTERRUPTIBLE);
573 				spin_unlock(&mb_cache_spinlock);
574 				schedule();
575 				spin_lock(&mb_cache_spinlock);
576 				ce->e_queued--;
577 			}
578 			finish_wait(&mb_cache_queue, &wait);
579 
580 			if (!__mb_cache_entry_is_hashed(ce)) {
581 				__mb_cache_entry_release_unlock(ce);
582 				spin_lock(&mb_cache_spinlock);
583 				return ERR_PTR(-EAGAIN);
584 			}
585 			return ce;
586 		}
587 		l = l->next;
588 	}
589 	return NULL;
590 }
591 
592 
593 /*
594  * mb_cache_entry_find_first()
595  *
596  * Find the first cache entry on a given device with a certain key in
597  * an additional index. Additonal matches can be found with
598  * mb_cache_entry_find_next(). Returns NULL if no match was found. The
599  * returned cache entry is locked for shared access ("multiple readers").
600  *
601  * @cache: the cache to search
602  * @index: the number of the additonal index to search (0<=index<indexes_count)
603  * @bdev: the device the cache entry should belong to
604  * @key: the key in the index
605  */
606 struct mb_cache_entry *
607 mb_cache_entry_find_first(struct mb_cache *cache, int index,
608 			  struct block_device *bdev, unsigned int key)
609 {
610 	unsigned int bucket = hash_long(key, cache->c_bucket_bits);
611 	struct list_head *l;
612 	struct mb_cache_entry *ce;
613 
614 	mb_assert(index < mb_cache_indexes(cache));
615 	spin_lock(&mb_cache_spinlock);
616 	l = cache->c_indexes_hash[index][bucket].next;
617 	ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
618 	                           index, bdev, key);
619 	spin_unlock(&mb_cache_spinlock);
620 	return ce;
621 }
622 
623 
624 /*
625  * mb_cache_entry_find_next()
626  *
627  * Find the next cache entry on a given device with a certain key in an
628  * additional index. Returns NULL if no match could be found. The previous
629  * entry is atomatically released, so that mb_cache_entry_find_next() can
630  * be called like this:
631  *
632  * entry = mb_cache_entry_find_first();
633  * while (entry) {
634  * 	...
635  *	entry = mb_cache_entry_find_next(entry, ...);
636  * }
637  *
638  * @prev: The previous match
639  * @index: the number of the additonal index to search (0<=index<indexes_count)
640  * @bdev: the device the cache entry should belong to
641  * @key: the key in the index
642  */
643 struct mb_cache_entry *
644 mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
645 			 struct block_device *bdev, unsigned int key)
646 {
647 	struct mb_cache *cache = prev->e_cache;
648 	unsigned int bucket = hash_long(key, cache->c_bucket_bits);
649 	struct list_head *l;
650 	struct mb_cache_entry *ce;
651 
652 	mb_assert(index < mb_cache_indexes(cache));
653 	spin_lock(&mb_cache_spinlock);
654 	l = prev->e_indexes[index].o_list.next;
655 	ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
656 	                           index, bdev, key);
657 	__mb_cache_entry_release_unlock(prev);
658 	return ce;
659 }
660 
661 #endif  /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
662 
663 static int __init init_mbcache(void)
664 {
665 	mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
666 	return 0;
667 }
668 
669 static void __exit exit_mbcache(void)
670 {
671 	remove_shrinker(mb_shrinker);
672 }
673 
674 module_init(init_mbcache)
675 module_exit(exit_mbcache)
676 
677