Lines Matching refs:bc

411 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)  in cache_read_lock()  argument
413 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_lock()
414 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_lock()
416 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_lock()
419 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_read_unlock() argument
421 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_unlock()
422 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_unlock()
424 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_unlock()
427 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) in cache_write_lock() argument
429 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_lock()
430 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_lock()
432 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_lock()
435 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_write_unlock() argument
437 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_unlock()
438 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_unlock()
440 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_unlock()
538 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) in cache_init() argument
542 bc->num_locks = num_locks; in cache_init()
543 bc->no_sleep = no_sleep; in cache_init()
545 for (i = 0; i < bc->num_locks; i++) { in cache_init()
547 rwlock_init(&bc->trees[i].u.spinlock); in cache_init()
549 init_rwsem(&bc->trees[i].u.lock); in cache_init()
550 bc->trees[i].root = RB_ROOT; in cache_init()
553 lru_init(&bc->lru[LIST_CLEAN]); in cache_init()
554 lru_init(&bc->lru[LIST_DIRTY]); in cache_init()
557 static void cache_destroy(struct dm_buffer_cache *bc) in cache_destroy() argument
561 for (i = 0; i < bc->num_locks; i++) in cache_destroy()
562 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
564 lru_destroy(&bc->lru[LIST_CLEAN]); in cache_destroy()
565 lru_destroy(&bc->lru[LIST_DIRTY]); in cache_destroy()
573 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) in cache_count() argument
575 return bc->lru[list_mode].count; in cache_count()
578 static inline unsigned long cache_total(struct dm_buffer_cache *bc) in cache_total() argument
580 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); in cache_total()
615 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) in cache_get() argument
619 cache_read_lock(bc, block); in cache_get()
620 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
625 cache_read_unlock(bc, block); in cache_get()
636 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_put() argument
640 cache_read_lock(bc, b->block); in cache_put()
643 cache_read_unlock(bc, b->block); in cache_put()
680 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, in __cache_evict() argument
688 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); in __cache_evict()
694 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
699 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, in cache_evict() argument
705 lh_init(&lh, bc, true); in cache_evict()
706 b = __cache_evict(bc, list_mode, pred, context, &lh); in cache_evict()
717 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) in cache_mark() argument
719 cache_write_lock(bc, b->block); in cache_mark()
721 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
723 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
725 cache_write_unlock(bc, b->block); in cache_mark()
734 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in __cache_mark_many() argument
742 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); in __cache_mark_many()
748 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
752 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in cache_mark_many() argument
757 lh_init(&lh, bc, true); in cache_mark_many()
758 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); in cache_mark_many()
780 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
783 struct lru *lru = &bc->lru[list_mode];
808 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, in cache_iterate() argument
813 lh_init(&lh, bc, false); in cache_iterate()
814 __cache_iterate(bc, list_mode, fn, context, &lh); in cache_iterate()
851 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_insert() argument
858 cache_write_lock(bc, b->block); in cache_insert()
860 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
862 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
863 cache_write_unlock(bc, b->block); in cache_insert()
876 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_remove() argument
880 cache_write_lock(bc, b->block); in cache_remove()
886 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
887 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
890 cache_write_unlock(bc, b->block); in cache_remove()
922 static void __remove_range(struct dm_buffer_cache *bc, in __remove_range() argument
943 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
949 static void cache_remove_range(struct dm_buffer_cache *bc, in cache_remove_range() argument
955 BUG_ON(bc->no_sleep); in cache_remove_range()
956 for (i = 0; i < bc->num_locks; i++) { in cache_remove_range()
957 down_write(&bc->trees[i].u.lock); in cache_remove_range()
958 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
959 up_write(&bc->trees[i].u.lock); in cache_remove_range()