Lines Matching refs:mq
876 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level) in writeback_sentinel() argument
878 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); in writeback_sentinel()
881 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level) in demote_sentinel() argument
883 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); in demote_sentinel()
886 static void __update_writeback_sentinels(struct smq_policy *mq) in __update_writeback_sentinels() argument
889 struct queue *q = &mq->dirty; in __update_writeback_sentinels()
893 sentinel = writeback_sentinel(mq, level); in __update_writeback_sentinels()
899 static void __update_demote_sentinels(struct smq_policy *mq) in __update_demote_sentinels() argument
902 struct queue *q = &mq->clean; in __update_demote_sentinels()
906 sentinel = demote_sentinel(mq, level); in __update_demote_sentinels()
912 static void update_sentinels(struct smq_policy *mq) in update_sentinels() argument
914 if (time_after(jiffies, mq->next_writeback_period)) { in update_sentinels()
915 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in update_sentinels()
916 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in update_sentinels()
917 __update_writeback_sentinels(mq); in update_sentinels()
920 if (time_after(jiffies, mq->next_demote_period)) { in update_sentinels()
921 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in update_sentinels()
922 mq->current_demote_sentinels = !mq->current_demote_sentinels; in update_sentinels()
923 __update_demote_sentinels(mq); in update_sentinels()
927 static void __sentinels_init(struct smq_policy *mq) in __sentinels_init() argument
933 sentinel = writeback_sentinel(mq, level); in __sentinels_init()
935 q_push(&mq->dirty, sentinel); in __sentinels_init()
937 sentinel = demote_sentinel(mq, level); in __sentinels_init()
939 q_push(&mq->clean, sentinel); in __sentinels_init()
943 static void sentinels_init(struct smq_policy *mq) in sentinels_init() argument
945 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in sentinels_init()
946 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in sentinels_init()
948 mq->current_writeback_sentinels = false; in sentinels_init()
949 mq->current_demote_sentinels = false; in sentinels_init()
950 __sentinels_init(mq); in sentinels_init()
952 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in sentinels_init()
953 mq->current_demote_sentinels = !mq->current_demote_sentinels; in sentinels_init()
954 __sentinels_init(mq); in sentinels_init()
959 static void del_queue(struct smq_policy *mq, struct entry *e) in del_queue() argument
961 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue()
964 static void push_queue(struct smq_policy *mq, struct entry *e) in push_queue() argument
967 q_push(&mq->dirty, e); in push_queue()
969 q_push(&mq->clean, e); in push_queue()
973 static void push(struct smq_policy *mq, struct entry *e) in push() argument
975 h_insert(&mq->table, e); in push()
977 push_queue(mq, e); in push()
980 static void push_queue_front(struct smq_policy *mq, struct entry *e) in push_queue_front() argument
983 q_push_front(&mq->dirty, e); in push_queue_front()
985 q_push_front(&mq->clean, e); in push_queue_front()
988 static void push_front(struct smq_policy *mq, struct entry *e) in push_front() argument
990 h_insert(&mq->table, e); in push_front()
992 push_queue_front(mq, e); in push_front()
995 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
997 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
1000 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
1008 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
1010 q_requeue(&mq->clean, e, 1u, NULL, NULL); in requeue()
1014 q_requeue(&mq->dirty, e, 1u, in requeue()
1015 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), in requeue()
1016 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); in requeue()
1020 static unsigned int default_promote_level(struct smq_policy *mq) in default_promote_level() argument
1042 unsigned int hits = mq->cache_stats.hits; in default_promote_level()
1043 unsigned int misses = mq->cache_stats.misses; in default_promote_level()
1048 static void update_promote_levels(struct smq_policy *mq) in update_promote_levels() argument
1054 unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ? in update_promote_levels()
1055 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u); in update_promote_levels()
1064 switch (stats_assess(&mq->hotspot_stats)) { in update_promote_levels()
1077 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level; in update_promote_levels()
1078 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level); in update_promote_levels()
1085 static void update_level_jump(struct smq_policy *mq) in update_level_jump() argument
1087 switch (stats_assess(&mq->hotspot_stats)) { in update_level_jump()
1089 mq->hotspot_level_jump = 4u; in update_level_jump()
1093 mq->hotspot_level_jump = 2u; in update_level_jump()
1097 mq->hotspot_level_jump = 1u; in update_level_jump()
1102 static void end_hotspot_period(struct smq_policy *mq) in end_hotspot_period() argument
1104 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in end_hotspot_period()
1105 update_promote_levels(mq); in end_hotspot_period()
1107 if (time_after(jiffies, mq->next_hotspot_period)) { in end_hotspot_period()
1108 update_level_jump(mq); in end_hotspot_period()
1109 q_redistribute(&mq->hotspot); in end_hotspot_period()
1110 stats_reset(&mq->hotspot_stats); in end_hotspot_period()
1111 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD; in end_hotspot_period()
1115 static void end_cache_period(struct smq_policy *mq) in end_cache_period() argument
1117 if (time_after(jiffies, mq->next_cache_period)) { in end_cache_period()
1118 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period()
1120 q_redistribute(&mq->dirty); in end_cache_period()
1121 q_redistribute(&mq->clean); in end_cache_period()
1122 stats_reset(&mq->cache_stats); in end_cache_period()
1124 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD; in end_cache_period()
1136 static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p) in percent_to_target() argument
1138 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target()
1141 static bool clean_target_met(struct smq_policy *mq, bool idle) in clean_target_met() argument
1147 if (idle || mq->cleaner) { in clean_target_met()
1151 return q_size(&mq->dirty) == 0u; in clean_target_met()
1160 static bool free_target_met(struct smq_policy *mq) in free_target_met() argument
1164 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met()
1165 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= in free_target_met()
1166 percent_to_target(mq, FREE_TARGET); in free_target_met()
1171 static void mark_pending(struct smq_policy *mq, struct entry *e) in mark_pending() argument
1179 static void clear_pending(struct smq_policy *mq, struct entry *e) in clear_pending() argument
1185 static void queue_writeback(struct smq_policy *mq, bool idle) in queue_writeback() argument
1191 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle); in queue_writeback()
1193 mark_pending(mq, e); in queue_writeback()
1194 q_del(&mq->dirty, e); in queue_writeback()
1198 work.cblock = infer_cblock(mq, e); in queue_writeback()
1200 r = btracker_queue(mq->bg_work, &work, NULL); in queue_writeback()
1202 clear_pending(mq, e); in queue_writeback()
1203 q_push_front(&mq->dirty, e); in queue_writeback()
1208 static void queue_demotion(struct smq_policy *mq) in queue_demotion() argument
1214 if (WARN_ON_ONCE(!mq->migrations_allowed)) in queue_demotion()
1217 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); in queue_demotion()
1219 if (!clean_target_met(mq, true)) in queue_demotion()
1220 queue_writeback(mq, false); in queue_demotion()
1224 mark_pending(mq, e); in queue_demotion()
1225 q_del(&mq->clean, e); in queue_demotion()
1229 work.cblock = infer_cblock(mq, e); in queue_demotion()
1230 r = btracker_queue(mq->bg_work, &work, NULL); in queue_demotion()
1232 clear_pending(mq, e); in queue_demotion()
1233 q_push_front(&mq->clean, e); in queue_demotion()
1237 static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, in queue_promotion() argument
1244 if (!mq->migrations_allowed) in queue_promotion()
1247 if (allocator_empty(&mq->cache_alloc)) { in queue_promotion()
1252 if (!free_target_met(mq)) in queue_promotion()
1253 queue_demotion(mq); in queue_promotion()
1257 if (btracker_promotion_already_present(mq->bg_work, oblock)) in queue_promotion()
1264 e = alloc_entry(&mq->cache_alloc); in queue_promotion()
1269 work.cblock = infer_cblock(mq, e); in queue_promotion()
1270 r = btracker_queue(mq->bg_work, &work, workp); in queue_promotion()
1272 free_entry(&mq->cache_alloc, e); in queue_promotion()
1291 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, in should_promote() argument
1295 if (!allocator_empty(&mq->cache_alloc) && fast_promote) in should_promote()
1298 return maybe_promote(hs_e->level >= mq->write_promote_level); in should_promote()
1300 return maybe_promote(hs_e->level >= mq->read_promote_level); in should_promote()
1303 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b) in to_hblock() argument
1306 (void) sector_div(r, mq->cache_blocks_per_hotspot_block); in to_hblock()
1310 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b) in update_hotspot_queue() argument
1313 dm_oblock_t hb = to_hblock(mq, b); in update_hotspot_queue()
1314 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue()
1317 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1319 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1320 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1321 test_and_set_bit(hi, mq->hotspot_hit_bits) ? in update_hotspot_queue()
1322 0u : mq->hotspot_level_jump, in update_hotspot_queue()
1326 stats_miss(&mq->hotspot_stats); in update_hotspot_queue()
1328 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1330 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1332 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1333 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1334 clear_bit(hi, mq->hotspot_hit_bits); in update_hotspot_queue()
1341 q_push(&mq->hotspot, e); in update_hotspot_queue()
1342 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1363 struct smq_policy *mq = to_smq_policy(p); in smq_destroy() local
1365 btracker_destroy(mq->bg_work); in smq_destroy()
1366 h_exit(&mq->hotspot_table); in smq_destroy()
1367 h_exit(&mq->table); in smq_destroy()
1368 free_bitset(mq->hotspot_hit_bits); in smq_destroy()
1369 free_bitset(mq->cache_hit_bits); in smq_destroy()
1370 space_exit(&mq->es); in smq_destroy()
1371 kfree(mq); in smq_destroy()
1376 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock, in __lookup() argument
1385 e = h_lookup(&mq->table, oblock); in __lookup()
1387 stats_level_accessed(&mq->cache_stats, e->level); in __lookup()
1389 requeue(mq, e); in __lookup()
1390 *cblock = infer_cblock(mq, e); in __lookup()
1394 stats_miss(&mq->cache_stats); in __lookup()
1399 hs_e = update_hotspot_queue(mq, oblock); in __lookup()
1401 pr = should_promote(mq, hs_e, data_dir, fast_copy); in __lookup()
1403 queue_promotion(mq, oblock, work); in __lookup()
1417 struct smq_policy *mq = to_smq_policy(p); in smq_lookup() local
1419 spin_lock_irqsave(&mq->lock, flags); in smq_lookup()
1420 r = __lookup(mq, oblock, cblock, in smq_lookup()
1423 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup()
1436 struct smq_policy *mq = to_smq_policy(p); in smq_lookup_with_work() local
1438 spin_lock_irqsave(&mq->lock, flags); in smq_lookup_with_work()
1439 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued); in smq_lookup_with_work()
1440 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup_with_work()
1450 struct smq_policy *mq = to_smq_policy(p); in smq_get_background_work() local
1452 spin_lock_irqsave(&mq->lock, flags); in smq_get_background_work()
1453 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1455 if (!clean_target_met(mq, idle)) { in smq_get_background_work()
1456 queue_writeback(mq, idle); in smq_get_background_work()
1457 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1460 spin_unlock_irqrestore(&mq->lock, flags); in smq_get_background_work()
1469 static void __complete_background_work(struct smq_policy *mq, in __complete_background_work() argument
1473 struct entry *e = get_entry(&mq->cache_alloc, in __complete_background_work()
1479 clear_pending(mq, e); in __complete_background_work()
1483 push(mq, e); in __complete_background_work()
1486 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1494 h_remove(&mq->table, e); in __complete_background_work()
1495 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1498 clear_pending(mq, e); in __complete_background_work()
1499 push_queue(mq, e); in __complete_background_work()
1506 clear_pending(mq, e); in __complete_background_work()
1507 push_queue(mq, e); in __complete_background_work()
1512 btracker_complete(mq->bg_work, work); in __complete_background_work()
1520 struct smq_policy *mq = to_smq_policy(p); in smq_complete_background_work() local
1522 spin_lock_irqsave(&mq->lock, flags); in smq_complete_background_work()
1523 __complete_background_work(mq, work, success); in smq_complete_background_work()
1524 spin_unlock_irqrestore(&mq->lock, flags); in smq_complete_background_work()
1528 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set) in __smq_set_clear_dirty() argument
1530 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty()
1535 del_queue(mq, e); in __smq_set_clear_dirty()
1537 push_queue(mq, e); in __smq_set_clear_dirty()
1544 struct smq_policy *mq = to_smq_policy(p); in smq_set_dirty() local
1546 spin_lock_irqsave(&mq->lock, flags); in smq_set_dirty()
1547 __smq_set_clear_dirty(mq, cblock, true); in smq_set_dirty()
1548 spin_unlock_irqrestore(&mq->lock, flags); in smq_set_dirty()
1553 struct smq_policy *mq = to_smq_policy(p); in smq_clear_dirty() local
1556 spin_lock_irqsave(&mq->lock, flags); in smq_clear_dirty()
1557 __smq_set_clear_dirty(mq, cblock, false); in smq_clear_dirty()
1558 spin_unlock_irqrestore(&mq->lock, flags); in smq_clear_dirty()
1570 struct smq_policy *mq = to_smq_policy(p); in smq_load_mapping() local
1573 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1583 push_front(mq, e); in smq_load_mapping()
1590 struct smq_policy *mq = to_smq_policy(p); in smq_invalidate_mapping() local
1591 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping()
1597 del_queue(mq, e); in smq_invalidate_mapping()
1598 h_remove(&mq->table, e); in smq_invalidate_mapping()
1599 free_entry(&mq->cache_alloc, e); in smq_invalidate_mapping()
1605 struct smq_policy *mq = to_smq_policy(p); in smq_get_hint() local
1606 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint()
1618 struct smq_policy *mq = to_smq_policy(p); in smq_residency() local
1620 spin_lock_irqsave(&mq->lock, flags); in smq_residency()
1621 r = to_cblock(mq->cache_alloc.nr_allocated); in smq_residency()
1622 spin_unlock_irqrestore(&mq->lock, flags); in smq_residency()
1629 struct smq_policy *mq = to_smq_policy(p); in smq_tick() local
1632 spin_lock_irqsave(&mq->lock, flags); in smq_tick()
1633 mq->tick++; in smq_tick()
1634 update_sentinels(mq); in smq_tick()
1635 end_hotspot_period(mq); in smq_tick()
1636 end_cache_period(mq); in smq_tick()
1637 spin_unlock_irqrestore(&mq->lock, flags); in smq_tick()
1642 struct smq_policy *mq = to_smq_policy(p); in smq_allow_migrations() local
1644 mq->migrations_allowed = allow; in smq_allow_migrations()
1688 static void init_policy_functions(struct smq_policy *mq, bool mimic_mq) in init_policy_functions() argument
1690 mq->policy.destroy = smq_destroy; in init_policy_functions()
1691 mq->policy.lookup = smq_lookup; in init_policy_functions()
1692 mq->policy.lookup_with_work = smq_lookup_with_work; in init_policy_functions()
1693 mq->policy.get_background_work = smq_get_background_work; in init_policy_functions()
1694 mq->policy.complete_background_work = smq_complete_background_work; in init_policy_functions()
1695 mq->policy.set_dirty = smq_set_dirty; in init_policy_functions()
1696 mq->policy.clear_dirty = smq_clear_dirty; in init_policy_functions()
1697 mq->policy.load_mapping = smq_load_mapping; in init_policy_functions()
1698 mq->policy.invalidate_mapping = smq_invalidate_mapping; in init_policy_functions()
1699 mq->policy.get_hint = smq_get_hint; in init_policy_functions()
1700 mq->policy.residency = smq_residency; in init_policy_functions()
1701 mq->policy.tick = smq_tick; in init_policy_functions()
1702 mq->policy.allow_migrations = smq_allow_migrations; in init_policy_functions()
1705 mq->policy.set_config_value = mq_set_config_value; in init_policy_functions()
1706 mq->policy.emit_config_values = mq_emit_config_values; in init_policy_functions()
1738 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in __smq_create() local
1740 if (!mq) in __smq_create()
1743 init_policy_functions(mq, mimic_mq); in __smq_create()
1744 mq->cache_size = cache_size; in __smq_create()
1745 mq->cache_block_size = cache_block_size; in __smq_create()
1748 &mq->hotspot_block_size, &mq->nr_hotspot_blocks); in __smq_create()
1750 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); in __smq_create()
1751 mq->hotspot_level_jump = 1u; in __smq_create()
1752 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { in __smq_create()
1757 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); in __smq_create()
1759 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true; in __smq_create()
1761 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels); in __smq_create()
1763 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true; in __smq_create()
1765 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels, in __smq_create()
1766 total_sentinels + mq->nr_hotspot_blocks); in __smq_create()
1768 init_allocator(&mq->cache_alloc, &mq->es, in __smq_create()
1769 total_sentinels + mq->nr_hotspot_blocks, in __smq_create()
1770 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); in __smq_create()
1772 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks); in __smq_create()
1773 if (!mq->hotspot_hit_bits) { in __smq_create()
1777 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in __smq_create()
1780 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); in __smq_create()
1781 if (!mq->cache_hit_bits) { in __smq_create()
1785 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in __smq_create()
1787 mq->cache_hit_bits = NULL; in __smq_create()
1789 mq->tick = 0; in __smq_create()
1790 spin_lock_init(&mq->lock); in __smq_create()
1792 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); in __smq_create()
1793 mq->hotspot.nr_top_levels = 8; in __smq_create()
1794 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS, in __smq_create()
1795 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block); in __smq_create()
1797 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1798 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1800 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS); in __smq_create()
1801 stats_init(&mq->cache_stats, NR_CACHE_LEVELS); in __smq_create()
1803 if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) in __smq_create()
1806 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) in __smq_create()
1809 sentinels_init(mq); in __smq_create()
1810 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS; in __smq_create()
1812 mq->next_hotspot_period = jiffies; in __smq_create()
1813 mq->next_cache_period = jiffies; in __smq_create()
1815 mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */ in __smq_create()
1816 if (!mq->bg_work) in __smq_create()
1819 mq->migrations_allowed = migrations_allowed; in __smq_create()
1820 mq->cleaner = cleaner; in __smq_create()
1822 return &mq->policy; in __smq_create()
1825 h_exit(&mq->hotspot_table); in __smq_create()
1827 h_exit(&mq->table); in __smq_create()
1829 free_bitset(mq->cache_hit_bits); in __smq_create()
1831 free_bitset(mq->hotspot_hit_bits); in __smq_create()
1833 space_exit(&mq->es); in __smq_create()
1835 kfree(mq); in __smq_create()