13bd94003SHeinz Mauelshagen // SPDX-License-Identifier: GPL-2.0-only
266a63635SJoe Thornber /*
366a63635SJoe Thornber * Copyright (C) 2015 Red Hat. All rights reserved.
466a63635SJoe Thornber *
566a63635SJoe Thornber * This file is released under the GPL.
666a63635SJoe Thornber */
766a63635SJoe Thornber
8b29d4986SJoe Thornber #include "dm-cache-background-tracker.h"
966a63635SJoe Thornber #include "dm-cache-policy-internal.h"
10b29d4986SJoe Thornber #include "dm-cache-policy.h"
1166a63635SJoe Thornber #include "dm.h"
1266a63635SJoe Thornber
1366a63635SJoe Thornber #include <linux/hash.h>
1466a63635SJoe Thornber #include <linux/jiffies.h>
1566a63635SJoe Thornber #include <linux/module.h>
1666a63635SJoe Thornber #include <linux/mutex.h>
1766a63635SJoe Thornber #include <linux/vmalloc.h>
1866a63635SJoe Thornber #include <linux/math64.h>
1966a63635SJoe Thornber
2066a63635SJoe Thornber #define DM_MSG_PREFIX "cache-policy-smq"
2166a63635SJoe Thornber
2266a63635SJoe Thornber /*----------------------------------------------------------------*/
2366a63635SJoe Thornber
2466a63635SJoe Thornber /*
2566a63635SJoe Thornber * Safe division functions that return zero on divide by zero.
2666a63635SJoe Thornber */
safe_div(unsigned int n,unsigned int d)2786a3238cSHeinz Mauelshagen static unsigned int safe_div(unsigned int n, unsigned int d)
2866a63635SJoe Thornber {
2966a63635SJoe Thornber return d ? n / d : 0u;
3066a63635SJoe Thornber }
3166a63635SJoe Thornber
safe_mod(unsigned int n,unsigned int d)3286a3238cSHeinz Mauelshagen static unsigned int safe_mod(unsigned int n, unsigned int d)
3366a63635SJoe Thornber {
3466a63635SJoe Thornber return d ? n % d : 0u;
3566a63635SJoe Thornber }
3666a63635SJoe Thornber
3766a63635SJoe Thornber /*----------------------------------------------------------------*/
3866a63635SJoe Thornber
3966a63635SJoe Thornber struct entry {
4086a3238cSHeinz Mauelshagen unsigned int hash_next:28;
4186a3238cSHeinz Mauelshagen unsigned int prev:28;
4286a3238cSHeinz Mauelshagen unsigned int next:28;
4386a3238cSHeinz Mauelshagen unsigned int level:6;
4466a63635SJoe Thornber bool dirty:1;
4566a63635SJoe Thornber bool allocated:1;
4666a63635SJoe Thornber bool sentinel:1;
47b29d4986SJoe Thornber bool pending_work:1;
4866a63635SJoe Thornber
4966a63635SJoe Thornber dm_oblock_t oblock;
5066a63635SJoe Thornber };
5166a63635SJoe Thornber
5266a63635SJoe Thornber /*----------------------------------------------------------------*/
5366a63635SJoe Thornber
5466a63635SJoe Thornber #define INDEXER_NULL ((1u << 28u) - 1u)
5566a63635SJoe Thornber
5666a63635SJoe Thornber /*
5766a63635SJoe Thornber * An entry_space manages a set of entries that we use for the queues.
5866a63635SJoe Thornber * The clean and dirty queues share entries, so this object is separate
5966a63635SJoe Thornber * from the queue itself.
6066a63635SJoe Thornber */
6166a63635SJoe Thornber struct entry_space {
6266a63635SJoe Thornber struct entry *begin;
6366a63635SJoe Thornber struct entry *end;
6466a63635SJoe Thornber };
6566a63635SJoe Thornber
space_init(struct entry_space * es,unsigned int nr_entries)6686a3238cSHeinz Mauelshagen static int space_init(struct entry_space *es, unsigned int nr_entries)
6766a63635SJoe Thornber {
6866a63635SJoe Thornber if (!nr_entries) {
6966a63635SJoe Thornber es->begin = es->end = NULL;
7066a63635SJoe Thornber return 0;
7166a63635SJoe Thornber }
7266a63635SJoe Thornber
73fad953ceSKees Cook es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry)));
7466a63635SJoe Thornber if (!es->begin)
7566a63635SJoe Thornber return -ENOMEM;
7666a63635SJoe Thornber
7766a63635SJoe Thornber es->end = es->begin + nr_entries;
7866a63635SJoe Thornber return 0;
7966a63635SJoe Thornber }
8066a63635SJoe Thornber
space_exit(struct entry_space * es)8166a63635SJoe Thornber static void space_exit(struct entry_space *es)
8266a63635SJoe Thornber {
8366a63635SJoe Thornber vfree(es->begin);
8466a63635SJoe Thornber }
8566a63635SJoe Thornber
__get_entry(struct entry_space * es,unsigned int block)8686a3238cSHeinz Mauelshagen static struct entry *__get_entry(struct entry_space *es, unsigned int block)
8766a63635SJoe Thornber {
8866a63635SJoe Thornber struct entry *e;
8966a63635SJoe Thornber
9066a63635SJoe Thornber e = es->begin + block;
9166a63635SJoe Thornber BUG_ON(e >= es->end);
9266a63635SJoe Thornber
9366a63635SJoe Thornber return e;
9466a63635SJoe Thornber }
9566a63635SJoe Thornber
to_index(struct entry_space * es,struct entry * e)9686a3238cSHeinz Mauelshagen static unsigned int to_index(struct entry_space *es, struct entry *e)
9766a63635SJoe Thornber {
9866a63635SJoe Thornber BUG_ON(e < es->begin || e >= es->end);
9966a63635SJoe Thornber return e - es->begin;
10066a63635SJoe Thornber }
10166a63635SJoe Thornber
to_entry(struct entry_space * es,unsigned int block)10286a3238cSHeinz Mauelshagen static struct entry *to_entry(struct entry_space *es, unsigned int block)
10366a63635SJoe Thornber {
10466a63635SJoe Thornber if (block == INDEXER_NULL)
10566a63635SJoe Thornber return NULL;
10666a63635SJoe Thornber
10766a63635SJoe Thornber return __get_entry(es, block);
10866a63635SJoe Thornber }
10966a63635SJoe Thornber
11066a63635SJoe Thornber /*----------------------------------------------------------------*/
11166a63635SJoe Thornber
11266a63635SJoe Thornber struct ilist {
11386a3238cSHeinz Mauelshagen unsigned int nr_elts; /* excluding sentinel entries */
11486a3238cSHeinz Mauelshagen unsigned int head, tail;
11566a63635SJoe Thornber };
11666a63635SJoe Thornber
l_init(struct ilist * l)11766a63635SJoe Thornber static void l_init(struct ilist *l)
11866a63635SJoe Thornber {
11966a63635SJoe Thornber l->nr_elts = 0;
12066a63635SJoe Thornber l->head = l->tail = INDEXER_NULL;
12166a63635SJoe Thornber }
12266a63635SJoe Thornber
l_head(struct entry_space * es,struct ilist * l)12366a63635SJoe Thornber static struct entry *l_head(struct entry_space *es, struct ilist *l)
12466a63635SJoe Thornber {
12566a63635SJoe Thornber return to_entry(es, l->head);
12666a63635SJoe Thornber }
12766a63635SJoe Thornber
l_tail(struct entry_space * es,struct ilist * l)12866a63635SJoe Thornber static struct entry *l_tail(struct entry_space *es, struct ilist *l)
12966a63635SJoe Thornber {
13066a63635SJoe Thornber return to_entry(es, l->tail);
13166a63635SJoe Thornber }
13266a63635SJoe Thornber
l_next(struct entry_space * es,struct entry * e)13366a63635SJoe Thornber static struct entry *l_next(struct entry_space *es, struct entry *e)
13466a63635SJoe Thornber {
13566a63635SJoe Thornber return to_entry(es, e->next);
13666a63635SJoe Thornber }
13766a63635SJoe Thornber
l_prev(struct entry_space * es,struct entry * e)13866a63635SJoe Thornber static struct entry *l_prev(struct entry_space *es, struct entry *e)
13966a63635SJoe Thornber {
14066a63635SJoe Thornber return to_entry(es, e->prev);
14166a63635SJoe Thornber }
14266a63635SJoe Thornber
l_empty(struct ilist * l)14366a63635SJoe Thornber static bool l_empty(struct ilist *l)
14466a63635SJoe Thornber {
14566a63635SJoe Thornber return l->head == INDEXER_NULL;
14666a63635SJoe Thornber }
14766a63635SJoe Thornber
l_add_head(struct entry_space * es,struct ilist * l,struct entry * e)14866a63635SJoe Thornber static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
14966a63635SJoe Thornber {
15066a63635SJoe Thornber struct entry *head = l_head(es, l);
15166a63635SJoe Thornber
15266a63635SJoe Thornber e->next = l->head;
15366a63635SJoe Thornber e->prev = INDEXER_NULL;
15466a63635SJoe Thornber
15566a63635SJoe Thornber if (head)
15666a63635SJoe Thornber head->prev = l->head = to_index(es, e);
15766a63635SJoe Thornber else
15866a63635SJoe Thornber l->head = l->tail = to_index(es, e);
15966a63635SJoe Thornber
16066a63635SJoe Thornber if (!e->sentinel)
16166a63635SJoe Thornber l->nr_elts++;
16266a63635SJoe Thornber }
16366a63635SJoe Thornber
l_add_tail(struct entry_space * es,struct ilist * l,struct entry * e)16466a63635SJoe Thornber static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
16566a63635SJoe Thornber {
16666a63635SJoe Thornber struct entry *tail = l_tail(es, l);
16766a63635SJoe Thornber
16866a63635SJoe Thornber e->next = INDEXER_NULL;
16966a63635SJoe Thornber e->prev = l->tail;
17066a63635SJoe Thornber
17166a63635SJoe Thornber if (tail)
17266a63635SJoe Thornber tail->next = l->tail = to_index(es, e);
17366a63635SJoe Thornber else
17466a63635SJoe Thornber l->head = l->tail = to_index(es, e);
17566a63635SJoe Thornber
17666a63635SJoe Thornber if (!e->sentinel)
17766a63635SJoe Thornber l->nr_elts++;
17866a63635SJoe Thornber }
17966a63635SJoe Thornber
l_add_before(struct entry_space * es,struct ilist * l,struct entry * old,struct entry * e)18066a63635SJoe Thornber static void l_add_before(struct entry_space *es, struct ilist *l,
18166a63635SJoe Thornber struct entry *old, struct entry *e)
18266a63635SJoe Thornber {
18366a63635SJoe Thornber struct entry *prev = l_prev(es, old);
18466a63635SJoe Thornber
18566a63635SJoe Thornber if (!prev)
18666a63635SJoe Thornber l_add_head(es, l, e);
18766a63635SJoe Thornber
18866a63635SJoe Thornber else {
18966a63635SJoe Thornber e->prev = old->prev;
19066a63635SJoe Thornber e->next = to_index(es, old);
19166a63635SJoe Thornber prev->next = old->prev = to_index(es, e);
19266a63635SJoe Thornber
19366a63635SJoe Thornber if (!e->sentinel)
19466a63635SJoe Thornber l->nr_elts++;
19566a63635SJoe Thornber }
19666a63635SJoe Thornber }
19766a63635SJoe Thornber
l_del(struct entry_space * es,struct ilist * l,struct entry * e)19866a63635SJoe Thornber static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
19966a63635SJoe Thornber {
20066a63635SJoe Thornber struct entry *prev = l_prev(es, e);
20166a63635SJoe Thornber struct entry *next = l_next(es, e);
20266a63635SJoe Thornber
20366a63635SJoe Thornber if (prev)
20466a63635SJoe Thornber prev->next = e->next;
20566a63635SJoe Thornber else
20666a63635SJoe Thornber l->head = e->next;
20766a63635SJoe Thornber
20866a63635SJoe Thornber if (next)
20966a63635SJoe Thornber next->prev = e->prev;
21066a63635SJoe Thornber else
21166a63635SJoe Thornber l->tail = e->prev;
21266a63635SJoe Thornber
21366a63635SJoe Thornber if (!e->sentinel)
21466a63635SJoe Thornber l->nr_elts--;
21566a63635SJoe Thornber }
21666a63635SJoe Thornber
l_pop_head(struct entry_space * es,struct ilist * l)2179768a10dSJoe Thornber static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
2189768a10dSJoe Thornber {
2199768a10dSJoe Thornber struct entry *e;
2209768a10dSJoe Thornber
2219768a10dSJoe Thornber for (e = l_head(es, l); e; e = l_next(es, e))
2229768a10dSJoe Thornber if (!e->sentinel) {
2239768a10dSJoe Thornber l_del(es, l, e);
2249768a10dSJoe Thornber return e;
2259768a10dSJoe Thornber }
2269768a10dSJoe Thornber
2279768a10dSJoe Thornber return NULL;
2289768a10dSJoe Thornber }
2299768a10dSJoe Thornber
l_pop_tail(struct entry_space * es,struct ilist * l)23066a63635SJoe Thornber static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
23166a63635SJoe Thornber {
23266a63635SJoe Thornber struct entry *e;
23366a63635SJoe Thornber
23466a63635SJoe Thornber for (e = l_tail(es, l); e; e = l_prev(es, e))
23566a63635SJoe Thornber if (!e->sentinel) {
23666a63635SJoe Thornber l_del(es, l, e);
23766a63635SJoe Thornber return e;
23866a63635SJoe Thornber }
23966a63635SJoe Thornber
24066a63635SJoe Thornber return NULL;
24166a63635SJoe Thornber }
24266a63635SJoe Thornber
24366a63635SJoe Thornber /*----------------------------------------------------------------*/
24466a63635SJoe Thornber
24566a63635SJoe Thornber /*
24666a63635SJoe Thornber * The stochastic-multi-queue is a set of lru lists stacked into levels.
24766a63635SJoe Thornber * Entries are moved up levels when they are used, which loosely orders the
24866a63635SJoe Thornber * most accessed entries in the top levels and least in the bottom. This
24966a63635SJoe Thornber * structure is *much* better than a single lru list.
25066a63635SJoe Thornber */
25166a63635SJoe Thornber #define MAX_LEVELS 64u
25266a63635SJoe Thornber
25366a63635SJoe Thornber struct queue {
25466a63635SJoe Thornber struct entry_space *es;
25566a63635SJoe Thornber
25686a3238cSHeinz Mauelshagen unsigned int nr_elts;
25786a3238cSHeinz Mauelshagen unsigned int nr_levels;
25866a63635SJoe Thornber struct ilist qs[MAX_LEVELS];
25966a63635SJoe Thornber
26066a63635SJoe Thornber /*
26166a63635SJoe Thornber * We maintain a count of the number of entries we would like in each
26266a63635SJoe Thornber * level.
26366a63635SJoe Thornber */
26486a3238cSHeinz Mauelshagen unsigned int last_target_nr_elts;
26586a3238cSHeinz Mauelshagen unsigned int nr_top_levels;
26686a3238cSHeinz Mauelshagen unsigned int nr_in_top_levels;
26786a3238cSHeinz Mauelshagen unsigned int target_count[MAX_LEVELS];
26866a63635SJoe Thornber };
26966a63635SJoe Thornber
q_init(struct queue * q,struct entry_space * es,unsigned int nr_levels)27086a3238cSHeinz Mauelshagen static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
27166a63635SJoe Thornber {
27286a3238cSHeinz Mauelshagen unsigned int i;
27366a63635SJoe Thornber
27466a63635SJoe Thornber q->es = es;
27566a63635SJoe Thornber q->nr_elts = 0;
27666a63635SJoe Thornber q->nr_levels = nr_levels;
27766a63635SJoe Thornber
27866a63635SJoe Thornber for (i = 0; i < q->nr_levels; i++) {
27966a63635SJoe Thornber l_init(q->qs + i);
28066a63635SJoe Thornber q->target_count[i] = 0u;
28166a63635SJoe Thornber }
28266a63635SJoe Thornber
28366a63635SJoe Thornber q->last_target_nr_elts = 0u;
28466a63635SJoe Thornber q->nr_top_levels = 0u;
28566a63635SJoe Thornber q->nr_in_top_levels = 0u;
28666a63635SJoe Thornber }
28766a63635SJoe Thornber
q_size(struct queue * q)28886a3238cSHeinz Mauelshagen static unsigned int q_size(struct queue *q)
28966a63635SJoe Thornber {
29066a63635SJoe Thornber return q->nr_elts;
29166a63635SJoe Thornber }
29266a63635SJoe Thornber
29366a63635SJoe Thornber /*
29466a63635SJoe Thornber * Insert an entry to the back of the given level.
29566a63635SJoe Thornber */
q_push(struct queue * q,struct entry * e)29666a63635SJoe Thornber static void q_push(struct queue *q, struct entry *e)
29766a63635SJoe Thornber {
298b29d4986SJoe Thornber BUG_ON(e->pending_work);
299b29d4986SJoe Thornber
30066a63635SJoe Thornber if (!e->sentinel)
30166a63635SJoe Thornber q->nr_elts++;
30266a63635SJoe Thornber
30366a63635SJoe Thornber l_add_tail(q->es, q->qs + e->level, e);
30466a63635SJoe Thornber }
30566a63635SJoe Thornber
q_push_front(struct queue * q,struct entry * e)306b29d4986SJoe Thornber static void q_push_front(struct queue *q, struct entry *e)
307b29d4986SJoe Thornber {
308b29d4986SJoe Thornber BUG_ON(e->pending_work);
309b29d4986SJoe Thornber
310b29d4986SJoe Thornber if (!e->sentinel)
311b29d4986SJoe Thornber q->nr_elts++;
312b29d4986SJoe Thornber
313b29d4986SJoe Thornber l_add_head(q->es, q->qs + e->level, e);
314b29d4986SJoe Thornber }
315b29d4986SJoe Thornber
q_push_before(struct queue * q,struct entry * old,struct entry * e)31666a63635SJoe Thornber static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
31766a63635SJoe Thornber {
318b29d4986SJoe Thornber BUG_ON(e->pending_work);
319b29d4986SJoe Thornber
32066a63635SJoe Thornber if (!e->sentinel)
32166a63635SJoe Thornber q->nr_elts++;
32266a63635SJoe Thornber
32366a63635SJoe Thornber l_add_before(q->es, q->qs + e->level, old, e);
32466a63635SJoe Thornber }
32566a63635SJoe Thornber
q_del(struct queue * q,struct entry * e)32666a63635SJoe Thornber static void q_del(struct queue *q, struct entry *e)
32766a63635SJoe Thornber {
32866a63635SJoe Thornber l_del(q->es, q->qs + e->level, e);
32966a63635SJoe Thornber if (!e->sentinel)
33066a63635SJoe Thornber q->nr_elts--;
33166a63635SJoe Thornber }
33266a63635SJoe Thornber
33366a63635SJoe Thornber /*
33466a63635SJoe Thornber * Return the oldest entry of the lowest populated level.
33566a63635SJoe Thornber */
q_peek(struct queue * q,unsigned int max_level,bool can_cross_sentinel)33686a3238cSHeinz Mauelshagen static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
33766a63635SJoe Thornber {
33886a3238cSHeinz Mauelshagen unsigned int level;
33966a63635SJoe Thornber struct entry *e;
34066a63635SJoe Thornber
34166a63635SJoe Thornber max_level = min(max_level, q->nr_levels);
34266a63635SJoe Thornber
34366a63635SJoe Thornber for (level = 0; level < max_level; level++)
34466a63635SJoe Thornber for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
34566a63635SJoe Thornber if (e->sentinel) {
34666a63635SJoe Thornber if (can_cross_sentinel)
34766a63635SJoe Thornber continue;
34866a63635SJoe Thornber else
34966a63635SJoe Thornber break;
35066a63635SJoe Thornber }
35166a63635SJoe Thornber
35266a63635SJoe Thornber return e;
35366a63635SJoe Thornber }
35466a63635SJoe Thornber
35566a63635SJoe Thornber return NULL;
35666a63635SJoe Thornber }
35766a63635SJoe Thornber
q_pop(struct queue * q)35866a63635SJoe Thornber static struct entry *q_pop(struct queue *q)
35966a63635SJoe Thornber {
36066a63635SJoe Thornber struct entry *e = q_peek(q, q->nr_levels, true);
36166a63635SJoe Thornber
36266a63635SJoe Thornber if (e)
36366a63635SJoe Thornber q_del(q, e);
36466a63635SJoe Thornber
36566a63635SJoe Thornber return e;
36666a63635SJoe Thornber }
36766a63635SJoe Thornber
36866a63635SJoe Thornber /*
36966a63635SJoe Thornber * This function assumes there is a non-sentinel entry to pop. It's only
37066a63635SJoe Thornber * used by redistribute, so we know this is true. It also doesn't adjust
37166a63635SJoe Thornber * the q->nr_elts count.
37266a63635SJoe Thornber */
__redist_pop_from(struct queue * q,unsigned int level)37386a3238cSHeinz Mauelshagen static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
37466a63635SJoe Thornber {
37566a63635SJoe Thornber struct entry *e;
37666a63635SJoe Thornber
37766a63635SJoe Thornber for (; level < q->nr_levels; level++)
37866a63635SJoe Thornber for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
37966a63635SJoe Thornber if (!e->sentinel) {
38066a63635SJoe Thornber l_del(q->es, q->qs + e->level, e);
38166a63635SJoe Thornber return e;
38266a63635SJoe Thornber }
38366a63635SJoe Thornber
38466a63635SJoe Thornber return NULL;
38566a63635SJoe Thornber }
38666a63635SJoe Thornber
q_set_targets_subrange_(struct queue * q,unsigned int nr_elts,unsigned int lbegin,unsigned int lend)38786a3238cSHeinz Mauelshagen static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
38886a3238cSHeinz Mauelshagen unsigned int lbegin, unsigned int lend)
38966a63635SJoe Thornber {
39086a3238cSHeinz Mauelshagen unsigned int level, nr_levels, entries_per_level, remainder;
39166a63635SJoe Thornber
39266a63635SJoe Thornber BUG_ON(lbegin > lend);
39366a63635SJoe Thornber BUG_ON(lend > q->nr_levels);
39466a63635SJoe Thornber nr_levels = lend - lbegin;
39566a63635SJoe Thornber entries_per_level = safe_div(nr_elts, nr_levels);
39666a63635SJoe Thornber remainder = safe_mod(nr_elts, nr_levels);
39766a63635SJoe Thornber
39866a63635SJoe Thornber for (level = lbegin; level < lend; level++)
39966a63635SJoe Thornber q->target_count[level] =
40066a63635SJoe Thornber (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
40166a63635SJoe Thornber }
40266a63635SJoe Thornber
40366a63635SJoe Thornber /*
40466a63635SJoe Thornber * Typically we have fewer elements in the top few levels which allows us
40566a63635SJoe Thornber * to adjust the promote threshold nicely.
40666a63635SJoe Thornber */
q_set_targets(struct queue * q)40766a63635SJoe Thornber static void q_set_targets(struct queue *q)
40866a63635SJoe Thornber {
40966a63635SJoe Thornber if (q->last_target_nr_elts == q->nr_elts)
41066a63635SJoe Thornber return;
41166a63635SJoe Thornber
41266a63635SJoe Thornber q->last_target_nr_elts = q->nr_elts;
41366a63635SJoe Thornber
41466a63635SJoe Thornber if (q->nr_top_levels > q->nr_levels)
41566a63635SJoe Thornber q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
41666a63635SJoe Thornber
41766a63635SJoe Thornber else {
41866a63635SJoe Thornber q_set_targets_subrange_(q, q->nr_in_top_levels,
41966a63635SJoe Thornber q->nr_levels - q->nr_top_levels, q->nr_levels);
42066a63635SJoe Thornber
42166a63635SJoe Thornber if (q->nr_in_top_levels < q->nr_elts)
42266a63635SJoe Thornber q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
42366a63635SJoe Thornber 0, q->nr_levels - q->nr_top_levels);
42466a63635SJoe Thornber else
42566a63635SJoe Thornber q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
42666a63635SJoe Thornber }
42766a63635SJoe Thornber }
42866a63635SJoe Thornber
q_redistribute(struct queue * q)42966a63635SJoe Thornber static void q_redistribute(struct queue *q)
43066a63635SJoe Thornber {
43186a3238cSHeinz Mauelshagen unsigned int target, level;
43266a63635SJoe Thornber struct ilist *l, *l_above;
43366a63635SJoe Thornber struct entry *e;
43466a63635SJoe Thornber
43566a63635SJoe Thornber q_set_targets(q);
43666a63635SJoe Thornber
43766a63635SJoe Thornber for (level = 0u; level < q->nr_levels - 1u; level++) {
43866a63635SJoe Thornber l = q->qs + level;
43966a63635SJoe Thornber target = q->target_count[level];
44066a63635SJoe Thornber
44166a63635SJoe Thornber /*
44266a63635SJoe Thornber * Pull down some entries from the level above.
44366a63635SJoe Thornber */
44466a63635SJoe Thornber while (l->nr_elts < target) {
44566a63635SJoe Thornber e = __redist_pop_from(q, level + 1u);
44666a63635SJoe Thornber if (!e) {
44766a63635SJoe Thornber /* bug in nr_elts */
44866a63635SJoe Thornber break;
44966a63635SJoe Thornber }
45066a63635SJoe Thornber
45166a63635SJoe Thornber e->level = level;
45266a63635SJoe Thornber l_add_tail(q->es, l, e);
45366a63635SJoe Thornber }
45466a63635SJoe Thornber
45566a63635SJoe Thornber /*
45666a63635SJoe Thornber * Push some entries up.
45766a63635SJoe Thornber */
45866a63635SJoe Thornber l_above = q->qs + level + 1u;
45966a63635SJoe Thornber while (l->nr_elts > target) {
46066a63635SJoe Thornber e = l_pop_tail(q->es, l);
46166a63635SJoe Thornber
46266a63635SJoe Thornber if (!e)
46366a63635SJoe Thornber /* bug in nr_elts */
46466a63635SJoe Thornber break;
46566a63635SJoe Thornber
46666a63635SJoe Thornber e->level = level + 1u;
467b29d4986SJoe Thornber l_add_tail(q->es, l_above, e);
46866a63635SJoe Thornber }
46966a63635SJoe Thornber }
47066a63635SJoe Thornber }
47166a63635SJoe Thornber
q_requeue(struct queue * q,struct entry * e,unsigned int extra_levels,struct entry * s1,struct entry * s2)47286a3238cSHeinz Mauelshagen static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
473b29d4986SJoe Thornber struct entry *s1, struct entry *s2)
47466a63635SJoe Thornber {
47566a63635SJoe Thornber struct entry *de;
47686a3238cSHeinz Mauelshagen unsigned int sentinels_passed = 0;
47786a3238cSHeinz Mauelshagen unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
47866a63635SJoe Thornber
479b29d4986SJoe Thornber /* try and find an entry to swap with */
48066a63635SJoe Thornber if (extra_levels && (e->level < q->nr_levels - 1u)) {
481b29d4986SJoe Thornber for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
482b29d4986SJoe Thornber sentinels_passed++;
48366a63635SJoe Thornber
484b29d4986SJoe Thornber if (de) {
48566a63635SJoe Thornber q_del(q, de);
48666a63635SJoe Thornber de->level = e->level;
487b29d4986SJoe Thornber if (s1) {
488b29d4986SJoe Thornber switch (sentinels_passed) {
489b29d4986SJoe Thornber case 0:
490b29d4986SJoe Thornber q_push_before(q, s1, de);
49166a63635SJoe Thornber break;
492b29d4986SJoe Thornber
493b29d4986SJoe Thornber case 1:
494b29d4986SJoe Thornber q_push_before(q, s2, de);
495b29d4986SJoe Thornber break;
496b29d4986SJoe Thornber
497b29d4986SJoe Thornber default:
498b29d4986SJoe Thornber q_push(q, de);
499b29d4986SJoe Thornber }
500b29d4986SJoe Thornber } else
501b29d4986SJoe Thornber q_push(q, de);
502b29d4986SJoe Thornber }
50366a63635SJoe Thornber }
50466a63635SJoe Thornber
505b29d4986SJoe Thornber q_del(q, e);
50666a63635SJoe Thornber e->level = new_level;
50766a63635SJoe Thornber q_push(q, e);
50866a63635SJoe Thornber }
50966a63635SJoe Thornber
51066a63635SJoe Thornber /*----------------------------------------------------------------*/
51166a63635SJoe Thornber
51266a63635SJoe Thornber #define FP_SHIFT 8
51366a63635SJoe Thornber #define SIXTEENTH (1u << (FP_SHIFT - 4u))
51466a63635SJoe Thornber #define EIGHTH (1u << (FP_SHIFT - 3u))
51566a63635SJoe Thornber
51666a63635SJoe Thornber struct stats {
51786a3238cSHeinz Mauelshagen unsigned int hit_threshold;
51886a3238cSHeinz Mauelshagen unsigned int hits;
51986a3238cSHeinz Mauelshagen unsigned int misses;
52066a63635SJoe Thornber };
52166a63635SJoe Thornber
52266a63635SJoe Thornber enum performance {
52366a63635SJoe Thornber Q_POOR,
52466a63635SJoe Thornber Q_FAIR,
52566a63635SJoe Thornber Q_WELL
52666a63635SJoe Thornber };
52766a63635SJoe Thornber
stats_init(struct stats * s,unsigned int nr_levels)52886a3238cSHeinz Mauelshagen static void stats_init(struct stats *s, unsigned int nr_levels)
52966a63635SJoe Thornber {
53066a63635SJoe Thornber s->hit_threshold = (nr_levels * 3u) / 4u;
53166a63635SJoe Thornber s->hits = 0u;
53266a63635SJoe Thornber s->misses = 0u;
53366a63635SJoe Thornber }
53466a63635SJoe Thornber
stats_reset(struct stats * s)53566a63635SJoe Thornber static void stats_reset(struct stats *s)
53666a63635SJoe Thornber {
53766a63635SJoe Thornber s->hits = s->misses = 0u;
53866a63635SJoe Thornber }
53966a63635SJoe Thornber
stats_level_accessed(struct stats * s,unsigned int level)54086a3238cSHeinz Mauelshagen static void stats_level_accessed(struct stats *s, unsigned int level)
54166a63635SJoe Thornber {
54266a63635SJoe Thornber if (level >= s->hit_threshold)
54366a63635SJoe Thornber s->hits++;
54466a63635SJoe Thornber else
54566a63635SJoe Thornber s->misses++;
54666a63635SJoe Thornber }
54766a63635SJoe Thornber
stats_miss(struct stats * s)54866a63635SJoe Thornber static void stats_miss(struct stats *s)
54966a63635SJoe Thornber {
55066a63635SJoe Thornber s->misses++;
55166a63635SJoe Thornber }
55266a63635SJoe Thornber
55366a63635SJoe Thornber /*
55466a63635SJoe Thornber * There are times when we don't have any confidence in the hotspot queue.
55566a63635SJoe Thornber * Such as when a fresh cache is created and the blocks have been spread
55666a63635SJoe Thornber * out across the levels, or if an io load changes. We detect this by
55766a63635SJoe Thornber * seeing how often a lookup is in the top levels of the hotspot queue.
55866a63635SJoe Thornber */
stats_assess(struct stats * s)55966a63635SJoe Thornber static enum performance stats_assess(struct stats *s)
56066a63635SJoe Thornber {
56186a3238cSHeinz Mauelshagen unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
56266a63635SJoe Thornber
56366a63635SJoe Thornber if (confidence < SIXTEENTH)
56466a63635SJoe Thornber return Q_POOR;
56566a63635SJoe Thornber
56666a63635SJoe Thornber else if (confidence < EIGHTH)
56766a63635SJoe Thornber return Q_FAIR;
56866a63635SJoe Thornber
56966a63635SJoe Thornber else
57066a63635SJoe Thornber return Q_WELL;
57166a63635SJoe Thornber }
57266a63635SJoe Thornber
57366a63635SJoe Thornber /*----------------------------------------------------------------*/
57466a63635SJoe Thornber
575b29d4986SJoe Thornber struct smq_hash_table {
57666a63635SJoe Thornber struct entry_space *es;
57766a63635SJoe Thornber unsigned long long hash_bits;
57886a3238cSHeinz Mauelshagen unsigned int *buckets;
57966a63635SJoe Thornber };
58066a63635SJoe Thornber
58166a63635SJoe Thornber /*
58266a63635SJoe Thornber * All cache entries are stored in a chained hash table. To save space we
58366a63635SJoe Thornber * use indexing again, and only store indexes to the next entry.
58466a63635SJoe Thornber */
h_init(struct smq_hash_table * ht,struct entry_space * es,unsigned int nr_entries)58586a3238cSHeinz Mauelshagen static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
58666a63635SJoe Thornber {
58786a3238cSHeinz Mauelshagen unsigned int i, nr_buckets;
58866a63635SJoe Thornber
58966a63635SJoe Thornber ht->es = es;
59066a63635SJoe Thornber nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
591a3d939aeSMikulas Patocka ht->hash_bits = __ffs(nr_buckets);
59266a63635SJoe Thornber
59342bc47b3SKees Cook ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
59466a63635SJoe Thornber if (!ht->buckets)
59566a63635SJoe Thornber return -ENOMEM;
59666a63635SJoe Thornber
59766a63635SJoe Thornber for (i = 0; i < nr_buckets; i++)
59866a63635SJoe Thornber ht->buckets[i] = INDEXER_NULL;
59966a63635SJoe Thornber
60066a63635SJoe Thornber return 0;
60166a63635SJoe Thornber }
60266a63635SJoe Thornber
h_exit(struct smq_hash_table * ht)603b29d4986SJoe Thornber static void h_exit(struct smq_hash_table *ht)
60466a63635SJoe Thornber {
60566a63635SJoe Thornber vfree(ht->buckets);
60666a63635SJoe Thornber }
60766a63635SJoe Thornber
h_head(struct smq_hash_table * ht,unsigned int bucket)60886a3238cSHeinz Mauelshagen static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
60966a63635SJoe Thornber {
61066a63635SJoe Thornber return to_entry(ht->es, ht->buckets[bucket]);
61166a63635SJoe Thornber }
61266a63635SJoe Thornber
h_next(struct smq_hash_table * ht,struct entry * e)613b29d4986SJoe Thornber static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
61466a63635SJoe Thornber {
61566a63635SJoe Thornber return to_entry(ht->es, e->hash_next);
61666a63635SJoe Thornber }
61766a63635SJoe Thornber
__h_insert(struct smq_hash_table * ht,unsigned int bucket,struct entry * e)61886a3238cSHeinz Mauelshagen static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
61966a63635SJoe Thornber {
62066a63635SJoe Thornber e->hash_next = ht->buckets[bucket];
62166a63635SJoe Thornber ht->buckets[bucket] = to_index(ht->es, e);
62266a63635SJoe Thornber }
62366a63635SJoe Thornber
h_insert(struct smq_hash_table * ht,struct entry * e)624b29d4986SJoe Thornber static void h_insert(struct smq_hash_table *ht, struct entry *e)
62566a63635SJoe Thornber {
62686a3238cSHeinz Mauelshagen unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
6270ef0b471SHeinz Mauelshagen
62866a63635SJoe Thornber __h_insert(ht, h, e);
62966a63635SJoe Thornber }
63066a63635SJoe Thornber
__h_lookup(struct smq_hash_table * ht,unsigned int h,dm_oblock_t oblock,struct entry ** prev)63186a3238cSHeinz Mauelshagen static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
63266a63635SJoe Thornber struct entry **prev)
63366a63635SJoe Thornber {
63466a63635SJoe Thornber struct entry *e;
63566a63635SJoe Thornber
63666a63635SJoe Thornber *prev = NULL;
63766a63635SJoe Thornber for (e = h_head(ht, h); e; e = h_next(ht, e)) {
63866a63635SJoe Thornber if (e->oblock == oblock)
63966a63635SJoe Thornber return e;
64066a63635SJoe Thornber
64166a63635SJoe Thornber *prev = e;
64266a63635SJoe Thornber }
64366a63635SJoe Thornber
64466a63635SJoe Thornber return NULL;
64566a63635SJoe Thornber }
64666a63635SJoe Thornber
__h_unlink(struct smq_hash_table * ht,unsigned int h,struct entry * e,struct entry * prev)64786a3238cSHeinz Mauelshagen static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
64866a63635SJoe Thornber struct entry *e, struct entry *prev)
64966a63635SJoe Thornber {
65066a63635SJoe Thornber if (prev)
65166a63635SJoe Thornber prev->hash_next = e->hash_next;
65266a63635SJoe Thornber else
65366a63635SJoe Thornber ht->buckets[h] = e->hash_next;
65466a63635SJoe Thornber }
65566a63635SJoe Thornber
65666a63635SJoe Thornber /*
65766a63635SJoe Thornber * Also moves each entry to the front of the bucket.
65866a63635SJoe Thornber */
h_lookup(struct smq_hash_table * ht,dm_oblock_t oblock)659b29d4986SJoe Thornber static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
66066a63635SJoe Thornber {
66166a63635SJoe Thornber struct entry *e, *prev;
66286a3238cSHeinz Mauelshagen unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
66366a63635SJoe Thornber
66466a63635SJoe Thornber e = __h_lookup(ht, h, oblock, &prev);
66566a63635SJoe Thornber if (e && prev) {
66666a63635SJoe Thornber /*
66766a63635SJoe Thornber * Move to the front because this entry is likely
66866a63635SJoe Thornber * to be hit again.
66966a63635SJoe Thornber */
67066a63635SJoe Thornber __h_unlink(ht, h, e, prev);
67166a63635SJoe Thornber __h_insert(ht, h, e);
67266a63635SJoe Thornber }
67366a63635SJoe Thornber
67466a63635SJoe Thornber return e;
67566a63635SJoe Thornber }
67666a63635SJoe Thornber
h_remove(struct smq_hash_table * ht,struct entry * e)677b29d4986SJoe Thornber static void h_remove(struct smq_hash_table *ht, struct entry *e)
67866a63635SJoe Thornber {
67986a3238cSHeinz Mauelshagen unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
68066a63635SJoe Thornber struct entry *prev;
68166a63635SJoe Thornber
68266a63635SJoe Thornber /*
68366a63635SJoe Thornber * The down side of using a singly linked list is we have to
68466a63635SJoe Thornber * iterate the bucket to remove an item.
68566a63635SJoe Thornber */
68666a63635SJoe Thornber e = __h_lookup(ht, h, e->oblock, &prev);
68766a63635SJoe Thornber if (e)
68866a63635SJoe Thornber __h_unlink(ht, h, e, prev);
68966a63635SJoe Thornber }
69066a63635SJoe Thornber
69166a63635SJoe Thornber /*----------------------------------------------------------------*/
69266a63635SJoe Thornber
69366a63635SJoe Thornber struct entry_alloc {
69466a63635SJoe Thornber struct entry_space *es;
69586a3238cSHeinz Mauelshagen unsigned int begin;
69666a63635SJoe Thornber
69786a3238cSHeinz Mauelshagen unsigned int nr_allocated;
69866a63635SJoe Thornber struct ilist free;
69966a63635SJoe Thornber };
70066a63635SJoe Thornber
init_allocator(struct entry_alloc * ea,struct entry_space * es,unsigned int begin,unsigned int end)70166a63635SJoe Thornber static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
70286a3238cSHeinz Mauelshagen unsigned int begin, unsigned int end)
70366a63635SJoe Thornber {
70486a3238cSHeinz Mauelshagen unsigned int i;
70566a63635SJoe Thornber
70666a63635SJoe Thornber ea->es = es;
70766a63635SJoe Thornber ea->nr_allocated = 0u;
70866a63635SJoe Thornber ea->begin = begin;
70966a63635SJoe Thornber
71066a63635SJoe Thornber l_init(&ea->free);
71166a63635SJoe Thornber for (i = begin; i != end; i++)
71266a63635SJoe Thornber l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
71366a63635SJoe Thornber }
71466a63635SJoe Thornber
init_entry(struct entry * e)71566a63635SJoe Thornber static void init_entry(struct entry *e)
71666a63635SJoe Thornber {
71766a63635SJoe Thornber /*
71866a63635SJoe Thornber * We can't memset because that would clear the hotspot and
71966a63635SJoe Thornber * sentinel bits which remain constant.
72066a63635SJoe Thornber */
72166a63635SJoe Thornber e->hash_next = INDEXER_NULL;
72266a63635SJoe Thornber e->next = INDEXER_NULL;
72366a63635SJoe Thornber e->prev = INDEXER_NULL;
72466a63635SJoe Thornber e->level = 0u;
725b29d4986SJoe Thornber e->dirty = true; /* FIXME: audit */
72666a63635SJoe Thornber e->allocated = true;
727b29d4986SJoe Thornber e->sentinel = false;
728b29d4986SJoe Thornber e->pending_work = false;
72966a63635SJoe Thornber }
73066a63635SJoe Thornber
alloc_entry(struct entry_alloc * ea)73166a63635SJoe Thornber static struct entry *alloc_entry(struct entry_alloc *ea)
73266a63635SJoe Thornber {
73366a63635SJoe Thornber struct entry *e;
73466a63635SJoe Thornber
73566a63635SJoe Thornber if (l_empty(&ea->free))
73666a63635SJoe Thornber return NULL;
73766a63635SJoe Thornber
7389768a10dSJoe Thornber e = l_pop_head(ea->es, &ea->free);
73966a63635SJoe Thornber init_entry(e);
74066a63635SJoe Thornber ea->nr_allocated++;
74166a63635SJoe Thornber
74266a63635SJoe Thornber return e;
74366a63635SJoe Thornber }
74466a63635SJoe Thornber
74566a63635SJoe Thornber /*
74666a63635SJoe Thornber * This assumes the cblock hasn't already been allocated.
74766a63635SJoe Thornber */
alloc_particular_entry(struct entry_alloc * ea,unsigned int i)74886a3238cSHeinz Mauelshagen static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
74966a63635SJoe Thornber {
75066a63635SJoe Thornber struct entry *e = __get_entry(ea->es, ea->begin + i);
75166a63635SJoe Thornber
75266a63635SJoe Thornber BUG_ON(e->allocated);
75366a63635SJoe Thornber
75466a63635SJoe Thornber l_del(ea->es, &ea->free, e);
75566a63635SJoe Thornber init_entry(e);
75666a63635SJoe Thornber ea->nr_allocated++;
75766a63635SJoe Thornber
75866a63635SJoe Thornber return e;
75966a63635SJoe Thornber }
76066a63635SJoe Thornber
free_entry(struct entry_alloc * ea,struct entry * e)76166a63635SJoe Thornber static void free_entry(struct entry_alloc *ea, struct entry *e)
76266a63635SJoe Thornber {
76366a63635SJoe Thornber BUG_ON(!ea->nr_allocated);
76466a63635SJoe Thornber BUG_ON(!e->allocated);
76566a63635SJoe Thornber
76666a63635SJoe Thornber ea->nr_allocated--;
76766a63635SJoe Thornber e->allocated = false;
76866a63635SJoe Thornber l_add_tail(ea->es, &ea->free, e);
76966a63635SJoe Thornber }
77066a63635SJoe Thornber
allocator_empty(struct entry_alloc * ea)77166a63635SJoe Thornber static bool allocator_empty(struct entry_alloc *ea)
77266a63635SJoe Thornber {
77366a63635SJoe Thornber return l_empty(&ea->free);
77466a63635SJoe Thornber }
77566a63635SJoe Thornber
get_index(struct entry_alloc * ea,struct entry * e)77686a3238cSHeinz Mauelshagen static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
77766a63635SJoe Thornber {
77866a63635SJoe Thornber return to_index(ea->es, e) - ea->begin;
77966a63635SJoe Thornber }
78066a63635SJoe Thornber
get_entry(struct entry_alloc * ea,unsigned int index)78186a3238cSHeinz Mauelshagen static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
78266a63635SJoe Thornber {
78366a63635SJoe Thornber return __get_entry(ea->es, ea->begin + index);
78466a63635SJoe Thornber }
78566a63635SJoe Thornber
78666a63635SJoe Thornber /*----------------------------------------------------------------*/
78766a63635SJoe Thornber
78866a63635SJoe Thornber #define NR_HOTSPOT_LEVELS 64u
78966a63635SJoe Thornber #define NR_CACHE_LEVELS 64u
79066a63635SJoe Thornber
791b29d4986SJoe Thornber #define WRITEBACK_PERIOD (10ul * HZ)
792b29d4986SJoe Thornber #define DEMOTE_PERIOD (60ul * HZ)
79366a63635SJoe Thornber
79466a63635SJoe Thornber #define HOTSPOT_UPDATE_PERIOD (HZ)
795b29d4986SJoe Thornber #define CACHE_UPDATE_PERIOD (60ul * HZ)
79666a63635SJoe Thornber
79766a63635SJoe Thornber struct smq_policy {
79866a63635SJoe Thornber struct dm_cache_policy policy;
79966a63635SJoe Thornber
80066a63635SJoe Thornber /* protects everything */
8014051aab7SJoe Thornber spinlock_t lock;
80266a63635SJoe Thornber dm_cblock_t cache_size;
80366a63635SJoe Thornber sector_t cache_block_size;
80466a63635SJoe Thornber
80566a63635SJoe Thornber sector_t hotspot_block_size;
80686a3238cSHeinz Mauelshagen unsigned int nr_hotspot_blocks;
80786a3238cSHeinz Mauelshagen unsigned int cache_blocks_per_hotspot_block;
80886a3238cSHeinz Mauelshagen unsigned int hotspot_level_jump;
80966a63635SJoe Thornber
81066a63635SJoe Thornber struct entry_space es;
81166a63635SJoe Thornber struct entry_alloc writeback_sentinel_alloc;
81266a63635SJoe Thornber struct entry_alloc demote_sentinel_alloc;
81366a63635SJoe Thornber struct entry_alloc hotspot_alloc;
81466a63635SJoe Thornber struct entry_alloc cache_alloc;
81566a63635SJoe Thornber
81666a63635SJoe Thornber unsigned long *hotspot_hit_bits;
81766a63635SJoe Thornber unsigned long *cache_hit_bits;
81866a63635SJoe Thornber
81966a63635SJoe Thornber /*
82066a63635SJoe Thornber * We maintain three queues of entries. The cache proper,
82166a63635SJoe Thornber * consisting of a clean and dirty queue, containing the currently
82266a63635SJoe Thornber * active mappings. The hotspot queue uses a larger block size to
82366a63635SJoe Thornber * track blocks that are being hit frequently and potential
82466a63635SJoe Thornber * candidates for promotion to the cache.
82566a63635SJoe Thornber */
82666a63635SJoe Thornber struct queue hotspot;
82766a63635SJoe Thornber struct queue clean;
82866a63635SJoe Thornber struct queue dirty;
82966a63635SJoe Thornber
83066a63635SJoe Thornber struct stats hotspot_stats;
83166a63635SJoe Thornber struct stats cache_stats;
83266a63635SJoe Thornber
83366a63635SJoe Thornber /*
83466a63635SJoe Thornber * Keeps track of time, incremented by the core. We use this to
83566a63635SJoe Thornber * avoid attributing multiple hits within the same tick.
83666a63635SJoe Thornber */
83786a3238cSHeinz Mauelshagen unsigned int tick;
83866a63635SJoe Thornber
83966a63635SJoe Thornber /*
84066a63635SJoe Thornber * The hash tables allows us to quickly find an entry by origin
84166a63635SJoe Thornber * block.
84266a63635SJoe Thornber */
843b29d4986SJoe Thornber struct smq_hash_table table;
844b29d4986SJoe Thornber struct smq_hash_table hotspot_table;
84566a63635SJoe Thornber
84666a63635SJoe Thornber bool current_writeback_sentinels;
84766a63635SJoe Thornber unsigned long next_writeback_period;
84866a63635SJoe Thornber
84966a63635SJoe Thornber bool current_demote_sentinels;
85066a63635SJoe Thornber unsigned long next_demote_period;
85166a63635SJoe Thornber
85286a3238cSHeinz Mauelshagen unsigned int write_promote_level;
85386a3238cSHeinz Mauelshagen unsigned int read_promote_level;
85466a63635SJoe Thornber
85566a63635SJoe Thornber unsigned long next_hotspot_period;
85666a63635SJoe Thornber unsigned long next_cache_period;
857b29d4986SJoe Thornber
858b29d4986SJoe Thornber struct background_tracker *bg_work;
859b29d4986SJoe Thornber
860*1e4ab7b4SJoe Thornber bool migrations_allowed:1;
861*1e4ab7b4SJoe Thornber
862*1e4ab7b4SJoe Thornber /*
863*1e4ab7b4SJoe Thornber * If this is set the policy will try and clean the whole cache
864*1e4ab7b4SJoe Thornber * even if the device is not idle.
865*1e4ab7b4SJoe Thornber */
866*1e4ab7b4SJoe Thornber bool cleaner:1;
86766a63635SJoe Thornber };
86866a63635SJoe Thornber
86966a63635SJoe Thornber /*----------------------------------------------------------------*/
87066a63635SJoe Thornber
get_sentinel(struct entry_alloc * ea,unsigned int level,bool which)87186a3238cSHeinz Mauelshagen static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
87266a63635SJoe Thornber {
87366a63635SJoe Thornber return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
87466a63635SJoe Thornber }
87566a63635SJoe Thornber
writeback_sentinel(struct smq_policy * mq,unsigned int level)87686a3238cSHeinz Mauelshagen static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
87766a63635SJoe Thornber {
87866a63635SJoe Thornber return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
87966a63635SJoe Thornber }
88066a63635SJoe Thornber
demote_sentinel(struct smq_policy * mq,unsigned int level)88186a3238cSHeinz Mauelshagen static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
88266a63635SJoe Thornber {
88366a63635SJoe Thornber return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
88466a63635SJoe Thornber }
88566a63635SJoe Thornber
__update_writeback_sentinels(struct smq_policy * mq)88666a63635SJoe Thornber static void __update_writeback_sentinels(struct smq_policy *mq)
88766a63635SJoe Thornber {
88886a3238cSHeinz Mauelshagen unsigned int level;
88966a63635SJoe Thornber struct queue *q = &mq->dirty;
89066a63635SJoe Thornber struct entry *sentinel;
89166a63635SJoe Thornber
89266a63635SJoe Thornber for (level = 0; level < q->nr_levels; level++) {
89366a63635SJoe Thornber sentinel = writeback_sentinel(mq, level);
89466a63635SJoe Thornber q_del(q, sentinel);
89566a63635SJoe Thornber q_push(q, sentinel);
89666a63635SJoe Thornber }
89766a63635SJoe Thornber }
89866a63635SJoe Thornber
__update_demote_sentinels(struct smq_policy * mq)89966a63635SJoe Thornber static void __update_demote_sentinels(struct smq_policy *mq)
90066a63635SJoe Thornber {
90186a3238cSHeinz Mauelshagen unsigned int level;
90266a63635SJoe Thornber struct queue *q = &mq->clean;
90366a63635SJoe Thornber struct entry *sentinel;
90466a63635SJoe Thornber
90566a63635SJoe Thornber for (level = 0; level < q->nr_levels; level++) {
90666a63635SJoe Thornber sentinel = demote_sentinel(mq, level);
90766a63635SJoe Thornber q_del(q, sentinel);
90866a63635SJoe Thornber q_push(q, sentinel);
90966a63635SJoe Thornber }
91066a63635SJoe Thornber }
91166a63635SJoe Thornber
update_sentinels(struct smq_policy * mq)91266a63635SJoe Thornber static void update_sentinels(struct smq_policy *mq)
91366a63635SJoe Thornber {
91466a63635SJoe Thornber if (time_after(jiffies, mq->next_writeback_period)) {
91566a63635SJoe Thornber mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
91666a63635SJoe Thornber mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
917b29d4986SJoe Thornber __update_writeback_sentinels(mq);
91866a63635SJoe Thornber }
91966a63635SJoe Thornber
92066a63635SJoe Thornber if (time_after(jiffies, mq->next_demote_period)) {
92166a63635SJoe Thornber mq->next_demote_period = jiffies + DEMOTE_PERIOD;
92266a63635SJoe Thornber mq->current_demote_sentinels = !mq->current_demote_sentinels;
923b29d4986SJoe Thornber __update_demote_sentinels(mq);
92466a63635SJoe Thornber }
92566a63635SJoe Thornber }
92666a63635SJoe Thornber
__sentinels_init(struct smq_policy * mq)92766a63635SJoe Thornber static void __sentinels_init(struct smq_policy *mq)
92866a63635SJoe Thornber {
92986a3238cSHeinz Mauelshagen unsigned int level;
93066a63635SJoe Thornber struct entry *sentinel;
93166a63635SJoe Thornber
93266a63635SJoe Thornber for (level = 0; level < NR_CACHE_LEVELS; level++) {
93366a63635SJoe Thornber sentinel = writeback_sentinel(mq, level);
93466a63635SJoe Thornber sentinel->level = level;
93566a63635SJoe Thornber q_push(&mq->dirty, sentinel);
93666a63635SJoe Thornber
93766a63635SJoe Thornber sentinel = demote_sentinel(mq, level);
93866a63635SJoe Thornber sentinel->level = level;
93966a63635SJoe Thornber q_push(&mq->clean, sentinel);
94066a63635SJoe Thornber }
94166a63635SJoe Thornber }
94266a63635SJoe Thornber
sentinels_init(struct smq_policy * mq)94366a63635SJoe Thornber static void sentinels_init(struct smq_policy *mq)
94466a63635SJoe Thornber {
94566a63635SJoe Thornber mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
94666a63635SJoe Thornber mq->next_demote_period = jiffies + DEMOTE_PERIOD;
94766a63635SJoe Thornber
94866a63635SJoe Thornber mq->current_writeback_sentinels = false;
94966a63635SJoe Thornber mq->current_demote_sentinels = false;
95066a63635SJoe Thornber __sentinels_init(mq);
95166a63635SJoe Thornber
95266a63635SJoe Thornber mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
95366a63635SJoe Thornber mq->current_demote_sentinels = !mq->current_demote_sentinels;
95466a63635SJoe Thornber __sentinels_init(mq);
95566a63635SJoe Thornber }
95666a63635SJoe Thornber
95766a63635SJoe Thornber /*----------------------------------------------------------------*/
95866a63635SJoe Thornber
del_queue(struct smq_policy * mq,struct entry * e)959b29d4986SJoe Thornber static void del_queue(struct smq_policy *mq, struct entry *e)
96066a63635SJoe Thornber {
961b29d4986SJoe Thornber q_del(e->dirty ? &mq->dirty : &mq->clean, e);
96266a63635SJoe Thornber }
96366a63635SJoe Thornber
push_queue(struct smq_policy * mq,struct entry * e)964b29d4986SJoe Thornber static void push_queue(struct smq_policy *mq, struct entry *e)
965b29d4986SJoe Thornber {
966b29d4986SJoe Thornber if (e->dirty)
967b29d4986SJoe Thornber q_push(&mq->dirty, e);
968b29d4986SJoe Thornber else
969b29d4986SJoe Thornber q_push(&mq->clean, e);
970b29d4986SJoe Thornber }
971b29d4986SJoe Thornber
972b29d4986SJoe Thornber // !h, !q, a -> h, q, a
push(struct smq_policy * mq,struct entry * e)97366a63635SJoe Thornber static void push(struct smq_policy *mq, struct entry *e)
97466a63635SJoe Thornber {
97566a63635SJoe Thornber h_insert(&mq->table, e);
976b29d4986SJoe Thornber if (!e->pending_work)
977b29d4986SJoe Thornber push_queue(mq, e);
97866a63635SJoe Thornber }
97966a63635SJoe Thornber
push_queue_front(struct smq_policy * mq,struct entry * e)980b29d4986SJoe Thornber static void push_queue_front(struct smq_policy *mq, struct entry *e)
98166a63635SJoe Thornber {
982b29d4986SJoe Thornber if (e->dirty)
983b29d4986SJoe Thornber q_push_front(&mq->dirty, e);
984b29d4986SJoe Thornber else
985b29d4986SJoe Thornber q_push_front(&mq->clean, e);
98666a63635SJoe Thornber }
98766a63635SJoe Thornber
push_front(struct smq_policy * mq,struct entry * e)988b29d4986SJoe Thornber static void push_front(struct smq_policy *mq, struct entry *e)
98966a63635SJoe Thornber {
990b29d4986SJoe Thornber h_insert(&mq->table, e);
991b29d4986SJoe Thornber if (!e->pending_work)
992b29d4986SJoe Thornber push_queue_front(mq, e);
99366a63635SJoe Thornber }
99466a63635SJoe Thornber
infer_cblock(struct smq_policy * mq,struct entry * e)99566a63635SJoe Thornber static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
99666a63635SJoe Thornber {
99766a63635SJoe Thornber return to_cblock(get_index(&mq->cache_alloc, e));
99866a63635SJoe Thornber }
99966a63635SJoe Thornber
requeue(struct smq_policy * mq,struct entry * e)100066a63635SJoe Thornber static void requeue(struct smq_policy *mq, struct entry *e)
100166a63635SJoe Thornber {
1002b29d4986SJoe Thornber /*
1003b29d4986SJoe Thornber * Pending work has temporarily been taken out of the queues.
1004b29d4986SJoe Thornber */
1005b29d4986SJoe Thornber if (e->pending_work)
1006b29d4986SJoe Thornber return;
100766a63635SJoe Thornber
100866a63635SJoe Thornber if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
1009b29d4986SJoe Thornber if (!e->dirty) {
1010b29d4986SJoe Thornber q_requeue(&mq->clean, e, 1u, NULL, NULL);
1011b29d4986SJoe Thornber return;
101266a63635SJoe Thornber }
1013b29d4986SJoe Thornber
1014b29d4986SJoe Thornber q_requeue(&mq->dirty, e, 1u,
1015b29d4986SJoe Thornber get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
1016b29d4986SJoe Thornber get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
101766a63635SJoe Thornber }
101866a63635SJoe Thornber }
101966a63635SJoe Thornber
default_promote_level(struct smq_policy * mq)102086a3238cSHeinz Mauelshagen static unsigned int default_promote_level(struct smq_policy *mq)
102166a63635SJoe Thornber {
102266a63635SJoe Thornber /*
102366a63635SJoe Thornber * The promote level depends on the current performance of the
102466a63635SJoe Thornber * cache.
102566a63635SJoe Thornber *
102666a63635SJoe Thornber * If the cache is performing badly, then we can't afford
102766a63635SJoe Thornber * to promote much without causing performance to drop below that
102866a63635SJoe Thornber * of the origin device.
102966a63635SJoe Thornber *
103066a63635SJoe Thornber * If the cache is performing well, then we don't need to promote
103166a63635SJoe Thornber * much. If it isn't broken, don't fix it.
103266a63635SJoe Thornber *
103366a63635SJoe Thornber * If the cache is middling then we promote more.
103466a63635SJoe Thornber *
103566a63635SJoe Thornber * This scheme reminds me of a graph of entropy vs probability of a
103666a63635SJoe Thornber * binary variable.
103766a63635SJoe Thornber */
1038302f0351SColin Ian King static const unsigned int table[] = {
1039302f0351SColin Ian King 1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
1040302f0351SColin Ian King };
104166a63635SJoe Thornber
104286a3238cSHeinz Mauelshagen unsigned int hits = mq->cache_stats.hits;
104386a3238cSHeinz Mauelshagen unsigned int misses = mq->cache_stats.misses;
104486a3238cSHeinz Mauelshagen unsigned int index = safe_div(hits << 4u, hits + misses);
104566a63635SJoe Thornber return table[index];
104666a63635SJoe Thornber }
104766a63635SJoe Thornber
update_promote_levels(struct smq_policy * mq)104866a63635SJoe Thornber static void update_promote_levels(struct smq_policy *mq)
104966a63635SJoe Thornber {
105066a63635SJoe Thornber /*
105166a63635SJoe Thornber * If there are unused cache entries then we want to be really
105266a63635SJoe Thornber * eager to promote.
105366a63635SJoe Thornber */
105486a3238cSHeinz Mauelshagen unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
105566a63635SJoe Thornber default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
105666a63635SJoe Thornber
1057b29d4986SJoe Thornber threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
1058b29d4986SJoe Thornber
105966a63635SJoe Thornber /*
106066a63635SJoe Thornber * If the hotspot queue is performing badly then we have little
106166a63635SJoe Thornber * confidence that we know which blocks to promote. So we cut down
106266a63635SJoe Thornber * the amount of promotions.
106366a63635SJoe Thornber */
106466a63635SJoe Thornber switch (stats_assess(&mq->hotspot_stats)) {
106566a63635SJoe Thornber case Q_POOR:
106666a63635SJoe Thornber threshold_level /= 4u;
106766a63635SJoe Thornber break;
106866a63635SJoe Thornber
106966a63635SJoe Thornber case Q_FAIR:
107066a63635SJoe Thornber threshold_level /= 2u;
107166a63635SJoe Thornber break;
107266a63635SJoe Thornber
107366a63635SJoe Thornber case Q_WELL:
107466a63635SJoe Thornber break;
107566a63635SJoe Thornber }
107666a63635SJoe Thornber
107766a63635SJoe Thornber mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
1078b29d4986SJoe Thornber mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
107966a63635SJoe Thornber }
108066a63635SJoe Thornber
108166a63635SJoe Thornber /*
108266a63635SJoe Thornber * If the hotspot queue is performing badly, then we try and move entries
108366a63635SJoe Thornber * around more quickly.
108466a63635SJoe Thornber */
update_level_jump(struct smq_policy * mq)108566a63635SJoe Thornber static void update_level_jump(struct smq_policy *mq)
108666a63635SJoe Thornber {
108766a63635SJoe Thornber switch (stats_assess(&mq->hotspot_stats)) {
108866a63635SJoe Thornber case Q_POOR:
108966a63635SJoe Thornber mq->hotspot_level_jump = 4u;
109066a63635SJoe Thornber break;
109166a63635SJoe Thornber
109266a63635SJoe Thornber case Q_FAIR:
109366a63635SJoe Thornber mq->hotspot_level_jump = 2u;
109466a63635SJoe Thornber break;
109566a63635SJoe Thornber
109666a63635SJoe Thornber case Q_WELL:
109766a63635SJoe Thornber mq->hotspot_level_jump = 1u;
109866a63635SJoe Thornber break;
109966a63635SJoe Thornber }
110066a63635SJoe Thornber }
110166a63635SJoe Thornber
end_hotspot_period(struct smq_policy * mq)110266a63635SJoe Thornber static void end_hotspot_period(struct smq_policy *mq)
110366a63635SJoe Thornber {
110466a63635SJoe Thornber clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
110566a63635SJoe Thornber update_promote_levels(mq);
110666a63635SJoe Thornber
110766a63635SJoe Thornber if (time_after(jiffies, mq->next_hotspot_period)) {
110866a63635SJoe Thornber update_level_jump(mq);
110966a63635SJoe Thornber q_redistribute(&mq->hotspot);
111066a63635SJoe Thornber stats_reset(&mq->hotspot_stats);
111166a63635SJoe Thornber mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
111266a63635SJoe Thornber }
111366a63635SJoe Thornber }
111466a63635SJoe Thornber
end_cache_period(struct smq_policy * mq)111566a63635SJoe Thornber static void end_cache_period(struct smq_policy *mq)
111666a63635SJoe Thornber {
111766a63635SJoe Thornber if (time_after(jiffies, mq->next_cache_period)) {
111866a63635SJoe Thornber clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
111966a63635SJoe Thornber
112066a63635SJoe Thornber q_redistribute(&mq->dirty);
112166a63635SJoe Thornber q_redistribute(&mq->clean);
112266a63635SJoe Thornber stats_reset(&mq->cache_stats);
112366a63635SJoe Thornber
112466a63635SJoe Thornber mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
112566a63635SJoe Thornber }
112666a63635SJoe Thornber }
112766a63635SJoe Thornber
1128b29d4986SJoe Thornber /*----------------------------------------------------------------*/
1129b29d4986SJoe Thornber
1130b29d4986SJoe Thornber /*
1131b29d4986SJoe Thornber * Targets are given as a percentage.
1132b29d4986SJoe Thornber */
1133b29d4986SJoe Thornber #define CLEAN_TARGET 25u
1134b29d4986SJoe Thornber #define FREE_TARGET 25u
1135b29d4986SJoe Thornber
percent_to_target(struct smq_policy * mq,unsigned int p)113686a3238cSHeinz Mauelshagen static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
113766a63635SJoe Thornber {
1138b29d4986SJoe Thornber return from_cblock(mq->cache_size) * p / 100u;
113966a63635SJoe Thornber }
114066a63635SJoe Thornber
clean_target_met(struct smq_policy * mq,bool idle)1141b29d4986SJoe Thornber static bool clean_target_met(struct smq_policy *mq, bool idle)
1142b29d4986SJoe Thornber {
1143b29d4986SJoe Thornber /*
1144b29d4986SJoe Thornber * Cache entries may not be populated. So we cannot rely on the
1145b29d4986SJoe Thornber * size of the clean queue.
1146b29d4986SJoe Thornber */
1147*1e4ab7b4SJoe Thornber if (idle || mq->cleaner) {
1148b29d4986SJoe Thornber /*
1149b29d4986SJoe Thornber * We'd like to clean everything.
1150b29d4986SJoe Thornber */
1151b29d4986SJoe Thornber return q_size(&mq->dirty) == 0u;
115297dfb203SMike Snitzer }
115397dfb203SMike Snitzer
11542e633095SJoe Thornber /*
11552e633095SJoe Thornber * If we're busy we don't worry about cleaning at all.
11562e633095SJoe Thornber */
11572e633095SJoe Thornber return true;
1158b29d4986SJoe Thornber }
1159b29d4986SJoe Thornber
free_target_met(struct smq_policy * mq)11606cf4cc8fSJoe Thornber static bool free_target_met(struct smq_policy *mq)
1161b29d4986SJoe Thornber {
116286a3238cSHeinz Mauelshagen unsigned int nr_free;
1163b29d4986SJoe Thornber
116497dfb203SMike Snitzer nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1165b29d4986SJoe Thornber return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1166b29d4986SJoe Thornber percent_to_target(mq, FREE_TARGET);
1167b29d4986SJoe Thornber }
1168b29d4986SJoe Thornber
1169b29d4986SJoe Thornber /*----------------------------------------------------------------*/
1170b29d4986SJoe Thornber
mark_pending(struct smq_policy * mq,struct entry * e)1171b29d4986SJoe Thornber static void mark_pending(struct smq_policy *mq, struct entry *e)
1172b29d4986SJoe Thornber {
1173b29d4986SJoe Thornber BUG_ON(e->sentinel);
1174b29d4986SJoe Thornber BUG_ON(!e->allocated);
1175b29d4986SJoe Thornber BUG_ON(e->pending_work);
1176b29d4986SJoe Thornber e->pending_work = true;
1177b29d4986SJoe Thornber }
1178b29d4986SJoe Thornber
clear_pending(struct smq_policy * mq,struct entry * e)1179b29d4986SJoe Thornber static void clear_pending(struct smq_policy *mq, struct entry *e)
1180b29d4986SJoe Thornber {
1181b29d4986SJoe Thornber BUG_ON(!e->pending_work);
1182b29d4986SJoe Thornber e->pending_work = false;
1183b29d4986SJoe Thornber }
1184b29d4986SJoe Thornber
queue_writeback(struct smq_policy * mq,bool idle)1185deb71918SJoe Thornber static void queue_writeback(struct smq_policy *mq, bool idle)
1186b29d4986SJoe Thornber {
1187b29d4986SJoe Thornber int r;
1188b29d4986SJoe Thornber struct policy_work work;
1189b29d4986SJoe Thornber struct entry *e;
1190b29d4986SJoe Thornber
1191deb71918SJoe Thornber e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
1192b29d4986SJoe Thornber if (e) {
1193b29d4986SJoe Thornber mark_pending(mq, e);
1194b29d4986SJoe Thornber q_del(&mq->dirty, e);
1195b29d4986SJoe Thornber
1196b29d4986SJoe Thornber work.op = POLICY_WRITEBACK;
1197b29d4986SJoe Thornber work.oblock = e->oblock;
1198b29d4986SJoe Thornber work.cblock = infer_cblock(mq, e);
1199b29d4986SJoe Thornber
1200b29d4986SJoe Thornber r = btracker_queue(mq->bg_work, &work, NULL);
12011e72a8e8SJoe Thornber if (r) {
12021e72a8e8SJoe Thornber clear_pending(mq, e);
12031e72a8e8SJoe Thornber q_push_front(&mq->dirty, e);
12041e72a8e8SJoe Thornber }
1205b29d4986SJoe Thornber }
1206b29d4986SJoe Thornber }
1207b29d4986SJoe Thornber
queue_demotion(struct smq_policy * mq)1208b29d4986SJoe Thornber static void queue_demotion(struct smq_policy *mq)
1209b29d4986SJoe Thornber {
12101e72a8e8SJoe Thornber int r;
1211b29d4986SJoe Thornber struct policy_work work;
1212b29d4986SJoe Thornber struct entry *e;
1213b29d4986SJoe Thornber
1214bab5d988SIgor Stoppa if (WARN_ON_ONCE(!mq->migrations_allowed))
1215b29d4986SJoe Thornber return;
1216b29d4986SJoe Thornber
1217a8cd1ebaSJoe Thornber e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1218b29d4986SJoe Thornber if (!e) {
121978c45607SJoe Thornber if (!clean_target_met(mq, true))
1220deb71918SJoe Thornber queue_writeback(mq, false);
1221b29d4986SJoe Thornber return;
1222b29d4986SJoe Thornber }
1223b29d4986SJoe Thornber
1224b29d4986SJoe Thornber mark_pending(mq, e);
1225b29d4986SJoe Thornber q_del(&mq->clean, e);
1226b29d4986SJoe Thornber
1227b29d4986SJoe Thornber work.op = POLICY_DEMOTE;
1228b29d4986SJoe Thornber work.oblock = e->oblock;
1229b29d4986SJoe Thornber work.cblock = infer_cblock(mq, e);
12301e72a8e8SJoe Thornber r = btracker_queue(mq->bg_work, &work, NULL);
12311e72a8e8SJoe Thornber if (r) {
12321e72a8e8SJoe Thornber clear_pending(mq, e);
12331e72a8e8SJoe Thornber q_push_front(&mq->clean, e);
12341e72a8e8SJoe Thornber }
1235b29d4986SJoe Thornber }
1236b29d4986SJoe Thornber
queue_promotion(struct smq_policy * mq,dm_oblock_t oblock,struct policy_work ** workp)1237b29d4986SJoe Thornber static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1238b29d4986SJoe Thornber struct policy_work **workp)
1239b29d4986SJoe Thornber {
12401e72a8e8SJoe Thornber int r;
1241b29d4986SJoe Thornber struct entry *e;
1242b29d4986SJoe Thornber struct policy_work work;
1243b29d4986SJoe Thornber
1244b29d4986SJoe Thornber if (!mq->migrations_allowed)
1245b29d4986SJoe Thornber return;
1246b29d4986SJoe Thornber
1247b29d4986SJoe Thornber if (allocator_empty(&mq->cache_alloc)) {
1248ce1d64e8SJoe Thornber /*
1249ce1d64e8SJoe Thornber * We always claim to be 'idle' to ensure some demotions happen
1250ce1d64e8SJoe Thornber * with continuous loads.
1251ce1d64e8SJoe Thornber */
12526cf4cc8fSJoe Thornber if (!free_target_met(mq))
1253b29d4986SJoe Thornber queue_demotion(mq);
1254b29d4986SJoe Thornber return;
1255b29d4986SJoe Thornber }
1256b29d4986SJoe Thornber
1257b29d4986SJoe Thornber if (btracker_promotion_already_present(mq->bg_work, oblock))
1258b29d4986SJoe Thornber return;
1259b29d4986SJoe Thornber
1260b29d4986SJoe Thornber /*
1261b29d4986SJoe Thornber * We allocate the entry now to reserve the cblock. If the
1262b29d4986SJoe Thornber * background work is aborted we must remember to free it.
1263b29d4986SJoe Thornber */
1264b29d4986SJoe Thornber e = alloc_entry(&mq->cache_alloc);
1265b29d4986SJoe Thornber BUG_ON(!e);
1266b29d4986SJoe Thornber e->pending_work = true;
1267b29d4986SJoe Thornber work.op = POLICY_PROMOTE;
1268b29d4986SJoe Thornber work.oblock = oblock;
1269b29d4986SJoe Thornber work.cblock = infer_cblock(mq, e);
12701e72a8e8SJoe Thornber r = btracker_queue(mq->bg_work, &work, workp);
12711e72a8e8SJoe Thornber if (r)
12721e72a8e8SJoe Thornber free_entry(&mq->cache_alloc, e);
1273b29d4986SJoe Thornber }
1274b29d4986SJoe Thornber
1275b29d4986SJoe Thornber /*----------------------------------------------------------------*/
1276b29d4986SJoe Thornber
127766a63635SJoe Thornber enum promote_result {
127866a63635SJoe Thornber PROMOTE_NOT,
127966a63635SJoe Thornber PROMOTE_TEMPORARY,
128066a63635SJoe Thornber PROMOTE_PERMANENT
128166a63635SJoe Thornber };
128266a63635SJoe Thornber
128366a63635SJoe Thornber /*
128466a63635SJoe Thornber * Converts a boolean into a promote result.
128566a63635SJoe Thornber */
maybe_promote(bool promote)128666a63635SJoe Thornber static enum promote_result maybe_promote(bool promote)
128766a63635SJoe Thornber {
128866a63635SJoe Thornber return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
128966a63635SJoe Thornber }
129066a63635SJoe Thornber
should_promote(struct smq_policy * mq,struct entry * hs_e,int data_dir,bool fast_promote)1291b29d4986SJoe Thornber static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
1292b29d4986SJoe Thornber int data_dir, bool fast_promote)
129366a63635SJoe Thornber {
1294b29d4986SJoe Thornber if (data_dir == WRITE) {
129566a63635SJoe Thornber if (!allocator_empty(&mq->cache_alloc) && fast_promote)
129666a63635SJoe Thornber return PROMOTE_TEMPORARY;
129766a63635SJoe Thornber
129866a63635SJoe Thornber return maybe_promote(hs_e->level >= mq->write_promote_level);
129966a63635SJoe Thornber } else
130066a63635SJoe Thornber return maybe_promote(hs_e->level >= mq->read_promote_level);
130166a63635SJoe Thornber }
130266a63635SJoe Thornber
to_hblock(struct smq_policy * mq,dm_oblock_t b)130366a63635SJoe Thornber static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
130466a63635SJoe Thornber {
130566a63635SJoe Thornber sector_t r = from_oblock(b);
130666a63635SJoe Thornber (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
130766a63635SJoe Thornber return to_oblock(r);
130866a63635SJoe Thornber }
130966a63635SJoe Thornber
update_hotspot_queue(struct smq_policy * mq,dm_oblock_t b)1310b29d4986SJoe Thornber static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
131166a63635SJoe Thornber {
131286a3238cSHeinz Mauelshagen unsigned int hi;
131366a63635SJoe Thornber dm_oblock_t hb = to_hblock(mq, b);
131466a63635SJoe Thornber struct entry *e = h_lookup(&mq->hotspot_table, hb);
131566a63635SJoe Thornber
131666a63635SJoe Thornber if (e) {
131766a63635SJoe Thornber stats_level_accessed(&mq->hotspot_stats, e->level);
131866a63635SJoe Thornber
131966a63635SJoe Thornber hi = get_index(&mq->hotspot_alloc, e);
132066a63635SJoe Thornber q_requeue(&mq->hotspot, e,
132166a63635SJoe Thornber test_and_set_bit(hi, mq->hotspot_hit_bits) ?
1322b29d4986SJoe Thornber 0u : mq->hotspot_level_jump,
1323b29d4986SJoe Thornber NULL, NULL);
132466a63635SJoe Thornber
132566a63635SJoe Thornber } else {
132666a63635SJoe Thornber stats_miss(&mq->hotspot_stats);
132766a63635SJoe Thornber
132866a63635SJoe Thornber e = alloc_entry(&mq->hotspot_alloc);
132966a63635SJoe Thornber if (!e) {
133066a63635SJoe Thornber e = q_pop(&mq->hotspot);
133166a63635SJoe Thornber if (e) {
133266a63635SJoe Thornber h_remove(&mq->hotspot_table, e);
133366a63635SJoe Thornber hi = get_index(&mq->hotspot_alloc, e);
133466a63635SJoe Thornber clear_bit(hi, mq->hotspot_hit_bits);
133566a63635SJoe Thornber }
133666a63635SJoe Thornber
133766a63635SJoe Thornber }
133866a63635SJoe Thornber
133966a63635SJoe Thornber if (e) {
134066a63635SJoe Thornber e->oblock = hb;
134166a63635SJoe Thornber q_push(&mq->hotspot, e);
134266a63635SJoe Thornber h_insert(&mq->hotspot_table, e);
134366a63635SJoe Thornber }
134466a63635SJoe Thornber }
134566a63635SJoe Thornber
134666a63635SJoe Thornber return e;
134766a63635SJoe Thornber }
134866a63635SJoe Thornber
134966a63635SJoe Thornber /*----------------------------------------------------------------*/
135066a63635SJoe Thornber
135166a63635SJoe Thornber /*
135266a63635SJoe Thornber * Public interface, via the policy struct. See dm-cache-policy.h for a
135366a63635SJoe Thornber * description of these.
135466a63635SJoe Thornber */
135566a63635SJoe Thornber
to_smq_policy(struct dm_cache_policy * p)135666a63635SJoe Thornber static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
135766a63635SJoe Thornber {
135866a63635SJoe Thornber return container_of(p, struct smq_policy, policy);
135966a63635SJoe Thornber }
136066a63635SJoe Thornber
smq_destroy(struct dm_cache_policy * p)136166a63635SJoe Thornber static void smq_destroy(struct dm_cache_policy *p)
136266a63635SJoe Thornber {
136366a63635SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
136466a63635SJoe Thornber
1365b29d4986SJoe Thornber btracker_destroy(mq->bg_work);
136666a63635SJoe Thornber h_exit(&mq->hotspot_table);
136766a63635SJoe Thornber h_exit(&mq->table);
136866a63635SJoe Thornber free_bitset(mq->hotspot_hit_bits);
136966a63635SJoe Thornber free_bitset(mq->cache_hit_bits);
137066a63635SJoe Thornber space_exit(&mq->es);
137166a63635SJoe Thornber kfree(mq);
137266a63635SJoe Thornber }
137366a63635SJoe Thornber
1374b29d4986SJoe Thornber /*----------------------------------------------------------------*/
1375b29d4986SJoe Thornber
__lookup(struct smq_policy * mq,dm_oblock_t oblock,dm_cblock_t * cblock,int data_dir,bool fast_copy,struct policy_work ** work,bool * background_work)1376b29d4986SJoe Thornber static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1377b29d4986SJoe Thornber int data_dir, bool fast_copy,
1378b29d4986SJoe Thornber struct policy_work **work, bool *background_work)
137966a63635SJoe Thornber {
1380b29d4986SJoe Thornber struct entry *e, *hs_e;
1381b29d4986SJoe Thornber enum promote_result pr;
138266a63635SJoe Thornber
1383b29d4986SJoe Thornber *background_work = false;
138466a63635SJoe Thornber
138566a63635SJoe Thornber e = h_lookup(&mq->table, oblock);
138666a63635SJoe Thornber if (e) {
1387b29d4986SJoe Thornber stats_level_accessed(&mq->cache_stats, e->level);
1388b29d4986SJoe Thornber
1389b29d4986SJoe Thornber requeue(mq, e);
139066a63635SJoe Thornber *cblock = infer_cblock(mq, e);
1391b29d4986SJoe Thornber return 0;
1392b29d4986SJoe Thornber
1393b29d4986SJoe Thornber } else {
1394b29d4986SJoe Thornber stats_miss(&mq->cache_stats);
1395b29d4986SJoe Thornber
1396b29d4986SJoe Thornber /*
1397b29d4986SJoe Thornber * The hotspot queue only gets updated with misses.
1398b29d4986SJoe Thornber */
1399b29d4986SJoe Thornber hs_e = update_hotspot_queue(mq, oblock);
1400b29d4986SJoe Thornber
1401b29d4986SJoe Thornber pr = should_promote(mq, hs_e, data_dir, fast_copy);
1402b29d4986SJoe Thornber if (pr != PROMOTE_NOT) {
1403b29d4986SJoe Thornber queue_promotion(mq, oblock, work);
1404b29d4986SJoe Thornber *background_work = true;
1405b29d4986SJoe Thornber }
1406b29d4986SJoe Thornber
1407b29d4986SJoe Thornber return -ENOENT;
1408b29d4986SJoe Thornber }
1409b29d4986SJoe Thornber }
1410b29d4986SJoe Thornber
smq_lookup(struct dm_cache_policy * p,dm_oblock_t oblock,dm_cblock_t * cblock,int data_dir,bool fast_copy,bool * background_work)1411b29d4986SJoe Thornber static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1412b29d4986SJoe Thornber int data_dir, bool fast_copy,
1413b29d4986SJoe Thornber bool *background_work)
1414b29d4986SJoe Thornber {
1415b29d4986SJoe Thornber int r;
1416b29d4986SJoe Thornber unsigned long flags;
1417b29d4986SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
1418b29d4986SJoe Thornber
1419b29d4986SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
1420b29d4986SJoe Thornber r = __lookup(mq, oblock, cblock,
1421b29d4986SJoe Thornber data_dir, fast_copy,
1422b29d4986SJoe Thornber NULL, background_work);
14234051aab7SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
142466a63635SJoe Thornber
142566a63635SJoe Thornber return r;
142666a63635SJoe Thornber }
142766a63635SJoe Thornber
smq_lookup_with_work(struct dm_cache_policy * p,dm_oblock_t oblock,dm_cblock_t * cblock,int data_dir,bool fast_copy,struct policy_work ** work)1428b29d4986SJoe Thornber static int smq_lookup_with_work(struct dm_cache_policy *p,
1429b29d4986SJoe Thornber dm_oblock_t oblock, dm_cblock_t *cblock,
1430b29d4986SJoe Thornber int data_dir, bool fast_copy,
1431b29d4986SJoe Thornber struct policy_work **work)
143266a63635SJoe Thornber {
1433b29d4986SJoe Thornber int r;
1434b29d4986SJoe Thornber bool background_queued;
1435b29d4986SJoe Thornber unsigned long flags;
1436b29d4986SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
143766a63635SJoe Thornber
1438b29d4986SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
1439b29d4986SJoe Thornber r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1440b29d4986SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
144166a63635SJoe Thornber
1442b29d4986SJoe Thornber return r;
144366a63635SJoe Thornber }
144466a63635SJoe Thornber
smq_get_background_work(struct dm_cache_policy * p,bool idle,struct policy_work ** result)1445b29d4986SJoe Thornber static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1446b29d4986SJoe Thornber struct policy_work **result)
1447b29d4986SJoe Thornber {
1448b29d4986SJoe Thornber int r;
1449b29d4986SJoe Thornber unsigned long flags;
1450b29d4986SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
1451b29d4986SJoe Thornber
1452b29d4986SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
1453b29d4986SJoe Thornber r = btracker_issue(mq->bg_work, result);
1454b29d4986SJoe Thornber if (r == -ENODATA) {
14556cf4cc8fSJoe Thornber if (!clean_target_met(mq, idle)) {
1456deb71918SJoe Thornber queue_writeback(mq, idle);
1457b29d4986SJoe Thornber r = btracker_issue(mq->bg_work, result);
1458b29d4986SJoe Thornber }
14596cf4cc8fSJoe Thornber }
1460b29d4986SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
1461b29d4986SJoe Thornber
1462b29d4986SJoe Thornber return r;
1463b29d4986SJoe Thornber }
1464b29d4986SJoe Thornber
1465b29d4986SJoe Thornber /*
1466b29d4986SJoe Thornber * We need to clear any pending work flags that have been set, and in the
1467b29d4986SJoe Thornber * case of promotion free the entry for the destination cblock.
1468b29d4986SJoe Thornber */
__complete_background_work(struct smq_policy * mq,struct policy_work * work,bool success)1469b29d4986SJoe Thornber static void __complete_background_work(struct smq_policy *mq,
1470b29d4986SJoe Thornber struct policy_work *work,
1471b29d4986SJoe Thornber bool success)
1472b29d4986SJoe Thornber {
1473b29d4986SJoe Thornber struct entry *e = get_entry(&mq->cache_alloc,
1474b29d4986SJoe Thornber from_cblock(work->cblock));
1475b29d4986SJoe Thornber
1476b29d4986SJoe Thornber switch (work->op) {
1477b29d4986SJoe Thornber case POLICY_PROMOTE:
1478b29d4986SJoe Thornber // !h, !q, a
1479b29d4986SJoe Thornber clear_pending(mq, e);
1480b29d4986SJoe Thornber if (success) {
1481b29d4986SJoe Thornber e->oblock = work->oblock;
14824d44ec5aSJoe Thornber e->level = NR_CACHE_LEVELS - 1;
1483b29d4986SJoe Thornber push(mq, e);
1484b29d4986SJoe Thornber // h, q, a
1485b29d4986SJoe Thornber } else {
1486b29d4986SJoe Thornber free_entry(&mq->cache_alloc, e);
1487b29d4986SJoe Thornber // !h, !q, !a
1488b29d4986SJoe Thornber }
1489b29d4986SJoe Thornber break;
1490b29d4986SJoe Thornber
1491b29d4986SJoe Thornber case POLICY_DEMOTE:
1492b29d4986SJoe Thornber // h, !q, a
1493b29d4986SJoe Thornber if (success) {
1494b29d4986SJoe Thornber h_remove(&mq->table, e);
1495b29d4986SJoe Thornber free_entry(&mq->cache_alloc, e);
1496b29d4986SJoe Thornber // !h, !q, !a
1497b29d4986SJoe Thornber } else {
1498b29d4986SJoe Thornber clear_pending(mq, e);
1499b29d4986SJoe Thornber push_queue(mq, e);
1500b29d4986SJoe Thornber // h, q, a
1501b29d4986SJoe Thornber }
1502b29d4986SJoe Thornber break;
1503b29d4986SJoe Thornber
1504b29d4986SJoe Thornber case POLICY_WRITEBACK:
1505b29d4986SJoe Thornber // h, !q, a
1506b29d4986SJoe Thornber clear_pending(mq, e);
1507b29d4986SJoe Thornber push_queue(mq, e);
1508b29d4986SJoe Thornber // h, q, a
1509b29d4986SJoe Thornber break;
1510b29d4986SJoe Thornber }
1511b29d4986SJoe Thornber
1512b29d4986SJoe Thornber btracker_complete(mq->bg_work, work);
1513b29d4986SJoe Thornber }
1514b29d4986SJoe Thornber
smq_complete_background_work(struct dm_cache_policy * p,struct policy_work * work,bool success)1515b29d4986SJoe Thornber static void smq_complete_background_work(struct dm_cache_policy *p,
1516b29d4986SJoe Thornber struct policy_work *work,
1517b29d4986SJoe Thornber bool success)
151866a63635SJoe Thornber {
15194051aab7SJoe Thornber unsigned long flags;
152066a63635SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
152166a63635SJoe Thornber
15224051aab7SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
1523b29d4986SJoe Thornber __complete_background_work(mq, work, success);
15244051aab7SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
152566a63635SJoe Thornber }
152666a63635SJoe Thornber
1527b29d4986SJoe Thornber // in_hash(oblock) -> in_hash(oblock)
__smq_set_clear_dirty(struct smq_policy * mq,dm_cblock_t cblock,bool set)1528b29d4986SJoe Thornber static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1529b29d4986SJoe Thornber {
1530b29d4986SJoe Thornber struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1531b29d4986SJoe Thornber
1532b29d4986SJoe Thornber if (e->pending_work)
1533b29d4986SJoe Thornber e->dirty = set;
1534b29d4986SJoe Thornber else {
1535b29d4986SJoe Thornber del_queue(mq, e);
1536b29d4986SJoe Thornber e->dirty = set;
1537b29d4986SJoe Thornber push_queue(mq, e);
1538b29d4986SJoe Thornber }
1539b29d4986SJoe Thornber }
1540b29d4986SJoe Thornber
smq_set_dirty(struct dm_cache_policy * p,dm_cblock_t cblock)1541b29d4986SJoe Thornber static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1542b29d4986SJoe Thornber {
1543b29d4986SJoe Thornber unsigned long flags;
1544b29d4986SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
1545b29d4986SJoe Thornber
1546b29d4986SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
1547b29d4986SJoe Thornber __smq_set_clear_dirty(mq, cblock, true);
1548b29d4986SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
1549b29d4986SJoe Thornber }
1550b29d4986SJoe Thornber
smq_clear_dirty(struct dm_cache_policy * p,dm_cblock_t cblock)1551b29d4986SJoe Thornber static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
155266a63635SJoe Thornber {
155366a63635SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
15544051aab7SJoe Thornber unsigned long flags;
155566a63635SJoe Thornber
15564051aab7SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
1557b29d4986SJoe Thornber __smq_set_clear_dirty(mq, cblock, false);
15584051aab7SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
155966a63635SJoe Thornber }
156066a63635SJoe Thornber
random_level(dm_cblock_t cblock)156186a3238cSHeinz Mauelshagen static unsigned int random_level(dm_cblock_t cblock)
15629d1b404cSJoe Thornber {
1563e99dda8fSMike Snitzer return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
15649d1b404cSJoe Thornber }
15659d1b404cSJoe Thornber
smq_load_mapping(struct dm_cache_policy * p,dm_oblock_t oblock,dm_cblock_t cblock,bool dirty,uint32_t hint,bool hint_valid)156666a63635SJoe Thornber static int smq_load_mapping(struct dm_cache_policy *p,
156766a63635SJoe Thornber dm_oblock_t oblock, dm_cblock_t cblock,
1568b29d4986SJoe Thornber bool dirty, uint32_t hint, bool hint_valid)
156966a63635SJoe Thornber {
157066a63635SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
157166a63635SJoe Thornber struct entry *e;
157266a63635SJoe Thornber
157366a63635SJoe Thornber e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
157466a63635SJoe Thornber e->oblock = oblock;
1575b29d4986SJoe Thornber e->dirty = dirty;
15769d1b404cSJoe Thornber e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1577b29d4986SJoe Thornber e->pending_work = false;
157866a63635SJoe Thornber
1579b29d4986SJoe Thornber /*
1580b29d4986SJoe Thornber * When we load mappings we push ahead of both sentinels in order to
1581b29d4986SJoe Thornber * allow demotions and cleaning to occur immediately.
1582b29d4986SJoe Thornber */
1583b29d4986SJoe Thornber push_front(mq, e);
1584b29d4986SJoe Thornber
1585b29d4986SJoe Thornber return 0;
1586b29d4986SJoe Thornber }
1587b29d4986SJoe Thornber
smq_invalidate_mapping(struct dm_cache_policy * p,dm_cblock_t cblock)1588b29d4986SJoe Thornber static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1589b29d4986SJoe Thornber {
1590b29d4986SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
1591b29d4986SJoe Thornber struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1592b29d4986SJoe Thornber
1593b29d4986SJoe Thornber if (!e->allocated)
1594b29d4986SJoe Thornber return -ENODATA;
1595b29d4986SJoe Thornber
1596b29d4986SJoe Thornber // FIXME: what if this block has pending background work?
1597b29d4986SJoe Thornber del_queue(mq, e);
1598b29d4986SJoe Thornber h_remove(&mq->table, e);
1599b29d4986SJoe Thornber free_entry(&mq->cache_alloc, e);
160066a63635SJoe Thornber return 0;
160166a63635SJoe Thornber }
160266a63635SJoe Thornber
smq_get_hint(struct dm_cache_policy * p,dm_cblock_t cblock)16034e781b49SJoe Thornber static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
160466a63635SJoe Thornber {
160566a63635SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
16064e781b49SJoe Thornber struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
160766a63635SJoe Thornber
16084e781b49SJoe Thornber if (!e->allocated)
16094e781b49SJoe Thornber return 0;
161066a63635SJoe Thornber
16114e781b49SJoe Thornber return e->level;
161266a63635SJoe Thornber }
161366a63635SJoe Thornber
smq_residency(struct dm_cache_policy * p)161466a63635SJoe Thornber static dm_cblock_t smq_residency(struct dm_cache_policy *p)
161566a63635SJoe Thornber {
161666a63635SJoe Thornber dm_cblock_t r;
16174051aab7SJoe Thornber unsigned long flags;
161866a63635SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
161966a63635SJoe Thornber
16204051aab7SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
162166a63635SJoe Thornber r = to_cblock(mq->cache_alloc.nr_allocated);
16224051aab7SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
162366a63635SJoe Thornber
162466a63635SJoe Thornber return r;
162566a63635SJoe Thornber }
162666a63635SJoe Thornber
smq_tick(struct dm_cache_policy * p,bool can_block)1627fba10109SJoe Thornber static void smq_tick(struct dm_cache_policy *p, bool can_block)
162866a63635SJoe Thornber {
162966a63635SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
163066a63635SJoe Thornber unsigned long flags;
163166a63635SJoe Thornber
16324051aab7SJoe Thornber spin_lock_irqsave(&mq->lock, flags);
16334051aab7SJoe Thornber mq->tick++;
16344051aab7SJoe Thornber update_sentinels(mq);
16354051aab7SJoe Thornber end_hotspot_period(mq);
16364051aab7SJoe Thornber end_cache_period(mq);
16374051aab7SJoe Thornber spin_unlock_irqrestore(&mq->lock, flags);
163866a63635SJoe Thornber }
163966a63635SJoe Thornber
smq_allow_migrations(struct dm_cache_policy * p,bool allow)1640b29d4986SJoe Thornber static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
1641b29d4986SJoe Thornber {
1642b29d4986SJoe Thornber struct smq_policy *mq = to_smq_policy(p);
16430ef0b471SHeinz Mauelshagen
1644b29d4986SJoe Thornber mq->migrations_allowed = allow;
1645b29d4986SJoe Thornber }
1646b29d4986SJoe Thornber
16479ed84698SJoe Thornber /*
16489ed84698SJoe Thornber * smq has no config values, but the old mq policy did. To avoid breaking
16499ed84698SJoe Thornber * software we continue to accept these configurables for the mq policy,
16509ed84698SJoe Thornber * but they have no effect.
16519ed84698SJoe Thornber */
mq_set_config_value(struct dm_cache_policy * p,const char * key,const char * value)16529ed84698SJoe Thornber static int mq_set_config_value(struct dm_cache_policy *p,
16539ed84698SJoe Thornber const char *key, const char *value)
16549ed84698SJoe Thornber {
16559ed84698SJoe Thornber unsigned long tmp;
16569ed84698SJoe Thornber
16579ed84698SJoe Thornber if (kstrtoul(value, 10, &tmp))
16589ed84698SJoe Thornber return -EINVAL;
16599ed84698SJoe Thornber
16609ed84698SJoe Thornber if (!strcasecmp(key, "random_threshold") ||
16619ed84698SJoe Thornber !strcasecmp(key, "sequential_threshold") ||
16629ed84698SJoe Thornber !strcasecmp(key, "discard_promote_adjustment") ||
16639ed84698SJoe Thornber !strcasecmp(key, "read_promote_adjustment") ||
16649ed84698SJoe Thornber !strcasecmp(key, "write_promote_adjustment")) {
16659ed84698SJoe Thornber DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
16669ed84698SJoe Thornber return 0;
16679ed84698SJoe Thornber }
16689ed84698SJoe Thornber
16699ed84698SJoe Thornber return -EINVAL;
16709ed84698SJoe Thornber }
16719ed84698SJoe Thornber
mq_emit_config_values(struct dm_cache_policy * p,char * result,unsigned int maxlen,ssize_t * sz_ptr)16729ed84698SJoe Thornber static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
167386a3238cSHeinz Mauelshagen unsigned int maxlen, ssize_t *sz_ptr)
16749ed84698SJoe Thornber {
16759ed84698SJoe Thornber ssize_t sz = *sz_ptr;
16769ed84698SJoe Thornber
16779ed84698SJoe Thornber DMEMIT("10 random_threshold 0 "
16789ed84698SJoe Thornber "sequential_threshold 0 "
16799ed84698SJoe Thornber "discard_promote_adjustment 0 "
16809ed84698SJoe Thornber "read_promote_adjustment 0 "
16819ed84698SJoe Thornber "write_promote_adjustment 0 ");
16829ed84698SJoe Thornber
16839ed84698SJoe Thornber *sz_ptr = sz;
16849ed84698SJoe Thornber return 0;
16859ed84698SJoe Thornber }
16869ed84698SJoe Thornber
168766a63635SJoe Thornber /* Init the policy plugin interface function pointers. */
init_policy_functions(struct smq_policy * mq,bool mimic_mq)16889ed84698SJoe Thornber static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
168966a63635SJoe Thornber {
169066a63635SJoe Thornber mq->policy.destroy = smq_destroy;
169166a63635SJoe Thornber mq->policy.lookup = smq_lookup;
1692b29d4986SJoe Thornber mq->policy.lookup_with_work = smq_lookup_with_work;
1693b29d4986SJoe Thornber mq->policy.get_background_work = smq_get_background_work;
1694b29d4986SJoe Thornber mq->policy.complete_background_work = smq_complete_background_work;
169566a63635SJoe Thornber mq->policy.set_dirty = smq_set_dirty;
169666a63635SJoe Thornber mq->policy.clear_dirty = smq_clear_dirty;
169766a63635SJoe Thornber mq->policy.load_mapping = smq_load_mapping;
1698b29d4986SJoe Thornber mq->policy.invalidate_mapping = smq_invalidate_mapping;
16994e781b49SJoe Thornber mq->policy.get_hint = smq_get_hint;
170066a63635SJoe Thornber mq->policy.residency = smq_residency;
170166a63635SJoe Thornber mq->policy.tick = smq_tick;
1702b29d4986SJoe Thornber mq->policy.allow_migrations = smq_allow_migrations;
17039ed84698SJoe Thornber
17049ed84698SJoe Thornber if (mimic_mq) {
17059ed84698SJoe Thornber mq->policy.set_config_value = mq_set_config_value;
17069ed84698SJoe Thornber mq->policy.emit_config_values = mq_emit_config_values;
17079ed84698SJoe Thornber }
170866a63635SJoe Thornber }
170966a63635SJoe Thornber
too_many_hotspot_blocks(sector_t origin_size,sector_t hotspot_block_size,unsigned int nr_hotspot_blocks)171066a63635SJoe Thornber static bool too_many_hotspot_blocks(sector_t origin_size,
171166a63635SJoe Thornber sector_t hotspot_block_size,
171286a3238cSHeinz Mauelshagen unsigned int nr_hotspot_blocks)
171366a63635SJoe Thornber {
171466a63635SJoe Thornber return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
171566a63635SJoe Thornber }
171666a63635SJoe Thornber
calc_hotspot_params(sector_t origin_size,sector_t cache_block_size,unsigned int nr_cache_blocks,sector_t * hotspot_block_size,unsigned int * nr_hotspot_blocks)171766a63635SJoe Thornber static void calc_hotspot_params(sector_t origin_size,
171866a63635SJoe Thornber sector_t cache_block_size,
171986a3238cSHeinz Mauelshagen unsigned int nr_cache_blocks,
172066a63635SJoe Thornber sector_t *hotspot_block_size,
172186a3238cSHeinz Mauelshagen unsigned int *nr_hotspot_blocks)
172266a63635SJoe Thornber {
172366a63635SJoe Thornber *hotspot_block_size = cache_block_size * 16u;
172466a63635SJoe Thornber *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
172566a63635SJoe Thornber
172666a63635SJoe Thornber while ((*hotspot_block_size > cache_block_size) &&
172766a63635SJoe Thornber too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
172866a63635SJoe Thornber *hotspot_block_size /= 2u;
172966a63635SJoe Thornber }
173066a63635SJoe Thornber
1731*1e4ab7b4SJoe Thornber static struct dm_cache_policy *
__smq_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size,bool mimic_mq,bool migrations_allowed,bool cleaner)1732*1e4ab7b4SJoe Thornber __smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
1733*1e4ab7b4SJoe Thornber bool mimic_mq, bool migrations_allowed, bool cleaner)
173466a63635SJoe Thornber {
173586a3238cSHeinz Mauelshagen unsigned int i;
173686a3238cSHeinz Mauelshagen unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
173786a3238cSHeinz Mauelshagen unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
173866a63635SJoe Thornber struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
173966a63635SJoe Thornber
174066a63635SJoe Thornber if (!mq)
174166a63635SJoe Thornber return NULL;
174266a63635SJoe Thornber
17439ed84698SJoe Thornber init_policy_functions(mq, mimic_mq);
174466a63635SJoe Thornber mq->cache_size = cache_size;
174566a63635SJoe Thornber mq->cache_block_size = cache_block_size;
174666a63635SJoe Thornber
174766a63635SJoe Thornber calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
174866a63635SJoe Thornber &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
174966a63635SJoe Thornber
175066a63635SJoe Thornber mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
175166a63635SJoe Thornber mq->hotspot_level_jump = 1u;
175266a63635SJoe Thornber if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
175366a63635SJoe Thornber DMERR("couldn't initialize entry space");
175466a63635SJoe Thornber goto bad_pool_init;
175566a63635SJoe Thornber }
175666a63635SJoe Thornber
175766a63635SJoe Thornber init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
175866a63635SJoe Thornber for (i = 0; i < nr_sentinels_per_queue; i++)
175966a63635SJoe Thornber get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
176066a63635SJoe Thornber
176166a63635SJoe Thornber init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
176266a63635SJoe Thornber for (i = 0; i < nr_sentinels_per_queue; i++)
176366a63635SJoe Thornber get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
176466a63635SJoe Thornber
176566a63635SJoe Thornber init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
176666a63635SJoe Thornber total_sentinels + mq->nr_hotspot_blocks);
176766a63635SJoe Thornber
176866a63635SJoe Thornber init_allocator(&mq->cache_alloc, &mq->es,
176966a63635SJoe Thornber total_sentinels + mq->nr_hotspot_blocks,
177066a63635SJoe Thornber total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
177166a63635SJoe Thornber
177266a63635SJoe Thornber mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
177366a63635SJoe Thornber if (!mq->hotspot_hit_bits) {
177466a63635SJoe Thornber DMERR("couldn't allocate hotspot hit bitset");
177566a63635SJoe Thornber goto bad_hotspot_hit_bits;
177666a63635SJoe Thornber }
177766a63635SJoe Thornber clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
177866a63635SJoe Thornber
177966a63635SJoe Thornber if (from_cblock(cache_size)) {
178066a63635SJoe Thornber mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1781134bf30cSColin Ian King if (!mq->cache_hit_bits) {
178266a63635SJoe Thornber DMERR("couldn't allocate cache hit bitset");
178366a63635SJoe Thornber goto bad_cache_hit_bits;
178466a63635SJoe Thornber }
178566a63635SJoe Thornber clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
178666a63635SJoe Thornber } else
178766a63635SJoe Thornber mq->cache_hit_bits = NULL;
178866a63635SJoe Thornber
178966a63635SJoe Thornber mq->tick = 0;
17904051aab7SJoe Thornber spin_lock_init(&mq->lock);
179166a63635SJoe Thornber
179266a63635SJoe Thornber q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
179366a63635SJoe Thornber mq->hotspot.nr_top_levels = 8;
179466a63635SJoe Thornber mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
179566a63635SJoe Thornber from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
179666a63635SJoe Thornber
179766a63635SJoe Thornber q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
179866a63635SJoe Thornber q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
179966a63635SJoe Thornber
180066a63635SJoe Thornber stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
180166a63635SJoe Thornber stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
180266a63635SJoe Thornber
180366a63635SJoe Thornber if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
180466a63635SJoe Thornber goto bad_alloc_table;
180566a63635SJoe Thornber
180666a63635SJoe Thornber if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
180766a63635SJoe Thornber goto bad_alloc_hotspot_table;
180866a63635SJoe Thornber
180966a63635SJoe Thornber sentinels_init(mq);
181066a63635SJoe Thornber mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
181166a63635SJoe Thornber
181266a63635SJoe Thornber mq->next_hotspot_period = jiffies;
181366a63635SJoe Thornber mq->next_cache_period = jiffies;
181466a63635SJoe Thornber
18158ee18edeSJoe Thornber mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
1816b29d4986SJoe Thornber if (!mq->bg_work)
1817b29d4986SJoe Thornber goto bad_btracker;
1818b29d4986SJoe Thornber
1819b29d4986SJoe Thornber mq->migrations_allowed = migrations_allowed;
1820*1e4ab7b4SJoe Thornber mq->cleaner = cleaner;
1821b29d4986SJoe Thornber
182266a63635SJoe Thornber return &mq->policy;
182366a63635SJoe Thornber
1824b29d4986SJoe Thornber bad_btracker:
1825b29d4986SJoe Thornber h_exit(&mq->hotspot_table);
182666a63635SJoe Thornber bad_alloc_hotspot_table:
182766a63635SJoe Thornber h_exit(&mq->table);
182866a63635SJoe Thornber bad_alloc_table:
182966a63635SJoe Thornber free_bitset(mq->cache_hit_bits);
183066a63635SJoe Thornber bad_cache_hit_bits:
183166a63635SJoe Thornber free_bitset(mq->hotspot_hit_bits);
183266a63635SJoe Thornber bad_hotspot_hit_bits:
183366a63635SJoe Thornber space_exit(&mq->es);
183466a63635SJoe Thornber bad_pool_init:
183566a63635SJoe Thornber kfree(mq);
183666a63635SJoe Thornber
183766a63635SJoe Thornber return NULL;
183866a63635SJoe Thornber }
183966a63635SJoe Thornber
smq_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size)18409ed84698SJoe Thornber static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
18419ed84698SJoe Thornber sector_t origin_size,
18429ed84698SJoe Thornber sector_t cache_block_size)
18439ed84698SJoe Thornber {
1844*1e4ab7b4SJoe Thornber return __smq_create(cache_size, origin_size, cache_block_size,
1845*1e4ab7b4SJoe Thornber false, true, false);
18469ed84698SJoe Thornber }
18479ed84698SJoe Thornber
mq_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size)18489ed84698SJoe Thornber static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
18499ed84698SJoe Thornber sector_t origin_size,
18509ed84698SJoe Thornber sector_t cache_block_size)
18519ed84698SJoe Thornber {
1852*1e4ab7b4SJoe Thornber return __smq_create(cache_size, origin_size, cache_block_size,
1853*1e4ab7b4SJoe Thornber true, true, false);
1854b29d4986SJoe Thornber }
1855b29d4986SJoe Thornber
cleaner_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size)1856b29d4986SJoe Thornber static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
1857b29d4986SJoe Thornber sector_t origin_size,
1858b29d4986SJoe Thornber sector_t cache_block_size)
1859b29d4986SJoe Thornber {
1860*1e4ab7b4SJoe Thornber return __smq_create(cache_size, origin_size, cache_block_size,
1861*1e4ab7b4SJoe Thornber false, false, true);
18629ed84698SJoe Thornber }
18639ed84698SJoe Thornber
186466a63635SJoe Thornber /*----------------------------------------------------------------*/
186566a63635SJoe Thornber
186666a63635SJoe Thornber static struct dm_cache_policy_type smq_policy_type = {
186766a63635SJoe Thornber .name = "smq",
1868b29d4986SJoe Thornber .version = {2, 0, 0},
186966a63635SJoe Thornber .hint_size = 4,
187066a63635SJoe Thornber .owner = THIS_MODULE,
187166a63635SJoe Thornber .create = smq_create
187266a63635SJoe Thornber };
187366a63635SJoe Thornber
18749ed84698SJoe Thornber static struct dm_cache_policy_type mq_policy_type = {
18759ed84698SJoe Thornber .name = "mq",
1876b29d4986SJoe Thornber .version = {2, 0, 0},
18779ed84698SJoe Thornber .hint_size = 4,
18789ed84698SJoe Thornber .owner = THIS_MODULE,
18799ed84698SJoe Thornber .create = mq_create,
18809ed84698SJoe Thornber };
18819ed84698SJoe Thornber
1882b29d4986SJoe Thornber static struct dm_cache_policy_type cleaner_policy_type = {
1883b29d4986SJoe Thornber .name = "cleaner",
1884b29d4986SJoe Thornber .version = {2, 0, 0},
1885b29d4986SJoe Thornber .hint_size = 4,
1886b29d4986SJoe Thornber .owner = THIS_MODULE,
1887b29d4986SJoe Thornber .create = cleaner_create,
1888b29d4986SJoe Thornber };
1889b29d4986SJoe Thornber
1890bccab6a0SMike Snitzer static struct dm_cache_policy_type default_policy_type = {
1891bccab6a0SMike Snitzer .name = "default",
1892b29d4986SJoe Thornber .version = {2, 0, 0},
1893bccab6a0SMike Snitzer .hint_size = 4,
1894bccab6a0SMike Snitzer .owner = THIS_MODULE,
1895bccab6a0SMike Snitzer .create = smq_create,
1896bccab6a0SMike Snitzer .real = &smq_policy_type
1897bccab6a0SMike Snitzer };
1898bccab6a0SMike Snitzer
smq_init(void)189966a63635SJoe Thornber static int __init smq_init(void)
190066a63635SJoe Thornber {
190166a63635SJoe Thornber int r;
190266a63635SJoe Thornber
190366a63635SJoe Thornber r = dm_cache_policy_register(&smq_policy_type);
190466a63635SJoe Thornber if (r) {
190566a63635SJoe Thornber DMERR("register failed %d", r);
190666a63635SJoe Thornber return -ENOMEM;
190766a63635SJoe Thornber }
190866a63635SJoe Thornber
19099ed84698SJoe Thornber r = dm_cache_policy_register(&mq_policy_type);
19109ed84698SJoe Thornber if (r) {
19117dd85bb0SMike Snitzer DMERR("register failed (as mq) %d", r);
1912b29d4986SJoe Thornber goto out_mq;
1913b29d4986SJoe Thornber }
1914b29d4986SJoe Thornber
1915b29d4986SJoe Thornber r = dm_cache_policy_register(&cleaner_policy_type);
1916b29d4986SJoe Thornber if (r) {
1917b29d4986SJoe Thornber DMERR("register failed (as cleaner) %d", r);
1918b29d4986SJoe Thornber goto out_cleaner;
19199ed84698SJoe Thornber }
19209ed84698SJoe Thornber
1921bccab6a0SMike Snitzer r = dm_cache_policy_register(&default_policy_type);
1922bccab6a0SMike Snitzer if (r) {
1923bccab6a0SMike Snitzer DMERR("register failed (as default) %d", r);
1924b29d4986SJoe Thornber goto out_default;
1925bccab6a0SMike Snitzer }
1926bccab6a0SMike Snitzer
192766a63635SJoe Thornber return 0;
1928b29d4986SJoe Thornber
1929b29d4986SJoe Thornber out_default:
1930b29d4986SJoe Thornber dm_cache_policy_unregister(&cleaner_policy_type);
1931b29d4986SJoe Thornber out_cleaner:
1932b29d4986SJoe Thornber dm_cache_policy_unregister(&mq_policy_type);
1933b29d4986SJoe Thornber out_mq:
1934b29d4986SJoe Thornber dm_cache_policy_unregister(&smq_policy_type);
1935b29d4986SJoe Thornber
1936b29d4986SJoe Thornber return -ENOMEM;
193766a63635SJoe Thornber }
193866a63635SJoe Thornber
smq_exit(void)193966a63635SJoe Thornber static void __exit smq_exit(void)
194066a63635SJoe Thornber {
1941b29d4986SJoe Thornber dm_cache_policy_unregister(&cleaner_policy_type);
194266a63635SJoe Thornber dm_cache_policy_unregister(&smq_policy_type);
19439ed84698SJoe Thornber dm_cache_policy_unregister(&mq_policy_type);
1944bccab6a0SMike Snitzer dm_cache_policy_unregister(&default_policy_type);
194566a63635SJoe Thornber }
194666a63635SJoe Thornber
194766a63635SJoe Thornber module_init(smq_init);
194866a63635SJoe Thornber module_exit(smq_exit);
194966a63635SJoe Thornber
195066a63635SJoe Thornber MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
195166a63635SJoe Thornber MODULE_LICENSE("GPL");
195266a63635SJoe Thornber MODULE_DESCRIPTION("smq cache policy");
195334dd0517SYi Zhang
195434dd0517SYi Zhang MODULE_ALIAS("dm-cache-default");
19559ed84698SJoe Thornber MODULE_ALIAS("dm-cache-mq");
1956b29d4986SJoe Thornber MODULE_ALIAS("dm-cache-cleaner");
1957