xref: /openbmc/linux/mm/list_lru.c (revision 79f08d9e)
1 /*
2  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3  * Authors: David Chinner and Glauber Costa
4  *
5  * Generic LRU infrastructure
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 
13 bool list_lru_add(struct list_lru *lru, struct list_head *item)
14 {
15 	int nid = page_to_nid(virt_to_page(item));
16 	struct list_lru_node *nlru = &lru->node[nid];
17 
18 	spin_lock(&nlru->lock);
19 	WARN_ON_ONCE(nlru->nr_items < 0);
20 	if (list_empty(item)) {
21 		list_add_tail(item, &nlru->list);
22 		if (nlru->nr_items++ == 0)
23 			node_set(nid, lru->active_nodes);
24 		spin_unlock(&nlru->lock);
25 		return true;
26 	}
27 	spin_unlock(&nlru->lock);
28 	return false;
29 }
30 EXPORT_SYMBOL_GPL(list_lru_add);
31 
32 bool list_lru_del(struct list_lru *lru, struct list_head *item)
33 {
34 	int nid = page_to_nid(virt_to_page(item));
35 	struct list_lru_node *nlru = &lru->node[nid];
36 
37 	spin_lock(&nlru->lock);
38 	if (!list_empty(item)) {
39 		list_del_init(item);
40 		if (--nlru->nr_items == 0)
41 			node_clear(nid, lru->active_nodes);
42 		WARN_ON_ONCE(nlru->nr_items < 0);
43 		spin_unlock(&nlru->lock);
44 		return true;
45 	}
46 	spin_unlock(&nlru->lock);
47 	return false;
48 }
49 EXPORT_SYMBOL_GPL(list_lru_del);
50 
51 unsigned long
52 list_lru_count_node(struct list_lru *lru, int nid)
53 {
54 	unsigned long count = 0;
55 	struct list_lru_node *nlru = &lru->node[nid];
56 
57 	spin_lock(&nlru->lock);
58 	WARN_ON_ONCE(nlru->nr_items < 0);
59 	count += nlru->nr_items;
60 	spin_unlock(&nlru->lock);
61 
62 	return count;
63 }
64 EXPORT_SYMBOL_GPL(list_lru_count_node);
65 
66 unsigned long
67 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
68 		   void *cb_arg, unsigned long *nr_to_walk)
69 {
70 
71 	struct list_lru_node	*nlru = &lru->node[nid];
72 	struct list_head *item, *n;
73 	unsigned long isolated = 0;
74 
75 	spin_lock(&nlru->lock);
76 restart:
77 	list_for_each_safe(item, n, &nlru->list) {
78 		enum lru_status ret;
79 
80 		/*
81 		 * decrement nr_to_walk first so that we don't livelock if we
82 		 * get stuck on large numbesr of LRU_RETRY items
83 		 */
84 		if (!*nr_to_walk)
85 			break;
86 		--*nr_to_walk;
87 
88 		ret = isolate(item, &nlru->lock, cb_arg);
89 		switch (ret) {
90 		case LRU_REMOVED:
91 			if (--nlru->nr_items == 0)
92 				node_clear(nid, lru->active_nodes);
93 			WARN_ON_ONCE(nlru->nr_items < 0);
94 			isolated++;
95 			break;
96 		case LRU_ROTATE:
97 			list_move_tail(item, &nlru->list);
98 			break;
99 		case LRU_SKIP:
100 			break;
101 		case LRU_RETRY:
102 			/*
103 			 * The lru lock has been dropped, our list traversal is
104 			 * now invalid and so we have to restart from scratch.
105 			 */
106 			goto restart;
107 		default:
108 			BUG();
109 		}
110 	}
111 
112 	spin_unlock(&nlru->lock);
113 	return isolated;
114 }
115 EXPORT_SYMBOL_GPL(list_lru_walk_node);
116 
117 int list_lru_init(struct list_lru *lru)
118 {
119 	int i;
120 	size_t size = sizeof(*lru->node) * nr_node_ids;
121 
122 	lru->node = kzalloc(size, GFP_KERNEL);
123 	if (!lru->node)
124 		return -ENOMEM;
125 
126 	nodes_clear(lru->active_nodes);
127 	for (i = 0; i < nr_node_ids; i++) {
128 		spin_lock_init(&lru->node[i].lock);
129 		INIT_LIST_HEAD(&lru->node[i].list);
130 		lru->node[i].nr_items = 0;
131 	}
132 	return 0;
133 }
134 EXPORT_SYMBOL_GPL(list_lru_init);
135 
136 void list_lru_destroy(struct list_lru *lru)
137 {
138 	kfree(lru->node);
139 }
140 EXPORT_SYMBOL_GPL(list_lru_destroy);
141