1 /* 2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. 3 * Authors: David Chinner and Glauber Costa 4 * 5 * Generic LRU infrastructure 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/mm.h> 10 #include <linux/list_lru.h> 11 #include <linux/slab.h> 12 13 bool list_lru_add(struct list_lru *lru, struct list_head *item) 14 { 15 int nid = page_to_nid(virt_to_page(item)); 16 struct list_lru_node *nlru = &lru->node[nid]; 17 18 spin_lock(&nlru->lock); 19 WARN_ON_ONCE(nlru->nr_items < 0); 20 if (list_empty(item)) { 21 list_add_tail(item, &nlru->list); 22 if (nlru->nr_items++ == 0) 23 node_set(nid, lru->active_nodes); 24 spin_unlock(&nlru->lock); 25 return true; 26 } 27 spin_unlock(&nlru->lock); 28 return false; 29 } 30 EXPORT_SYMBOL_GPL(list_lru_add); 31 32 bool list_lru_del(struct list_lru *lru, struct list_head *item) 33 { 34 int nid = page_to_nid(virt_to_page(item)); 35 struct list_lru_node *nlru = &lru->node[nid]; 36 37 spin_lock(&nlru->lock); 38 if (!list_empty(item)) { 39 list_del_init(item); 40 if (--nlru->nr_items == 0) 41 node_clear(nid, lru->active_nodes); 42 WARN_ON_ONCE(nlru->nr_items < 0); 43 spin_unlock(&nlru->lock); 44 return true; 45 } 46 spin_unlock(&nlru->lock); 47 return false; 48 } 49 EXPORT_SYMBOL_GPL(list_lru_del); 50 51 unsigned long 52 list_lru_count_node(struct list_lru *lru, int nid) 53 { 54 unsigned long count = 0; 55 struct list_lru_node *nlru = &lru->node[nid]; 56 57 spin_lock(&nlru->lock); 58 WARN_ON_ONCE(nlru->nr_items < 0); 59 count += nlru->nr_items; 60 spin_unlock(&nlru->lock); 61 62 return count; 63 } 64 EXPORT_SYMBOL_GPL(list_lru_count_node); 65 66 unsigned long 67 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, 68 void *cb_arg, unsigned long *nr_to_walk) 69 { 70 71 struct list_lru_node *nlru = &lru->node[nid]; 72 struct list_head *item, *n; 73 unsigned long isolated = 0; 74 75 spin_lock(&nlru->lock); 76 restart: 77 list_for_each_safe(item, n, &nlru->list) { 78 enum lru_status ret; 79 80 /* 81 * decrement nr_to_walk first so that we don't livelock if we 82 * get stuck on large numbesr of LRU_RETRY items 83 */ 84 if (!*nr_to_walk) 85 break; 86 --*nr_to_walk; 87 88 ret = isolate(item, &nlru->lock, cb_arg); 89 switch (ret) { 90 case LRU_REMOVED_RETRY: 91 assert_spin_locked(&nlru->lock); 92 case LRU_REMOVED: 93 if (--nlru->nr_items == 0) 94 node_clear(nid, lru->active_nodes); 95 WARN_ON_ONCE(nlru->nr_items < 0); 96 isolated++; 97 /* 98 * If the lru lock has been dropped, our list 99 * traversal is now invalid and so we have to 100 * restart from scratch. 101 */ 102 if (ret == LRU_REMOVED_RETRY) 103 goto restart; 104 break; 105 case LRU_ROTATE: 106 list_move_tail(item, &nlru->list); 107 break; 108 case LRU_SKIP: 109 break; 110 case LRU_RETRY: 111 /* 112 * The lru lock has been dropped, our list traversal is 113 * now invalid and so we have to restart from scratch. 114 */ 115 assert_spin_locked(&nlru->lock); 116 goto restart; 117 default: 118 BUG(); 119 } 120 } 121 122 spin_unlock(&nlru->lock); 123 return isolated; 124 } 125 EXPORT_SYMBOL_GPL(list_lru_walk_node); 126 127 int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key) 128 { 129 int i; 130 size_t size = sizeof(*lru->node) * nr_node_ids; 131 132 lru->node = kzalloc(size, GFP_KERNEL); 133 if (!lru->node) 134 return -ENOMEM; 135 136 nodes_clear(lru->active_nodes); 137 for (i = 0; i < nr_node_ids; i++) { 138 spin_lock_init(&lru->node[i].lock); 139 if (key) 140 lockdep_set_class(&lru->node[i].lock, key); 141 INIT_LIST_HEAD(&lru->node[i].list); 142 lru->node[i].nr_items = 0; 143 } 144 return 0; 145 } 146 EXPORT_SYMBOL_GPL(list_lru_init_key); 147 148 void list_lru_destroy(struct list_lru *lru) 149 { 150 kfree(lru->node); 151 } 152 EXPORT_SYMBOL_GPL(list_lru_destroy); 153