1 /* 2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. 3 * Authors: David Chinner and Glauber Costa 4 * 5 * Generic LRU infrastructure 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/mm.h> 10 #include <linux/list_lru.h> 11 #include <linux/slab.h> 12 13 bool list_lru_add(struct list_lru *lru, struct list_head *item) 14 { 15 int nid = page_to_nid(virt_to_page(item)); 16 struct list_lru_node *nlru = &lru->node[nid]; 17 18 spin_lock(&nlru->lock); 19 WARN_ON_ONCE(nlru->nr_items < 0); 20 if (list_empty(item)) { 21 list_add_tail(item, &nlru->list); 22 if (nlru->nr_items++ == 0) 23 node_set(nid, lru->active_nodes); 24 spin_unlock(&nlru->lock); 25 return true; 26 } 27 spin_unlock(&nlru->lock); 28 return false; 29 } 30 EXPORT_SYMBOL_GPL(list_lru_add); 31 32 bool list_lru_del(struct list_lru *lru, struct list_head *item) 33 { 34 int nid = page_to_nid(virt_to_page(item)); 35 struct list_lru_node *nlru = &lru->node[nid]; 36 37 spin_lock(&nlru->lock); 38 if (!list_empty(item)) { 39 list_del_init(item); 40 if (--nlru->nr_items == 0) 41 node_clear(nid, lru->active_nodes); 42 WARN_ON_ONCE(nlru->nr_items < 0); 43 spin_unlock(&nlru->lock); 44 return true; 45 } 46 spin_unlock(&nlru->lock); 47 return false; 48 } 49 EXPORT_SYMBOL_GPL(list_lru_del); 50 51 unsigned long 52 list_lru_count_node(struct list_lru *lru, int nid) 53 { 54 unsigned long count = 0; 55 struct list_lru_node *nlru = &lru->node[nid]; 56 57 spin_lock(&nlru->lock); 58 WARN_ON_ONCE(nlru->nr_items < 0); 59 count += nlru->nr_items; 60 spin_unlock(&nlru->lock); 61 62 return count; 63 } 64 EXPORT_SYMBOL_GPL(list_lru_count_node); 65 66 unsigned long 67 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, 68 void *cb_arg, unsigned long *nr_to_walk) 69 { 70 71 struct list_lru_node *nlru = &lru->node[nid]; 72 struct list_head *item, *n; 73 unsigned long isolated = 0; 74 75 spin_lock(&nlru->lock); 76 restart: 77 list_for_each_safe(item, n, &nlru->list) { 78 enum lru_status ret; 79 80 /* 81 * decrement nr_to_walk first so that we don't livelock if we 82 * get stuck on large numbesr of LRU_RETRY items 83 */ 84 if (--(*nr_to_walk) == 0) 85 break; 86 87 ret = isolate(item, &nlru->lock, cb_arg); 88 switch (ret) { 89 case LRU_REMOVED: 90 if (--nlru->nr_items == 0) 91 node_clear(nid, lru->active_nodes); 92 WARN_ON_ONCE(nlru->nr_items < 0); 93 isolated++; 94 break; 95 case LRU_ROTATE: 96 list_move_tail(item, &nlru->list); 97 break; 98 case LRU_SKIP: 99 break; 100 case LRU_RETRY: 101 /* 102 * The lru lock has been dropped, our list traversal is 103 * now invalid and so we have to restart from scratch. 104 */ 105 goto restart; 106 default: 107 BUG(); 108 } 109 } 110 111 spin_unlock(&nlru->lock); 112 return isolated; 113 } 114 EXPORT_SYMBOL_GPL(list_lru_walk_node); 115 116 int list_lru_init(struct list_lru *lru) 117 { 118 int i; 119 size_t size = sizeof(*lru->node) * nr_node_ids; 120 121 lru->node = kzalloc(size, GFP_KERNEL); 122 if (!lru->node) 123 return -ENOMEM; 124 125 nodes_clear(lru->active_nodes); 126 for (i = 0; i < nr_node_ids; i++) { 127 spin_lock_init(&lru->node[i].lock); 128 INIT_LIST_HEAD(&lru->node[i].list); 129 lru->node[i].nr_items = 0; 130 } 131 return 0; 132 } 133 EXPORT_SYMBOL_GPL(list_lru_init); 134 135 void list_lru_destroy(struct list_lru *lru) 136 { 137 kfree(lru->node); 138 } 139 EXPORT_SYMBOL_GPL(list_lru_destroy); 140