1 #include <linux/swap_cgroup.h> 2 #include <linux/vmalloc.h> 3 #include <linux/mm.h> 4 5 #include <linux/swapops.h> /* depends on mm.h include */ 6 7 static DEFINE_MUTEX(swap_cgroup_mutex); 8 struct swap_cgroup_ctrl { 9 struct page **map; 10 unsigned long length; 11 spinlock_t lock; 12 }; 13 14 static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; 15 16 struct swap_cgroup { 17 unsigned short id; 18 }; 19 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) 20 21 /* 22 * SwapCgroup implements "lookup" and "exchange" operations. 23 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge 24 * against SwapCache. At swap_free(), this is accessed directly from swap. 25 * 26 * This means, 27 * - we have no race in "exchange" when we're accessed via SwapCache because 28 * SwapCache(and its swp_entry) is under lock. 29 * - When called via swap_free(), there is no user of this entry and no race. 30 * Then, we don't need lock around "exchange". 31 * 32 * TODO: we can push these buffers out to HIGHMEM. 33 */ 34 35 /* 36 * allocate buffer for swap_cgroup. 37 */ 38 static int swap_cgroup_prepare(int type) 39 { 40 struct page *page; 41 struct swap_cgroup_ctrl *ctrl; 42 unsigned long idx, max; 43 44 ctrl = &swap_cgroup_ctrl[type]; 45 46 for (idx = 0; idx < ctrl->length; idx++) { 47 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 48 if (!page) 49 goto not_enough_page; 50 ctrl->map[idx] = page; 51 52 if (!(idx % SWAP_CLUSTER_MAX)) 53 cond_resched(); 54 } 55 return 0; 56 not_enough_page: 57 max = idx; 58 for (idx = 0; idx < max; idx++) 59 __free_page(ctrl->map[idx]); 60 61 return -ENOMEM; 62 } 63 64 static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, 65 struct swap_cgroup_ctrl **ctrlp) 66 { 67 pgoff_t offset = swp_offset(ent); 68 struct swap_cgroup_ctrl *ctrl; 69 struct page *mappage; 70 struct swap_cgroup *sc; 71 72 ctrl = &swap_cgroup_ctrl[swp_type(ent)]; 73 if (ctrlp) 74 *ctrlp = ctrl; 75 76 mappage = ctrl->map[offset / SC_PER_PAGE]; 77 sc = page_address(mappage); 78 return sc + offset % SC_PER_PAGE; 79 } 80 81 /** 82 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. 83 * @ent: swap entry to be cmpxchged 84 * @old: old id 85 * @new: new id 86 * 87 * Returns old id at success, 0 at failure. 88 * (There is no mem_cgroup using 0 as its id) 89 */ 90 unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, 91 unsigned short old, unsigned short new) 92 { 93 struct swap_cgroup_ctrl *ctrl; 94 struct swap_cgroup *sc; 95 unsigned long flags; 96 unsigned short retval; 97 98 sc = lookup_swap_cgroup(ent, &ctrl); 99 100 spin_lock_irqsave(&ctrl->lock, flags); 101 retval = sc->id; 102 if (retval == old) 103 sc->id = new; 104 else 105 retval = 0; 106 spin_unlock_irqrestore(&ctrl->lock, flags); 107 return retval; 108 } 109 110 /** 111 * swap_cgroup_record - record mem_cgroup for this swp_entry. 112 * @ent: swap entry to be recorded into 113 * @id: mem_cgroup to be recorded 114 * 115 * Returns old value at success, 0 at failure. 116 * (Of course, old value can be 0.) 117 */ 118 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) 119 { 120 struct swap_cgroup_ctrl *ctrl; 121 struct swap_cgroup *sc; 122 unsigned short old; 123 unsigned long flags; 124 125 sc = lookup_swap_cgroup(ent, &ctrl); 126 127 spin_lock_irqsave(&ctrl->lock, flags); 128 old = sc->id; 129 sc->id = id; 130 spin_unlock_irqrestore(&ctrl->lock, flags); 131 132 return old; 133 } 134 135 /** 136 * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry 137 * @ent: swap entry to be looked up. 138 * 139 * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) 140 */ 141 unsigned short lookup_swap_cgroup_id(swp_entry_t ent) 142 { 143 return lookup_swap_cgroup(ent, NULL)->id; 144 } 145 146 int swap_cgroup_swapon(int type, unsigned long max_pages) 147 { 148 void *array; 149 unsigned long array_size; 150 unsigned long length; 151 struct swap_cgroup_ctrl *ctrl; 152 153 if (!do_swap_account) 154 return 0; 155 156 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); 157 array_size = length * sizeof(void *); 158 159 array = vzalloc(array_size); 160 if (!array) 161 goto nomem; 162 163 ctrl = &swap_cgroup_ctrl[type]; 164 mutex_lock(&swap_cgroup_mutex); 165 ctrl->length = length; 166 ctrl->map = array; 167 spin_lock_init(&ctrl->lock); 168 if (swap_cgroup_prepare(type)) { 169 /* memory shortage */ 170 ctrl->map = NULL; 171 ctrl->length = 0; 172 mutex_unlock(&swap_cgroup_mutex); 173 vfree(array); 174 goto nomem; 175 } 176 mutex_unlock(&swap_cgroup_mutex); 177 178 return 0; 179 nomem: 180 pr_info("couldn't allocate enough memory for swap_cgroup\n"); 181 pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n"); 182 return -ENOMEM; 183 } 184 185 void swap_cgroup_swapoff(int type) 186 { 187 struct page **map; 188 unsigned long i, length; 189 struct swap_cgroup_ctrl *ctrl; 190 191 if (!do_swap_account) 192 return; 193 194 mutex_lock(&swap_cgroup_mutex); 195 ctrl = &swap_cgroup_ctrl[type]; 196 map = ctrl->map; 197 length = ctrl->length; 198 ctrl->map = NULL; 199 ctrl->length = 0; 200 mutex_unlock(&swap_cgroup_mutex); 201 202 if (map) { 203 for (i = 0; i < length; i++) { 204 struct page *page = map[i]; 205 if (page) 206 __free_page(page); 207 if (!(i % SWAP_CLUSTER_MAX)) 208 cond_resched(); 209 } 210 vfree(map); 211 } 212 } 213