Lines Matching +full:cpu +full:- +full:nr

1 // SPDX-License-Identifier: GPL-2.0
11 * it into local per cpu caches. This has the advantage
31 #include <linux/cpu.h>
67 /* Must not be called with cpu hot plug lock */
73 /* serialize with cpu hotplug operations */ in disable_swap_slots_cache_lock()
113 static int alloc_swap_slot_cache(unsigned int cpu) in alloc_swap_slot_cache() argument
126 return -ENOMEM; in alloc_swap_slot_cache()
132 return -ENOMEM; in alloc_swap_slot_cache()
136 cache = &per_cpu(swp_slots, cpu); in alloc_swap_slot_cache()
137 if (cache->slots || cache->slots_ret) { in alloc_swap_slot_cache()
147 if (!cache->lock_initialized) { in alloc_swap_slot_cache()
148 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache()
149 spin_lock_init(&cache->free_lock); in alloc_swap_slot_cache()
150 cache->lock_initialized = true; in alloc_swap_slot_cache()
152 cache->nr = 0; in alloc_swap_slot_cache()
153 cache->cur = 0; in alloc_swap_slot_cache()
154 cache->n_ret = 0; in alloc_swap_slot_cache()
157 * !cache->slots or !cache->slots_ret to know if it is safe to acquire in alloc_swap_slot_cache()
162 cache->slots = slots; in alloc_swap_slot_cache()
163 cache->slots_ret = slots_ret; in alloc_swap_slot_cache()
168 static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, in drain_slots_cache_cpu() argument
174 cache = &per_cpu(swp_slots, cpu); in drain_slots_cache_cpu()
175 if ((type & SLOTS_CACHE) && cache->slots) { in drain_slots_cache_cpu()
176 mutex_lock(&cache->alloc_lock); in drain_slots_cache_cpu()
177 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu()
178 cache->cur = 0; in drain_slots_cache_cpu()
179 cache->nr = 0; in drain_slots_cache_cpu()
180 if (free_slots && cache->slots) { in drain_slots_cache_cpu()
181 kvfree(cache->slots); in drain_slots_cache_cpu()
182 cache->slots = NULL; in drain_slots_cache_cpu()
184 mutex_unlock(&cache->alloc_lock); in drain_slots_cache_cpu()
186 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { in drain_slots_cache_cpu()
187 spin_lock_irq(&cache->free_lock); in drain_slots_cache_cpu()
188 swapcache_free_entries(cache->slots_ret, cache->n_ret); in drain_slots_cache_cpu()
189 cache->n_ret = 0; in drain_slots_cache_cpu()
190 if (free_slots && cache->slots_ret) { in drain_slots_cache_cpu()
191 slots = cache->slots_ret; in drain_slots_cache_cpu()
192 cache->slots_ret = NULL; in drain_slots_cache_cpu()
194 spin_unlock_irq(&cache->free_lock); in drain_slots_cache_cpu()
201 unsigned int cpu; in __drain_swap_slots_cache() local
212 * We cannot acquire cpu hot plug lock here as in __drain_swap_slots_cache()
213 * this function can be invoked in the cpu in __drain_swap_slots_cache()
215 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback in __drain_swap_slots_cache()
216 * -> memory allocation -> direct reclaim -> folio_alloc_swap in __drain_swap_slots_cache()
217 * -> drain_swap_slots_cache in __drain_swap_slots_cache()
219 * Hence the loop over current online cpu below could miss cpu that in __drain_swap_slots_cache()
222 * cpu before it has been marked online. Hence, we will not in __drain_swap_slots_cache()
223 * fill any swap slots in slots cache of such cpu. in __drain_swap_slots_cache()
224 * There are no slots on such cpu that need to be drained. in __drain_swap_slots_cache()
226 for_each_online_cpu(cpu) in __drain_swap_slots_cache()
227 drain_slots_cache_cpu(cpu, type, false); in __drain_swap_slots_cache()
230 static int free_slot_cache(unsigned int cpu) in free_slot_cache() argument
233 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); in free_slot_cache()
264 cache->cur = 0; in refill_swap_slots_cache()
266 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache()
267 cache->slots, 1); in refill_swap_slots_cache()
269 return cache->nr; in refill_swap_slots_cache()
277 if (likely(use_swap_slot_cache && cache->slots_ret)) { in free_swap_slot()
278 spin_lock_irq(&cache->free_lock); in free_swap_slot()
280 if (!use_swap_slot_cache || !cache->slots_ret) { in free_swap_slot()
281 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
284 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { in free_swap_slot()
291 swapcache_free_entries(cache->slots_ret, cache->n_ret); in free_swap_slot()
292 cache->n_ret = 0; in free_swap_slot()
294 cache->slots_ret[cache->n_ret++] = entry; in free_swap_slot()
295 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
318 * accesses to the per-CPU data structure are protected by the in folio_alloc_swap()
319 * mutex cache->alloc_lock. in folio_alloc_swap()
321 * The alloc path here does not touch cache->slots_ret in folio_alloc_swap()
322 * so cache->free_lock is not taken. in folio_alloc_swap()
326 if (likely(check_cache_active() && cache->slots)) { in folio_alloc_swap()
327 mutex_lock(&cache->alloc_lock); in folio_alloc_swap()
328 if (cache->slots) { in folio_alloc_swap()
330 if (cache->nr) { in folio_alloc_swap()
331 entry = cache->slots[cache->cur]; in folio_alloc_swap()
332 cache->slots[cache->cur++].val = 0; in folio_alloc_swap()
333 cache->nr--; in folio_alloc_swap()
338 mutex_unlock(&cache->alloc_lock); in folio_alloc_swap()