1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2016 Facebook 3 */ 4 #include "percpu_freelist.h" 5 6 int pcpu_freelist_init(struct pcpu_freelist *s) 7 { 8 int cpu; 9 10 s->freelist = alloc_percpu(struct pcpu_freelist_head); 11 if (!s->freelist) 12 return -ENOMEM; 13 14 for_each_possible_cpu(cpu) { 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); 16 17 raw_spin_lock_init(&head->lock); 18 head->first = NULL; 19 } 20 raw_spin_lock_init(&s->extralist.lock); 21 s->extralist.first = NULL; 22 return 0; 23 } 24 25 void pcpu_freelist_destroy(struct pcpu_freelist *s) 26 { 27 free_percpu(s->freelist); 28 } 29 30 static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head, 31 struct pcpu_freelist_node *node) 32 { 33 node->next = head->first; 34 head->first = node; 35 } 36 37 static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, 38 struct pcpu_freelist_node *node) 39 { 40 raw_spin_lock(&head->lock); 41 pcpu_freelist_push_node(head, node); 42 raw_spin_unlock(&head->lock); 43 } 44 45 static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s, 46 struct pcpu_freelist_node *node) 47 { 48 if (!raw_spin_trylock(&s->extralist.lock)) 49 return false; 50 51 pcpu_freelist_push_node(&s->extralist, node); 52 raw_spin_unlock(&s->extralist.lock); 53 return true; 54 } 55 56 static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s, 57 struct pcpu_freelist_node *node) 58 { 59 int cpu, orig_cpu; 60 61 orig_cpu = cpu = raw_smp_processor_id(); 62 while (1) { 63 struct pcpu_freelist_head *head; 64 65 head = per_cpu_ptr(s->freelist, cpu); 66 if (raw_spin_trylock(&head->lock)) { 67 pcpu_freelist_push_node(head, node); 68 raw_spin_unlock(&head->lock); 69 return; 70 } 71 cpu = cpumask_next(cpu, cpu_possible_mask); 72 if (cpu >= nr_cpu_ids) 73 cpu = 0; 74 75 /* cannot lock any per cpu lock, try extralist */ 76 if (cpu == orig_cpu && 77 pcpu_freelist_try_push_extra(s, node)) 78 return; 79 } 80 } 81 82 void __pcpu_freelist_push(struct pcpu_freelist *s, 83 struct pcpu_freelist_node *node) 84 { 85 if (in_nmi()) 86 ___pcpu_freelist_push_nmi(s, node); 87 else 88 ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node); 89 } 90 91 void pcpu_freelist_push(struct pcpu_freelist *s, 92 struct pcpu_freelist_node *node) 93 { 94 unsigned long flags; 95 96 local_irq_save(flags); 97 __pcpu_freelist_push(s, node); 98 local_irq_restore(flags); 99 } 100 101 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, 102 u32 nr_elems) 103 { 104 struct pcpu_freelist_head *head; 105 int i, cpu, pcpu_entries; 106 107 pcpu_entries = nr_elems / num_possible_cpus() + 1; 108 i = 0; 109 110 for_each_possible_cpu(cpu) { 111 again: 112 head = per_cpu_ptr(s->freelist, cpu); 113 /* No locking required as this is not visible yet. */ 114 pcpu_freelist_push_node(head, buf); 115 i++; 116 buf += elem_size; 117 if (i == nr_elems) 118 break; 119 if (i % pcpu_entries) 120 goto again; 121 } 122 } 123 124 static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s) 125 { 126 struct pcpu_freelist_head *head; 127 struct pcpu_freelist_node *node; 128 int orig_cpu, cpu; 129 130 orig_cpu = cpu = raw_smp_processor_id(); 131 while (1) { 132 head = per_cpu_ptr(s->freelist, cpu); 133 raw_spin_lock(&head->lock); 134 node = head->first; 135 if (node) { 136 head->first = node->next; 137 raw_spin_unlock(&head->lock); 138 return node; 139 } 140 raw_spin_unlock(&head->lock); 141 cpu = cpumask_next(cpu, cpu_possible_mask); 142 if (cpu >= nr_cpu_ids) 143 cpu = 0; 144 if (cpu == orig_cpu) 145 break; 146 } 147 148 /* per cpu lists are all empty, try extralist */ 149 raw_spin_lock(&s->extralist.lock); 150 node = s->extralist.first; 151 if (node) 152 s->extralist.first = node->next; 153 raw_spin_unlock(&s->extralist.lock); 154 return node; 155 } 156 157 static struct pcpu_freelist_node * 158 ___pcpu_freelist_pop_nmi(struct pcpu_freelist *s) 159 { 160 struct pcpu_freelist_head *head; 161 struct pcpu_freelist_node *node; 162 int orig_cpu, cpu; 163 164 orig_cpu = cpu = raw_smp_processor_id(); 165 while (1) { 166 head = per_cpu_ptr(s->freelist, cpu); 167 if (raw_spin_trylock(&head->lock)) { 168 node = head->first; 169 if (node) { 170 head->first = node->next; 171 raw_spin_unlock(&head->lock); 172 return node; 173 } 174 raw_spin_unlock(&head->lock); 175 } 176 cpu = cpumask_next(cpu, cpu_possible_mask); 177 if (cpu >= nr_cpu_ids) 178 cpu = 0; 179 if (cpu == orig_cpu) 180 break; 181 } 182 183 /* cannot pop from per cpu lists, try extralist */ 184 if (!raw_spin_trylock(&s->extralist.lock)) 185 return NULL; 186 node = s->extralist.first; 187 if (node) 188 s->extralist.first = node->next; 189 raw_spin_unlock(&s->extralist.lock); 190 return node; 191 } 192 193 struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) 194 { 195 if (in_nmi()) 196 return ___pcpu_freelist_pop_nmi(s); 197 return ___pcpu_freelist_pop(s); 198 } 199 200 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) 201 { 202 struct pcpu_freelist_node *ret; 203 unsigned long flags; 204 205 local_irq_save(flags); 206 ret = __pcpu_freelist_pop(s); 207 local_irq_restore(flags); 208 return ret; 209 } 210