pid.c (d62e54abca1146981fc9f98f85ff398a113a22c2) | pid.c (92476d7fc0326a409ab1d3864a04093a6be9aca7) |
---|---|
1/* 2 * Generic pidhash and scalable, time-bounded PID allocator 3 * 4 * (C) 2002-2003 William Irwin, IBM 5 * (C) 2004 William Irwin, Oracle 6 * (C) 2002-2004 Ingo Molnar, Red Hat 7 * 8 * pid-structures are backing objects for tasks sharing a given ID to chain --- 14 unchanged lines hidden (view full) --- 23#include <linux/mm.h> 24#include <linux/module.h> 25#include <linux/slab.h> 26#include <linux/init.h> 27#include <linux/bootmem.h> 28#include <linux/hash.h> 29 30#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) | 1/* 2 * Generic pidhash and scalable, time-bounded PID allocator 3 * 4 * (C) 2002-2003 William Irwin, IBM 5 * (C) 2004 William Irwin, Oracle 6 * (C) 2002-2004 Ingo Molnar, Red Hat 7 * 8 * pid-structures are backing objects for tasks sharing a given ID to chain --- 14 unchanged lines hidden (view full) --- 23#include <linux/mm.h> 24#include <linux/module.h> 25#include <linux/slab.h> 26#include <linux/init.h> 27#include <linux/bootmem.h> 28#include <linux/hash.h> 29 30#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) |
31static struct hlist_head *pid_hash[PIDTYPE_MAX]; | 31static struct hlist_head *pid_hash; |
32static int pidhash_shift; | 32static int pidhash_shift; |
33static kmem_cache_t *pid_cachep; |
|
33 34int pid_max = PID_MAX_DEFAULT; 35int last_pid; 36 37#define RESERVED_PIDS 300 38 39int pid_max_min = RESERVED_PIDS + 1; 40int pid_max_max = PID_MAX_LIMIT; --- 14 unchanged lines hidden (view full) --- 55typedef struct pidmap { 56 atomic_t nr_free; 57 void *page; 58} pidmap_t; 59 60static pidmap_t pidmap_array[PIDMAP_ENTRIES] = 61 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }; 62 | 34 35int pid_max = PID_MAX_DEFAULT; 36int last_pid; 37 38#define RESERVED_PIDS 300 39 40int pid_max_min = RESERVED_PIDS + 1; 41int pid_max_max = PID_MAX_LIMIT; --- 14 unchanged lines hidden (view full) --- 56typedef struct pidmap { 57 atomic_t nr_free; 58 void *page; 59} pidmap_t; 60 61static pidmap_t pidmap_array[PIDMAP_ENTRIES] = 62 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }; 63 |
64/* 65 * Note: disable interrupts while the pidmap_lock is held as an 66 * interrupt might come in and do read_lock(&tasklist_lock). 67 * 68 * If we don't disable interrupts there is a nasty deadlock between 69 * detach_pid()->free_pid() and another cpu that does 70 * spin_lock(&pidmap_lock) followed by an interrupt routine that does 71 * read_lock(&tasklist_lock); 72 * 73 * After we clean up the tasklist_lock and know there are no 74 * irq handlers that take it we can leave the interrupts enabled. 75 * For now it is easier to be safe than to prove it can't happen. 76 */ |
|
63static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 64 | 77static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 78 |
65fastcall void free_pidmap(int pid) | 79static fastcall void free_pidmap(int pid) |
66{ 67 pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; 68 int offset = pid & BITS_PER_PAGE_MASK; 69 70 clear_bit(offset, map->page); 71 atomic_inc(&map->nr_free); 72} 73 | 80{ 81 pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; 82 int offset = pid & BITS_PER_PAGE_MASK; 83 84 clear_bit(offset, map->page); 85 atomic_inc(&map->nr_free); 86} 87 |
74int alloc_pidmap(void) | 88static int alloc_pidmap(void) |
75{ 76 int i, offset, max_scan, pid, last = last_pid; 77 pidmap_t *map; 78 79 pid = last + 1; 80 if (pid >= pid_max) 81 pid = RESERVED_PIDS; 82 offset = pid & BITS_PER_PAGE_MASK; 83 map = &pidmap_array[pid/BITS_PER_PAGE]; 84 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; 85 for (i = 0; i <= max_scan; ++i) { 86 if (unlikely(!map->page)) { 87 unsigned long page = get_zeroed_page(GFP_KERNEL); 88 /* 89 * Free the page if someone raced with us 90 * installing it: 91 */ | 89{ 90 int i, offset, max_scan, pid, last = last_pid; 91 pidmap_t *map; 92 93 pid = last + 1; 94 if (pid >= pid_max) 95 pid = RESERVED_PIDS; 96 offset = pid & BITS_PER_PAGE_MASK; 97 map = &pidmap_array[pid/BITS_PER_PAGE]; 98 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; 99 for (i = 0; i <= max_scan; ++i) { 100 if (unlikely(!map->page)) { 101 unsigned long page = get_zeroed_page(GFP_KERNEL); 102 /* 103 * Free the page if someone raced with us 104 * installing it: 105 */ |
92 spin_lock(&pidmap_lock); | 106 spin_lock_irq(&pidmap_lock); |
93 if (map->page) 94 free_page(page); 95 else 96 map->page = (void *)page; | 107 if (map->page) 108 free_page(page); 109 else 110 map->page = (void *)page; |
97 spin_unlock(&pidmap_lock); | 111 spin_unlock_irq(&pidmap_lock); |
98 if (unlikely(!map->page)) 99 break; 100 } 101 if (likely(atomic_read(&map->nr_free))) { 102 do { 103 if (!test_and_set_bit(offset, map->page)) { 104 atomic_dec(&map->nr_free); 105 last_pid = pid; --- 20 unchanged lines hidden (view full) --- 126 if (unlikely(last == offset)) 127 break; 128 } 129 pid = mk_pid(map, offset); 130 } 131 return -1; 132} 133 | 112 if (unlikely(!map->page)) 113 break; 114 } 115 if (likely(atomic_read(&map->nr_free))) { 116 do { 117 if (!test_and_set_bit(offset, map->page)) { 118 atomic_dec(&map->nr_free); 119 last_pid = pid; --- 20 unchanged lines hidden (view full) --- 140 if (unlikely(last == offset)) 141 break; 142 } 143 pid = mk_pid(map, offset); 144 } 145 return -1; 146} 147 |
134struct pid * fastcall find_pid(enum pid_type type, int nr) | 148fastcall void put_pid(struct pid *pid) |
135{ | 149{ |
150 if (!pid) 151 return; 152 if ((atomic_read(&pid->count) == 1) || 153 atomic_dec_and_test(&pid->count)) 154 kmem_cache_free(pid_cachep, pid); 155} 156 157static void delayed_put_pid(struct rcu_head *rhp) 158{ 159 struct pid *pid = container_of(rhp, struct pid, rcu); 160 put_pid(pid); 161} 162 163fastcall void free_pid(struct pid *pid) 164{ 165 /* We can be called with write_lock_irq(&tasklist_lock) held */ 166 unsigned long flags; 167 168 spin_lock_irqsave(&pidmap_lock, flags); 169 hlist_del_rcu(&pid->pid_chain); 170 spin_unlock_irqrestore(&pidmap_lock, flags); 171 172 free_pidmap(pid->nr); 173 call_rcu(&pid->rcu, delayed_put_pid); 174} 175 176struct pid *alloc_pid(void) 177{ 178 struct pid *pid; 179 enum pid_type type; 180 int nr = -1; 181 182 pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL); 183 if (!pid) 184 goto out; 185 186 nr = alloc_pidmap(); 187 if (nr < 0) 188 goto out_free; 189 190 atomic_set(&pid->count, 1); 191 pid->nr = nr; 192 for (type = 0; type < PIDTYPE_MAX; ++type) 193 INIT_HLIST_HEAD(&pid->tasks[type]); 194 195 spin_lock_irq(&pidmap_lock); 196 hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]); 197 spin_unlock_irq(&pidmap_lock); 198 199out: 200 return pid; 201 202out_free: 203 kmem_cache_free(pid_cachep, pid); 204 pid = NULL; 205 goto out; 206} 207 208struct pid * fastcall find_pid(int nr) 209{ |
|
136 struct hlist_node *elem; 137 struct pid *pid; 138 139 hlist_for_each_entry_rcu(pid, elem, | 210 struct hlist_node *elem; 211 struct pid *pid; 212 213 hlist_for_each_entry_rcu(pid, elem, |
140 &pid_hash[type][pid_hashfn(nr)], pid_chain) { | 214 &pid_hash[pid_hashfn(nr)], pid_chain) { |
141 if (pid->nr == nr) 142 return pid; 143 } 144 return NULL; 145} 146 147int fastcall attach_pid(task_t *task, enum pid_type type, int nr) 148{ | 215 if (pid->nr == nr) 216 return pid; 217 } 218 return NULL; 219} 220 221int fastcall attach_pid(task_t *task, enum pid_type type, int nr) 222{ |
149 struct pid *pid, *task_pid; | 223 struct pid_link *link; 224 struct pid *pid; |
150 | 225 |
151 task_pid = &task->pids[type]; 152 pid = find_pid(type, nr); 153 task_pid->nr = nr; 154 if (pid == NULL) { 155 INIT_LIST_HEAD(&task_pid->pid_list); 156 hlist_add_head_rcu(&task_pid->pid_chain, 157 &pid_hash[type][pid_hashfn(nr)]); 158 } else { 159 INIT_HLIST_NODE(&task_pid->pid_chain); 160 list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list); 161 } | 226 WARN_ON(!task->pid); /* to be removed soon */ 227 WARN_ON(!nr); /* to be removed soon */ |
162 | 228 |
229 link = &task->pids[type]; 230 link->pid = pid = find_pid(nr); 231 hlist_add_head_rcu(&link->node, &pid->tasks[type]); 232 |
|
163 return 0; 164} 165 | 233 return 0; 234} 235 |
166static fastcall int __detach_pid(task_t *task, enum pid_type type) | 236void fastcall detach_pid(task_t *task, enum pid_type type) |
167{ | 237{ |
168 struct pid *pid, *pid_next; 169 int nr = 0; | 238 struct pid_link *link; 239 struct pid *pid; 240 int tmp; |
170 | 241 |
171 pid = &task->pids[type]; 172 if (!hlist_unhashed(&pid->pid_chain)) { | 242 link = &task->pids[type]; 243 pid = link->pid; |
173 | 244 |
174 if (list_empty(&pid->pid_list)) { 175 nr = pid->nr; 176 hlist_del_rcu(&pid->pid_chain); 177 } else { 178 pid_next = list_entry(pid->pid_list.next, 179 struct pid, pid_list); 180 /* insert next pid from pid_list to hash */ 181 hlist_replace_rcu(&pid->pid_chain, 182 &pid_next->pid_chain); 183 } 184 } | 245 hlist_del_rcu(&link->node); 246 link->pid = NULL; |
185 | 247 |
186 list_del_rcu(&pid->pid_list); 187 pid->nr = 0; | 248 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 249 if (!hlist_empty(&pid->tasks[tmp])) 250 return; |
188 | 251 |
189 return nr; | 252 free_pid(pid); |
190} 191 | 253} 254 |
192void fastcall detach_pid(task_t *task, enum pid_type type) | 255struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) |
193{ | 256{ |
194 int tmp, nr; | 257 struct task_struct *result = NULL; 258 if (pid) { 259 struct hlist_node *first; 260 first = rcu_dereference(pid->tasks[type].first); 261 if (first) 262 result = hlist_entry(first, struct task_struct, pids[(type)].node); 263 } 264 return result; 265} |
195 | 266 |
196 nr = __detach_pid(task, type); 197 if (!nr) 198 return; | 267/* 268 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 269 */ 270task_t *find_task_by_pid_type(int type, int nr) 271{ 272 return pid_task(find_pid(nr), type); 273} |
199 | 274 |
200 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 201 if (tmp != type && find_pid(tmp, nr)) 202 return; | 275EXPORT_SYMBOL(find_task_by_pid_type); |
203 | 276 |
204 free_pidmap(nr); | 277struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) 278{ 279 struct task_struct *result; 280 rcu_read_lock(); 281 result = pid_task(pid, type); 282 if (result) 283 get_task_struct(result); 284 rcu_read_unlock(); 285 return result; |
205} 206 | 286} 287 |
207task_t *find_task_by_pid_type(int type, int nr) | 288struct pid *find_get_pid(pid_t nr) |
208{ 209 struct pid *pid; 210 | 289{ 290 struct pid *pid; 291 |
211 pid = find_pid(type, nr); 212 if (!pid) 213 return NULL; | 292 rcu_read_lock(); 293 pid = get_pid(find_pid(nr)); 294 rcu_read_unlock(); |
214 | 295 |
215 return pid_task(&pid->pid_list, type); | 296 return pid; |
216} 217 | 297} 298 |
218EXPORT_SYMBOL(find_task_by_pid_type); 219 | |
220/* 221 * The pid hash table is scaled according to the amount of memory in the 222 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or 223 * more. 224 */ 225void __init pidhash_init(void) 226{ | 299/* 300 * The pid hash table is scaled according to the amount of memory in the 301 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or 302 * more. 303 */ 304void __init pidhash_init(void) 305{ |
227 int i, j, pidhash_size; | 306 int i, pidhash_size; |
228 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); 229 230 pidhash_shift = max(4, fls(megabytes * 4)); 231 pidhash_shift = min(12, pidhash_shift); 232 pidhash_size = 1 << pidhash_shift; 233 234 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", 235 pidhash_size, pidhash_shift, | 307 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); 308 309 pidhash_shift = max(4, fls(megabytes * 4)); 310 pidhash_shift = min(12, pidhash_shift); 311 pidhash_size = 1 << pidhash_shift; 312 313 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", 314 pidhash_size, pidhash_shift, |
236 PIDTYPE_MAX * pidhash_size * sizeof(struct hlist_head)); | 315 pidhash_size * sizeof(struct hlist_head)); |
237 | 316 |
238 for (i = 0; i < PIDTYPE_MAX; i++) { 239 pid_hash[i] = alloc_bootmem(pidhash_size * 240 sizeof(*(pid_hash[i]))); 241 if (!pid_hash[i]) 242 panic("Could not alloc pidhash!\n"); 243 for (j = 0; j < pidhash_size; j++) 244 INIT_HLIST_HEAD(&pid_hash[i][j]); 245 } | 317 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); 318 if (!pid_hash) 319 panic("Could not alloc pidhash!\n"); 320 for (i = 0; i < pidhash_size; i++) 321 INIT_HLIST_HEAD(&pid_hash[i]); |
246} 247 248void __init pidmap_init(void) 249{ 250 pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL); 251 /* Reserve PID 0. We never call free_pidmap(0) */ 252 set_bit(0, pidmap_array->page); 253 atomic_dec(&pidmap_array->nr_free); | 322} 323 324void __init pidmap_init(void) 325{ 326 pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL); 327 /* Reserve PID 0. We never call free_pidmap(0) */ 328 set_bit(0, pidmap_array->page); 329 atomic_dec(&pidmap_array->nr_free); |
330 331 pid_cachep = kmem_cache_create("pid", sizeof(struct pid), 332 __alignof__(struct pid), 333 SLAB_PANIC, NULL, NULL); |
|
254} | 334} |