xref: /openbmc/linux/kernel/profile.c (revision a75acf850ca80136a4f845cf9a7cd26e7465c1f4)
1 /*
2  *  linux/kernel/profile.c
3  *  Simple profiling. Manages a direct-mapped profile hit count buffer,
4  *  with configurable resolution, support for restricting the cpus on
5  *  which profiling is done, and switching between cpu time and
6  *  schedule() calls via kernel command line parameters passed at boot.
7  *
8  *  Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9  *	Red Hat, July 2004
10  *  Consolidation of architecture support code for profiling,
11  *	William Irwin, Oracle, July 2004
12  *  Amortized hit count accounting via per-cpu open-addressed hashtables
13  *	to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
14  */
15 
16 #include <linux/module.h>
17 #include <linux/profile.h>
18 #include <linux/bootmem.h>
19 #include <linux/notifier.h>
20 #include <linux/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpu.h>
23 #include <linux/profile.h>
24 #include <linux/highmem.h>
25 #include <linux/mutex.h>
26 #include <asm/sections.h>
27 #include <asm/semaphore.h>
28 #include <asm/irq_regs.h>
29 
30 struct profile_hit {
31 	u32 pc, hits;
32 };
33 #define PROFILE_GRPSHIFT	3
34 #define PROFILE_GRPSZ		(1 << PROFILE_GRPSHIFT)
35 #define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))
36 #define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)
37 
38 /* Oprofile timer tick hook */
39 int (*timer_hook)(struct pt_regs *) __read_mostly;
40 
41 static atomic_t *prof_buffer;
42 static unsigned long prof_len, prof_shift;
43 int prof_on __read_mostly;
44 static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
45 #ifdef CONFIG_SMP
46 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
47 static DEFINE_PER_CPU(int, cpu_profile_flip);
48 static DEFINE_MUTEX(profile_flip_mutex);
49 #endif /* CONFIG_SMP */
50 
51 static int __init profile_setup(char * str)
52 {
53 	static char __initdata schedstr[] = "schedule";
54 	static char __initdata sleepstr[] = "sleep";
55 	int par;
56 
57 	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
58 		prof_on = SLEEP_PROFILING;
59 		if (str[strlen(sleepstr)] == ',')
60 			str += strlen(sleepstr) + 1;
61 		if (get_option(&str, &par))
62 			prof_shift = par;
63 		printk(KERN_INFO
64 			"kernel sleep profiling enabled (shift: %ld)\n",
65 			prof_shift);
66 	} else if (!strncmp(str, schedstr, strlen(schedstr))) {
67 		prof_on = SCHED_PROFILING;
68 		if (str[strlen(schedstr)] == ',')
69 			str += strlen(schedstr) + 1;
70 		if (get_option(&str, &par))
71 			prof_shift = par;
72 		printk(KERN_INFO
73 			"kernel schedule profiling enabled (shift: %ld)\n",
74 			prof_shift);
75 	} else if (get_option(&str, &par)) {
76 		prof_shift = par;
77 		prof_on = CPU_PROFILING;
78 		printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
79 			prof_shift);
80 	}
81 	return 1;
82 }
83 __setup("profile=", profile_setup);
84 
85 
86 void __init profile_init(void)
87 {
88 	if (!prof_on)
89 		return;
90 
91 	/* only text is profiled */
92 	prof_len = (_etext - _stext) >> prof_shift;
93 	prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t));
94 }
95 
96 /* Profile event notifications */
97 
98 #ifdef CONFIG_PROFILING
99 
100 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
101 static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
102 static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
103 
104 void profile_task_exit(struct task_struct * task)
105 {
106 	blocking_notifier_call_chain(&task_exit_notifier, 0, task);
107 }
108 
109 int profile_handoff_task(struct task_struct * task)
110 {
111 	int ret;
112 	ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
113 	return (ret == NOTIFY_OK) ? 1 : 0;
114 }
115 
116 void profile_munmap(unsigned long addr)
117 {
118 	blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
119 }
120 
121 int task_handoff_register(struct notifier_block * n)
122 {
123 	return atomic_notifier_chain_register(&task_free_notifier, n);
124 }
125 
126 int task_handoff_unregister(struct notifier_block * n)
127 {
128 	return atomic_notifier_chain_unregister(&task_free_notifier, n);
129 }
130 
131 int profile_event_register(enum profile_type type, struct notifier_block * n)
132 {
133 	int err = -EINVAL;
134 
135 	switch (type) {
136 		case PROFILE_TASK_EXIT:
137 			err = blocking_notifier_chain_register(
138 					&task_exit_notifier, n);
139 			break;
140 		case PROFILE_MUNMAP:
141 			err = blocking_notifier_chain_register(
142 					&munmap_notifier, n);
143 			break;
144 	}
145 
146 	return err;
147 }
148 
149 
150 int profile_event_unregister(enum profile_type type, struct notifier_block * n)
151 {
152 	int err = -EINVAL;
153 
154 	switch (type) {
155 		case PROFILE_TASK_EXIT:
156 			err = blocking_notifier_chain_unregister(
157 					&task_exit_notifier, n);
158 			break;
159 		case PROFILE_MUNMAP:
160 			err = blocking_notifier_chain_unregister(
161 					&munmap_notifier, n);
162 			break;
163 	}
164 
165 	return err;
166 }
167 
168 int register_timer_hook(int (*hook)(struct pt_regs *))
169 {
170 	if (timer_hook)
171 		return -EBUSY;
172 	timer_hook = hook;
173 	return 0;
174 }
175 
176 void unregister_timer_hook(int (*hook)(struct pt_regs *))
177 {
178 	WARN_ON(hook != timer_hook);
179 	timer_hook = NULL;
180 	/* make sure all CPUs see the NULL hook */
181 	synchronize_sched();  /* Allow ongoing interrupts to complete. */
182 }
183 
184 EXPORT_SYMBOL_GPL(register_timer_hook);
185 EXPORT_SYMBOL_GPL(unregister_timer_hook);
186 EXPORT_SYMBOL_GPL(task_handoff_register);
187 EXPORT_SYMBOL_GPL(task_handoff_unregister);
188 
189 #endif /* CONFIG_PROFILING */
190 
191 EXPORT_SYMBOL_GPL(profile_event_register);
192 EXPORT_SYMBOL_GPL(profile_event_unregister);
193 
194 #ifdef CONFIG_SMP
195 /*
196  * Each cpu has a pair of open-addressed hashtables for pending
197  * profile hits. read_profile() IPI's all cpus to request them
198  * to flip buffers and flushes their contents to prof_buffer itself.
199  * Flip requests are serialized by the profile_flip_mutex. The sole
200  * use of having a second hashtable is for avoiding cacheline
201  * contention that would otherwise happen during flushes of pending
202  * profile hits required for the accuracy of reported profile hits
203  * and so resurrect the interrupt livelock issue.
204  *
205  * The open-addressed hashtables are indexed by profile buffer slot
206  * and hold the number of pending hits to that profile buffer slot on
207  * a cpu in an entry. When the hashtable overflows, all pending hits
208  * are accounted to their corresponding profile buffer slots with
209  * atomic_add() and the hashtable emptied. As numerous pending hits
210  * may be accounted to a profile buffer slot in a hashtable entry,
211  * this amortizes a number of atomic profile buffer increments likely
212  * to be far larger than the number of entries in the hashtable,
213  * particularly given that the number of distinct profile buffer
214  * positions to which hits are accounted during short intervals (e.g.
215  * several seconds) is usually very small. Exclusion from buffer
216  * flipping is provided by interrupt disablement (note that for
217  * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
218  * process context).
219  * The hash function is meant to be lightweight as opposed to strong,
220  * and was vaguely inspired by ppc64 firmware-supported inverted
221  * pagetable hash functions, but uses a full hashtable full of finite
222  * collision chains, not just pairs of them.
223  *
224  * -- wli
225  */
226 static void __profile_flip_buffers(void *unused)
227 {
228 	int cpu = smp_processor_id();
229 
230 	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
231 }
232 
233 static void profile_flip_buffers(void)
234 {
235 	int i, j, cpu;
236 
237 	mutex_lock(&profile_flip_mutex);
238 	j = per_cpu(cpu_profile_flip, get_cpu());
239 	put_cpu();
240 	on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
241 	for_each_online_cpu(cpu) {
242 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
243 		for (i = 0; i < NR_PROFILE_HIT; ++i) {
244 			if (!hits[i].hits) {
245 				if (hits[i].pc)
246 					hits[i].pc = 0;
247 				continue;
248 			}
249 			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
250 			hits[i].hits = hits[i].pc = 0;
251 		}
252 	}
253 	mutex_unlock(&profile_flip_mutex);
254 }
255 
256 static void profile_discard_flip_buffers(void)
257 {
258 	int i, cpu;
259 
260 	mutex_lock(&profile_flip_mutex);
261 	i = per_cpu(cpu_profile_flip, get_cpu());
262 	put_cpu();
263 	on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
264 	for_each_online_cpu(cpu) {
265 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
266 		memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
267 	}
268 	mutex_unlock(&profile_flip_mutex);
269 }
270 
271 void profile_hits(int type, void *__pc, unsigned int nr_hits)
272 {
273 	unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
274 	int i, j, cpu;
275 	struct profile_hit *hits;
276 
277 	if (prof_on != type || !prof_buffer)
278 		return;
279 	pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
280 	i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
281 	secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
282 	cpu = get_cpu();
283 	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
284 	if (!hits) {
285 		put_cpu();
286 		return;
287 	}
288 	/*
289 	 * We buffer the global profiler buffer into a per-CPU
290 	 * queue and thus reduce the number of global (and possibly
291 	 * NUMA-alien) accesses. The write-queue is self-coalescing:
292 	 */
293 	local_irq_save(flags);
294 	do {
295 		for (j = 0; j < PROFILE_GRPSZ; ++j) {
296 			if (hits[i + j].pc == pc) {
297 				hits[i + j].hits += nr_hits;
298 				goto out;
299 			} else if (!hits[i + j].hits) {
300 				hits[i + j].pc = pc;
301 				hits[i + j].hits = nr_hits;
302 				goto out;
303 			}
304 		}
305 		i = (i + secondary) & (NR_PROFILE_HIT - 1);
306 	} while (i != primary);
307 
308 	/*
309 	 * Add the current hit(s) and flush the write-queue out
310 	 * to the global buffer:
311 	 */
312 	atomic_add(nr_hits, &prof_buffer[pc]);
313 	for (i = 0; i < NR_PROFILE_HIT; ++i) {
314 		atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
315 		hits[i].pc = hits[i].hits = 0;
316 	}
317 out:
318 	local_irq_restore(flags);
319 	put_cpu();
320 }
321 
322 static int __devinit profile_cpu_callback(struct notifier_block *info,
323 					unsigned long action, void *__cpu)
324 {
325 	int node, cpu = (unsigned long)__cpu;
326 	struct page *page;
327 
328 	switch (action) {
329 	case CPU_UP_PREPARE:
330 		node = cpu_to_node(cpu);
331 		per_cpu(cpu_profile_flip, cpu) = 0;
332 		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
333 			page = alloc_pages_node(node,
334 					GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
335 					0);
336 			if (!page)
337 				return NOTIFY_BAD;
338 			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
339 		}
340 		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
341 			page = alloc_pages_node(node,
342 					GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
343 					0);
344 			if (!page)
345 				goto out_free;
346 			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
347 		}
348 		break;
349 	out_free:
350 		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
351 		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
352 		__free_page(page);
353 		return NOTIFY_BAD;
354 	case CPU_ONLINE:
355 		cpu_set(cpu, prof_cpu_mask);
356 		break;
357 	case CPU_UP_CANCELED:
358 	case CPU_DEAD:
359 		cpu_clear(cpu, prof_cpu_mask);
360 		if (per_cpu(cpu_profile_hits, cpu)[0]) {
361 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
362 			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
363 			__free_page(page);
364 		}
365 		if (per_cpu(cpu_profile_hits, cpu)[1]) {
366 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
367 			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
368 			__free_page(page);
369 		}
370 		break;
371 	}
372 	return NOTIFY_OK;
373 }
374 #else /* !CONFIG_SMP */
375 #define profile_flip_buffers()		do { } while (0)
376 #define profile_discard_flip_buffers()	do { } while (0)
377 #define profile_cpu_callback		NULL
378 
379 void profile_hits(int type, void *__pc, unsigned int nr_hits)
380 {
381 	unsigned long pc;
382 
383 	if (prof_on != type || !prof_buffer)
384 		return;
385 	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
386 	atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
387 }
388 #endif /* !CONFIG_SMP */
389 
390 void profile_tick(int type)
391 {
392 	struct pt_regs *regs = get_irq_regs();
393 
394 	if (type == CPU_PROFILING && timer_hook)
395 		timer_hook(regs);
396 	if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
397 		profile_hit(type, (void *)profile_pc(regs));
398 }
399 
400 #ifdef CONFIG_PROC_FS
401 #include <linux/proc_fs.h>
402 #include <asm/uaccess.h>
403 #include <asm/ptrace.h>
404 
405 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
406 			int count, int *eof, void *data)
407 {
408 	int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
409 	if (count - len < 2)
410 		return -EINVAL;
411 	len += sprintf(page + len, "\n");
412 	return len;
413 }
414 
415 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
416 					unsigned long count, void *data)
417 {
418 	cpumask_t *mask = (cpumask_t *)data;
419 	unsigned long full_count = count, err;
420 	cpumask_t new_value;
421 
422 	err = cpumask_parse_user(buffer, count, new_value);
423 	if (err)
424 		return err;
425 
426 	*mask = new_value;
427 	return full_count;
428 }
429 
430 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
431 {
432 	struct proc_dir_entry *entry;
433 
434 	/* create /proc/irq/prof_cpu_mask */
435 	if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir)))
436 		return;
437 	entry->nlink = 1;
438 	entry->data = (void *)&prof_cpu_mask;
439 	entry->read_proc = prof_cpu_mask_read_proc;
440 	entry->write_proc = prof_cpu_mask_write_proc;
441 }
442 
443 /*
444  * This function accesses profiling information. The returned data is
445  * binary: the sampling step and the actual contents of the profile
446  * buffer. Use of the program readprofile is recommended in order to
447  * get meaningful info out of these data.
448  */
449 static ssize_t
450 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
451 {
452 	unsigned long p = *ppos;
453 	ssize_t read;
454 	char * pnt;
455 	unsigned int sample_step = 1 << prof_shift;
456 
457 	profile_flip_buffers();
458 	if (p >= (prof_len+1)*sizeof(unsigned int))
459 		return 0;
460 	if (count > (prof_len+1)*sizeof(unsigned int) - p)
461 		count = (prof_len+1)*sizeof(unsigned int) - p;
462 	read = 0;
463 
464 	while (p < sizeof(unsigned int) && count > 0) {
465 		if (put_user(*((char *)(&sample_step)+p),buf))
466 			return -EFAULT;
467 		buf++; p++; count--; read++;
468 	}
469 	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
470 	if (copy_to_user(buf,(void *)pnt,count))
471 		return -EFAULT;
472 	read += count;
473 	*ppos += read;
474 	return read;
475 }
476 
477 /*
478  * Writing to /proc/profile resets the counters
479  *
480  * Writing a 'profiling multiplier' value into it also re-sets the profiling
481  * interrupt frequency, on architectures that support this.
482  */
483 static ssize_t write_profile(struct file *file, const char __user *buf,
484 			     size_t count, loff_t *ppos)
485 {
486 #ifdef CONFIG_SMP
487 	extern int setup_profiling_timer (unsigned int multiplier);
488 
489 	if (count == sizeof(int)) {
490 		unsigned int multiplier;
491 
492 		if (copy_from_user(&multiplier, buf, sizeof(int)))
493 			return -EFAULT;
494 
495 		if (setup_profiling_timer(multiplier))
496 			return -EINVAL;
497 	}
498 #endif
499 	profile_discard_flip_buffers();
500 	memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
501 	return count;
502 }
503 
504 static const struct file_operations proc_profile_operations = {
505 	.read		= read_profile,
506 	.write		= write_profile,
507 };
508 
509 #ifdef CONFIG_SMP
510 static void __init profile_nop(void *unused)
511 {
512 }
513 
514 static int __init create_hash_tables(void)
515 {
516 	int cpu;
517 
518 	for_each_online_cpu(cpu) {
519 		int node = cpu_to_node(cpu);
520 		struct page *page;
521 
522 		page = alloc_pages_node(node,
523 				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
524 				0);
525 		if (!page)
526 			goto out_cleanup;
527 		per_cpu(cpu_profile_hits, cpu)[1]
528 				= (struct profile_hit *)page_address(page);
529 		page = alloc_pages_node(node,
530 				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
531 				0);
532 		if (!page)
533 			goto out_cleanup;
534 		per_cpu(cpu_profile_hits, cpu)[0]
535 				= (struct profile_hit *)page_address(page);
536 	}
537 	return 0;
538 out_cleanup:
539 	prof_on = 0;
540 	smp_mb();
541 	on_each_cpu(profile_nop, NULL, 0, 1);
542 	for_each_online_cpu(cpu) {
543 		struct page *page;
544 
545 		if (per_cpu(cpu_profile_hits, cpu)[0]) {
546 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
547 			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
548 			__free_page(page);
549 		}
550 		if (per_cpu(cpu_profile_hits, cpu)[1]) {
551 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
552 			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
553 			__free_page(page);
554 		}
555 	}
556 	return -1;
557 }
558 #else
559 #define create_hash_tables()			({ 0; })
560 #endif
561 
562 static int __init create_proc_profile(void)
563 {
564 	struct proc_dir_entry *entry;
565 
566 	if (!prof_on)
567 		return 0;
568 	if (create_hash_tables())
569 		return -1;
570 	if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL)))
571 		return 0;
572 	entry->proc_fops = &proc_profile_operations;
573 	entry->size = (1+prof_len) * sizeof(atomic_t);
574 	hotcpu_notifier(profile_cpu_callback, 0);
575 	return 0;
576 }
577 module_init(create_proc_profile);
578 #endif /* CONFIG_PROC_FS */
579