Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 *  linux/kernel/profile.c
  3 *  Simple profiling. Manages a direct-mapped profile hit count buffer,
  4 *  with configurable resolution, support for restricting the cpus on
  5 *  which profiling is done, and switching between cpu time and
  6 *  schedule() calls via kernel command line parameters passed at boot.
  7 *
  8 *  Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
  9 *	Red Hat, July 2004
 10 *  Consolidation of architecture support code for profiling,
 11 *	Nadia Yvette Chambers, Oracle, July 2004
 12 *  Amortized hit count accounting via per-cpu open-addressed hashtables
 13 *	to resolve timer interrupt livelocks, Nadia Yvette Chambers,
 14 *	Oracle, 2004
 15 */
 16
 17#include <linux/export.h>
 18#include <linux/profile.h>
 19#include <linux/bootmem.h>
 20#include <linux/notifier.h>
 21#include <linux/mm.h>
 22#include <linux/cpumask.h>
 23#include <linux/cpu.h>
 24#include <linux/highmem.h>
 25#include <linux/mutex.h>
 26#include <linux/slab.h>
 27#include <linux/vmalloc.h>
 
 
 28#include <asm/sections.h>
 29#include <asm/irq_regs.h>
 30#include <asm/ptrace.h>
 31
 32struct profile_hit {
 33	u32 pc, hits;
 34};
 35#define PROFILE_GRPSHIFT	3
 36#define PROFILE_GRPSZ		(1 << PROFILE_GRPSHIFT)
 37#define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))
 38#define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)
 39
 40static atomic_t *prof_buffer;
 41static unsigned long prof_len, prof_shift;
 42
 43int prof_on __read_mostly;
 44EXPORT_SYMBOL_GPL(prof_on);
 45
 46static cpumask_var_t prof_cpu_mask;
 47#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
 48static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
 49static DEFINE_PER_CPU(int, cpu_profile_flip);
 50static DEFINE_MUTEX(profile_flip_mutex);
 51#endif /* CONFIG_SMP */
 52
 53int profile_setup(char *str)
 54{
 55	static const char schedstr[] = "schedule";
 56	static const char sleepstr[] = "sleep";
 57	static const char kvmstr[] = "kvm";
 58	int par;
 59
 60	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
 61#ifdef CONFIG_SCHEDSTATS
 62		force_schedstat_enabled();
 63		prof_on = SLEEP_PROFILING;
 64		if (str[strlen(sleepstr)] == ',')
 65			str += strlen(sleepstr) + 1;
 66		if (get_option(&str, &par))
 67			prof_shift = par;
 68		pr_info("kernel sleep profiling enabled (shift: %ld)\n",
 69			prof_shift);
 70#else
 71		pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
 72#endif /* CONFIG_SCHEDSTATS */
 73	} else if (!strncmp(str, schedstr, strlen(schedstr))) {
 74		prof_on = SCHED_PROFILING;
 75		if (str[strlen(schedstr)] == ',')
 76			str += strlen(schedstr) + 1;
 77		if (get_option(&str, &par))
 78			prof_shift = par;
 79		pr_info("kernel schedule profiling enabled (shift: %ld)\n",
 80			prof_shift);
 81	} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
 82		prof_on = KVM_PROFILING;
 83		if (str[strlen(kvmstr)] == ',')
 84			str += strlen(kvmstr) + 1;
 85		if (get_option(&str, &par))
 86			prof_shift = par;
 87		pr_info("kernel KVM profiling enabled (shift: %ld)\n",
 88			prof_shift);
 89	} else if (get_option(&str, &par)) {
 90		prof_shift = par;
 91		prof_on = CPU_PROFILING;
 92		pr_info("kernel profiling enabled (shift: %ld)\n",
 93			prof_shift);
 94	}
 95	return 1;
 96}
 97__setup("profile=", profile_setup);
 98
 99
100int __ref profile_init(void)
101{
102	int buffer_bytes;
103	if (!prof_on)
104		return 0;
105
106	/* only text is profiled */
107	prof_len = (_etext - _stext) >> prof_shift;
108	buffer_bytes = prof_len*sizeof(atomic_t);
109
110	if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
111		return -ENOMEM;
112
113	cpumask_copy(prof_cpu_mask, cpu_possible_mask);
114
115	prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
116	if (prof_buffer)
117		return 0;
118
119	prof_buffer = alloc_pages_exact(buffer_bytes,
120					GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
121	if (prof_buffer)
122		return 0;
123
124	prof_buffer = vzalloc(buffer_bytes);
125	if (prof_buffer)
126		return 0;
127
128	free_cpumask_var(prof_cpu_mask);
129	return -ENOMEM;
130}
131
132/* Profile event notifications */
133
134static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
135static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
136static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
137
138void profile_task_exit(struct task_struct *task)
139{
140	blocking_notifier_call_chain(&task_exit_notifier, 0, task);
141}
142
143int profile_handoff_task(struct task_struct *task)
144{
145	int ret;
146	ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
147	return (ret == NOTIFY_OK) ? 1 : 0;
148}
149
150void profile_munmap(unsigned long addr)
151{
152	blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
153}
154
155int task_handoff_register(struct notifier_block *n)
156{
157	return atomic_notifier_chain_register(&task_free_notifier, n);
158}
159EXPORT_SYMBOL_GPL(task_handoff_register);
160
161int task_handoff_unregister(struct notifier_block *n)
162{
163	return atomic_notifier_chain_unregister(&task_free_notifier, n);
164}
165EXPORT_SYMBOL_GPL(task_handoff_unregister);
166
167int profile_event_register(enum profile_type type, struct notifier_block *n)
168{
169	int err = -EINVAL;
170
171	switch (type) {
172	case PROFILE_TASK_EXIT:
173		err = blocking_notifier_chain_register(
174				&task_exit_notifier, n);
175		break;
176	case PROFILE_MUNMAP:
177		err = blocking_notifier_chain_register(
178				&munmap_notifier, n);
179		break;
180	}
181
182	return err;
183}
184EXPORT_SYMBOL_GPL(profile_event_register);
185
186int profile_event_unregister(enum profile_type type, struct notifier_block *n)
187{
188	int err = -EINVAL;
189
190	switch (type) {
191	case PROFILE_TASK_EXIT:
192		err = blocking_notifier_chain_unregister(
193				&task_exit_notifier, n);
194		break;
195	case PROFILE_MUNMAP:
196		err = blocking_notifier_chain_unregister(
197				&munmap_notifier, n);
198		break;
199	}
200
201	return err;
202}
203EXPORT_SYMBOL_GPL(profile_event_unregister);
204
205#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
206/*
207 * Each cpu has a pair of open-addressed hashtables for pending
208 * profile hits. read_profile() IPI's all cpus to request them
209 * to flip buffers and flushes their contents to prof_buffer itself.
210 * Flip requests are serialized by the profile_flip_mutex. The sole
211 * use of having a second hashtable is for avoiding cacheline
212 * contention that would otherwise happen during flushes of pending
213 * profile hits required for the accuracy of reported profile hits
214 * and so resurrect the interrupt livelock issue.
215 *
216 * The open-addressed hashtables are indexed by profile buffer slot
217 * and hold the number of pending hits to that profile buffer slot on
218 * a cpu in an entry. When the hashtable overflows, all pending hits
219 * are accounted to their corresponding profile buffer slots with
220 * atomic_add() and the hashtable emptied. As numerous pending hits
221 * may be accounted to a profile buffer slot in a hashtable entry,
222 * this amortizes a number of atomic profile buffer increments likely
223 * to be far larger than the number of entries in the hashtable,
224 * particularly given that the number of distinct profile buffer
225 * positions to which hits are accounted during short intervals (e.g.
226 * several seconds) is usually very small. Exclusion from buffer
227 * flipping is provided by interrupt disablement (note that for
228 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
229 * process context).
230 * The hash function is meant to be lightweight as opposed to strong,
231 * and was vaguely inspired by ppc64 firmware-supported inverted
232 * pagetable hash functions, but uses a full hashtable full of finite
233 * collision chains, not just pairs of them.
234 *
235 * -- nyc
236 */
237static void __profile_flip_buffers(void *unused)
238{
239	int cpu = smp_processor_id();
240
241	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
242}
243
244static void profile_flip_buffers(void)
245{
246	int i, j, cpu;
247
248	mutex_lock(&profile_flip_mutex);
249	j = per_cpu(cpu_profile_flip, get_cpu());
250	put_cpu();
251	on_each_cpu(__profile_flip_buffers, NULL, 1);
252	for_each_online_cpu(cpu) {
253		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
254		for (i = 0; i < NR_PROFILE_HIT; ++i) {
255			if (!hits[i].hits) {
256				if (hits[i].pc)
257					hits[i].pc = 0;
258				continue;
259			}
260			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
261			hits[i].hits = hits[i].pc = 0;
262		}
263	}
264	mutex_unlock(&profile_flip_mutex);
265}
266
267static void profile_discard_flip_buffers(void)
268{
269	int i, cpu;
270
271	mutex_lock(&profile_flip_mutex);
272	i = per_cpu(cpu_profile_flip, get_cpu());
273	put_cpu();
274	on_each_cpu(__profile_flip_buffers, NULL, 1);
275	for_each_online_cpu(cpu) {
276		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
277		memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
278	}
279	mutex_unlock(&profile_flip_mutex);
280}
281
282static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
283{
284	unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
285	int i, j, cpu;
286	struct profile_hit *hits;
287
288	pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
289	i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
290	secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
291	cpu = get_cpu();
292	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
293	if (!hits) {
294		put_cpu();
295		return;
296	}
297	/*
298	 * We buffer the global profiler buffer into a per-CPU
299	 * queue and thus reduce the number of global (and possibly
300	 * NUMA-alien) accesses. The write-queue is self-coalescing:
301	 */
302	local_irq_save(flags);
303	do {
304		for (j = 0; j < PROFILE_GRPSZ; ++j) {
305			if (hits[i + j].pc == pc) {
306				hits[i + j].hits += nr_hits;
307				goto out;
308			} else if (!hits[i + j].hits) {
309				hits[i + j].pc = pc;
310				hits[i + j].hits = nr_hits;
311				goto out;
312			}
313		}
314		i = (i + secondary) & (NR_PROFILE_HIT - 1);
315	} while (i != primary);
316
317	/*
318	 * Add the current hit(s) and flush the write-queue out
319	 * to the global buffer:
320	 */
321	atomic_add(nr_hits, &prof_buffer[pc]);
322	for (i = 0; i < NR_PROFILE_HIT; ++i) {
323		atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
324		hits[i].pc = hits[i].hits = 0;
325	}
326out:
327	local_irq_restore(flags);
328	put_cpu();
329}
330
331static int profile_dead_cpu(unsigned int cpu)
332{
333	struct page *page;
334	int i;
335
336	if (prof_cpu_mask != NULL)
337		cpumask_clear_cpu(cpu, prof_cpu_mask);
338
339	for (i = 0; i < 2; i++) {
340		if (per_cpu(cpu_profile_hits, cpu)[i]) {
341			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
342			per_cpu(cpu_profile_hits, cpu)[i] = NULL;
343			__free_page(page);
344		}
345	}
346	return 0;
347}
348
349static int profile_prepare_cpu(unsigned int cpu)
350{
351	int i, node = cpu_to_mem(cpu);
352	struct page *page;
353
354	per_cpu(cpu_profile_flip, cpu) = 0;
355
356	for (i = 0; i < 2; i++) {
357		if (per_cpu(cpu_profile_hits, cpu)[i])
358			continue;
359
360		page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
361		if (!page) {
362			profile_dead_cpu(cpu);
363			return -ENOMEM;
364		}
365		per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
366
367	}
368	return 0;
369}
370
371static int profile_online_cpu(unsigned int cpu)
372{
373	if (prof_cpu_mask != NULL)
374		cpumask_set_cpu(cpu, prof_cpu_mask);
375
376	return 0;
377}
378
379#else /* !CONFIG_SMP */
380#define profile_flip_buffers()		do { } while (0)
381#define profile_discard_flip_buffers()	do { } while (0)
382
383static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
384{
385	unsigned long pc;
386	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
387	atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
388}
389#endif /* !CONFIG_SMP */
390
391void profile_hits(int type, void *__pc, unsigned int nr_hits)
392{
393	if (prof_on != type || !prof_buffer)
394		return;
395	do_profile_hits(type, __pc, nr_hits);
396}
397EXPORT_SYMBOL_GPL(profile_hits);
398
399void profile_tick(int type)
400{
401	struct pt_regs *regs = get_irq_regs();
402
403	if (!user_mode(regs) && prof_cpu_mask != NULL &&
404	    cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
405		profile_hit(type, (void *)profile_pc(regs));
406}
407
408#ifdef CONFIG_PROC_FS
409#include <linux/proc_fs.h>
410#include <linux/seq_file.h>
411#include <linux/uaccess.h>
412
413static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
414{
415	seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
416	return 0;
417}
418
419static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
420{
421	return single_open(file, prof_cpu_mask_proc_show, NULL);
422}
423
424static ssize_t prof_cpu_mask_proc_write(struct file *file,
425	const char __user *buffer, size_t count, loff_t *pos)
426{
427	cpumask_var_t new_value;
428	int err;
429
430	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
431		return -ENOMEM;
432
433	err = cpumask_parse_user(buffer, count, new_value);
434	if (!err) {
435		cpumask_copy(prof_cpu_mask, new_value);
436		err = count;
437	}
438	free_cpumask_var(new_value);
439	return err;
440}
441
442static const struct file_operations prof_cpu_mask_proc_fops = {
443	.open		= prof_cpu_mask_proc_open,
444	.read		= seq_read,
445	.llseek		= seq_lseek,
446	.release	= single_release,
447	.write		= prof_cpu_mask_proc_write,
448};
449
450void create_prof_cpu_mask(void)
451{
452	/* create /proc/irq/prof_cpu_mask */
453	proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
454}
455
456/*
457 * This function accesses profiling information. The returned data is
458 * binary: the sampling step and the actual contents of the profile
459 * buffer. Use of the program readprofile is recommended in order to
460 * get meaningful info out of these data.
461 */
462static ssize_t
463read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
464{
465	unsigned long p = *ppos;
466	ssize_t read;
467	char *pnt;
468	unsigned int sample_step = 1 << prof_shift;
469
470	profile_flip_buffers();
471	if (p >= (prof_len+1)*sizeof(unsigned int))
472		return 0;
473	if (count > (prof_len+1)*sizeof(unsigned int) - p)
474		count = (prof_len+1)*sizeof(unsigned int) - p;
475	read = 0;
476
477	while (p < sizeof(unsigned int) && count > 0) {
478		if (put_user(*((char *)(&sample_step)+p), buf))
479			return -EFAULT;
480		buf++; p++; count--; read++;
481	}
482	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
483	if (copy_to_user(buf, (void *)pnt, count))
484		return -EFAULT;
485	read += count;
486	*ppos += read;
487	return read;
488}
489
490/*
491 * Writing to /proc/profile resets the counters
492 *
493 * Writing a 'profiling multiplier' value into it also re-sets the profiling
494 * interrupt frequency, on architectures that support this.
495 */
496static ssize_t write_profile(struct file *file, const char __user *buf,
497			     size_t count, loff_t *ppos)
498{
499#ifdef CONFIG_SMP
500	extern int setup_profiling_timer(unsigned int multiplier);
501
502	if (count == sizeof(int)) {
503		unsigned int multiplier;
504
505		if (copy_from_user(&multiplier, buf, sizeof(int)))
506			return -EFAULT;
507
508		if (setup_profiling_timer(multiplier))
509			return -EINVAL;
510	}
511#endif
512	profile_discard_flip_buffers();
513	memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
514	return count;
515}
516
517static const struct file_operations proc_profile_operations = {
518	.read		= read_profile,
519	.write		= write_profile,
520	.llseek		= default_llseek,
521};
522
523int __ref create_proc_profile(void)
524{
525	struct proc_dir_entry *entry;
526#ifdef CONFIG_SMP
527	enum cpuhp_state online_state;
528#endif
529
530	int err = 0;
531
532	if (!prof_on)
533		return 0;
534#ifdef CONFIG_SMP
535	err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
536				profile_prepare_cpu, profile_dead_cpu);
537	if (err)
538		return err;
539
540	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
541				profile_online_cpu, NULL);
542	if (err < 0)
543		goto err_state_prep;
544	online_state = err;
545	err = 0;
546#endif
547	entry = proc_create("profile", S_IWUSR | S_IRUGO,
548			    NULL, &proc_profile_operations);
549	if (!entry)
550		goto err_state_onl;
551	proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
552
553	return err;
554err_state_onl:
555#ifdef CONFIG_SMP
556	cpuhp_remove_state(online_state);
557err_state_prep:
558	cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
559#endif
560	return err;
561}
562subsys_initcall(create_proc_profile);
563#endif /* CONFIG_PROC_FS */
v4.17
  1/*
  2 *  linux/kernel/profile.c
  3 *  Simple profiling. Manages a direct-mapped profile hit count buffer,
  4 *  with configurable resolution, support for restricting the cpus on
  5 *  which profiling is done, and switching between cpu time and
  6 *  schedule() calls via kernel command line parameters passed at boot.
  7 *
  8 *  Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
  9 *	Red Hat, July 2004
 10 *  Consolidation of architecture support code for profiling,
 11 *	Nadia Yvette Chambers, Oracle, July 2004
 12 *  Amortized hit count accounting via per-cpu open-addressed hashtables
 13 *	to resolve timer interrupt livelocks, Nadia Yvette Chambers,
 14 *	Oracle, 2004
 15 */
 16
 17#include <linux/export.h>
 18#include <linux/profile.h>
 19#include <linux/bootmem.h>
 20#include <linux/notifier.h>
 21#include <linux/mm.h>
 22#include <linux/cpumask.h>
 23#include <linux/cpu.h>
 24#include <linux/highmem.h>
 25#include <linux/mutex.h>
 26#include <linux/slab.h>
 27#include <linux/vmalloc.h>
 28#include <linux/sched/stat.h>
 29
 30#include <asm/sections.h>
 31#include <asm/irq_regs.h>
 32#include <asm/ptrace.h>
 33
 34struct profile_hit {
 35	u32 pc, hits;
 36};
 37#define PROFILE_GRPSHIFT	3
 38#define PROFILE_GRPSZ		(1 << PROFILE_GRPSHIFT)
 39#define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))
 40#define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)
 41
 42static atomic_t *prof_buffer;
 43static unsigned long prof_len, prof_shift;
 44
 45int prof_on __read_mostly;
 46EXPORT_SYMBOL_GPL(prof_on);
 47
 48static cpumask_var_t prof_cpu_mask;
 49#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
 50static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
 51static DEFINE_PER_CPU(int, cpu_profile_flip);
 52static DEFINE_MUTEX(profile_flip_mutex);
 53#endif /* CONFIG_SMP */
 54
 55int profile_setup(char *str)
 56{
 57	static const char schedstr[] = "schedule";
 58	static const char sleepstr[] = "sleep";
 59	static const char kvmstr[] = "kvm";
 60	int par;
 61
 62	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
 63#ifdef CONFIG_SCHEDSTATS
 64		force_schedstat_enabled();
 65		prof_on = SLEEP_PROFILING;
 66		if (str[strlen(sleepstr)] == ',')
 67			str += strlen(sleepstr) + 1;
 68		if (get_option(&str, &par))
 69			prof_shift = par;
 70		pr_info("kernel sleep profiling enabled (shift: %ld)\n",
 71			prof_shift);
 72#else
 73		pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
 74#endif /* CONFIG_SCHEDSTATS */
 75	} else if (!strncmp(str, schedstr, strlen(schedstr))) {
 76		prof_on = SCHED_PROFILING;
 77		if (str[strlen(schedstr)] == ',')
 78			str += strlen(schedstr) + 1;
 79		if (get_option(&str, &par))
 80			prof_shift = par;
 81		pr_info("kernel schedule profiling enabled (shift: %ld)\n",
 82			prof_shift);
 83	} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
 84		prof_on = KVM_PROFILING;
 85		if (str[strlen(kvmstr)] == ',')
 86			str += strlen(kvmstr) + 1;
 87		if (get_option(&str, &par))
 88			prof_shift = par;
 89		pr_info("kernel KVM profiling enabled (shift: %ld)\n",
 90			prof_shift);
 91	} else if (get_option(&str, &par)) {
 92		prof_shift = par;
 93		prof_on = CPU_PROFILING;
 94		pr_info("kernel profiling enabled (shift: %ld)\n",
 95			prof_shift);
 96	}
 97	return 1;
 98}
 99__setup("profile=", profile_setup);
100
101
102int __ref profile_init(void)
103{
104	int buffer_bytes;
105	if (!prof_on)
106		return 0;
107
108	/* only text is profiled */
109	prof_len = (_etext - _stext) >> prof_shift;
110	buffer_bytes = prof_len*sizeof(atomic_t);
111
112	if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
113		return -ENOMEM;
114
115	cpumask_copy(prof_cpu_mask, cpu_possible_mask);
116
117	prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
118	if (prof_buffer)
119		return 0;
120
121	prof_buffer = alloc_pages_exact(buffer_bytes,
122					GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
123	if (prof_buffer)
124		return 0;
125
126	prof_buffer = vzalloc(buffer_bytes);
127	if (prof_buffer)
128		return 0;
129
130	free_cpumask_var(prof_cpu_mask);
131	return -ENOMEM;
132}
133
134/* Profile event notifications */
135
136static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
137static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
138static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
139
140void profile_task_exit(struct task_struct *task)
141{
142	blocking_notifier_call_chain(&task_exit_notifier, 0, task);
143}
144
145int profile_handoff_task(struct task_struct *task)
146{
147	int ret;
148	ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
149	return (ret == NOTIFY_OK) ? 1 : 0;
150}
151
152void profile_munmap(unsigned long addr)
153{
154	blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
155}
156
157int task_handoff_register(struct notifier_block *n)
158{
159	return atomic_notifier_chain_register(&task_free_notifier, n);
160}
161EXPORT_SYMBOL_GPL(task_handoff_register);
162
163int task_handoff_unregister(struct notifier_block *n)
164{
165	return atomic_notifier_chain_unregister(&task_free_notifier, n);
166}
167EXPORT_SYMBOL_GPL(task_handoff_unregister);
168
169int profile_event_register(enum profile_type type, struct notifier_block *n)
170{
171	int err = -EINVAL;
172
173	switch (type) {
174	case PROFILE_TASK_EXIT:
175		err = blocking_notifier_chain_register(
176				&task_exit_notifier, n);
177		break;
178	case PROFILE_MUNMAP:
179		err = blocking_notifier_chain_register(
180				&munmap_notifier, n);
181		break;
182	}
183
184	return err;
185}
186EXPORT_SYMBOL_GPL(profile_event_register);
187
188int profile_event_unregister(enum profile_type type, struct notifier_block *n)
189{
190	int err = -EINVAL;
191
192	switch (type) {
193	case PROFILE_TASK_EXIT:
194		err = blocking_notifier_chain_unregister(
195				&task_exit_notifier, n);
196		break;
197	case PROFILE_MUNMAP:
198		err = blocking_notifier_chain_unregister(
199				&munmap_notifier, n);
200		break;
201	}
202
203	return err;
204}
205EXPORT_SYMBOL_GPL(profile_event_unregister);
206
207#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
208/*
209 * Each cpu has a pair of open-addressed hashtables for pending
210 * profile hits. read_profile() IPI's all cpus to request them
211 * to flip buffers and flushes their contents to prof_buffer itself.
212 * Flip requests are serialized by the profile_flip_mutex. The sole
213 * use of having a second hashtable is for avoiding cacheline
214 * contention that would otherwise happen during flushes of pending
215 * profile hits required for the accuracy of reported profile hits
216 * and so resurrect the interrupt livelock issue.
217 *
218 * The open-addressed hashtables are indexed by profile buffer slot
219 * and hold the number of pending hits to that profile buffer slot on
220 * a cpu in an entry. When the hashtable overflows, all pending hits
221 * are accounted to their corresponding profile buffer slots with
222 * atomic_add() and the hashtable emptied. As numerous pending hits
223 * may be accounted to a profile buffer slot in a hashtable entry,
224 * this amortizes a number of atomic profile buffer increments likely
225 * to be far larger than the number of entries in the hashtable,
226 * particularly given that the number of distinct profile buffer
227 * positions to which hits are accounted during short intervals (e.g.
228 * several seconds) is usually very small. Exclusion from buffer
229 * flipping is provided by interrupt disablement (note that for
230 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
231 * process context).
232 * The hash function is meant to be lightweight as opposed to strong,
233 * and was vaguely inspired by ppc64 firmware-supported inverted
234 * pagetable hash functions, but uses a full hashtable full of finite
235 * collision chains, not just pairs of them.
236 *
237 * -- nyc
238 */
239static void __profile_flip_buffers(void *unused)
240{
241	int cpu = smp_processor_id();
242
243	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
244}
245
246static void profile_flip_buffers(void)
247{
248	int i, j, cpu;
249
250	mutex_lock(&profile_flip_mutex);
251	j = per_cpu(cpu_profile_flip, get_cpu());
252	put_cpu();
253	on_each_cpu(__profile_flip_buffers, NULL, 1);
254	for_each_online_cpu(cpu) {
255		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
256		for (i = 0; i < NR_PROFILE_HIT; ++i) {
257			if (!hits[i].hits) {
258				if (hits[i].pc)
259					hits[i].pc = 0;
260				continue;
261			}
262			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
263			hits[i].hits = hits[i].pc = 0;
264		}
265	}
266	mutex_unlock(&profile_flip_mutex);
267}
268
269static void profile_discard_flip_buffers(void)
270{
271	int i, cpu;
272
273	mutex_lock(&profile_flip_mutex);
274	i = per_cpu(cpu_profile_flip, get_cpu());
275	put_cpu();
276	on_each_cpu(__profile_flip_buffers, NULL, 1);
277	for_each_online_cpu(cpu) {
278		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
279		memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
280	}
281	mutex_unlock(&profile_flip_mutex);
282}
283
284static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
285{
286	unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
287	int i, j, cpu;
288	struct profile_hit *hits;
289
290	pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
291	i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
292	secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
293	cpu = get_cpu();
294	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
295	if (!hits) {
296		put_cpu();
297		return;
298	}
299	/*
300	 * We buffer the global profiler buffer into a per-CPU
301	 * queue and thus reduce the number of global (and possibly
302	 * NUMA-alien) accesses. The write-queue is self-coalescing:
303	 */
304	local_irq_save(flags);
305	do {
306		for (j = 0; j < PROFILE_GRPSZ; ++j) {
307			if (hits[i + j].pc == pc) {
308				hits[i + j].hits += nr_hits;
309				goto out;
310			} else if (!hits[i + j].hits) {
311				hits[i + j].pc = pc;
312				hits[i + j].hits = nr_hits;
313				goto out;
314			}
315		}
316		i = (i + secondary) & (NR_PROFILE_HIT - 1);
317	} while (i != primary);
318
319	/*
320	 * Add the current hit(s) and flush the write-queue out
321	 * to the global buffer:
322	 */
323	atomic_add(nr_hits, &prof_buffer[pc]);
324	for (i = 0; i < NR_PROFILE_HIT; ++i) {
325		atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
326		hits[i].pc = hits[i].hits = 0;
327	}
328out:
329	local_irq_restore(flags);
330	put_cpu();
331}
332
333static int profile_dead_cpu(unsigned int cpu)
334{
335	struct page *page;
336	int i;
337
338	if (prof_cpu_mask != NULL)
339		cpumask_clear_cpu(cpu, prof_cpu_mask);
340
341	for (i = 0; i < 2; i++) {
342		if (per_cpu(cpu_profile_hits, cpu)[i]) {
343			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
344			per_cpu(cpu_profile_hits, cpu)[i] = NULL;
345			__free_page(page);
346		}
347	}
348	return 0;
349}
350
351static int profile_prepare_cpu(unsigned int cpu)
352{
353	int i, node = cpu_to_mem(cpu);
354	struct page *page;
355
356	per_cpu(cpu_profile_flip, cpu) = 0;
357
358	for (i = 0; i < 2; i++) {
359		if (per_cpu(cpu_profile_hits, cpu)[i])
360			continue;
361
362		page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
363		if (!page) {
364			profile_dead_cpu(cpu);
365			return -ENOMEM;
366		}
367		per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
368
369	}
370	return 0;
371}
372
373static int profile_online_cpu(unsigned int cpu)
374{
375	if (prof_cpu_mask != NULL)
376		cpumask_set_cpu(cpu, prof_cpu_mask);
377
378	return 0;
379}
380
381#else /* !CONFIG_SMP */
382#define profile_flip_buffers()		do { } while (0)
383#define profile_discard_flip_buffers()	do { } while (0)
384
385static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
386{
387	unsigned long pc;
388	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
389	atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
390}
391#endif /* !CONFIG_SMP */
392
393void profile_hits(int type, void *__pc, unsigned int nr_hits)
394{
395	if (prof_on != type || !prof_buffer)
396		return;
397	do_profile_hits(type, __pc, nr_hits);
398}
399EXPORT_SYMBOL_GPL(profile_hits);
400
401void profile_tick(int type)
402{
403	struct pt_regs *regs = get_irq_regs();
404
405	if (!user_mode(regs) && prof_cpu_mask != NULL &&
406	    cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
407		profile_hit(type, (void *)profile_pc(regs));
408}
409
410#ifdef CONFIG_PROC_FS
411#include <linux/proc_fs.h>
412#include <linux/seq_file.h>
413#include <linux/uaccess.h>
414
415static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
416{
417	seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
418	return 0;
419}
420
421static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
422{
423	return single_open(file, prof_cpu_mask_proc_show, NULL);
424}
425
426static ssize_t prof_cpu_mask_proc_write(struct file *file,
427	const char __user *buffer, size_t count, loff_t *pos)
428{
429	cpumask_var_t new_value;
430	int err;
431
432	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
433		return -ENOMEM;
434
435	err = cpumask_parse_user(buffer, count, new_value);
436	if (!err) {
437		cpumask_copy(prof_cpu_mask, new_value);
438		err = count;
439	}
440	free_cpumask_var(new_value);
441	return err;
442}
443
444static const struct file_operations prof_cpu_mask_proc_fops = {
445	.open		= prof_cpu_mask_proc_open,
446	.read		= seq_read,
447	.llseek		= seq_lseek,
448	.release	= single_release,
449	.write		= prof_cpu_mask_proc_write,
450};
451
452void create_prof_cpu_mask(void)
453{
454	/* create /proc/irq/prof_cpu_mask */
455	proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
456}
457
458/*
459 * This function accesses profiling information. The returned data is
460 * binary: the sampling step and the actual contents of the profile
461 * buffer. Use of the program readprofile is recommended in order to
462 * get meaningful info out of these data.
463 */
464static ssize_t
465read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
466{
467	unsigned long p = *ppos;
468	ssize_t read;
469	char *pnt;
470	unsigned int sample_step = 1 << prof_shift;
471
472	profile_flip_buffers();
473	if (p >= (prof_len+1)*sizeof(unsigned int))
474		return 0;
475	if (count > (prof_len+1)*sizeof(unsigned int) - p)
476		count = (prof_len+1)*sizeof(unsigned int) - p;
477	read = 0;
478
479	while (p < sizeof(unsigned int) && count > 0) {
480		if (put_user(*((char *)(&sample_step)+p), buf))
481			return -EFAULT;
482		buf++; p++; count--; read++;
483	}
484	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
485	if (copy_to_user(buf, (void *)pnt, count))
486		return -EFAULT;
487	read += count;
488	*ppos += read;
489	return read;
490}
491
492/*
493 * Writing to /proc/profile resets the counters
494 *
495 * Writing a 'profiling multiplier' value into it also re-sets the profiling
496 * interrupt frequency, on architectures that support this.
497 */
498static ssize_t write_profile(struct file *file, const char __user *buf,
499			     size_t count, loff_t *ppos)
500{
501#ifdef CONFIG_SMP
502	extern int setup_profiling_timer(unsigned int multiplier);
503
504	if (count == sizeof(int)) {
505		unsigned int multiplier;
506
507		if (copy_from_user(&multiplier, buf, sizeof(int)))
508			return -EFAULT;
509
510		if (setup_profiling_timer(multiplier))
511			return -EINVAL;
512	}
513#endif
514	profile_discard_flip_buffers();
515	memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
516	return count;
517}
518
519static const struct file_operations proc_profile_operations = {
520	.read		= read_profile,
521	.write		= write_profile,
522	.llseek		= default_llseek,
523};
524
525int __ref create_proc_profile(void)
526{
527	struct proc_dir_entry *entry;
528#ifdef CONFIG_SMP
529	enum cpuhp_state online_state;
530#endif
531
532	int err = 0;
533
534	if (!prof_on)
535		return 0;
536#ifdef CONFIG_SMP
537	err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
538				profile_prepare_cpu, profile_dead_cpu);
539	if (err)
540		return err;
541
542	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
543				profile_online_cpu, NULL);
544	if (err < 0)
545		goto err_state_prep;
546	online_state = err;
547	err = 0;
548#endif
549	entry = proc_create("profile", S_IWUSR | S_IRUGO,
550			    NULL, &proc_profile_operations);
551	if (!entry)
552		goto err_state_onl;
553	proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
554
555	return err;
556err_state_onl:
557#ifdef CONFIG_SMP
558	cpuhp_remove_state(online_state);
559err_state_prep:
560	cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
561#endif
562	return err;
563}
564subsys_initcall(create_proc_profile);
565#endif /* CONFIG_PROC_FS */