Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Performance events callchain code, extracted from core.c:
  3 *
  4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  7 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8 *
  9 * For licensing details see kernel-base/COPYING
 10 */
 11
 12#include <linux/perf_event.h>
 13#include <linux/slab.h>
 14#include "internal.h"
 15
 16struct callchain_cpus_entries {
 17	struct rcu_head			rcu_head;
 18	struct perf_callchain_entry	*cpu_entries[0];
 19};
 20
 21static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
 22static atomic_t nr_callchain_events;
 23static DEFINE_MUTEX(callchain_mutex);
 24static struct callchain_cpus_entries *callchain_cpus_entries;
 25
 26
 27__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
 28				  struct pt_regs *regs)
 29{
 30}
 31
 32__weak void perf_callchain_user(struct perf_callchain_entry *entry,
 33				struct pt_regs *regs)
 34{
 35}
 36
 37static void release_callchain_buffers_rcu(struct rcu_head *head)
 38{
 39	struct callchain_cpus_entries *entries;
 40	int cpu;
 41
 42	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
 43
 44	for_each_possible_cpu(cpu)
 45		kfree(entries->cpu_entries[cpu]);
 46
 47	kfree(entries);
 48}
 49
 50static void release_callchain_buffers(void)
 51{
 52	struct callchain_cpus_entries *entries;
 53
 54	entries = callchain_cpus_entries;
 55	RCU_INIT_POINTER(callchain_cpus_entries, NULL);
 56	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
 57}
 58
 59static int alloc_callchain_buffers(void)
 60{
 61	int cpu;
 62	int size;
 63	struct callchain_cpus_entries *entries;
 64
 65	/*
 66	 * We can't use the percpu allocation API for data that can be
 67	 * accessed from NMI. Use a temporary manual per cpu allocation
 68	 * until that gets sorted out.
 69	 */
 70	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
 71
 72	entries = kzalloc(size, GFP_KERNEL);
 73	if (!entries)
 74		return -ENOMEM;
 75
 76	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
 77
 78	for_each_possible_cpu(cpu) {
 79		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
 80							 cpu_to_node(cpu));
 81		if (!entries->cpu_entries[cpu])
 82			goto fail;
 83	}
 84
 85	rcu_assign_pointer(callchain_cpus_entries, entries);
 86
 87	return 0;
 88
 89fail:
 90	for_each_possible_cpu(cpu)
 91		kfree(entries->cpu_entries[cpu]);
 92	kfree(entries);
 93
 94	return -ENOMEM;
 95}
 96
 97int get_callchain_buffers(void)
 98{
 99	int err = 0;
100	int count;
101
102	mutex_lock(&callchain_mutex);
103
104	count = atomic_inc_return(&nr_callchain_events);
105	if (WARN_ON_ONCE(count < 1)) {
106		err = -EINVAL;
107		goto exit;
108	}
109
110	if (count > 1) {
111		/* If the allocation failed, give up */
112		if (!callchain_cpus_entries)
113			err = -ENOMEM;
114		goto exit;
115	}
116
117	err = alloc_callchain_buffers();
118exit:
119	if (err)
120		atomic_dec(&nr_callchain_events);
121
122	mutex_unlock(&callchain_mutex);
123
124	return err;
125}
126
127void put_callchain_buffers(void)
128{
129	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
130		release_callchain_buffers();
131		mutex_unlock(&callchain_mutex);
132	}
133}
134
135static struct perf_callchain_entry *get_callchain_entry(int *rctx)
136{
137	int cpu;
138	struct callchain_cpus_entries *entries;
139
140	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
141	if (*rctx == -1)
142		return NULL;
143
144	entries = rcu_dereference(callchain_cpus_entries);
145	if (!entries)
146		return NULL;
147
148	cpu = smp_processor_id();
149
150	return &entries->cpu_entries[cpu][*rctx];
151}
152
153static void
154put_callchain_entry(int rctx)
155{
156	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
157}
158
159struct perf_callchain_entry *
160perf_callchain(struct perf_event *event, struct pt_regs *regs)
161{
162	bool kernel = !event->attr.exclude_callchain_kernel;
163	bool user   = !event->attr.exclude_callchain_user;
164	/* Disallow cross-task user callchains. */
165	bool crosstask = event->ctx->task && event->ctx->task != current;
166
167	if (!kernel && !user)
168		return NULL;
169
170	return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
171}
172
173struct perf_callchain_entry *
174get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
175		   bool crosstask, bool add_mark)
176{
177	struct perf_callchain_entry *entry;
178	int rctx;
179
180	entry = get_callchain_entry(&rctx);
181	if (rctx == -1)
182		return NULL;
183
184	if (!entry)
185		goto exit_put;
186
187	entry->nr = init_nr;
188
189	if (kernel && !user_mode(regs)) {
190		if (add_mark)
191			perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
192		perf_callchain_kernel(entry, regs);
193	}
194
195	if (user) {
196		if (!user_mode(regs)) {
197			if  (current->mm)
198				regs = task_pt_regs(current);
199			else
200				regs = NULL;
201		}
202
203		if (regs) {
204			if (crosstask)
205				goto exit_put;
206
207			if (add_mark)
208				perf_callchain_store(entry, PERF_CONTEXT_USER);
209			perf_callchain_user(entry, regs);
210		}
211	}
212
213exit_put:
214	put_callchain_entry(rctx);
215
216	return entry;
217}