Loading...
1/*
2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10#include <sys/mman.h>
11#include <inttypes.h>
12#include <asm/bug.h>
13#include "debug.h"
14#include "event.h"
15#include "mmap.h"
16#include "util.h" /* page_size */
17
18size_t perf_mmap__mmap_len(struct perf_mmap *map)
19{
20 return map->mask + 1 + page_size;
21}
22
23/* When check_messup is true, 'end' must points to a good entry */
24static union perf_event *perf_mmap__read(struct perf_mmap *map,
25 u64 *startp, u64 end)
26{
27 unsigned char *data = map->base + page_size;
28 union perf_event *event = NULL;
29 int diff = end - *startp;
30
31 if (diff >= (int)sizeof(event->header)) {
32 size_t size;
33
34 event = (union perf_event *)&data[*startp & map->mask];
35 size = event->header.size;
36
37 if (size < sizeof(event->header) || diff < (int)size)
38 return NULL;
39
40 /*
41 * Event straddles the mmap boundary -- header should always
42 * be inside due to u64 alignment of output.
43 */
44 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
45 unsigned int offset = *startp;
46 unsigned int len = min(sizeof(*event), size), cpy;
47 void *dst = map->event_copy;
48
49 do {
50 cpy = min(map->mask + 1 - (offset & map->mask), len);
51 memcpy(dst, &data[offset & map->mask], cpy);
52 offset += cpy;
53 dst += cpy;
54 len -= cpy;
55 } while (len);
56
57 event = (union perf_event *)map->event_copy;
58 }
59
60 *startp += size;
61 }
62
63 return event;
64}
65
66/*
67 * Read event from ring buffer one by one.
68 * Return one event for each call.
69 *
70 * Usage:
71 * perf_mmap__read_init()
72 * while(event = perf_mmap__read_event()) {
73 * //process the event
74 * perf_mmap__consume()
75 * }
76 * perf_mmap__read_done()
77 */
78union perf_event *perf_mmap__read_event(struct perf_mmap *map)
79{
80 union perf_event *event;
81
82 /*
83 * Check if event was unmapped due to a POLLHUP/POLLERR.
84 */
85 if (!refcount_read(&map->refcnt))
86 return NULL;
87
88 /* non-overwirte doesn't pause the ringbuffer */
89 if (!map->overwrite)
90 map->end = perf_mmap__read_head(map);
91
92 event = perf_mmap__read(map, &map->start, map->end);
93
94 if (!map->overwrite)
95 map->prev = map->start;
96
97 return event;
98}
99
100static bool perf_mmap__empty(struct perf_mmap *map)
101{
102 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
103}
104
105void perf_mmap__get(struct perf_mmap *map)
106{
107 refcount_inc(&map->refcnt);
108}
109
110void perf_mmap__put(struct perf_mmap *map)
111{
112 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
113
114 if (refcount_dec_and_test(&map->refcnt))
115 perf_mmap__munmap(map);
116}
117
118void perf_mmap__consume(struct perf_mmap *map)
119{
120 if (!map->overwrite) {
121 u64 old = map->prev;
122
123 perf_mmap__write_tail(map, old);
124 }
125
126 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
127 perf_mmap__put(map);
128}
129
130int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
131 struct auxtrace_mmap_params *mp __maybe_unused,
132 void *userpg __maybe_unused,
133 int fd __maybe_unused)
134{
135 return 0;
136}
137
138void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
139{
140}
141
142void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
143 off_t auxtrace_offset __maybe_unused,
144 unsigned int auxtrace_pages __maybe_unused,
145 bool auxtrace_overwrite __maybe_unused)
146{
147}
148
149void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
150 struct perf_evlist *evlist __maybe_unused,
151 int idx __maybe_unused,
152 bool per_cpu __maybe_unused)
153{
154}
155
156void perf_mmap__munmap(struct perf_mmap *map)
157{
158 if (map->base != NULL) {
159 munmap(map->base, perf_mmap__mmap_len(map));
160 map->base = NULL;
161 map->fd = -1;
162 refcount_set(&map->refcnt, 0);
163 }
164 auxtrace_mmap__munmap(&map->auxtrace_mmap);
165}
166
167int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
168{
169 /*
170 * The last one will be done at perf_mmap__consume(), so that we
171 * make sure we don't prevent tools from consuming every last event in
172 * the ring buffer.
173 *
174 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
175 * anymore, but the last events for it are still in the ring buffer,
176 * waiting to be consumed.
177 *
178 * Tools can chose to ignore this at their own discretion, but the
179 * evlist layer can't just drop it when filtering events in
180 * perf_evlist__filter_pollfd().
181 */
182 refcount_set(&map->refcnt, 2);
183 map->prev = 0;
184 map->mask = mp->mask;
185 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
186 MAP_SHARED, fd, 0);
187 if (map->base == MAP_FAILED) {
188 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
189 errno);
190 map->base = NULL;
191 return -1;
192 }
193 map->fd = fd;
194
195 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
196 &mp->auxtrace_mp, map->base, fd))
197 return -1;
198
199 return 0;
200}
201
202static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
203{
204 struct perf_event_header *pheader;
205 u64 evt_head = *start;
206 int size = mask + 1;
207
208 pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
209 pheader = (struct perf_event_header *)(buf + (*start & mask));
210 while (true) {
211 if (evt_head - *start >= (unsigned int)size) {
212 pr_debug("Finished reading overwrite ring buffer: rewind\n");
213 if (evt_head - *start > (unsigned int)size)
214 evt_head -= pheader->size;
215 *end = evt_head;
216 return 0;
217 }
218
219 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
220
221 if (pheader->size == 0) {
222 pr_debug("Finished reading overwrite ring buffer: get start\n");
223 *end = evt_head;
224 return 0;
225 }
226
227 evt_head += pheader->size;
228 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
229 }
230 WARN_ONCE(1, "Shouldn't get here\n");
231 return -1;
232}
233
234/*
235 * Report the start and end of the available data in ringbuffer
236 */
237static int __perf_mmap__read_init(struct perf_mmap *md)
238{
239 u64 head = perf_mmap__read_head(md);
240 u64 old = md->prev;
241 unsigned char *data = md->base + page_size;
242 unsigned long size;
243
244 md->start = md->overwrite ? head : old;
245 md->end = md->overwrite ? old : head;
246
247 if (md->start == md->end)
248 return -EAGAIN;
249
250 size = md->end - md->start;
251 if (size > (unsigned long)(md->mask) + 1) {
252 if (!md->overwrite) {
253 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
254
255 md->prev = head;
256 perf_mmap__consume(md);
257 return -EAGAIN;
258 }
259
260 /*
261 * Backward ring buffer is full. We still have a chance to read
262 * most of data from it.
263 */
264 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
265 return -EINVAL;
266 }
267
268 return 0;
269}
270
271int perf_mmap__read_init(struct perf_mmap *map)
272{
273 /*
274 * Check if event was unmapped due to a POLLHUP/POLLERR.
275 */
276 if (!refcount_read(&map->refcnt))
277 return -ENOENT;
278
279 return __perf_mmap__read_init(map);
280}
281
282int perf_mmap__push(struct perf_mmap *md, void *to,
283 int push(void *to, void *buf, size_t size))
284{
285 u64 head = perf_mmap__read_head(md);
286 unsigned char *data = md->base + page_size;
287 unsigned long size;
288 void *buf;
289 int rc = 0;
290
291 rc = perf_mmap__read_init(md);
292 if (rc < 0)
293 return (rc == -EAGAIN) ? 0 : -1;
294
295 size = md->end - md->start;
296
297 if ((md->start & md->mask) + size != (md->end & md->mask)) {
298 buf = &data[md->start & md->mask];
299 size = md->mask + 1 - (md->start & md->mask);
300 md->start += size;
301
302 if (push(to, buf, size) < 0) {
303 rc = -1;
304 goto out;
305 }
306 }
307
308 buf = &data[md->start & md->mask];
309 size = md->end - md->start;
310 md->start += size;
311
312 if (push(to, buf, size) < 0) {
313 rc = -1;
314 goto out;
315 }
316
317 md->prev = head;
318 perf_mmap__consume(md);
319out:
320 return rc;
321}
322
323/*
324 * Mandatory for overwrite mode
325 * The direction of overwrite mode is backward.
326 * The last perf_mmap__read() will set tail to map->prev.
327 * Need to correct the map->prev to head which is the end of next read.
328 */
329void perf_mmap__read_done(struct perf_mmap *map)
330{
331 /*
332 * Check if event was unmapped due to a POLLHUP/POLLERR.
333 */
334 if (!refcount_read(&map->refcnt))
335 return;
336
337 map->prev = perf_mmap__read_head(map);
338}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
7 */
8
9#include <sys/mman.h>
10#include <inttypes.h>
11#include <asm/bug.h>
12#include <linux/zalloc.h>
13#include <stdlib.h>
14#include <string.h>
15#include <unistd.h> // sysconf()
16#include <perf/mmap.h>
17#ifdef HAVE_LIBNUMA_SUPPORT
18#include <numaif.h>
19#endif
20#include "cpumap.h"
21#include "debug.h"
22#include "event.h"
23#include "mmap.h"
24#include "../perf.h"
25#include <internal/lib.h> /* page_size */
26#include <linux/bitmap.h>
27
28#define MASK_SIZE 1023
29void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
30{
31 char buf[MASK_SIZE + 1];
32 size_t len;
33
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
35 buf[len] = '\0';
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
37}
38
39size_t mmap__mmap_len(struct mmap *map)
40{
41 return perf_mmap__mmap_len(&map->core);
42}
43
44int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
45 struct auxtrace_mmap_params *mp __maybe_unused,
46 void *userpg __maybe_unused,
47 int fd __maybe_unused)
48{
49 return 0;
50}
51
52void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
53{
54}
55
56void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
57 off_t auxtrace_offset __maybe_unused,
58 unsigned int auxtrace_pages __maybe_unused,
59 bool auxtrace_overwrite __maybe_unused)
60{
61}
62
63void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
64 struct evlist *evlist __maybe_unused,
65 struct evsel *evsel __maybe_unused,
66 int idx __maybe_unused)
67{
68}
69
70#ifdef HAVE_AIO_SUPPORT
71static int perf_mmap__aio_enabled(struct mmap *map)
72{
73 return map->aio.nr_cblocks > 0;
74}
75
76#ifdef HAVE_LIBNUMA_SUPPORT
77static int perf_mmap__aio_alloc(struct mmap *map, int idx)
78{
79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
80 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
81 if (map->aio.data[idx] == MAP_FAILED) {
82 map->aio.data[idx] = NULL;
83 return -1;
84 }
85
86 return 0;
87}
88
89static void perf_mmap__aio_free(struct mmap *map, int idx)
90{
91 if (map->aio.data[idx]) {
92 munmap(map->aio.data[idx], mmap__mmap_len(map));
93 map->aio.data[idx] = NULL;
94 }
95}
96
97static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity)
98{
99 void *data;
100 size_t mmap_len;
101 unsigned long *node_mask;
102 unsigned long node_index;
103 int err = 0;
104
105 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
106 data = map->aio.data[idx];
107 mmap_len = mmap__mmap_len(map);
108 node_index = cpu__get_node(cpu);
109 node_mask = bitmap_zalloc(node_index + 1);
110 if (!node_mask) {
111 pr_err("Failed to allocate node mask for mbind: error %m\n");
112 return -1;
113 }
114 __set_bit(node_index, node_mask);
115 if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
116 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
117 data, data + mmap_len, node_index);
118 err = -1;
119 }
120 bitmap_free(node_mask);
121 }
122
123 return err;
124}
125#else /* !HAVE_LIBNUMA_SUPPORT */
126static int perf_mmap__aio_alloc(struct mmap *map, int idx)
127{
128 map->aio.data[idx] = malloc(mmap__mmap_len(map));
129 if (map->aio.data[idx] == NULL)
130 return -1;
131
132 return 0;
133}
134
135static void perf_mmap__aio_free(struct mmap *map, int idx)
136{
137 zfree(&(map->aio.data[idx]));
138}
139
140static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
141 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused)
142{
143 return 0;
144}
145#endif
146
147static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
148{
149 int delta_max, i, prio, ret;
150
151 map->aio.nr_cblocks = mp->nr_cblocks;
152 if (map->aio.nr_cblocks) {
153 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
154 if (!map->aio.aiocb) {
155 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
156 return -1;
157 }
158 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
159 if (!map->aio.cblocks) {
160 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
161 return -1;
162 }
163 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
164 if (!map->aio.data) {
165 pr_debug2("failed to allocate data buffer, error %m\n");
166 return -1;
167 }
168 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
169 for (i = 0; i < map->aio.nr_cblocks; ++i) {
170 ret = perf_mmap__aio_alloc(map, i);
171 if (ret == -1) {
172 pr_debug2("failed to allocate data buffer area, error %m");
173 return -1;
174 }
175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
176 if (ret == -1)
177 return -1;
178 /*
179 * Use cblock.aio_fildes value different from -1
180 * to denote started aio write operation on the
181 * cblock so it requires explicit record__aio_sync()
182 * call prior the cblock may be reused again.
183 */
184 map->aio.cblocks[i].aio_fildes = -1;
185 /*
186 * Allocate cblocks with priority delta to have
187 * faster aio write system calls because queued requests
188 * are kept in separate per-prio queues and adding
189 * a new request will iterate thru shorter per-prio
190 * list. Blocks with numbers higher than
191 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
192 */
193 prio = delta_max - i;
194 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
195 }
196 }
197
198 return 0;
199}
200
201static void perf_mmap__aio_munmap(struct mmap *map)
202{
203 int i;
204
205 for (i = 0; i < map->aio.nr_cblocks; ++i)
206 perf_mmap__aio_free(map, i);
207 if (map->aio.data)
208 zfree(&map->aio.data);
209 zfree(&map->aio.cblocks);
210 zfree(&map->aio.aiocb);
211}
212#else /* !HAVE_AIO_SUPPORT */
213static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
214{
215 return 0;
216}
217
218static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
219 struct mmap_params *mp __maybe_unused)
220{
221 return 0;
222}
223
224static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
225{
226}
227#endif
228
229void mmap__munmap(struct mmap *map)
230{
231 bitmap_free(map->affinity_mask.bits);
232
233#ifndef PYTHON_PERF
234 zstd_fini(&map->zstd_data);
235#endif
236
237 perf_mmap__aio_munmap(map);
238 if (map->data != NULL) {
239 munmap(map->data, mmap__mmap_len(map));
240 map->data = NULL;
241 }
242 auxtrace_mmap__munmap(&map->auxtrace_mmap);
243}
244
245static void build_node_mask(int node, struct mmap_cpu_mask *mask)
246{
247 int idx, nr_cpus;
248 struct perf_cpu cpu;
249 const struct perf_cpu_map *cpu_map = NULL;
250
251 cpu_map = cpu_map__online();
252 if (!cpu_map)
253 return;
254
255 nr_cpus = perf_cpu_map__nr(cpu_map);
256 for (idx = 0; idx < nr_cpus; idx++) {
257 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
258 if (cpu__get_node(cpu) == node)
259 __set_bit(cpu.cpu, mask->bits);
260 }
261}
262
263static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
264{
265 map->affinity_mask.nbits = cpu__max_cpu().cpu;
266 map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
267 if (!map->affinity_mask.bits)
268 return -1;
269
270 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
271 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
272 else if (mp->affinity == PERF_AFFINITY_CPU)
273 __set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
274
275 return 0;
276}
277
278int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu)
279{
280 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
281 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
282 errno);
283 return -1;
284 }
285
286 if (mp->affinity != PERF_AFFINITY_SYS &&
287 perf_mmap__setup_affinity_mask(map, mp)) {
288 pr_debug2("failed to alloc mmap affinity mask, error %d\n",
289 errno);
290 return -1;
291 }
292
293 if (verbose == 2)
294 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
295
296 map->core.flush = mp->flush;
297
298#ifndef PYTHON_PERF
299 if (zstd_init(&map->zstd_data, mp->comp_level)) {
300 pr_debug2("failed to init mmap compressor, error %d\n", errno);
301 return -1;
302 }
303#endif
304
305 if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
306 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
307 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
308 if (map->data == MAP_FAILED) {
309 pr_debug2("failed to mmap data buffer, error %d\n",
310 errno);
311 map->data = NULL;
312 return -1;
313 }
314 }
315
316 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
317 &mp->auxtrace_mp, map->core.base, fd))
318 return -1;
319
320 return perf_mmap__aio_mmap(map, mp);
321}
322
323int perf_mmap__push(struct mmap *md, void *to,
324 int push(struct mmap *map, void *to, void *buf, size_t size))
325{
326 u64 head = perf_mmap__read_head(&md->core);
327 unsigned char *data = md->core.base + page_size;
328 unsigned long size;
329 void *buf;
330 int rc = 0;
331
332 rc = perf_mmap__read_init(&md->core);
333 if (rc < 0)
334 return (rc == -EAGAIN) ? 1 : -1;
335
336 size = md->core.end - md->core.start;
337
338 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
339 buf = &data[md->core.start & md->core.mask];
340 size = md->core.mask + 1 - (md->core.start & md->core.mask);
341 md->core.start += size;
342
343 if (push(md, to, buf, size) < 0) {
344 rc = -1;
345 goto out;
346 }
347 }
348
349 buf = &data[md->core.start & md->core.mask];
350 size = md->core.end - md->core.start;
351 md->core.start += size;
352
353 if (push(md, to, buf, size) < 0) {
354 rc = -1;
355 goto out;
356 }
357
358 md->core.prev = head;
359 perf_mmap__consume(&md->core);
360out:
361 return rc;
362}
363
364int mmap_cpu_mask__duplicate(struct mmap_cpu_mask *original, struct mmap_cpu_mask *clone)
365{
366 clone->nbits = original->nbits;
367 clone->bits = bitmap_zalloc(original->nbits);
368 if (!clone->bits)
369 return -ENOMEM;
370
371 memcpy(clone->bits, original->bits, MMAP_CPU_MASK_BYTES(original));
372 return 0;
373}