Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/list.h>
5#include <linux/compiler.h>
6#include <linux/string.h>
7#include "ordered-events.h"
8#include "session.h"
9#include "asm/bug.h"
10#include "debug.h"
11
12#define pr_N(n, fmt, ...) \
13 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
14
15#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
16
17static void queue_event(struct ordered_events *oe, struct ordered_event *new)
18{
19 struct ordered_event *last = oe->last;
20 u64 timestamp = new->timestamp;
21 struct list_head *p;
22
23 ++oe->nr_events;
24 oe->last = new;
25
26 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
27
28 if (!last) {
29 list_add(&new->list, &oe->events);
30 oe->max_timestamp = timestamp;
31 return;
32 }
33
34 /*
35 * last event might point to some random place in the list as it's
36 * the last queued event. We expect that the new event is close to
37 * this.
38 */
39 if (last->timestamp <= timestamp) {
40 while (last->timestamp <= timestamp) {
41 p = last->list.next;
42 if (p == &oe->events) {
43 list_add_tail(&new->list, &oe->events);
44 oe->max_timestamp = timestamp;
45 return;
46 }
47 last = list_entry(p, struct ordered_event, list);
48 }
49 list_add_tail(&new->list, &last->list);
50 } else {
51 while (last->timestamp > timestamp) {
52 p = last->list.prev;
53 if (p == &oe->events) {
54 list_add(&new->list, &oe->events);
55 return;
56 }
57 last = list_entry(p, struct ordered_event, list);
58 }
59 list_add(&new->list, &last->list);
60 }
61}
62
63static union perf_event *__dup_event(struct ordered_events *oe,
64 union perf_event *event)
65{
66 union perf_event *new_event = NULL;
67
68 if (oe->cur_alloc_size < oe->max_alloc_size) {
69 new_event = memdup(event, event->header.size);
70 if (new_event)
71 oe->cur_alloc_size += event->header.size;
72 }
73
74 return new_event;
75}
76
77static union perf_event *dup_event(struct ordered_events *oe,
78 union perf_event *event)
79{
80 return oe->copy_on_queue ? __dup_event(oe, event) : event;
81}
82
83static void free_dup_event(struct ordered_events *oe, union perf_event *event)
84{
85 if (event && oe->copy_on_queue) {
86 oe->cur_alloc_size -= event->header.size;
87 free(event);
88 }
89}
90
91#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
92static struct ordered_event *alloc_event(struct ordered_events *oe,
93 union perf_event *event)
94{
95 struct list_head *cache = &oe->cache;
96 struct ordered_event *new = NULL;
97 union perf_event *new_event;
98
99 new_event = dup_event(oe, event);
100 if (!new_event)
101 return NULL;
102
103 if (!list_empty(cache)) {
104 new = list_entry(cache->next, struct ordered_event, list);
105 list_del(&new->list);
106 } else if (oe->buffer) {
107 new = oe->buffer + oe->buffer_idx;
108 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
109 oe->buffer = NULL;
110 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
111 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
112
113 oe->buffer = malloc(size);
114 if (!oe->buffer) {
115 free_dup_event(oe, new_event);
116 return NULL;
117 }
118
119 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
120 oe->cur_alloc_size, size, oe->max_alloc_size);
121
122 oe->cur_alloc_size += size;
123 list_add(&oe->buffer->list, &oe->to_free);
124
125 /* First entry is abused to maintain the to_free list. */
126 oe->buffer_idx = 2;
127 new = oe->buffer + 1;
128 } else {
129 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
130 }
131
132 new->event = new_event;
133 return new;
134}
135
136static struct ordered_event *
137ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
138 union perf_event *event)
139{
140 struct ordered_event *new;
141
142 new = alloc_event(oe, event);
143 if (new) {
144 new->timestamp = timestamp;
145 queue_event(oe, new);
146 }
147
148 return new;
149}
150
151void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
152{
153 list_move(&event->list, &oe->cache);
154 oe->nr_events--;
155 free_dup_event(oe, event->event);
156 event->event = NULL;
157}
158
159int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
160 u64 timestamp, u64 file_offset)
161{
162 struct ordered_event *oevent;
163
164 if (!timestamp || timestamp == ~0ULL)
165 return -ETIME;
166
167 if (timestamp < oe->last_flush) {
168 pr_oe_time(timestamp, "out of order event\n");
169 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
170 oe->last_flush_type);
171
172 oe->nr_unordered_events++;
173 }
174
175 oevent = ordered_events__new_event(oe, timestamp, event);
176 if (!oevent) {
177 ordered_events__flush(oe, OE_FLUSH__HALF);
178 oevent = ordered_events__new_event(oe, timestamp, event);
179 }
180
181 if (!oevent)
182 return -ENOMEM;
183
184 oevent->file_offset = file_offset;
185 return 0;
186}
187
188static int __ordered_events__flush(struct ordered_events *oe)
189{
190 struct list_head *head = &oe->events;
191 struct ordered_event *tmp, *iter;
192 u64 limit = oe->next_flush;
193 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
194 bool show_progress = limit == ULLONG_MAX;
195 struct ui_progress prog;
196 int ret;
197
198 if (!limit)
199 return 0;
200
201 if (show_progress)
202 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
203
204 list_for_each_entry_safe(iter, tmp, head, list) {
205 if (session_done())
206 return 0;
207
208 if (iter->timestamp > limit)
209 break;
210 ret = oe->deliver(oe, iter);
211 if (ret)
212 return ret;
213
214 ordered_events__delete(oe, iter);
215 oe->last_flush = iter->timestamp;
216
217 if (show_progress)
218 ui_progress__update(&prog, 1);
219 }
220
221 if (list_empty(head))
222 oe->last = NULL;
223 else if (last_ts <= limit)
224 oe->last = list_entry(head->prev, struct ordered_event, list);
225
226 if (show_progress)
227 ui_progress__finish();
228
229 return 0;
230}
231
232int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
233{
234 static const char * const str[] = {
235 "NONE",
236 "FINAL",
237 "ROUND",
238 "HALF ",
239 };
240 int err;
241
242 if (oe->nr_events == 0)
243 return 0;
244
245 switch (how) {
246 case OE_FLUSH__FINAL:
247 oe->next_flush = ULLONG_MAX;
248 break;
249
250 case OE_FLUSH__HALF:
251 {
252 struct ordered_event *first, *last;
253 struct list_head *head = &oe->events;
254
255 first = list_entry(head->next, struct ordered_event, list);
256 last = oe->last;
257
258 /* Warn if we are called before any event got allocated. */
259 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
260 return 0;
261
262 oe->next_flush = first->timestamp;
263 oe->next_flush += (last->timestamp - first->timestamp) / 2;
264 break;
265 }
266
267 case OE_FLUSH__ROUND:
268 case OE_FLUSH__NONE:
269 default:
270 break;
271 };
272
273 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
274 str[how], oe->nr_events);
275 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
276
277 err = __ordered_events__flush(oe);
278
279 if (!err) {
280 if (how == OE_FLUSH__ROUND)
281 oe->next_flush = oe->max_timestamp;
282
283 oe->last_flush_type = how;
284 }
285
286 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
287 str[how], oe->nr_events);
288 pr_oe_time(oe->last_flush, "last_flush\n");
289
290 return err;
291}
292
293void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
294{
295 INIT_LIST_HEAD(&oe->events);
296 INIT_LIST_HEAD(&oe->cache);
297 INIT_LIST_HEAD(&oe->to_free);
298 oe->max_alloc_size = (u64) -1;
299 oe->cur_alloc_size = 0;
300 oe->deliver = deliver;
301}
302
303void ordered_events__free(struct ordered_events *oe)
304{
305 while (!list_empty(&oe->to_free)) {
306 struct ordered_event *event;
307
308 event = list_entry(oe->to_free.next, struct ordered_event, list);
309 list_del(&event->list);
310 free_dup_event(oe, event->event);
311 free(event);
312 }
313}
314
315void ordered_events__reinit(struct ordered_events *oe)
316{
317 ordered_events__deliver_t old_deliver = oe->deliver;
318
319 ordered_events__free(oe);
320 memset(oe, '\0', sizeof(*oe));
321 ordered_events__init(oe, old_deliver);
322}
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/list.h>
5#include <linux/compiler.h>
6#include <linux/string.h>
7#include "ordered-events.h"
8#include "session.h"
9#include "asm/bug.h"
10#include "debug.h"
11#include "ui/progress.h"
12
13#define pr_N(n, fmt, ...) \
14 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
15
16#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
17
18static void queue_event(struct ordered_events *oe, struct ordered_event *new)
19{
20 struct ordered_event *last = oe->last;
21 u64 timestamp = new->timestamp;
22 struct list_head *p;
23
24 ++oe->nr_events;
25 oe->last = new;
26
27 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
28
29 if (!last) {
30 list_add(&new->list, &oe->events);
31 oe->max_timestamp = timestamp;
32 return;
33 }
34
35 /*
36 * last event might point to some random place in the list as it's
37 * the last queued event. We expect that the new event is close to
38 * this.
39 */
40 if (last->timestamp <= timestamp) {
41 while (last->timestamp <= timestamp) {
42 p = last->list.next;
43 if (p == &oe->events) {
44 list_add_tail(&new->list, &oe->events);
45 oe->max_timestamp = timestamp;
46 return;
47 }
48 last = list_entry(p, struct ordered_event, list);
49 }
50 list_add_tail(&new->list, &last->list);
51 } else {
52 while (last->timestamp > timestamp) {
53 p = last->list.prev;
54 if (p == &oe->events) {
55 list_add(&new->list, &oe->events);
56 return;
57 }
58 last = list_entry(p, struct ordered_event, list);
59 }
60 list_add(&new->list, &last->list);
61 }
62}
63
64static union perf_event *__dup_event(struct ordered_events *oe,
65 union perf_event *event)
66{
67 union perf_event *new_event = NULL;
68
69 if (oe->cur_alloc_size < oe->max_alloc_size) {
70 new_event = memdup(event, event->header.size);
71 if (new_event)
72 oe->cur_alloc_size += event->header.size;
73 }
74
75 return new_event;
76}
77
78static union perf_event *dup_event(struct ordered_events *oe,
79 union perf_event *event)
80{
81 return oe->copy_on_queue ? __dup_event(oe, event) : event;
82}
83
84static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
85{
86 if (event) {
87 oe->cur_alloc_size -= event->header.size;
88 free(event);
89 }
90}
91
92static void free_dup_event(struct ordered_events *oe, union perf_event *event)
93{
94 if (oe->copy_on_queue)
95 __free_dup_event(oe, event);
96}
97
98#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
99static struct ordered_event *alloc_event(struct ordered_events *oe,
100 union perf_event *event)
101{
102 struct list_head *cache = &oe->cache;
103 struct ordered_event *new = NULL;
104 union perf_event *new_event;
105 size_t size;
106
107 new_event = dup_event(oe, event);
108 if (!new_event)
109 return NULL;
110
111 /*
112 * We maintain the following scheme of buffers for ordered
113 * event allocation:
114 *
115 * to_free list -> buffer1 (64K)
116 * buffer2 (64K)
117 * ...
118 *
119 * Each buffer keeps an array of ordered events objects:
120 * buffer -> event[0]
121 * event[1]
122 * ...
123 *
124 * Each allocated ordered event is linked to one of
125 * following lists:
126 * - time ordered list 'events'
127 * - list of currently removed events 'cache'
128 *
129 * Allocation of the ordered event uses the following order
130 * to get the memory:
131 * - use recently removed object from 'cache' list
132 * - use available object in current allocation buffer
133 * - allocate new buffer if the current buffer is full
134 *
135 * Removal of ordered event object moves it from events to
136 * the cache list.
137 */
138 size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
139
140 if (!list_empty(cache)) {
141 new = list_entry(cache->next, struct ordered_event, list);
142 list_del_init(&new->list);
143 } else if (oe->buffer) {
144 new = &oe->buffer->event[oe->buffer_idx];
145 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
146 oe->buffer = NULL;
147 } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
148 oe->buffer = malloc(size);
149 if (!oe->buffer) {
150 free_dup_event(oe, new_event);
151 return NULL;
152 }
153
154 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
155 oe->cur_alloc_size, size, oe->max_alloc_size);
156
157 oe->cur_alloc_size += size;
158 list_add(&oe->buffer->list, &oe->to_free);
159
160 oe->buffer_idx = 1;
161 new = &oe->buffer->event[0];
162 } else {
163 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
164 return NULL;
165 }
166
167 new->event = new_event;
168 return new;
169}
170
171static struct ordered_event *
172ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
173 union perf_event *event)
174{
175 struct ordered_event *new;
176
177 new = alloc_event(oe, event);
178 if (new) {
179 new->timestamp = timestamp;
180 queue_event(oe, new);
181 }
182
183 return new;
184}
185
186void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
187{
188 list_move(&event->list, &oe->cache);
189 oe->nr_events--;
190 free_dup_event(oe, event->event);
191 event->event = NULL;
192}
193
194int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
195 u64 timestamp, u64 file_offset, const char *file_path)
196{
197 struct ordered_event *oevent;
198
199 if (!timestamp || timestamp == ~0ULL)
200 return -ETIME;
201
202 if (timestamp < oe->last_flush) {
203 pr_oe_time(timestamp, "out of order event\n");
204 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
205 oe->last_flush_type);
206
207 oe->nr_unordered_events++;
208 }
209
210 oevent = ordered_events__new_event(oe, timestamp, event);
211 if (!oevent) {
212 ordered_events__flush(oe, OE_FLUSH__HALF);
213 oevent = ordered_events__new_event(oe, timestamp, event);
214 }
215
216 if (!oevent)
217 return -ENOMEM;
218
219 oevent->file_offset = file_offset;
220 oevent->file_path = file_path;
221 return 0;
222}
223
224static int do_flush(struct ordered_events *oe, bool show_progress)
225{
226 struct list_head *head = &oe->events;
227 struct ordered_event *tmp, *iter;
228 u64 limit = oe->next_flush;
229 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
230 struct ui_progress prog;
231 int ret;
232
233 if (!limit)
234 return 0;
235
236 if (show_progress)
237 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
238
239 list_for_each_entry_safe(iter, tmp, head, list) {
240 if (session_done())
241 return 0;
242
243 if (iter->timestamp > limit)
244 break;
245 ret = oe->deliver(oe, iter);
246 if (ret)
247 return ret;
248
249 ordered_events__delete(oe, iter);
250 oe->last_flush = iter->timestamp;
251
252 if (show_progress)
253 ui_progress__update(&prog, 1);
254 }
255
256 if (list_empty(head))
257 oe->last = NULL;
258 else if (last_ts <= limit)
259 oe->last = list_entry(head->prev, struct ordered_event, list);
260
261 if (show_progress)
262 ui_progress__finish();
263
264 return 0;
265}
266
267static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
268 u64 timestamp)
269{
270 static const char * const str[] = {
271 "NONE",
272 "FINAL",
273 "ROUND",
274 "HALF ",
275 "TOP ",
276 "TIME ",
277 };
278 int err;
279 bool show_progress = false;
280
281 if (oe->nr_events == 0)
282 return 0;
283
284 switch (how) {
285 case OE_FLUSH__FINAL:
286 show_progress = true;
287 fallthrough;
288 case OE_FLUSH__TOP:
289 oe->next_flush = ULLONG_MAX;
290 break;
291
292 case OE_FLUSH__HALF:
293 {
294 struct ordered_event *first, *last;
295 struct list_head *head = &oe->events;
296
297 first = list_entry(head->next, struct ordered_event, list);
298 last = oe->last;
299
300 /* Warn if we are called before any event got allocated. */
301 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
302 return 0;
303
304 oe->next_flush = first->timestamp;
305 oe->next_flush += (last->timestamp - first->timestamp) / 2;
306 break;
307 }
308
309 case OE_FLUSH__TIME:
310 oe->next_flush = timestamp;
311 show_progress = false;
312 break;
313
314 case OE_FLUSH__ROUND:
315 case OE_FLUSH__NONE:
316 default:
317 break;
318 }
319
320 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
321 str[how], oe->nr_events);
322 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
323
324 err = do_flush(oe, show_progress);
325
326 if (!err) {
327 if (how == OE_FLUSH__ROUND)
328 oe->next_flush = oe->max_timestamp;
329
330 oe->last_flush_type = how;
331 }
332
333 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
334 str[how], oe->nr_events);
335 pr_oe_time(oe->last_flush, "last_flush\n");
336
337 return err;
338}
339
340int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
341{
342 return __ordered_events__flush(oe, how, 0);
343}
344
345int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
346{
347 return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
348}
349
350u64 ordered_events__first_time(struct ordered_events *oe)
351{
352 struct ordered_event *event;
353
354 if (list_empty(&oe->events))
355 return 0;
356
357 event = list_first_entry(&oe->events, struct ordered_event, list);
358 return event->timestamp;
359}
360
361void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
362 void *data)
363{
364 INIT_LIST_HEAD(&oe->events);
365 INIT_LIST_HEAD(&oe->cache);
366 INIT_LIST_HEAD(&oe->to_free);
367 oe->max_alloc_size = (u64) -1;
368 oe->cur_alloc_size = 0;
369 oe->deliver = deliver;
370 oe->data = data;
371}
372
373static void
374ordered_events_buffer__free(struct ordered_events_buffer *buffer,
375 unsigned int max, struct ordered_events *oe)
376{
377 if (oe->copy_on_queue) {
378 unsigned int i;
379
380 for (i = 0; i < max; i++)
381 __free_dup_event(oe, buffer->event[i].event);
382 }
383
384 free(buffer);
385}
386
387void ordered_events__free(struct ordered_events *oe)
388{
389 struct ordered_events_buffer *buffer, *tmp;
390
391 if (list_empty(&oe->to_free))
392 return;
393
394 /*
395 * Current buffer might not have all the events allocated
396 * yet, we need to free only allocated ones ...
397 */
398 if (oe->buffer) {
399 list_del_init(&oe->buffer->list);
400 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
401 }
402
403 /* ... and continue with the rest */
404 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
405 list_del_init(&buffer->list);
406 ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
407 }
408}
409
410void ordered_events__reinit(struct ordered_events *oe)
411{
412 ordered_events__deliver_t old_deliver = oe->deliver;
413
414 ordered_events__free(oe);
415 memset(oe, '\0', sizeof(*oe));
416 ordered_events__init(oe, old_deliver, oe->data);
417}