Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.6
 
 
 
  1#include <linux/list.h>
  2#include <linux/compiler.h>
  3#include <linux/string.h>
  4#include "ordered-events.h"
  5#include "session.h"
  6#include "asm/bug.h"
  7#include "debug.h"
 
  8
  9#define pr_N(n, fmt, ...) \
 10	eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
 11
 12#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
 13
 14static void queue_event(struct ordered_events *oe, struct ordered_event *new)
 15{
 16	struct ordered_event *last = oe->last;
 17	u64 timestamp = new->timestamp;
 18	struct list_head *p;
 19
 20	++oe->nr_events;
 21	oe->last = new;
 22
 23	pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
 24
 25	if (!last) {
 26		list_add(&new->list, &oe->events);
 27		oe->max_timestamp = timestamp;
 28		return;
 29	}
 30
 31	/*
 32	 * last event might point to some random place in the list as it's
 33	 * the last queued event. We expect that the new event is close to
 34	 * this.
 35	 */
 36	if (last->timestamp <= timestamp) {
 37		while (last->timestamp <= timestamp) {
 38			p = last->list.next;
 39			if (p == &oe->events) {
 40				list_add_tail(&new->list, &oe->events);
 41				oe->max_timestamp = timestamp;
 42				return;
 43			}
 44			last = list_entry(p, struct ordered_event, list);
 45		}
 46		list_add_tail(&new->list, &last->list);
 47	} else {
 48		while (last->timestamp > timestamp) {
 49			p = last->list.prev;
 50			if (p == &oe->events) {
 51				list_add(&new->list, &oe->events);
 52				return;
 53			}
 54			last = list_entry(p, struct ordered_event, list);
 55		}
 56		list_add(&new->list, &last->list);
 57	}
 58}
 59
 60static union perf_event *__dup_event(struct ordered_events *oe,
 61				     union perf_event *event)
 62{
 63	union perf_event *new_event = NULL;
 64
 65	if (oe->cur_alloc_size < oe->max_alloc_size) {
 66		new_event = memdup(event, event->header.size);
 67		if (new_event)
 68			oe->cur_alloc_size += event->header.size;
 69	}
 70
 71	return new_event;
 72}
 73
 74static union perf_event *dup_event(struct ordered_events *oe,
 75				   union perf_event *event)
 76{
 77	return oe->copy_on_queue ? __dup_event(oe, event) : event;
 78}
 79
 80static void free_dup_event(struct ordered_events *oe, union perf_event *event)
 81{
 82	if (oe->copy_on_queue) {
 83		oe->cur_alloc_size -= event->header.size;
 84		free(event);
 85	}
 86}
 87
 
 
 
 
 
 
 88#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
 89static struct ordered_event *alloc_event(struct ordered_events *oe,
 90					 union perf_event *event)
 91{
 92	struct list_head *cache = &oe->cache;
 93	struct ordered_event *new = NULL;
 94	union perf_event *new_event;
 
 95
 96	new_event = dup_event(oe, event);
 97	if (!new_event)
 98		return NULL;
 99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100	if (!list_empty(cache)) {
101		new = list_entry(cache->next, struct ordered_event, list);
102		list_del(&new->list);
103	} else if (oe->buffer) {
104		new = oe->buffer + oe->buffer_idx;
105		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
106			oe->buffer = NULL;
107	} else if (oe->cur_alloc_size < oe->max_alloc_size) {
108		size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
109
110		oe->buffer = malloc(size);
111		if (!oe->buffer) {
112			free_dup_event(oe, new_event);
113			return NULL;
114		}
115
116		pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
117		   oe->cur_alloc_size, size, oe->max_alloc_size);
118
119		oe->cur_alloc_size += size;
120		list_add(&oe->buffer->list, &oe->to_free);
121
122		/* First entry is abused to maintain the to_free list. */
123		oe->buffer_idx = 2;
124		new = oe->buffer + 1;
125	} else {
126		pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
 
127	}
128
129	new->event = new_event;
130	return new;
131}
132
133static struct ordered_event *
134ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
135		    union perf_event *event)
136{
137	struct ordered_event *new;
138
139	new = alloc_event(oe, event);
140	if (new) {
141		new->timestamp = timestamp;
142		queue_event(oe, new);
143	}
144
145	return new;
146}
147
148void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
149{
150	list_move(&event->list, &oe->cache);
151	oe->nr_events--;
152	free_dup_event(oe, event->event);
 
153}
154
155int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
156			  struct perf_sample *sample, u64 file_offset)
157{
158	u64 timestamp = sample->time;
159	struct ordered_event *oevent;
160
161	if (!timestamp || timestamp == ~0ULL)
162		return -ETIME;
163
164	if (timestamp < oe->last_flush) {
165		pr_oe_time(timestamp,      "out of order event\n");
166		pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
167			   oe->last_flush_type);
168
169		oe->nr_unordered_events++;
170	}
171
172	oevent = ordered_events__new_event(oe, timestamp, event);
173	if (!oevent) {
174		ordered_events__flush(oe, OE_FLUSH__HALF);
175		oevent = ordered_events__new_event(oe, timestamp, event);
176	}
177
178	if (!oevent)
179		return -ENOMEM;
180
181	oevent->file_offset = file_offset;
182	return 0;
183}
184
185static int __ordered_events__flush(struct ordered_events *oe)
186{
187	struct list_head *head = &oe->events;
188	struct ordered_event *tmp, *iter;
189	u64 limit = oe->next_flush;
190	u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
191	bool show_progress = limit == ULLONG_MAX;
192	struct ui_progress prog;
193	int ret;
194
195	if (!limit)
196		return 0;
197
198	if (show_progress)
199		ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
200
201	list_for_each_entry_safe(iter, tmp, head, list) {
202		if (session_done())
203			return 0;
204
205		if (iter->timestamp > limit)
206			break;
207		ret = oe->deliver(oe, iter);
208		if (ret)
209			return ret;
210
211		ordered_events__delete(oe, iter);
212		oe->last_flush = iter->timestamp;
213
214		if (show_progress)
215			ui_progress__update(&prog, 1);
216	}
217
218	if (list_empty(head))
219		oe->last = NULL;
220	else if (last_ts <= limit)
221		oe->last = list_entry(head->prev, struct ordered_event, list);
222
223	if (show_progress)
224		ui_progress__finish();
225
226	return 0;
227}
228
229int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
 
230{
231	static const char * const str[] = {
232		"NONE",
233		"FINAL",
234		"ROUND",
235		"HALF ",
 
 
236	};
237	int err;
 
238
239	if (oe->nr_events == 0)
240		return 0;
241
242	switch (how) {
243	case OE_FLUSH__FINAL:
 
 
 
244		oe->next_flush = ULLONG_MAX;
245		break;
246
247	case OE_FLUSH__HALF:
248	{
249		struct ordered_event *first, *last;
250		struct list_head *head = &oe->events;
251
252		first = list_entry(head->next, struct ordered_event, list);
253		last = oe->last;
254
255		/* Warn if we are called before any event got allocated. */
256		if (WARN_ONCE(!last || list_empty(head), "empty queue"))
257			return 0;
258
259		oe->next_flush  = first->timestamp;
260		oe->next_flush += (last->timestamp - first->timestamp) / 2;
261		break;
262	}
263
 
 
 
 
 
264	case OE_FLUSH__ROUND:
265	case OE_FLUSH__NONE:
266	default:
267		break;
268	};
269
270	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
271		   str[how], oe->nr_events);
272	pr_oe_time(oe->max_timestamp, "max_timestamp\n");
273
274	err = __ordered_events__flush(oe);
275
276	if (!err) {
277		if (how == OE_FLUSH__ROUND)
278			oe->next_flush = oe->max_timestamp;
279
280		oe->last_flush_type = how;
281	}
282
283	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
284		   str[how], oe->nr_events);
285	pr_oe_time(oe->last_flush, "last_flush\n");
286
287	return err;
288}
289
290void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291{
292	INIT_LIST_HEAD(&oe->events);
293	INIT_LIST_HEAD(&oe->cache);
294	INIT_LIST_HEAD(&oe->to_free);
295	oe->max_alloc_size = (u64) -1;
296	oe->cur_alloc_size = 0;
297	oe->deliver	   = deliver;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298}
299
300void ordered_events__free(struct ordered_events *oe)
301{
302	while (!list_empty(&oe->to_free)) {
303		struct ordered_event *event;
304
305		event = list_entry(oe->to_free.next, struct ordered_event, list);
306		list_del(&event->list);
307		free_dup_event(oe, event->event);
308		free(event);
 
 
 
 
 
 
309	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <inttypes.h>
  4#include <linux/list.h>
  5#include <linux/compiler.h>
  6#include <linux/string.h>
  7#include "ordered-events.h"
  8#include "session.h"
  9#include "asm/bug.h"
 10#include "debug.h"
 11#include "ui/progress.h"
 12
 13#define pr_N(n, fmt, ...) \
 14	eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
 15
 16#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
 17
 18static void queue_event(struct ordered_events *oe, struct ordered_event *new)
 19{
 20	struct ordered_event *last = oe->last;
 21	u64 timestamp = new->timestamp;
 22	struct list_head *p;
 23
 24	++oe->nr_events;
 25	oe->last = new;
 26
 27	pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
 28
 29	if (!last) {
 30		list_add(&new->list, &oe->events);
 31		oe->max_timestamp = timestamp;
 32		return;
 33	}
 34
 35	/*
 36	 * last event might point to some random place in the list as it's
 37	 * the last queued event. We expect that the new event is close to
 38	 * this.
 39	 */
 40	if (last->timestamp <= timestamp) {
 41		while (last->timestamp <= timestamp) {
 42			p = last->list.next;
 43			if (p == &oe->events) {
 44				list_add_tail(&new->list, &oe->events);
 45				oe->max_timestamp = timestamp;
 46				return;
 47			}
 48			last = list_entry(p, struct ordered_event, list);
 49		}
 50		list_add_tail(&new->list, &last->list);
 51	} else {
 52		while (last->timestamp > timestamp) {
 53			p = last->list.prev;
 54			if (p == &oe->events) {
 55				list_add(&new->list, &oe->events);
 56				return;
 57			}
 58			last = list_entry(p, struct ordered_event, list);
 59		}
 60		list_add(&new->list, &last->list);
 61	}
 62}
 63
 64static union perf_event *__dup_event(struct ordered_events *oe,
 65				     union perf_event *event)
 66{
 67	union perf_event *new_event = NULL;
 68
 69	if (oe->cur_alloc_size < oe->max_alloc_size) {
 70		new_event = memdup(event, event->header.size);
 71		if (new_event)
 72			oe->cur_alloc_size += event->header.size;
 73	}
 74
 75	return new_event;
 76}
 77
 78static union perf_event *dup_event(struct ordered_events *oe,
 79				   union perf_event *event)
 80{
 81	return oe->copy_on_queue ? __dup_event(oe, event) : event;
 82}
 83
 84static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
 85{
 86	if (event) {
 87		oe->cur_alloc_size -= event->header.size;
 88		free(event);
 89	}
 90}
 91
 92static void free_dup_event(struct ordered_events *oe, union perf_event *event)
 93{
 94	if (oe->copy_on_queue)
 95		__free_dup_event(oe, event);
 96}
 97
 98#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
 99static struct ordered_event *alloc_event(struct ordered_events *oe,
100					 union perf_event *event)
101{
102	struct list_head *cache = &oe->cache;
103	struct ordered_event *new = NULL;
104	union perf_event *new_event;
105	size_t size;
106
107	new_event = dup_event(oe, event);
108	if (!new_event)
109		return NULL;
110
111	/*
112	 * We maintain the following scheme of buffers for ordered
113	 * event allocation:
114	 *
115	 *   to_free list -> buffer1 (64K)
116	 *                   buffer2 (64K)
117	 *                   ...
118	 *
119	 * Each buffer keeps an array of ordered events objects:
120	 *    buffer -> event[0]
121	 *              event[1]
122	 *              ...
123	 *
124	 * Each allocated ordered event is linked to one of
125	 * following lists:
126	 *   - time ordered list 'events'
127	 *   - list of currently removed events 'cache'
128	 *
129	 * Allocation of the ordered event uses the following order
130	 * to get the memory:
131	 *   - use recently removed object from 'cache' list
132	 *   - use available object in current allocation buffer
133	 *   - allocate new buffer if the current buffer is full
134	 *
135	 * Removal of ordered event object moves it from events to
136	 * the cache list.
137	 */
138	size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
139
140	if (!list_empty(cache)) {
141		new = list_entry(cache->next, struct ordered_event, list);
142		list_del_init(&new->list);
143	} else if (oe->buffer) {
144		new = &oe->buffer->event[oe->buffer_idx];
145		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
146			oe->buffer = NULL;
147	} else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
 
 
148		oe->buffer = malloc(size);
149		if (!oe->buffer) {
150			free_dup_event(oe, new_event);
151			return NULL;
152		}
153
154		pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
155		   oe->cur_alloc_size, size, oe->max_alloc_size);
156
157		oe->cur_alloc_size += size;
158		list_add(&oe->buffer->list, &oe->to_free);
159
160		oe->buffer_idx = 1;
161		new = &oe->buffer->event[0];
 
162	} else {
163		pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
164		return NULL;
165	}
166
167	new->event = new_event;
168	return new;
169}
170
171static struct ordered_event *
172ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
173		    union perf_event *event)
174{
175	struct ordered_event *new;
176
177	new = alloc_event(oe, event);
178	if (new) {
179		new->timestamp = timestamp;
180		queue_event(oe, new);
181	}
182
183	return new;
184}
185
186void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
187{
188	list_move(&event->list, &oe->cache);
189	oe->nr_events--;
190	free_dup_event(oe, event->event);
191	event->event = NULL;
192}
193
194int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
195			  u64 timestamp, u64 file_offset)
196{
 
197	struct ordered_event *oevent;
198
199	if (!timestamp || timestamp == ~0ULL)
200		return -ETIME;
201
202	if (timestamp < oe->last_flush) {
203		pr_oe_time(timestamp,      "out of order event\n");
204		pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
205			   oe->last_flush_type);
206
207		oe->nr_unordered_events++;
208	}
209
210	oevent = ordered_events__new_event(oe, timestamp, event);
211	if (!oevent) {
212		ordered_events__flush(oe, OE_FLUSH__HALF);
213		oevent = ordered_events__new_event(oe, timestamp, event);
214	}
215
216	if (!oevent)
217		return -ENOMEM;
218
219	oevent->file_offset = file_offset;
220	return 0;
221}
222
223static int do_flush(struct ordered_events *oe, bool show_progress)
224{
225	struct list_head *head = &oe->events;
226	struct ordered_event *tmp, *iter;
227	u64 limit = oe->next_flush;
228	u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
 
229	struct ui_progress prog;
230	int ret;
231
232	if (!limit)
233		return 0;
234
235	if (show_progress)
236		ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
237
238	list_for_each_entry_safe(iter, tmp, head, list) {
239		if (session_done())
240			return 0;
241
242		if (iter->timestamp > limit)
243			break;
244		ret = oe->deliver(oe, iter);
245		if (ret)
246			return ret;
247
248		ordered_events__delete(oe, iter);
249		oe->last_flush = iter->timestamp;
250
251		if (show_progress)
252			ui_progress__update(&prog, 1);
253	}
254
255	if (list_empty(head))
256		oe->last = NULL;
257	else if (last_ts <= limit)
258		oe->last = list_entry(head->prev, struct ordered_event, list);
259
260	if (show_progress)
261		ui_progress__finish();
262
263	return 0;
264}
265
266static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
267				   u64 timestamp)
268{
269	static const char * const str[] = {
270		"NONE",
271		"FINAL",
272		"ROUND",
273		"HALF ",
274		"TOP  ",
275		"TIME ",
276	};
277	int err;
278	bool show_progress = false;
279
280	if (oe->nr_events == 0)
281		return 0;
282
283	switch (how) {
284	case OE_FLUSH__FINAL:
285		show_progress = true;
286		__fallthrough;
287	case OE_FLUSH__TOP:
288		oe->next_flush = ULLONG_MAX;
289		break;
290
291	case OE_FLUSH__HALF:
292	{
293		struct ordered_event *first, *last;
294		struct list_head *head = &oe->events;
295
296		first = list_entry(head->next, struct ordered_event, list);
297		last = oe->last;
298
299		/* Warn if we are called before any event got allocated. */
300		if (WARN_ONCE(!last || list_empty(head), "empty queue"))
301			return 0;
302
303		oe->next_flush  = first->timestamp;
304		oe->next_flush += (last->timestamp - first->timestamp) / 2;
305		break;
306	}
307
308	case OE_FLUSH__TIME:
309		oe->next_flush = timestamp;
310		show_progress = false;
311		break;
312
313	case OE_FLUSH__ROUND:
314	case OE_FLUSH__NONE:
315	default:
316		break;
317	};
318
319	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
320		   str[how], oe->nr_events);
321	pr_oe_time(oe->max_timestamp, "max_timestamp\n");
322
323	err = do_flush(oe, show_progress);
324
325	if (!err) {
326		if (how == OE_FLUSH__ROUND)
327			oe->next_flush = oe->max_timestamp;
328
329		oe->last_flush_type = how;
330	}
331
332	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
333		   str[how], oe->nr_events);
334	pr_oe_time(oe->last_flush, "last_flush\n");
335
336	return err;
337}
338
339int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
340{
341	return __ordered_events__flush(oe, how, 0);
342}
343
344int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
345{
346	return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
347}
348
349u64 ordered_events__first_time(struct ordered_events *oe)
350{
351	struct ordered_event *event;
352
353	if (list_empty(&oe->events))
354		return 0;
355
356	event = list_first_entry(&oe->events, struct ordered_event, list);
357	return event->timestamp;
358}
359
360void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
361			  void *data)
362{
363	INIT_LIST_HEAD(&oe->events);
364	INIT_LIST_HEAD(&oe->cache);
365	INIT_LIST_HEAD(&oe->to_free);
366	oe->max_alloc_size = (u64) -1;
367	oe->cur_alloc_size = 0;
368	oe->deliver	   = deliver;
369	oe->data	   = data;
370}
371
372static void
373ordered_events_buffer__free(struct ordered_events_buffer *buffer,
374			    unsigned int max, struct ordered_events *oe)
375{
376	if (oe->copy_on_queue) {
377		unsigned int i;
378
379		for (i = 0; i < max; i++)
380			__free_dup_event(oe, buffer->event[i].event);
381	}
382
383	free(buffer);
384}
385
386void ordered_events__free(struct ordered_events *oe)
387{
388	struct ordered_events_buffer *buffer, *tmp;
 
389
390	if (list_empty(&oe->to_free))
391		return;
392
393	/*
394	 * Current buffer might not have all the events allocated
395	 * yet, we need to free only allocated ones ...
396	 */
397	if (oe->buffer) {
398		list_del_init(&oe->buffer->list);
399		ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
400	}
401
402	/* ... and continue with the rest */
403	list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
404		list_del_init(&buffer->list);
405		ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
406	}
407}
408
409void ordered_events__reinit(struct ordered_events *oe)
410{
411	ordered_events__deliver_t old_deliver = oe->deliver;
412
413	ordered_events__free(oe);
414	memset(oe, '\0', sizeof(*oe));
415	ordered_events__init(oe, old_deliver, oe->data);
416}