Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright(C) 2015-2018 Linaro Limited.
  4 *
  5 * Author: Tor Jeremiassen <tor@ti.com>
  6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  7 */
  8
  9#include <asm/bug.h>
 10#include <linux/coresight-pmu.h>
 11#include <linux/err.h>
 12#include <linux/list.h>
 13#include <linux/zalloc.h>
 14#include <stdlib.h>
 15#include <opencsd/c_api/opencsd_c_api.h>
 16
 17#include "cs-etm.h"
 18#include "cs-etm-decoder.h"
 19#include "debug.h"
 20#include "intlist.h"
 21
 22/* use raw logging */
 23#ifdef CS_DEBUG_RAW
 24#define CS_LOG_RAW_FRAMES
 25#ifdef CS_RAW_PACKED
 26#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
 27			    OCSD_DFRMTR_PACKED_RAW_OUT)
 28#else
 29#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
 30#endif
 31#endif
 32
 33/*
 34 * Assume a maximum of 0.1ns elapsed per instruction. This would be the
 35 * case with a theoretical 10GHz core executing 1 instruction per cycle.
 36 * Used to estimate the sample time for synthesized instructions because
 37 * Coresight only emits a timestamp for a range of instructions rather
 38 * than per instruction.
 39 */
 40const u32 INSTR_PER_NS = 10;
 41
 42struct cs_etm_decoder {
 43	void *data;
 44	void (*packet_printer)(const char *msg);
 45	bool suppress_printing;
 46	dcd_tree_handle_t dcd_tree;
 47	cs_etm_mem_cb_type mem_access;
 48	ocsd_datapath_resp_t prev_return;
 49	const char *decoder_name;
 50};
 51
 52static u32
 53cs_etm_decoder__mem_access(const void *context,
 54			   const ocsd_vaddr_t address,
 55			   const ocsd_mem_space_acc_t mem_space,
 56			   const u8 trace_chan_id,
 57			   const u32 req_size,
 58			   u8 *buffer)
 59{
 60	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
 61
 62	return decoder->mem_access(decoder->data, trace_chan_id, address,
 63				   req_size, buffer, mem_space);
 64}
 65
 66int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
 67				      u64 start, u64 end,
 68				      cs_etm_mem_cb_type cb_func)
 69{
 70	decoder->mem_access = cb_func;
 71
 72	if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
 73					       OCSD_MEM_SPACE_ANY,
 74					       cs_etm_decoder__mem_access,
 75					       decoder))
 76		return -1;
 77
 78	return 0;
 79}
 80
 81int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
 82{
 83	ocsd_datapath_resp_t dp_ret;
 84
 85	decoder->prev_return = OCSD_RESP_CONT;
 86	decoder->suppress_printing = true;
 87	dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
 88				      0, 0, NULL, NULL);
 89	decoder->suppress_printing = false;
 90	if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
 91		return -1;
 92
 93	return 0;
 94}
 95
 96int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
 97			       struct cs_etm_packet *packet)
 98{
 99	if (!packet_queue || !packet)
100		return -EINVAL;
101
102	/* Nothing to do, might as well just return */
103	if (packet_queue->packet_count == 0)
104		return 0;
105	/*
106	 * The queueing process in function cs_etm_decoder__buffer_packet()
107	 * increments the tail *before* using it.  This is somewhat counter
108	 * intuitive but it has the advantage of centralizing tail management
109	 * at a single location.  Because of that we need to follow the same
110	 * heuristic with the head, i.e we increment it before using its
111	 * value.  Otherwise the first element of the packet queue is not
112	 * used.
113	 */
114	packet_queue->head = (packet_queue->head + 1) &
115			     (CS_ETM_PACKET_MAX_BUFFER - 1);
116
117	*packet = packet_queue->packet_buffer[packet_queue->head];
118
119	packet_queue->packet_count--;
120
121	return 1;
122}
123
124/*
125 * Calculate the number of nanoseconds elapsed.
126 *
127 * instr_count is updated in place with the remainder of the instructions
128 * which didn't make up a whole nanosecond.
129 */
130static u32 cs_etm_decoder__dec_instr_count_to_ns(u32 *instr_count)
131{
132	const u32 instr_copy = *instr_count;
133
134	*instr_count %= INSTR_PER_NS;
135	return instr_copy / INSTR_PER_NS;
136}
137
138static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
139					    ocsd_etmv3_cfg *config)
140{
141	config->reg_idr = params->etmv3.reg_idr;
142	config->reg_ctrl = params->etmv3.reg_ctrl;
143	config->reg_ccer = params->etmv3.reg_ccer;
144	config->reg_trc_id = params->etmv3.reg_trc_id;
145	config->arch_ver = ARCH_V7;
146	config->core_prof = profile_CortexA;
147
148	return 0;
149}
150
151#define TRCIDR1_TRCARCHMIN_SHIFT 4
152#define TRCIDR1_TRCARCHMIN_MASK  GENMASK(7, 4)
153#define TRCIDR1_TRCARCHMIN(x)    (((x) & TRCIDR1_TRCARCHMIN_MASK) >> TRCIDR1_TRCARCHMIN_SHIFT)
154
155static enum _ocsd_arch_version cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1)
156{
157	/*
158	 * For ETMv4 if the trace minor version is 4 or more then we can assume
159	 * the architecture is ARCH_AA64 rather than just V8.
160	 * ARCH_V8 = V8 architecture
161	 * ARCH_AA64 = Min v8r3 plus additional AA64 PE features
162	 */
163	return TRCIDR1_TRCARCHMIN(reg_idr1) >= 4 ? ARCH_AA64 : ARCH_V8;
164}
165
166static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
167					     ocsd_etmv4_cfg *config)
168{
169	config->reg_configr = params->etmv4.reg_configr;
170	config->reg_traceidr = params->etmv4.reg_traceidr;
171	config->reg_idr0 = params->etmv4.reg_idr0;
172	config->reg_idr1 = params->etmv4.reg_idr1;
173	config->reg_idr2 = params->etmv4.reg_idr2;
174	config->reg_idr8 = params->etmv4.reg_idr8;
175	config->reg_idr9 = 0;
176	config->reg_idr10 = 0;
177	config->reg_idr11 = 0;
178	config->reg_idr12 = 0;
179	config->reg_idr13 = 0;
180	config->arch_ver = cs_etm_decoder__get_etmv4_arch_ver(params->etmv4.reg_idr1);
181	config->core_prof = profile_CortexA;
182}
183
184static void cs_etm_decoder__gen_ete_config(struct cs_etm_trace_params *params,
185					   ocsd_ete_cfg *config)
186{
187	config->reg_configr = params->ete.reg_configr;
188	config->reg_traceidr = params->ete.reg_traceidr;
189	config->reg_idr0 = params->ete.reg_idr0;
190	config->reg_idr1 = params->ete.reg_idr1;
191	config->reg_idr2 = params->ete.reg_idr2;
192	config->reg_idr8 = params->ete.reg_idr8;
193	config->reg_devarch = params->ete.reg_devarch;
194	config->arch_ver = ARCH_AA64;
195	config->core_prof = profile_CortexA;
196}
197
198static void cs_etm_decoder__print_str_cb(const void *p_context,
199					 const char *msg,
200					 const int str_len)
201{
202	const struct cs_etm_decoder *decoder = p_context;
203
204	if (p_context && str_len && !decoder->suppress_printing)
205		decoder->packet_printer(msg);
206}
207
208static int
209cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
210					 struct cs_etm_decoder *decoder)
211{
212	int ret = 0;
213
214	if (d_params->packet_printer == NULL)
215		return -1;
216
217	decoder->packet_printer = d_params->packet_printer;
218
219	/*
220	 * Set up a library default logger to process any printers
221	 * (packet/raw frame) we add later.
222	 */
223	ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
224	if (ret != 0)
225		return -1;
226
227	/* no stdout / err / file output */
228	ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
229	if (ret != 0)
230		return -1;
231
232	/*
233	 * Set the string CB for the default logger, passes strings to
234	 * perf print logger.
235	 */
236	ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
237					      (void *)decoder,
238					      cs_etm_decoder__print_str_cb);
239	if (ret != 0)
240		ret = -1;
241
242	return 0;
243}
244
245#ifdef CS_LOG_RAW_FRAMES
246static void
247cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
248				       struct cs_etm_decoder *decoder)
249{
250	/* Only log these during a --dump operation */
251	if (d_params->operation == CS_ETM_OPERATION_PRINT) {
252		/* set up a library default logger to process the
253		 *  raw frame printer we add later
254		 */
255		ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
256
257		/* no stdout / err / file output */
258		ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
259
260		/* set the string CB for the default logger,
261		 * passes strings to perf print logger.
262		 */
263		ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
264						(void *)decoder,
265						cs_etm_decoder__print_str_cb);
266
267		/* use the built in library printer for the raw frames */
268		ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
269					      CS_RAW_DEBUG_FLAGS);
270	}
271}
272#else
273static void
274cs_etm_decoder__init_raw_frame_logging(
275		struct cs_etm_decoder_params *d_params __maybe_unused,
276		struct cs_etm_decoder *decoder __maybe_unused)
277{
278}
279#endif
280
281static ocsd_datapath_resp_t
282cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
283				  struct cs_etm_packet_queue *packet_queue,
284				  const uint8_t trace_chan_id)
285{
286	u64 estimated_ts;
287
288	/* No timestamp packet has been received, nothing to do */
289	if (!packet_queue->next_cs_timestamp)
290		return OCSD_RESP_CONT;
291
292	estimated_ts = packet_queue->cs_timestamp +
293			cs_etm_decoder__dec_instr_count_to_ns(&packet_queue->instr_count);
294
295	/* Estimated TS can never be higher than the next real one in the trace */
296	packet_queue->cs_timestamp = min(packet_queue->next_cs_timestamp, estimated_ts);
 
297
298	/* Tell the front end which traceid_queue needs attention */
299	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
300
301	return OCSD_RESP_WAIT;
302}
303
304static ocsd_datapath_resp_t
305cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
306				  const ocsd_generic_trace_elem *elem,
307				  const uint8_t trace_chan_id,
308				  const ocsd_trc_index_t indx)
309{
310	struct cs_etm_packet_queue *packet_queue;
311	u64 converted_timestamp;
312	u64 estimated_first_ts;
313
314	/* First get the packet queue for this traceID */
315	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
316	if (!packet_queue)
317		return OCSD_RESP_FATAL_SYS_ERR;
318
319	/*
320	 * Coresight timestamps are raw timer values which need to be scaled to ns. Assume
321	 * 0 is a bad value so don't try to convert it.
322	 */
323	converted_timestamp = elem->timestamp ?
324				cs_etm__convert_sample_time(etmq, elem->timestamp) : 0;
325
326	/*
327	 * We've seen a timestamp packet before - simply record the new value.
328	 * Function do_soft_timestamp() will report the value to the front end,
329	 * hence asking the decoder to keep decoding rather than stopping.
330	 */
331	if (packet_queue->next_cs_timestamp) {
332		/*
333		 * What was next is now where new ranges start from, overwriting
334		 * any previous estimate in cs_timestamp
335		 */
336		packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
337		packet_queue->next_cs_timestamp = converted_timestamp;
338		return OCSD_RESP_CONT;
339	}
340
341	if (!converted_timestamp) {
 
342		/*
343		 * Zero timestamps can be seen due to misconfiguration or hardware bugs.
344		 * Warn once, and don't try to subtract instr_count as it would result in an
345		 * underflow.
346		 */
347		packet_queue->cs_timestamp = 0;
348		if (!cs_etm__etmq_is_timeless(etmq))
349			pr_warning_once("Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
350					". Decoding may be improved by prepending 'Z' to your current --itrace arguments.\n",
351					indx);
352
353	} else if (packet_queue->instr_count / INSTR_PER_NS > converted_timestamp) {
354		/*
355		 * Sanity check that the elem->timestamp - packet_queue->instr_count would not
356		 * result in an underflow. Warn and clamp at 0 if it would.
357		 */
358		packet_queue->cs_timestamp = 0;
359		pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
360	} else {
361		/*
362		 * This is the first timestamp we've seen since the beginning of traces
363		 * or a discontinuity.  Since timestamps packets are generated *after*
364		 * range packets have been generated, we need to estimate the time at
365		 * which instructions started by subtracting the number of instructions
366		 * executed to the timestamp. Don't estimate earlier than the last used
367		 * timestamp though.
368		 */
369		estimated_first_ts = converted_timestamp -
370					(packet_queue->instr_count / INSTR_PER_NS);
371		packet_queue->cs_timestamp = max(packet_queue->cs_timestamp, estimated_first_ts);
372	}
373	packet_queue->next_cs_timestamp = converted_timestamp;
374	packet_queue->instr_count = 0;
375
376	/* Tell the front end which traceid_queue needs attention */
377	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
378
379	/* Halt processing until we are being told to proceed */
380	return OCSD_RESP_WAIT;
381}
382
383static void
384cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
385{
 
386	packet_queue->next_cs_timestamp = 0;
387	packet_queue->instr_count = 0;
388}
389
390static ocsd_datapath_resp_t
391cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
392			      const u8 trace_chan_id,
393			      enum cs_etm_sample_type sample_type)
394{
395	u32 et = 0;
396	int cpu;
397
398	if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
399		return OCSD_RESP_FATAL_SYS_ERR;
400
401	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
402		return OCSD_RESP_FATAL_SYS_ERR;
403
404	et = packet_queue->tail;
405	et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
406	packet_queue->tail = et;
407	packet_queue->packet_count++;
408
409	packet_queue->packet_buffer[et].sample_type = sample_type;
410	packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
411	packet_queue->packet_buffer[et].cpu = cpu;
412	packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
413	packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
414	packet_queue->packet_buffer[et].instr_count = 0;
415	packet_queue->packet_buffer[et].last_instr_taken_branch = false;
416	packet_queue->packet_buffer[et].last_instr_size = 0;
417	packet_queue->packet_buffer[et].last_instr_type = 0;
418	packet_queue->packet_buffer[et].last_instr_subtype = 0;
419	packet_queue->packet_buffer[et].last_instr_cond = 0;
420	packet_queue->packet_buffer[et].flags = 0;
421	packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
422	packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
423
424	if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
425		return OCSD_RESP_WAIT;
426
427	return OCSD_RESP_CONT;
428}
429
430static ocsd_datapath_resp_t
431cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
432			     struct cs_etm_packet_queue *packet_queue,
433			     const ocsd_generic_trace_elem *elem,
434			     const uint8_t trace_chan_id)
435{
436	int ret = 0;
437	struct cs_etm_packet *packet;
438
439	ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
440					    CS_ETM_RANGE);
441	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
442		return ret;
443
444	packet = &packet_queue->packet_buffer[packet_queue->tail];
445
446	switch (elem->isa) {
447	case ocsd_isa_aarch64:
448		packet->isa = CS_ETM_ISA_A64;
449		break;
450	case ocsd_isa_arm:
451		packet->isa = CS_ETM_ISA_A32;
452		break;
453	case ocsd_isa_thumb2:
454		packet->isa = CS_ETM_ISA_T32;
455		break;
456	case ocsd_isa_tee:
457	case ocsd_isa_jazelle:
458	case ocsd_isa_custom:
459	case ocsd_isa_unknown:
460	default:
461		packet->isa = CS_ETM_ISA_UNKNOWN;
462	}
463
464	packet->start_addr = elem->st_addr;
465	packet->end_addr = elem->en_addr;
466	packet->instr_count = elem->num_instr_range;
467	packet->last_instr_type = elem->last_i_type;
468	packet->last_instr_subtype = elem->last_i_subtype;
469	packet->last_instr_cond = elem->last_instr_cond;
470
471	if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
472		packet->last_instr_taken_branch = elem->last_instr_exec;
473	else
474		packet->last_instr_taken_branch = false;
475
476	packet->last_instr_size = elem->last_instr_sz;
477
478	/* per-thread scenario, no need to generate a timestamp */
479	if (cs_etm__etmq_is_timeless(etmq))
480		goto out;
481
482	/*
483	 * The packet queue is full and we haven't seen a timestamp (had we
484	 * seen one the packet queue wouldn't be full).  Let the front end
485	 * deal with it.
486	 */
487	if (ret == OCSD_RESP_WAIT)
488		goto out;
489
490	packet_queue->instr_count += elem->num_instr_range;
491	/* Tell the front end we have a new timestamp to process */
492	ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
493						trace_chan_id);
494out:
495	return ret;
496}
497
498static ocsd_datapath_resp_t
499cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
500				     const uint8_t trace_chan_id)
501{
502	/*
503	 * Something happened and who knows when we'll get new traces so
504	 * reset time statistics.
505	 */
506	cs_etm_decoder__reset_timestamp(queue);
507	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
508					     CS_ETM_DISCONTINUITY);
509}
510
511static ocsd_datapath_resp_t
512cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
513				 const ocsd_generic_trace_elem *elem,
514				 const uint8_t trace_chan_id)
515{	int ret = 0;
516	struct cs_etm_packet *packet;
517
518	ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
519					    CS_ETM_EXCEPTION);
520	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
521		return ret;
522
523	packet = &queue->packet_buffer[queue->tail];
524	packet->exception_number = elem->exception_number;
525
526	return ret;
527}
528
529static ocsd_datapath_resp_t
530cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
531				     const uint8_t trace_chan_id)
532{
533	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
534					     CS_ETM_EXCEPTION_RET);
535}
536
537static ocsd_datapath_resp_t
538cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
539			struct cs_etm_packet_queue *packet_queue,
540			const ocsd_generic_trace_elem *elem,
541			const uint8_t trace_chan_id)
542{
543	pid_t tid = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
544
545	/*
546	 * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
547	 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
548	 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
549	 */
550	switch (cs_etm__get_pid_fmt(etmq)) {
551	case CS_ETM_PIDFMT_CTXTID:
552		if (elem->context.ctxt_id_valid)
553			tid = elem->context.context_id;
554		break;
555	case CS_ETM_PIDFMT_CTXTID2:
556		if (elem->context.vmid_valid)
557			tid = elem->context.vmid;
558		break;
559	case CS_ETM_PIDFMT_NONE:
560	default:
561		break;
562	}
563
564	if (cs_etm__etmq_set_tid_el(etmq, tid, trace_chan_id,
565				    elem->context.exception_level))
566		return OCSD_RESP_FATAL_SYS_ERR;
567
568	if (tid == -1)
569		return OCSD_RESP_CONT;
570
 
 
 
571	/*
572	 * A timestamp is generated after a PE_CONTEXT element so make sure
573	 * to rely on that coming one.
574	 */
575	cs_etm_decoder__reset_timestamp(packet_queue);
576
577	return OCSD_RESP_CONT;
578}
579
580static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
581				const void *context,
582				const ocsd_trc_index_t indx,
583				const u8 trace_chan_id __maybe_unused,
584				const ocsd_generic_trace_elem *elem)
585{
586	ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
587	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
588	struct cs_etm_queue *etmq = decoder->data;
589	struct cs_etm_packet_queue *packet_queue;
590
591	/* First get the packet queue for this traceID */
592	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
593	if (!packet_queue)
594		return OCSD_RESP_FATAL_SYS_ERR;
595
596	switch (elem->elem_type) {
597	case OCSD_GEN_TRC_ELEM_UNKNOWN:
598		break;
599	case OCSD_GEN_TRC_ELEM_EO_TRACE:
600	case OCSD_GEN_TRC_ELEM_NO_SYNC:
601	case OCSD_GEN_TRC_ELEM_TRACE_ON:
602		resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
603							    trace_chan_id);
604		break;
605	case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
606		resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
607						    trace_chan_id);
608		break;
609	case OCSD_GEN_TRC_ELEM_EXCEPTION:
610		resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
611							trace_chan_id);
612		break;
613	case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
614		resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
615							    trace_chan_id);
616		break;
617	case OCSD_GEN_TRC_ELEM_TIMESTAMP:
618		resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
619							 trace_chan_id,
620							 indx);
621		break;
622	case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
623		resp = cs_etm_decoder__set_tid(etmq, packet_queue,
624					       elem, trace_chan_id);
625		break;
626	/* Unused packet types */
627	case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
628	case OCSD_GEN_TRC_ELEM_ADDR_NACC:
629	case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
630	case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
631	case OCSD_GEN_TRC_ELEM_EVENT:
632	case OCSD_GEN_TRC_ELEM_SWTRACE:
633	case OCSD_GEN_TRC_ELEM_CUSTOM:
634	case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
635	case OCSD_GEN_TRC_ELEM_MEMTRANS:
636#if (OCSD_VER_NUM >= 0x010400)
637	case OCSD_GEN_TRC_ELEM_INSTRUMENTATION:
638#endif
639	default:
640		break;
641	}
642
643	return resp;
644}
645
646static int
647cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
648				   struct cs_etm_trace_params *t_params,
649				   struct cs_etm_decoder *decoder)
650{
651	ocsd_etmv3_cfg config_etmv3;
652	ocsd_etmv4_cfg trace_config_etmv4;
653	ocsd_ete_cfg trace_config_ete;
654	void *trace_config;
655	u8 csid;
656
657	switch (t_params->protocol) {
658	case CS_ETM_PROTO_ETMV3:
659	case CS_ETM_PROTO_PTM:
660		csid = (t_params->etmv3.reg_idr & CORESIGHT_TRACE_ID_VAL_MASK);
661		cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
662		decoder->decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
663							OCSD_BUILTIN_DCD_ETMV3 :
664							OCSD_BUILTIN_DCD_PTM;
665		trace_config = &config_etmv3;
666		break;
667	case CS_ETM_PROTO_ETMV4i:
668		csid = (t_params->etmv4.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
669		cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
670		decoder->decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
671		trace_config = &trace_config_etmv4;
672		break;
673	case CS_ETM_PROTO_ETE:
674		csid = (t_params->ete.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
675		cs_etm_decoder__gen_ete_config(t_params, &trace_config_ete);
676		decoder->decoder_name = OCSD_BUILTIN_DCD_ETE;
677		trace_config = &trace_config_ete;
678		break;
679	default:
680		return -1;
681	}
682
683	/* if the CPU has no trace ID associated, no decoder needed */
684	if (csid == CORESIGHT_TRACE_ID_UNUSED_VAL)
685		return 0;
686
687	if (d_params->operation == CS_ETM_OPERATION_DECODE) {
688		if (ocsd_dt_create_decoder(decoder->dcd_tree,
689					   decoder->decoder_name,
690					   OCSD_CREATE_FLG_FULL_DECODER,
691					   trace_config, &csid))
692			return -1;
693
694		if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
695					       cs_etm_decoder__gen_trace_elem_printer,
696					       decoder))
697			return -1;
698
699		return 0;
700	} else if (d_params->operation == CS_ETM_OPERATION_PRINT) {
701		if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder->decoder_name,
702					   OCSD_CREATE_FLG_PACKET_PROC,
703					   trace_config, &csid))
704			return -1;
705
706		if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
707			return -1;
708
709		return 0;
710	}
711
712	return -1;
713}
714
715struct cs_etm_decoder *
716cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params,
717		    struct cs_etm_trace_params t_params[])
718{
719	struct cs_etm_decoder *decoder;
720	ocsd_dcd_tree_src_t format;
721	u32 flags;
722	int i, ret;
723
724	if ((!t_params) || (!d_params))
725		return NULL;
726
727	decoder = zalloc(sizeof(*decoder));
728
729	if (!decoder)
730		return NULL;
731
732	decoder->data = d_params->data;
733	decoder->prev_return = OCSD_RESP_CONT;
734	format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
735					 OCSD_TRC_SRC_SINGLE);
736	flags = 0;
737	flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
738	flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
739	flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
740
741	/*
742	 * Drivers may add barrier frames when used with perf, set up to
743	 * handle this. Barriers const of FSYNC packet repeated 4 times.
744	 */
745	flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
746
747	/* Create decode tree for the data source */
748	decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
749
750	if (decoder->dcd_tree == 0)
751		goto err_free_decoder;
752
753	/* init library print logging support */
754	ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
755	if (ret != 0)
756		goto err_free_decoder;
757
758	/* init raw frame logging if required */
759	cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
760
761	for (i = 0; i < decoders; i++) {
762		ret = cs_etm_decoder__create_etm_decoder(d_params,
763							 &t_params[i],
764							 decoder);
765		if (ret != 0)
766			goto err_free_decoder;
767	}
768
769	return decoder;
770
771err_free_decoder:
772	cs_etm_decoder__free(decoder);
773	return NULL;
774}
775
776int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
777				       u64 indx, const u8 *buf,
778				       size_t len, size_t *consumed)
779{
780	int ret = 0;
781	ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
782	ocsd_datapath_resp_t prev_return = decoder->prev_return;
783	size_t processed = 0;
784	u32 count;
785
786	while (processed < len) {
787		if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
788			cur = ocsd_dt_process_data(decoder->dcd_tree,
789						   OCSD_OP_FLUSH,
790						   0,
791						   0,
792						   NULL,
793						   NULL);
794		} else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
795			cur = ocsd_dt_process_data(decoder->dcd_tree,
796						   OCSD_OP_DATA,
797						   indx + processed,
798						   len - processed,
799						   &buf[processed],
800						   &count);
801			processed += count;
802		} else {
803			ret = -EINVAL;
804			break;
805		}
806
807		/*
808		 * Return to the input code if the packet buffer is full.
809		 * Flushing will get done once the packet buffer has been
810		 * processed.
811		 */
812		if (OCSD_DATA_RESP_IS_WAIT(cur))
813			break;
814
815		prev_return = cur;
816	}
817
818	decoder->prev_return = cur;
819	*consumed = processed;
820
821	return ret;
822}
823
824void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
825{
826	if (!decoder)
827		return;
828
829	ocsd_destroy_dcd_tree(decoder->dcd_tree);
830	decoder->dcd_tree = NULL;
831	free(decoder);
832}
833
834const char *cs_etm_decoder__get_name(struct cs_etm_decoder *decoder)
835{
836	return decoder->decoder_name;
837}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright(C) 2015-2018 Linaro Limited.
  4 *
  5 * Author: Tor Jeremiassen <tor@ti.com>
  6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  7 */
  8
  9#include <asm/bug.h>
 10#include <linux/coresight-pmu.h>
 11#include <linux/err.h>
 12#include <linux/list.h>
 13#include <linux/zalloc.h>
 14#include <stdlib.h>
 15#include <opencsd/c_api/opencsd_c_api.h>
 16
 17#include "cs-etm.h"
 18#include "cs-etm-decoder.h"
 19#include "debug.h"
 20#include "intlist.h"
 21
 22/* use raw logging */
 23#ifdef CS_DEBUG_RAW
 24#define CS_LOG_RAW_FRAMES
 25#ifdef CS_RAW_PACKED
 26#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
 27			    OCSD_DFRMTR_PACKED_RAW_OUT)
 28#else
 29#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
 30#endif
 31#endif
 32
 
 
 
 
 
 
 
 
 
 33struct cs_etm_decoder {
 34	void *data;
 35	void (*packet_printer)(const char *msg);
 36	bool suppress_printing;
 37	dcd_tree_handle_t dcd_tree;
 38	cs_etm_mem_cb_type mem_access;
 39	ocsd_datapath_resp_t prev_return;
 40	const char *decoder_name;
 41};
 42
 43static u32
 44cs_etm_decoder__mem_access(const void *context,
 45			   const ocsd_vaddr_t address,
 46			   const ocsd_mem_space_acc_t mem_space __maybe_unused,
 47			   const u8 trace_chan_id,
 48			   const u32 req_size,
 49			   u8 *buffer)
 50{
 51	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
 52
 53	return decoder->mem_access(decoder->data, trace_chan_id,
 54				   address, req_size, buffer);
 55}
 56
 57int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
 58				      u64 start, u64 end,
 59				      cs_etm_mem_cb_type cb_func)
 60{
 61	decoder->mem_access = cb_func;
 62
 63	if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
 64					       OCSD_MEM_SPACE_ANY,
 65					       cs_etm_decoder__mem_access,
 66					       decoder))
 67		return -1;
 68
 69	return 0;
 70}
 71
 72int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
 73{
 74	ocsd_datapath_resp_t dp_ret;
 75
 76	decoder->prev_return = OCSD_RESP_CONT;
 77	decoder->suppress_printing = true;
 78	dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
 79				      0, 0, NULL, NULL);
 80	decoder->suppress_printing = false;
 81	if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
 82		return -1;
 83
 84	return 0;
 85}
 86
 87int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
 88			       struct cs_etm_packet *packet)
 89{
 90	if (!packet_queue || !packet)
 91		return -EINVAL;
 92
 93	/* Nothing to do, might as well just return */
 94	if (packet_queue->packet_count == 0)
 95		return 0;
 96	/*
 97	 * The queueing process in function cs_etm_decoder__buffer_packet()
 98	 * increments the tail *before* using it.  This is somewhat counter
 99	 * intuitive but it has the advantage of centralizing tail management
100	 * at a single location.  Because of that we need to follow the same
101	 * heuristic with the head, i.e we increment it before using its
102	 * value.  Otherwise the first element of the packet queue is not
103	 * used.
104	 */
105	packet_queue->head = (packet_queue->head + 1) &
106			     (CS_ETM_PACKET_MAX_BUFFER - 1);
107
108	*packet = packet_queue->packet_buffer[packet_queue->head];
109
110	packet_queue->packet_count--;
111
112	return 1;
113}
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
116					    ocsd_etmv3_cfg *config)
117{
118	config->reg_idr = params->etmv3.reg_idr;
119	config->reg_ctrl = params->etmv3.reg_ctrl;
120	config->reg_ccer = params->etmv3.reg_ccer;
121	config->reg_trc_id = params->etmv3.reg_trc_id;
122	config->arch_ver = ARCH_V7;
123	config->core_prof = profile_CortexA;
124
125	return 0;
126}
127
128#define TRCIDR1_TRCARCHMIN_SHIFT 4
129#define TRCIDR1_TRCARCHMIN_MASK  GENMASK(7, 4)
130#define TRCIDR1_TRCARCHMIN(x)    (((x) & TRCIDR1_TRCARCHMIN_MASK) >> TRCIDR1_TRCARCHMIN_SHIFT)
131
132static enum _ocsd_arch_version cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1)
133{
134	/*
135	 * For ETMv4 if the trace minor version is 4 or more then we can assume
136	 * the architecture is ARCH_AA64 rather than just V8.
137	 * ARCH_V8 = V8 architecture
138	 * ARCH_AA64 = Min v8r3 plus additional AA64 PE features
139	 */
140	return TRCIDR1_TRCARCHMIN(reg_idr1) >= 4 ? ARCH_AA64 : ARCH_V8;
141}
142
143static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
144					     ocsd_etmv4_cfg *config)
145{
146	config->reg_configr = params->etmv4.reg_configr;
147	config->reg_traceidr = params->etmv4.reg_traceidr;
148	config->reg_idr0 = params->etmv4.reg_idr0;
149	config->reg_idr1 = params->etmv4.reg_idr1;
150	config->reg_idr2 = params->etmv4.reg_idr2;
151	config->reg_idr8 = params->etmv4.reg_idr8;
152	config->reg_idr9 = 0;
153	config->reg_idr10 = 0;
154	config->reg_idr11 = 0;
155	config->reg_idr12 = 0;
156	config->reg_idr13 = 0;
157	config->arch_ver = cs_etm_decoder__get_etmv4_arch_ver(params->etmv4.reg_idr1);
158	config->core_prof = profile_CortexA;
159}
160
161static void cs_etm_decoder__gen_ete_config(struct cs_etm_trace_params *params,
162					   ocsd_ete_cfg *config)
163{
164	config->reg_configr = params->ete.reg_configr;
165	config->reg_traceidr = params->ete.reg_traceidr;
166	config->reg_idr0 = params->ete.reg_idr0;
167	config->reg_idr1 = params->ete.reg_idr1;
168	config->reg_idr2 = params->ete.reg_idr2;
169	config->reg_idr8 = params->ete.reg_idr8;
170	config->reg_devarch = params->ete.reg_devarch;
171	config->arch_ver = ARCH_AA64;
172	config->core_prof = profile_CortexA;
173}
174
175static void cs_etm_decoder__print_str_cb(const void *p_context,
176					 const char *msg,
177					 const int str_len)
178{
179	const struct cs_etm_decoder *decoder = p_context;
180
181	if (p_context && str_len && !decoder->suppress_printing)
182		decoder->packet_printer(msg);
183}
184
185static int
186cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
187					 struct cs_etm_decoder *decoder)
188{
189	int ret = 0;
190
191	if (d_params->packet_printer == NULL)
192		return -1;
193
194	decoder->packet_printer = d_params->packet_printer;
195
196	/*
197	 * Set up a library default logger to process any printers
198	 * (packet/raw frame) we add later.
199	 */
200	ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
201	if (ret != 0)
202		return -1;
203
204	/* no stdout / err / file output */
205	ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
206	if (ret != 0)
207		return -1;
208
209	/*
210	 * Set the string CB for the default logger, passes strings to
211	 * perf print logger.
212	 */
213	ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
214					      (void *)decoder,
215					      cs_etm_decoder__print_str_cb);
216	if (ret != 0)
217		ret = -1;
218
219	return 0;
220}
221
222#ifdef CS_LOG_RAW_FRAMES
223static void
224cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
225				       struct cs_etm_decoder *decoder)
226{
227	/* Only log these during a --dump operation */
228	if (d_params->operation == CS_ETM_OPERATION_PRINT) {
229		/* set up a library default logger to process the
230		 *  raw frame printer we add later
231		 */
232		ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
233
234		/* no stdout / err / file output */
235		ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
236
237		/* set the string CB for the default logger,
238		 * passes strings to perf print logger.
239		 */
240		ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
241						(void *)decoder,
242						cs_etm_decoder__print_str_cb);
243
244		/* use the built in library printer for the raw frames */
245		ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
246					      CS_RAW_DEBUG_FLAGS);
247	}
248}
249#else
250static void
251cs_etm_decoder__init_raw_frame_logging(
252		struct cs_etm_decoder_params *d_params __maybe_unused,
253		struct cs_etm_decoder *decoder __maybe_unused)
254{
255}
256#endif
257
258static ocsd_datapath_resp_t
259cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
260				  struct cs_etm_packet_queue *packet_queue,
261				  const uint8_t trace_chan_id)
262{
 
 
263	/* No timestamp packet has been received, nothing to do */
264	if (!packet_queue->cs_timestamp)
265		return OCSD_RESP_CONT;
266
267	packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
 
268
269	/* Estimate the timestamp for the next range packet */
270	packet_queue->next_cs_timestamp += packet_queue->instr_count;
271	packet_queue->instr_count = 0;
272
273	/* Tell the front end which traceid_queue needs attention */
274	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
275
276	return OCSD_RESP_WAIT;
277}
278
279static ocsd_datapath_resp_t
280cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
281				  const ocsd_generic_trace_elem *elem,
282				  const uint8_t trace_chan_id,
283				  const ocsd_trc_index_t indx)
284{
285	struct cs_etm_packet_queue *packet_queue;
 
 
286
287	/* First get the packet queue for this traceID */
288	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
289	if (!packet_queue)
290		return OCSD_RESP_FATAL_SYS_ERR;
291
292	/*
 
 
 
 
 
 
 
293	 * We've seen a timestamp packet before - simply record the new value.
294	 * Function do_soft_timestamp() will report the value to the front end,
295	 * hence asking the decoder to keep decoding rather than stopping.
296	 */
297	if (packet_queue->cs_timestamp) {
298		packet_queue->next_cs_timestamp = elem->timestamp;
 
 
 
 
 
299		return OCSD_RESP_CONT;
300	}
301
302
303	if (!elem->timestamp) {
304		/*
305		 * Zero timestamps can be seen due to misconfiguration or hardware bugs.
306		 * Warn once, and don't try to subtract instr_count as it would result in an
307		 * underflow.
308		 */
309		packet_queue->cs_timestamp = 0;
310		if (!cs_etm__etmq_is_timeless(etmq))
311			pr_warning_once("Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
312					". Decoding may be improved by prepending 'Z' to your current --itrace arguments.\n",
313					indx);
314
315	} else if (packet_queue->instr_count > elem->timestamp) {
316		/*
317		 * Sanity check that the elem->timestamp - packet_queue->instr_count would not
318		 * result in an underflow. Warn and clamp at 0 if it would.
319		 */
320		packet_queue->cs_timestamp = 0;
321		pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
322	} else {
323		/*
324		 * This is the first timestamp we've seen since the beginning of traces
325		 * or a discontinuity.  Since timestamps packets are generated *after*
326		 * range packets have been generated, we need to estimate the time at
327		 * which instructions started by subtracting the number of instructions
328		 * executed to the timestamp.
 
329		 */
330		packet_queue->cs_timestamp = elem->timestamp - packet_queue->instr_count;
 
 
331	}
332	packet_queue->next_cs_timestamp = elem->timestamp;
333	packet_queue->instr_count = 0;
334
335	/* Tell the front end which traceid_queue needs attention */
336	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
337
338	/* Halt processing until we are being told to proceed */
339	return OCSD_RESP_WAIT;
340}
341
342static void
343cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
344{
345	packet_queue->cs_timestamp = 0;
346	packet_queue->next_cs_timestamp = 0;
347	packet_queue->instr_count = 0;
348}
349
350static ocsd_datapath_resp_t
351cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
352			      const u8 trace_chan_id,
353			      enum cs_etm_sample_type sample_type)
354{
355	u32 et = 0;
356	int cpu;
357
358	if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
359		return OCSD_RESP_FATAL_SYS_ERR;
360
361	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
362		return OCSD_RESP_FATAL_SYS_ERR;
363
364	et = packet_queue->tail;
365	et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
366	packet_queue->tail = et;
367	packet_queue->packet_count++;
368
369	packet_queue->packet_buffer[et].sample_type = sample_type;
370	packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
371	packet_queue->packet_buffer[et].cpu = cpu;
372	packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
373	packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
374	packet_queue->packet_buffer[et].instr_count = 0;
375	packet_queue->packet_buffer[et].last_instr_taken_branch = false;
376	packet_queue->packet_buffer[et].last_instr_size = 0;
377	packet_queue->packet_buffer[et].last_instr_type = 0;
378	packet_queue->packet_buffer[et].last_instr_subtype = 0;
379	packet_queue->packet_buffer[et].last_instr_cond = 0;
380	packet_queue->packet_buffer[et].flags = 0;
381	packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
382	packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
383
384	if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
385		return OCSD_RESP_WAIT;
386
387	return OCSD_RESP_CONT;
388}
389
390static ocsd_datapath_resp_t
391cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
392			     struct cs_etm_packet_queue *packet_queue,
393			     const ocsd_generic_trace_elem *elem,
394			     const uint8_t trace_chan_id)
395{
396	int ret = 0;
397	struct cs_etm_packet *packet;
398
399	ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
400					    CS_ETM_RANGE);
401	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
402		return ret;
403
404	packet = &packet_queue->packet_buffer[packet_queue->tail];
405
406	switch (elem->isa) {
407	case ocsd_isa_aarch64:
408		packet->isa = CS_ETM_ISA_A64;
409		break;
410	case ocsd_isa_arm:
411		packet->isa = CS_ETM_ISA_A32;
412		break;
413	case ocsd_isa_thumb2:
414		packet->isa = CS_ETM_ISA_T32;
415		break;
416	case ocsd_isa_tee:
417	case ocsd_isa_jazelle:
418	case ocsd_isa_custom:
419	case ocsd_isa_unknown:
420	default:
421		packet->isa = CS_ETM_ISA_UNKNOWN;
422	}
423
424	packet->start_addr = elem->st_addr;
425	packet->end_addr = elem->en_addr;
426	packet->instr_count = elem->num_instr_range;
427	packet->last_instr_type = elem->last_i_type;
428	packet->last_instr_subtype = elem->last_i_subtype;
429	packet->last_instr_cond = elem->last_instr_cond;
430
431	if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
432		packet->last_instr_taken_branch = elem->last_instr_exec;
433	else
434		packet->last_instr_taken_branch = false;
435
436	packet->last_instr_size = elem->last_instr_sz;
437
438	/* per-thread scenario, no need to generate a timestamp */
439	if (cs_etm__etmq_is_timeless(etmq))
440		goto out;
441
442	/*
443	 * The packet queue is full and we haven't seen a timestamp (had we
444	 * seen one the packet queue wouldn't be full).  Let the front end
445	 * deal with it.
446	 */
447	if (ret == OCSD_RESP_WAIT)
448		goto out;
449
450	packet_queue->instr_count += elem->num_instr_range;
451	/* Tell the front end we have a new timestamp to process */
452	ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
453						trace_chan_id);
454out:
455	return ret;
456}
457
458static ocsd_datapath_resp_t
459cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
460				     const uint8_t trace_chan_id)
461{
462	/*
463	 * Something happened and who knows when we'll get new traces so
464	 * reset time statistics.
465	 */
466	cs_etm_decoder__reset_timestamp(queue);
467	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
468					     CS_ETM_DISCONTINUITY);
469}
470
471static ocsd_datapath_resp_t
472cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
473				 const ocsd_generic_trace_elem *elem,
474				 const uint8_t trace_chan_id)
475{	int ret = 0;
476	struct cs_etm_packet *packet;
477
478	ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
479					    CS_ETM_EXCEPTION);
480	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
481		return ret;
482
483	packet = &queue->packet_buffer[queue->tail];
484	packet->exception_number = elem->exception_number;
485
486	return ret;
487}
488
489static ocsd_datapath_resp_t
490cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
491				     const uint8_t trace_chan_id)
492{
493	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
494					     CS_ETM_EXCEPTION_RET);
495}
496
497static ocsd_datapath_resp_t
498cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
499			struct cs_etm_packet_queue *packet_queue,
500			const ocsd_generic_trace_elem *elem,
501			const uint8_t trace_chan_id)
502{
503	pid_t tid = -1;
504	static u64 pid_fmt;
505	int ret;
506
507	/*
508	 * As all the ETMs run at the same exception level, the system should
509	 * have the same PID format crossing CPUs.  So cache the PID format
510	 * and reuse it for sequential decoding.
511	 */
512	if (!pid_fmt) {
513		ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt);
514		if (ret)
515			return OCSD_RESP_FATAL_SYS_ERR;
516	}
517
518	/*
519	 * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
520	 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
521	 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
522	 */
523	switch (pid_fmt) {
524	case BIT(ETM_OPT_CTXTID):
525		if (elem->context.ctxt_id_valid)
526			tid = elem->context.context_id;
527		break;
528	case BIT(ETM_OPT_CTXTID2):
529		if (elem->context.vmid_valid)
530			tid = elem->context.vmid;
531		break;
 
532	default:
533		break;
534	}
535
 
 
 
 
536	if (tid == -1)
537		return OCSD_RESP_CONT;
538
539	if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
540		return OCSD_RESP_FATAL_SYS_ERR;
541
542	/*
543	 * A timestamp is generated after a PE_CONTEXT element so make sure
544	 * to rely on that coming one.
545	 */
546	cs_etm_decoder__reset_timestamp(packet_queue);
547
548	return OCSD_RESP_CONT;
549}
550
551static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
552				const void *context,
553				const ocsd_trc_index_t indx,
554				const u8 trace_chan_id __maybe_unused,
555				const ocsd_generic_trace_elem *elem)
556{
557	ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
558	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
559	struct cs_etm_queue *etmq = decoder->data;
560	struct cs_etm_packet_queue *packet_queue;
561
562	/* First get the packet queue for this traceID */
563	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
564	if (!packet_queue)
565		return OCSD_RESP_FATAL_SYS_ERR;
566
567	switch (elem->elem_type) {
568	case OCSD_GEN_TRC_ELEM_UNKNOWN:
569		break;
570	case OCSD_GEN_TRC_ELEM_EO_TRACE:
571	case OCSD_GEN_TRC_ELEM_NO_SYNC:
572	case OCSD_GEN_TRC_ELEM_TRACE_ON:
573		resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
574							    trace_chan_id);
575		break;
576	case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
577		resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
578						    trace_chan_id);
579		break;
580	case OCSD_GEN_TRC_ELEM_EXCEPTION:
581		resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
582							trace_chan_id);
583		break;
584	case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
585		resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
586							    trace_chan_id);
587		break;
588	case OCSD_GEN_TRC_ELEM_TIMESTAMP:
589		resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
590							 trace_chan_id,
591							 indx);
592		break;
593	case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
594		resp = cs_etm_decoder__set_tid(etmq, packet_queue,
595					       elem, trace_chan_id);
596		break;
597	/* Unused packet types */
598	case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
599	case OCSD_GEN_TRC_ELEM_ADDR_NACC:
600	case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
601	case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
602	case OCSD_GEN_TRC_ELEM_EVENT:
603	case OCSD_GEN_TRC_ELEM_SWTRACE:
604	case OCSD_GEN_TRC_ELEM_CUSTOM:
605	case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
606	case OCSD_GEN_TRC_ELEM_MEMTRANS:
 
 
 
607	default:
608		break;
609	}
610
611	return resp;
612}
613
614static int
615cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
616				   struct cs_etm_trace_params *t_params,
617				   struct cs_etm_decoder *decoder)
618{
619	ocsd_etmv3_cfg config_etmv3;
620	ocsd_etmv4_cfg trace_config_etmv4;
621	ocsd_ete_cfg trace_config_ete;
622	void *trace_config;
623	u8 csid;
624
625	switch (t_params->protocol) {
626	case CS_ETM_PROTO_ETMV3:
627	case CS_ETM_PROTO_PTM:
 
628		cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
629		decoder->decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
630							OCSD_BUILTIN_DCD_ETMV3 :
631							OCSD_BUILTIN_DCD_PTM;
632		trace_config = &config_etmv3;
633		break;
634	case CS_ETM_PROTO_ETMV4i:
 
635		cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
636		decoder->decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
637		trace_config = &trace_config_etmv4;
638		break;
639	case CS_ETM_PROTO_ETE:
 
640		cs_etm_decoder__gen_ete_config(t_params, &trace_config_ete);
641		decoder->decoder_name = OCSD_BUILTIN_DCD_ETE;
642		trace_config = &trace_config_ete;
643		break;
644	default:
645		return -1;
646	}
 
 
 
 
647
648	if (d_params->operation == CS_ETM_OPERATION_DECODE) {
649		if (ocsd_dt_create_decoder(decoder->dcd_tree,
650					   decoder->decoder_name,
651					   OCSD_CREATE_FLG_FULL_DECODER,
652					   trace_config, &csid))
653			return -1;
654
655		if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
656					       cs_etm_decoder__gen_trace_elem_printer,
657					       decoder))
658			return -1;
659
660		return 0;
661	} else if (d_params->operation == CS_ETM_OPERATION_PRINT) {
662		if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder->decoder_name,
663					   OCSD_CREATE_FLG_PACKET_PROC,
664					   trace_config, &csid))
665			return -1;
666
667		if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
668			return -1;
669
670		return 0;
671	}
672
673	return -1;
674}
675
676struct cs_etm_decoder *
677cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params,
678		    struct cs_etm_trace_params t_params[])
679{
680	struct cs_etm_decoder *decoder;
681	ocsd_dcd_tree_src_t format;
682	u32 flags;
683	int i, ret;
684
685	if ((!t_params) || (!d_params))
686		return NULL;
687
688	decoder = zalloc(sizeof(*decoder));
689
690	if (!decoder)
691		return NULL;
692
693	decoder->data = d_params->data;
694	decoder->prev_return = OCSD_RESP_CONT;
695	format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
696					 OCSD_TRC_SRC_SINGLE);
697	flags = 0;
698	flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
699	flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
700	flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
701
702	/*
703	 * Drivers may add barrier frames when used with perf, set up to
704	 * handle this. Barriers const of FSYNC packet repeated 4 times.
705	 */
706	flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
707
708	/* Create decode tree for the data source */
709	decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
710
711	if (decoder->dcd_tree == 0)
712		goto err_free_decoder;
713
714	/* init library print logging support */
715	ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
716	if (ret != 0)
717		goto err_free_decoder;
718
719	/* init raw frame logging if required */
720	cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
721
722	for (i = 0; i < decoders; i++) {
723		ret = cs_etm_decoder__create_etm_decoder(d_params,
724							 &t_params[i],
725							 decoder);
726		if (ret != 0)
727			goto err_free_decoder;
728	}
729
730	return decoder;
731
732err_free_decoder:
733	cs_etm_decoder__free(decoder);
734	return NULL;
735}
736
737int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
738				       u64 indx, const u8 *buf,
739				       size_t len, size_t *consumed)
740{
741	int ret = 0;
742	ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
743	ocsd_datapath_resp_t prev_return = decoder->prev_return;
744	size_t processed = 0;
745	u32 count;
746
747	while (processed < len) {
748		if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
749			cur = ocsd_dt_process_data(decoder->dcd_tree,
750						   OCSD_OP_FLUSH,
751						   0,
752						   0,
753						   NULL,
754						   NULL);
755		} else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
756			cur = ocsd_dt_process_data(decoder->dcd_tree,
757						   OCSD_OP_DATA,
758						   indx + processed,
759						   len - processed,
760						   &buf[processed],
761						   &count);
762			processed += count;
763		} else {
764			ret = -EINVAL;
765			break;
766		}
767
768		/*
769		 * Return to the input code if the packet buffer is full.
770		 * Flushing will get done once the packet buffer has been
771		 * processed.
772		 */
773		if (OCSD_DATA_RESP_IS_WAIT(cur))
774			break;
775
776		prev_return = cur;
777	}
778
779	decoder->prev_return = cur;
780	*consumed = processed;
781
782	return ret;
783}
784
785void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
786{
787	if (!decoder)
788		return;
789
790	ocsd_destroy_dcd_tree(decoder->dcd_tree);
791	decoder->dcd_tree = NULL;
792	free(decoder);
793}
794
795const char *cs_etm_decoder__get_name(struct cs_etm_decoder *decoder)
796{
797	return decoder->decoder_name;
798}