Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* SPDX-License-Identifier: MIT */
  2/*
  3 * Copyright © 2014-2019 Intel Corporation
  4 */
  5
  6#ifndef _INTEL_GUC_FWIF_H
  7#define _INTEL_GUC_FWIF_H
  8
  9#include <linux/bits.h>
 10#include <linux/compiler.h>
 11#include <linux/types.h>
 12#include "gt/intel_engine_types.h"
 13
 14#include "abi/guc_actions_abi.h"
 15#include "abi/guc_actions_slpc_abi.h"
 16#include "abi/guc_errors_abi.h"
 17#include "abi/guc_communication_mmio_abi.h"
 18#include "abi/guc_communication_ctb_abi.h"
 19#include "abi/guc_klvs_abi.h"
 20#include "abi/guc_messages_abi.h"
 21
 22/* Payload length only i.e. don't include G2H header length */
 23#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET	2
 24#define G2H_LEN_DW_DEREGISTER_CONTEXT		1
 25#define G2H_LEN_DW_INVALIDATE_TLB		1
 26
 27#define GUC_CONTEXT_DISABLE		0
 28#define GUC_CONTEXT_ENABLE		1
 29
 30#define GUC_CLIENT_PRIORITY_KMD_HIGH	0
 31#define GUC_CLIENT_PRIORITY_HIGH	1
 32#define GUC_CLIENT_PRIORITY_KMD_NORMAL	2
 33#define GUC_CLIENT_PRIORITY_NORMAL	3
 34#define GUC_CLIENT_PRIORITY_NUM		4
 35
 36#define GUC_MAX_CONTEXT_ID		65535
 37#define	GUC_INVALID_CONTEXT_ID		GUC_MAX_CONTEXT_ID
 38
 39#define GUC_RENDER_CLASS		0
 40#define GUC_VIDEO_CLASS			1
 41#define GUC_VIDEOENHANCE_CLASS		2
 42#define GUC_BLITTER_CLASS		3
 43#define GUC_COMPUTE_CLASS		4
 44#define GUC_GSC_OTHER_CLASS		5
 45#define GUC_LAST_ENGINE_CLASS		GUC_GSC_OTHER_CLASS
 46#define GUC_MAX_ENGINE_CLASSES		16
 47#define GUC_MAX_INSTANCES_PER_CLASS	32
 48
 49#define GUC_DOORBELL_INVALID		256
 50
 51/*
 52 * Work queue item header definitions
 53 *
 54 * Work queue is circular buffer used to submit complex (multi-lrc) submissions
 55 * to the GuC. A work queue item is an entry in the circular buffer.
 56 */
 57#define WQ_STATUS_ACTIVE		1
 58#define WQ_STATUS_SUSPENDED		2
 59#define WQ_STATUS_CMD_ERROR		3
 60#define WQ_STATUS_ENGINE_ID_NOT_USED	4
 61#define WQ_STATUS_SUSPENDED_FROM_RESET	5
 62#define WQ_TYPE_BATCH_BUF		0x1
 63#define WQ_TYPE_PSEUDO			0x2
 64#define WQ_TYPE_INORDER			0x3
 65#define WQ_TYPE_NOOP			0x4
 66#define WQ_TYPE_MULTI_LRC		0x5
 67#define WQ_TYPE_MASK			GENMASK(7, 0)
 68#define WQ_LEN_MASK			GENMASK(26, 16)
 69
 70#define WQ_GUC_ID_MASK			GENMASK(15, 0)
 71#define WQ_RING_TAIL_MASK		GENMASK(28, 18)
 72
 73#define GUC_STAGE_DESC_ATTR_ACTIVE	BIT(0)
 74#define GUC_STAGE_DESC_ATTR_PENDING_DB	BIT(1)
 75#define GUC_STAGE_DESC_ATTR_KERNEL	BIT(2)
 76#define GUC_STAGE_DESC_ATTR_PREEMPT	BIT(3)
 77#define GUC_STAGE_DESC_ATTR_RESET	BIT(4)
 78#define GUC_STAGE_DESC_ATTR_WQLOCKED	BIT(5)
 79#define GUC_STAGE_DESC_ATTR_PCH		BIT(6)
 80#define GUC_STAGE_DESC_ATTR_TERMINATED	BIT(7)
 81
 82#define GUC_CTL_LOG_PARAMS		0
 83#define   GUC_LOG_VALID			BIT(0)
 84#define   GUC_LOG_NOTIFY_ON_HALF_FULL	BIT(1)
 85#define   GUC_LOG_CAPTURE_ALLOC_UNITS	BIT(2)
 86#define   GUC_LOG_LOG_ALLOC_UNITS	BIT(3)
 87#define   GUC_LOG_CRASH_SHIFT		4
 88#define   GUC_LOG_CRASH_MASK		(0x3 << GUC_LOG_CRASH_SHIFT)
 89#define   GUC_LOG_DEBUG_SHIFT		6
 90#define   GUC_LOG_DEBUG_MASK	        (0xF << GUC_LOG_DEBUG_SHIFT)
 91#define   GUC_LOG_CAPTURE_SHIFT		10
 92#define   GUC_LOG_CAPTURE_MASK	        (0x3 << GUC_LOG_CAPTURE_SHIFT)
 93#define   GUC_LOG_BUF_ADDR_SHIFT	12
 94
 95#define GUC_CTL_WA			1
 96#define   GUC_WA_GAM_CREDITS		BIT(10)
 97#define   GUC_WA_DUAL_QUEUE		BIT(11)
 98#define   GUC_WA_RCS_RESET_BEFORE_RC6	BIT(13)
 99#define   GUC_WA_PRE_PARSER		BIT(14)
100#define   GUC_WA_CONTEXT_ISOLATION	BIT(15)
101#define   GUC_WA_RCS_CCS_SWITCHOUT	BIT(16)
102#define   GUC_WA_HOLD_CCS_SWITCHOUT	BIT(17)
103#define   GUC_WA_POLLCS			BIT(18)
104#define   GUC_WA_RCS_REGS_IN_CCS_REGS_LIST	BIT(21)
105#define   GUC_WA_ENABLE_TSC_CHECK_ON_RC6	BIT(22)
106
107#define GUC_CTL_FEATURE			2
108#define   GUC_CTL_ENABLE_GUC_PXP_CTL	BIT(1)
109#define   GUC_CTL_ENABLE_SLPC		BIT(2)
110#define   GUC_CTL_DISABLE_SCHEDULER	BIT(14)
111
112#define GUC_CTL_DEBUG			3
113#define   GUC_LOG_VERBOSITY_SHIFT	0
114#define   GUC_LOG_VERBOSITY_LOW		(0 << GUC_LOG_VERBOSITY_SHIFT)
115#define   GUC_LOG_VERBOSITY_MED		(1 << GUC_LOG_VERBOSITY_SHIFT)
116#define   GUC_LOG_VERBOSITY_HIGH	(2 << GUC_LOG_VERBOSITY_SHIFT)
117#define   GUC_LOG_VERBOSITY_ULTRA	(3 << GUC_LOG_VERBOSITY_SHIFT)
118/* Verbosity range-check limits, without the shift */
119#define	  GUC_LOG_VERBOSITY_MIN		0
120#define	  GUC_LOG_VERBOSITY_MAX		3
121#define	  GUC_LOG_VERBOSITY_MASK	0x0000000f
122#define	  GUC_LOG_DESTINATION_MASK	(3 << 4)
123#define   GUC_LOG_DISABLED		(1 << 6)
124#define   GUC_PROFILE_ENABLED		(1 << 7)
125
126#define GUC_CTL_ADS			4
127#define   GUC_ADS_ADDR_SHIFT		1
128#define   GUC_ADS_ADDR_MASK		(0xFFFFF << GUC_ADS_ADDR_SHIFT)
129
130#define GUC_CTL_DEVID			5
131
132#define GUC_CTL_MAX_DWORDS		(SOFT_SCRATCH_COUNT - 2) /* [1..14] */
133
134/* Generic GT SysInfo data types */
135#define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED		0
136#define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK	1
137#define GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI	2
138#define GUC_GENERIC_GT_SYSINFO_MAX			16
139
140/*
141 * The class goes in bits [0..2] of the GuC ID, the instance in bits [3..6].
142 * Bit 7 can be used for operations that apply to all engine classes&instances.
143 */
144#define GUC_ENGINE_CLASS_SHIFT		0
145#define GUC_ENGINE_CLASS_MASK		(0x7 << GUC_ENGINE_CLASS_SHIFT)
146#define GUC_ENGINE_INSTANCE_SHIFT	3
147#define GUC_ENGINE_INSTANCE_MASK	(0xf << GUC_ENGINE_INSTANCE_SHIFT)
148#define GUC_ENGINE_ALL_INSTANCES	BIT(7)
149
150#define MAKE_GUC_ID(class, instance) \
151	(((class) << GUC_ENGINE_CLASS_SHIFT) | \
152	 ((instance) << GUC_ENGINE_INSTANCE_SHIFT))
153
154#define GUC_ID_TO_ENGINE_CLASS(guc_id) \
155	(((guc_id) & GUC_ENGINE_CLASS_MASK) >> GUC_ENGINE_CLASS_SHIFT)
156#define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
157	(((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT)
158
159#define SLPC_EVENT(id, c) (\
160FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
161FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, c) \
162)
163
164/* the GuC arrays don't include OTHER_CLASS */
165static u8 engine_class_guc_class_map[] = {
166	[RENDER_CLASS]            = GUC_RENDER_CLASS,
167	[COPY_ENGINE_CLASS]       = GUC_BLITTER_CLASS,
168	[VIDEO_DECODE_CLASS]      = GUC_VIDEO_CLASS,
169	[VIDEO_ENHANCEMENT_CLASS] = GUC_VIDEOENHANCE_CLASS,
170	[OTHER_CLASS]             = GUC_GSC_OTHER_CLASS,
171	[COMPUTE_CLASS]           = GUC_COMPUTE_CLASS,
172};
173
174static u8 guc_class_engine_class_map[] = {
175	[GUC_RENDER_CLASS]       = RENDER_CLASS,
176	[GUC_BLITTER_CLASS]      = COPY_ENGINE_CLASS,
177	[GUC_VIDEO_CLASS]        = VIDEO_DECODE_CLASS,
178	[GUC_VIDEOENHANCE_CLASS] = VIDEO_ENHANCEMENT_CLASS,
179	[GUC_COMPUTE_CLASS]      = COMPUTE_CLASS,
180	[GUC_GSC_OTHER_CLASS]    = OTHER_CLASS,
181};
182
183static inline u8 engine_class_to_guc_class(u8 class)
184{
185	BUILD_BUG_ON(ARRAY_SIZE(engine_class_guc_class_map) != MAX_ENGINE_CLASS + 1);
186	GEM_BUG_ON(class > MAX_ENGINE_CLASS);
187
188	return engine_class_guc_class_map[class];
189}
190
191static inline u8 guc_class_to_engine_class(u8 guc_class)
192{
193	BUILD_BUG_ON(ARRAY_SIZE(guc_class_engine_class_map) != GUC_LAST_ENGINE_CLASS + 1);
194	GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS);
195
196	return guc_class_engine_class_map[guc_class];
197}
198
199/* Work item for submitting workloads into work queue of GuC. */
200struct guc_wq_item {
201	u32 header;
202	u32 context_desc;
203	u32 submit_element_info;
204	u32 fence_id;
205} __packed;
206
207struct guc_process_desc_v69 {
208	u32 stage_id;
209	u64 db_base_addr;
210	u32 head;
211	u32 tail;
212	u32 error_offset;
213	u64 wq_base_addr;
214	u32 wq_size_bytes;
215	u32 wq_status;
216	u32 engine_presence;
217	u32 priority;
218	u32 reserved[36];
219} __packed;
220
221struct guc_sched_wq_desc {
222	u32 head;
223	u32 tail;
224	u32 error_offset;
225	u32 wq_status;
226	u32 reserved[28];
227} __packed;
228
229/* Helper for context registration H2G */
230struct guc_ctxt_registration_info {
231	u32 flags;
232	u32 context_idx;
233	u32 engine_class;
234	u32 engine_submit_mask;
235	u32 wq_desc_lo;
236	u32 wq_desc_hi;
237	u32 wq_base_lo;
238	u32 wq_base_hi;
239	u32 wq_size;
240	u32 hwlrca_lo;
241	u32 hwlrca_hi;
242};
243#define CONTEXT_REGISTRATION_FLAG_KMD	BIT(0)
244
245/* Preempt to idle on quantum expiry */
246#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69	BIT(0)
247
248/*
249 * GuC Context registration descriptor.
250 * FIXME: This is only required to exist during context registration.
251 * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
252 * is not required.
253 */
254struct guc_lrc_desc_v69 {
255	u32 hw_context_desc;
256	u32 slpm_perf_mode_hint;	/* SPLC v1 only */
257	u32 slpm_freq_hint;
258	u32 engine_submit_mask;		/* In logical space */
259	u8 engine_class;
260	u8 reserved0[3];
261	u32 priority;
262	u32 process_desc;
263	u32 wq_addr;
264	u32 wq_size;
265	u32 context_flags;		/* CONTEXT_REGISTRATION_* */
266	/* Time for one workload to execute. (in micro seconds) */
267	u32 execution_quantum;
268	/* Time to wait for a preemption request to complete before issuing a
269	 * reset. (in micro seconds).
270	 */
271	u32 preemption_timeout;
272	u32 policy_flags;		/* CONTEXT_POLICY_* */
273	u32 reserved1[19];
274} __packed;
275
276/* 32-bit KLV structure as used by policy updates and others */
277struct guc_klv_generic_dw_t {
278	u32 kl;
279	u32 value;
280} __packed;
281
282/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
283struct guc_update_context_policy_header {
284	u32 action;
285	u32 ctx_id;
286} __packed;
287
288struct guc_update_context_policy {
289	struct guc_update_context_policy_header header;
290	struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
291} __packed;
292
293/* Format of the UPDATE_SCHEDULING_POLICIES H2G data packet */
294struct guc_update_scheduling_policy_header {
295	u32 action;
296} __packed;
297
298/*
299 * Can't dynamically allocate memory for the scheduling policy KLV because
300 * it will be sent from within the reset path. Need a fixed size lump on
301 * the stack instead :(.
302 *
303 * Currently, there is only one KLV defined, which has 1 word of KL + 2 words of V.
304 */
305#define MAX_SCHEDULING_POLICY_SIZE 3
306
307struct guc_update_scheduling_policy {
308	struct guc_update_scheduling_policy_header header;
309	u32 data[MAX_SCHEDULING_POLICY_SIZE];
310} __packed;
311
312#define GUC_POWER_UNSPECIFIED	0
313#define GUC_POWER_D0		1
314#define GUC_POWER_D1		2
315#define GUC_POWER_D2		3
316#define GUC_POWER_D3		4
317
318/* Scheduling policy settings */
319
320#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION	100	/* in ms */
321#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO		50	/* in percent */
322
323#define GLOBAL_POLICY_MAX_NUM_WI 15
324
325/* Don't reset an engine upon preemption failure */
326#define GLOBAL_POLICY_DISABLE_ENGINE_RESET				BIT(0)
327
328#define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
329
330/*
331 * GuC converts the timeout to clock ticks internally. Different platforms have
332 * different GuC clocks. Thus, the maximum value before overflow is platform
333 * dependent. Current worst case scenario is about 110s. So, the spec says to
334 * limit to 100s to be safe.
335 */
336#define GUC_POLICY_MAX_EXEC_QUANTUM_US		(100 * 1000 * 1000UL)
337#define GUC_POLICY_MAX_PREEMPT_TIMEOUT_US	(100 * 1000 * 1000UL)
338
339static inline u32 guc_policy_max_exec_quantum_ms(void)
340{
341	BUILD_BUG_ON(GUC_POLICY_MAX_EXEC_QUANTUM_US >= UINT_MAX);
342	return GUC_POLICY_MAX_EXEC_QUANTUM_US / 1000;
343}
344
345static inline u32 guc_policy_max_preempt_timeout_ms(void)
346{
347	BUILD_BUG_ON(GUC_POLICY_MAX_PREEMPT_TIMEOUT_US >= UINT_MAX);
348	return GUC_POLICY_MAX_PREEMPT_TIMEOUT_US / 1000;
349}
350
351struct guc_policies {
352	u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
353	/* In micro seconds. How much time to allow before DPC processing is
354	 * called back via interrupt (to prevent DPC queue drain starving).
355	 * Typically 1000s of micro seconds (example only, not granularity). */
356	u32 dpc_promote_time;
357
358	/* Must be set to take these new values. */
359	u32 is_valid;
360
361	/* Max number of WIs to process per call. A large value may keep CS
362	 * idle. */
363	u32 max_num_work_items;
364
365	u32 global_flags;
366	u32 reserved[4];
367} __packed;
368
369/* GuC MMIO reg state struct */
370struct guc_mmio_reg {
371	u32 offset;
372	u32 value;
373	u32 flags;
374#define GUC_REGSET_MASKED		BIT(0)
375#define GUC_REGSET_NEEDS_STEERING	BIT(1)
376#define GUC_REGSET_MASKED_WITH_VALUE	BIT(2)
377#define GUC_REGSET_RESTORE_ONLY		BIT(3)
378#define GUC_REGSET_STEERING_GROUP       GENMASK(15, 12)
379#define GUC_REGSET_STEERING_INSTANCE    GENMASK(23, 20)
380	u32 mask;
381} __packed;
382
383/* GuC register sets */
384struct guc_mmio_reg_set {
385	u32 address;
386	u16 count;
387	u16 reserved;
388} __packed;
389
390/* HW info */
391struct guc_gt_system_info {
392	u8 mapping_table[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
393	u32 engine_enabled_masks[GUC_MAX_ENGINE_CLASSES];
394	u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
395} __packed;
396
397enum {
398	GUC_CAPTURE_LIST_INDEX_PF = 0,
399	GUC_CAPTURE_LIST_INDEX_VF = 1,
400	GUC_CAPTURE_LIST_INDEX_MAX = 2,
401};
402
403/*Register-types of GuC capture register lists */
404enum guc_capture_type {
405	GUC_CAPTURE_LIST_TYPE_GLOBAL = 0,
406	GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
407	GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
408	GUC_CAPTURE_LIST_TYPE_MAX,
409};
410
411/* Class indecies for capture_class and capture_instance arrays */
412enum {
413	GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
414	GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
415	GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE = 2,
416	GUC_CAPTURE_LIST_CLASS_BLITTER = 3,
417	GUC_CAPTURE_LIST_CLASS_GSC_OTHER = 4,
418};
419
420/* GuC Additional Data Struct */
421struct guc_ads {
422	struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
423	u32 reserved0;
424	u32 scheduler_policies;
425	u32 gt_system_info;
426	u32 reserved1;
427	u32 control_data;
428	u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
429	u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
430	u32 private_data;
431	u32 reserved2;
432	u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
433	u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
434	u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
435	u32 wa_klv_addr_lo;
436	u32 wa_klv_addr_hi;
437	u32 wa_klv_size;
438	u32 reserved[11];
439} __packed;
440
441/* Engine usage stats */
442struct guc_engine_usage_record {
443	u32 current_context_index;
444	u32 last_switch_in_stamp;
445	u32 reserved0;
446	u32 total_runtime;
447	u32 reserved1[4];
448} __packed;
449
450struct guc_engine_usage {
451	struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
452} __packed;
453
454/* GuC logging structures */
455
456enum guc_log_buffer_type {
457	GUC_DEBUG_LOG_BUFFER,
458	GUC_CRASH_DUMP_LOG_BUFFER,
459	GUC_CAPTURE_LOG_BUFFER,
460	GUC_MAX_LOG_BUFFER
461};
462
463/*
464 * struct guc_log_buffer_state - GuC log buffer state
465 *
466 * Below state structure is used for coordination of retrieval of GuC firmware
467 * logs. Separate state is maintained for each log buffer type.
468 * read_ptr points to the location where i915 read last in log buffer and
469 * is read only for GuC firmware. write_ptr is incremented by GuC with number
470 * of bytes written for each log entry and is read only for i915.
471 * When any type of log buffer becomes half full, GuC sends a flush interrupt.
472 * GuC firmware expects that while it is writing to 2nd half of the buffer,
473 * first half would get consumed by Host and then get a flush completed
474 * acknowledgment from Host, so that it does not end up doing any overwrite
475 * causing loss of logs. So when buffer gets half filled & i915 has requested
476 * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
477 * to the value of write_ptr and raise the interrupt.
478 * On receiving the interrupt i915 should read the buffer, clear flush_to_file
479 * field and also update read_ptr with the value of sample_write_ptr, before
480 * sending an acknowledgment to GuC. marker & version fields are for internal
481 * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
482 * time GuC detects the log buffer overflow.
483 */
484struct guc_log_buffer_state {
485	u32 marker[2];
486	u32 read_ptr;
487	u32 write_ptr;
488	u32 size;
489	u32 sampled_write_ptr;
490	u32 wrap_offset;
491	union {
492		struct {
493			u32 flush_to_file:1;
494			u32 buffer_full_cnt:4;
495			u32 reserved:27;
496		};
497		u32 flags;
498	};
499	u32 version;
500} __packed;
501
502/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
503enum intel_guc_recv_message {
504	INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
505	INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
506};
507
508#endif