Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26#include "../dmub_srv.h"
 27#include "dmub_dcn20.h"
 28#include "dmub_dcn21.h"
 29#include "dmub_cmd.h"
 30#include "dmub_dcn30.h"
 31#include "dmub_dcn301.h"
 32#include "dmub_dcn302.h"
 33#include "dmub_dcn303.h"
 34#include "dmub_dcn31.h"
 35#include "os_types.h"
 36/*
 37 * Note: the DMUB service is standalone. No additional headers should be
 38 * added below or above this line unless they reside within the DMUB
 39 * folder.
 40 */
 41
 42/* Alignment for framebuffer memory. */
 43#define DMUB_FB_ALIGNMENT (1024 * 1024)
 44
 45/* Stack size. */
 46#define DMUB_STACK_SIZE (128 * 1024)
 47
 48/* Context size. */
 49#define DMUB_CONTEXT_SIZE (512 * 1024)
 50
 51/* Mailbox size : Ring buffers are required for both inbox and outbox */
 52#define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE))
 53
 54/* Default state size if meta is absent. */
 55#define DMUB_FW_STATE_SIZE (64 * 1024)
 56
 57/* Default tracebuffer size if meta is absent. */
 58#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
 59
 60
 61/* Default scratch mem size. */
 62#define DMUB_SCRATCH_MEM_SIZE (256)
 63
 64/* Number of windows in use. */
 65#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
 66/* Base addresses. */
 67
 68#define DMUB_CW0_BASE (0x60000000)
 69#define DMUB_CW1_BASE (0x61000000)
 70#define DMUB_CW3_BASE (0x63000000)
 71#define DMUB_CW4_BASE (0x64000000)
 72#define DMUB_CW5_BASE (0x65000000)
 73#define DMUB_CW6_BASE (0x66000000)
 74
 75#define DMUB_REGION5_BASE (0xA0000000)
 76
 77static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
 78{
 79	return (val + factor - 1) / factor * factor;
 80}
 81
 82void dmub_flush_buffer_mem(const struct dmub_fb *fb)
 83{
 84	const uint8_t *base = (const uint8_t *)fb->cpu_addr;
 85	uint8_t buf[64];
 86	uint32_t pos, end;
 87
 88	/**
 89	 * Read 64-byte chunks since we don't want to store a
 90	 * large temporary buffer for this purpose.
 91	 */
 92	end = fb->size / sizeof(buf) * sizeof(buf);
 93
 94	for (pos = 0; pos < end; pos += sizeof(buf))
 95		dmub_memcpy(buf, base + pos, sizeof(buf));
 96
 97	/* Read anything leftover into the buffer. */
 98	if (end < fb->size)
 99		dmub_memcpy(buf, base + pos, fb->size - end);
100}
101
102static const struct dmub_fw_meta_info *
103dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
104{
105	const union dmub_fw_meta *meta;
106	const uint8_t *blob = NULL;
107	uint32_t blob_size = 0;
108	uint32_t meta_offset = 0;
109
110	if (params->fw_bss_data && params->bss_data_size) {
111		/* Legacy metadata region. */
112		blob = params->fw_bss_data;
113		blob_size = params->bss_data_size;
114		meta_offset = DMUB_FW_META_OFFSET;
115	} else if (params->fw_inst_const && params->inst_const_size) {
116		/* Combined metadata region. */
117		blob = params->fw_inst_const;
118		blob_size = params->inst_const_size;
119		meta_offset = 0;
120	}
121
122	if (!blob || !blob_size)
123		return NULL;
124
125	if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
126		return NULL;
127
128	meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
129					    sizeof(union dmub_fw_meta));
130
131	if (meta->info.magic_value != DMUB_FW_META_MAGIC)
132		return NULL;
133
134	return &meta->info;
135}
136
137static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
138{
139	struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
140
141	switch (asic) {
142	case DMUB_ASIC_DCN20:
143	case DMUB_ASIC_DCN21:
144	case DMUB_ASIC_DCN30:
145	case DMUB_ASIC_DCN301:
146	case DMUB_ASIC_DCN302:
147	case DMUB_ASIC_DCN303:
148		dmub->regs = &dmub_srv_dcn20_regs;
149
150		funcs->reset = dmub_dcn20_reset;
151		funcs->reset_release = dmub_dcn20_reset_release;
152		funcs->backdoor_load = dmub_dcn20_backdoor_load;
153		funcs->setup_windows = dmub_dcn20_setup_windows;
154		funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
155		funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
156		funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
157		funcs->is_supported = dmub_dcn20_is_supported;
158		funcs->is_hw_init = dmub_dcn20_is_hw_init;
159		funcs->set_gpint = dmub_dcn20_set_gpint;
160		funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
161		funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
162		funcs->get_fw_status = dmub_dcn20_get_fw_boot_status;
163		funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options;
164		funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence;
165		funcs->get_current_time = dmub_dcn20_get_current_time;
166
167		// Out mailbox register access functions for RN and above
168		funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox;
169		funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr;
170		funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr;
171
172		//outbox0 call stacks
173		funcs->setup_outbox0 = dmub_dcn20_setup_outbox0;
174		funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr;
175		funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr;
176
177		funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data;
178
179		if (asic == DMUB_ASIC_DCN21) {
180			dmub->regs = &dmub_srv_dcn21_regs;
181
182			funcs->is_phy_init = dmub_dcn21_is_phy_init;
183		}
184		if (asic == DMUB_ASIC_DCN30) {
185			dmub->regs = &dmub_srv_dcn30_regs;
186
187			funcs->backdoor_load = dmub_dcn30_backdoor_load;
188			funcs->setup_windows = dmub_dcn30_setup_windows;
189		}
190		if (asic == DMUB_ASIC_DCN301) {
191			dmub->regs = &dmub_srv_dcn301_regs;
192
193			funcs->backdoor_load = dmub_dcn30_backdoor_load;
194			funcs->setup_windows = dmub_dcn30_setup_windows;
195		}
196		if (asic == DMUB_ASIC_DCN302) {
197			dmub->regs = &dmub_srv_dcn302_regs;
198
199			funcs->backdoor_load = dmub_dcn30_backdoor_load;
200			funcs->setup_windows = dmub_dcn30_setup_windows;
201		}
202		if (asic == DMUB_ASIC_DCN303) {
203			dmub->regs = &dmub_srv_dcn303_regs;
204
205			funcs->backdoor_load = dmub_dcn30_backdoor_load;
206			funcs->setup_windows = dmub_dcn30_setup_windows;
207		}
208		break;
209
210	case DMUB_ASIC_DCN31:
211		dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
212		funcs->reset = dmub_dcn31_reset;
213		funcs->reset_release = dmub_dcn31_reset_release;
214		funcs->backdoor_load = dmub_dcn31_backdoor_load;
215		funcs->setup_windows = dmub_dcn31_setup_windows;
216		funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
217		funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
218		funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
219		funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
220		funcs->get_outbox1_wptr = dmub_dcn31_get_outbox1_wptr;
221		funcs->set_outbox1_rptr = dmub_dcn31_set_outbox1_rptr;
222		funcs->is_supported = dmub_dcn31_is_supported;
223		funcs->is_hw_init = dmub_dcn31_is_hw_init;
224		funcs->set_gpint = dmub_dcn31_set_gpint;
225		funcs->is_gpint_acked = dmub_dcn31_is_gpint_acked;
226		funcs->get_gpint_response = dmub_dcn31_get_gpint_response;
227		funcs->get_fw_status = dmub_dcn31_get_fw_boot_status;
228		funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options;
229		funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence;
230		//outbox0 call stacks
231		funcs->setup_outbox0 = dmub_dcn31_setup_outbox0;
232		funcs->get_outbox0_wptr = dmub_dcn31_get_outbox0_wptr;
233		funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr;
234
235		funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data;
236
237		funcs->get_current_time = dmub_dcn31_get_current_time;
238
239		break;
240
241	default:
242		return false;
243	}
244
245	return true;
246}
247
248enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
249				 const struct dmub_srv_create_params *params)
250{
251	enum dmub_status status = DMUB_STATUS_OK;
252
253	dmub_memset(dmub, 0, sizeof(*dmub));
254
255	dmub->funcs = params->funcs;
256	dmub->user_ctx = params->user_ctx;
257	dmub->asic = params->asic;
258	dmub->fw_version = params->fw_version;
259	dmub->is_virtual = params->is_virtual;
260
261	/* Setup asic dependent hardware funcs. */
262	if (!dmub_srv_hw_setup(dmub, params->asic)) {
263		status = DMUB_STATUS_INVALID;
264		goto cleanup;
265	}
266
267	/* Override (some) hardware funcs based on user params. */
268	if (params->hw_funcs) {
269		if (params->hw_funcs->emul_get_inbox1_rptr)
270			dmub->hw_funcs.emul_get_inbox1_rptr =
271				params->hw_funcs->emul_get_inbox1_rptr;
272
273		if (params->hw_funcs->emul_set_inbox1_wptr)
274			dmub->hw_funcs.emul_set_inbox1_wptr =
275				params->hw_funcs->emul_set_inbox1_wptr;
276
277		if (params->hw_funcs->is_supported)
278			dmub->hw_funcs.is_supported =
279				params->hw_funcs->is_supported;
280	}
281
282	/* Sanity checks for required hw func pointers. */
283	if (!dmub->hw_funcs.get_inbox1_rptr ||
284	    !dmub->hw_funcs.set_inbox1_wptr) {
285		status = DMUB_STATUS_INVALID;
286		goto cleanup;
287	}
288
289cleanup:
290	if (status == DMUB_STATUS_OK)
291		dmub->sw_init = true;
292	else
293		dmub_srv_destroy(dmub);
294
295	return status;
296}
297
298void dmub_srv_destroy(struct dmub_srv *dmub)
299{
300	dmub_memset(dmub, 0, sizeof(*dmub));
301}
302
303enum dmub_status
304dmub_srv_calc_region_info(struct dmub_srv *dmub,
305			  const struct dmub_srv_region_params *params,
306			  struct dmub_srv_region_info *out)
307{
308	struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
309	struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
310	struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
311	struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
312	struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
313	struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
314	struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
315	struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
316	const struct dmub_fw_meta_info *fw_info;
317	uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
318	uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
319	uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
320
321	if (!dmub->sw_init)
322		return DMUB_STATUS_INVALID;
323
324	memset(out, 0, sizeof(*out));
325
326	out->num_regions = DMUB_NUM_WINDOWS;
327
328	inst->base = 0x0;
329	inst->top = inst->base + params->inst_const_size;
330
331	data->base = dmub_align(inst->top, 256);
332	data->top = data->base + params->bss_data_size;
333
334	/*
335	 * All cache windows below should be aligned to the size
336	 * of the DMCUB cache line, 64 bytes.
337	 */
338
339	stack->base = dmub_align(data->top, 256);
340	stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
341
342	bios->base = dmub_align(stack->top, 256);
343	bios->top = bios->base + params->vbios_size;
344
345	mail->base = dmub_align(bios->top, 256);
346	mail->top = mail->base + DMUB_MAILBOX_SIZE;
347
348	fw_info = dmub_get_fw_meta_info(params);
349
350	if (fw_info) {
351		fw_state_size = fw_info->fw_region_size;
352		trace_buffer_size = fw_info->trace_buffer_size;
353
354		/**
355		 * If DM didn't fill in a version, then fill it in based on
356		 * the firmware meta now that we have it.
357		 *
358		 * TODO: Make it easier for driver to extract this out to
359		 * pass during creation.
360		 */
361		if (dmub->fw_version == 0)
362			dmub->fw_version = fw_info->fw_version;
363	}
364
365	trace_buff->base = dmub_align(mail->top, 256);
366	trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
367
368	fw_state->base = dmub_align(trace_buff->top, 256);
369	fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
370
371	scratch_mem->base = dmub_align(fw_state->top, 256);
372	scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
373
374	out->fb_size = dmub_align(scratch_mem->top, 4096);
375
376	return DMUB_STATUS_OK;
377}
378
379enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
380				       const struct dmub_srv_fb_params *params,
381				       struct dmub_srv_fb_info *out)
382{
383	uint8_t *cpu_base;
384	uint64_t gpu_base;
385	uint32_t i;
386
387	if (!dmub->sw_init)
388		return DMUB_STATUS_INVALID;
389
390	memset(out, 0, sizeof(*out));
391
392	if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
393		return DMUB_STATUS_INVALID;
394
395	cpu_base = (uint8_t *)params->cpu_addr;
396	gpu_base = params->gpu_addr;
397
398	for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
399		const struct dmub_region *reg =
400			&params->region_info->regions[i];
401
402		out->fb[i].cpu_addr = cpu_base + reg->base;
403		out->fb[i].gpu_addr = gpu_base + reg->base;
404		out->fb[i].size = reg->top - reg->base;
405	}
406
407	out->num_fb = DMUB_NUM_WINDOWS;
408
409	return DMUB_STATUS_OK;
410}
411
412enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
413					 bool *is_supported)
414{
415	*is_supported = false;
416
417	if (!dmub->sw_init)
418		return DMUB_STATUS_INVALID;
419
420	if (dmub->hw_funcs.is_supported)
421		*is_supported = dmub->hw_funcs.is_supported(dmub);
422
423	return DMUB_STATUS_OK;
424}
425
426enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
427{
428	*is_hw_init = false;
429
430	if (!dmub->sw_init)
431		return DMUB_STATUS_INVALID;
432
433	if (!dmub->hw_init)
434		return DMUB_STATUS_OK;
435
436	if (dmub->hw_funcs.is_hw_init)
437		*is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
438
439	return DMUB_STATUS_OK;
440}
441
442enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
443				  const struct dmub_srv_hw_params *params)
444{
445	struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST];
446	struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK];
447	struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA];
448	struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS];
449	struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
450	struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
451	struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
452	struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
453
454	struct dmub_rb_init_params rb_params, outbox0_rb_params;
455	struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
456	struct dmub_region inbox1, outbox1, outbox0;
457
458	if (!dmub->sw_init)
459		return DMUB_STATUS_INVALID;
460
461	if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
462		!tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
463		ASSERT(0);
464		return DMUB_STATUS_INVALID;
465	}
466
467	dmub->fb_base = params->fb_base;
468	dmub->fb_offset = params->fb_offset;
469	dmub->psp_version = params->psp_version;
470
471	if (dmub->hw_funcs.reset)
472		dmub->hw_funcs.reset(dmub);
473
474	cw0.offset.quad_part = inst_fb->gpu_addr;
475	cw0.region.base = DMUB_CW0_BASE;
476	cw0.region.top = cw0.region.base + inst_fb->size - 1;
477
478	cw1.offset.quad_part = stack_fb->gpu_addr;
479	cw1.region.base = DMUB_CW1_BASE;
480	cw1.region.top = cw1.region.base + stack_fb->size - 1;
481
482	if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
483		/**
484		 * Read back all the instruction memory so we don't hang the
485		 * DMCUB when backdoor loading if the write from x86 hasn't been
486		 * flushed yet. This only occurs in backdoor loading.
487		 */
488		dmub_flush_buffer_mem(inst_fb);
489		dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
490	}
491
492	cw2.offset.quad_part = data_fb->gpu_addr;
493	cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
494	cw2.region.top = cw2.region.base + data_fb->size;
495
496	cw3.offset.quad_part = bios_fb->gpu_addr;
497	cw3.region.base = DMUB_CW3_BASE;
498	cw3.region.top = cw3.region.base + bios_fb->size;
499
500	cw4.offset.quad_part = mail_fb->gpu_addr;
501	cw4.region.base = DMUB_CW4_BASE;
502	cw4.region.top = cw4.region.base + mail_fb->size;
503
504	/**
505	 * Doubled the mailbox region to accomodate inbox and outbox.
506	 * Note: Currently, currently total mailbox size is 16KB. It is split
507	 * equally into 8KB between inbox and outbox. If this config is
508	 * changed, then uncached base address configuration of outbox1
509	 * has to be updated in funcs->setup_out_mailbox.
510	 */
511	inbox1.base = cw4.region.base;
512	inbox1.top = cw4.region.base + DMUB_RB_SIZE;
513	outbox1.base = inbox1.top;
514	outbox1.top = cw4.region.top;
515
516	cw5.offset.quad_part = tracebuff_fb->gpu_addr;
517	cw5.region.base = DMUB_CW5_BASE;
518	cw5.region.top = cw5.region.base + tracebuff_fb->size;
519
520	outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
521	outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
522
523	cw6.offset.quad_part = fw_state_fb->gpu_addr;
524	cw6.region.base = DMUB_CW6_BASE;
525	cw6.region.top = cw6.region.base + fw_state_fb->size;
526
527	dmub->fw_state = fw_state_fb->cpu_addr;
528
529	dmub->scratch_mem_fb = *scratch_mem_fb;
530
531	if (dmub->hw_funcs.setup_windows)
532		dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
533
534	if (dmub->hw_funcs.setup_outbox0)
535		dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
536
537	if (dmub->hw_funcs.setup_mailbox)
538		dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
539	if (dmub->hw_funcs.setup_out_mailbox)
540		dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
541
542	dmub_memset(&rb_params, 0, sizeof(rb_params));
543	rb_params.ctx = dmub;
544	rb_params.base_address = mail_fb->cpu_addr;
545	rb_params.capacity = DMUB_RB_SIZE;
546	dmub_rb_init(&dmub->inbox1_rb, &rb_params);
547
548	// Initialize outbox1 ring buffer
549	rb_params.ctx = dmub;
550	rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
551	rb_params.capacity = DMUB_RB_SIZE;
552	dmub_rb_init(&dmub->outbox1_rb, &rb_params);
553
554	dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params));
555	outbox0_rb_params.ctx = dmub;
556	outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
557	outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64);
558	dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params);
559
560	/* Report to DMUB what features are supported by current driver */
561	if (dmub->hw_funcs.enable_dmub_boot_options)
562		dmub->hw_funcs.enable_dmub_boot_options(dmub, params);
563
564	if (dmub->hw_funcs.reset_release)
565		dmub->hw_funcs.reset_release(dmub);
566
567	dmub->hw_init = true;
568
569	return DMUB_STATUS_OK;
570}
571
572enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
573{
574	if (!dmub->sw_init)
575		return DMUB_STATUS_INVALID;
576
577	if (dmub->hw_funcs.reset)
578		dmub->hw_funcs.reset(dmub);
579
580	dmub->hw_init = false;
581
582	return DMUB_STATUS_OK;
583}
584
585enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
586				    const union dmub_rb_cmd *cmd)
587{
588	if (!dmub->hw_init)
589		return DMUB_STATUS_INVALID;
590
591	if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
592		return DMUB_STATUS_OK;
593
594	return DMUB_STATUS_QUEUE_FULL;
595}
596
597enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
598{
599	if (!dmub->hw_init)
600		return DMUB_STATUS_INVALID;
601
602	/**
603	 * Read back all the queued commands to ensure that they've
604	 * been flushed to framebuffer memory. Otherwise DMCUB might
605	 * read back stale, fully invalid or partially invalid data.
606	 */
607	dmub_rb_flush_pending(&dmub->inbox1_rb);
608
609		dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
610	return DMUB_STATUS_OK;
611}
612
613enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
614					     uint32_t timeout_us)
615{
616	uint32_t i;
617
618	if (!dmub->hw_init)
619		return DMUB_STATUS_INVALID;
620
621	for (i = 0; i <= timeout_us; i += 100) {
622		union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub);
623
624		if (status.bits.dal_fw && status.bits.mailbox_rdy)
625			return DMUB_STATUS_OK;
626
627		udelay(100);
628	}
629
630	return DMUB_STATUS_TIMEOUT;
631}
632
633enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
634					    uint32_t timeout_us)
635{
636	uint32_t i = 0;
637
638	if (!dmub->hw_init)
639		return DMUB_STATUS_INVALID;
640
641	if (!dmub->hw_funcs.is_phy_init)
642		return DMUB_STATUS_OK;
643
644	for (i = 0; i <= timeout_us; i += 10) {
645		if (dmub->hw_funcs.is_phy_init(dmub))
646			return DMUB_STATUS_OK;
647
648		udelay(10);
649	}
650
651	return DMUB_STATUS_TIMEOUT;
652}
653
654enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
655					uint32_t timeout_us)
656{
657	uint32_t i;
658
659	if (!dmub->hw_init)
660		return DMUB_STATUS_INVALID;
661
662	for (i = 0; i <= timeout_us; ++i) {
663			dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
664		if (dmub_rb_empty(&dmub->inbox1_rb))
665			return DMUB_STATUS_OK;
666
667		udelay(1);
668	}
669
670	return DMUB_STATUS_TIMEOUT;
671}
672
673enum dmub_status
674dmub_srv_send_gpint_command(struct dmub_srv *dmub,
675			    enum dmub_gpint_command command_code,
676			    uint16_t param, uint32_t timeout_us)
677{
678	union dmub_gpint_data_register reg;
679	uint32_t i;
680
681	if (!dmub->sw_init)
682		return DMUB_STATUS_INVALID;
683
684	if (!dmub->hw_funcs.set_gpint)
685		return DMUB_STATUS_INVALID;
686
687	if (!dmub->hw_funcs.is_gpint_acked)
688		return DMUB_STATUS_INVALID;
689
690	reg.bits.status = 1;
691	reg.bits.command_code = command_code;
692	reg.bits.param = param;
693
694	dmub->hw_funcs.set_gpint(dmub, reg);
695
696	for (i = 0; i < timeout_us; ++i) {
697		udelay(1);
698
699		if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
700			return DMUB_STATUS_OK;
701	}
702
703	return DMUB_STATUS_TIMEOUT;
704}
705
706enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
707					     uint32_t *response)
708{
709	*response = 0;
710
711	if (!dmub->sw_init)
712		return DMUB_STATUS_INVALID;
713
714	if (!dmub->hw_funcs.get_gpint_response)
715		return DMUB_STATUS_INVALID;
716
717	*response = dmub->hw_funcs.get_gpint_response(dmub);
718
719	return DMUB_STATUS_OK;
720}
721
722enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
723					     union dmub_fw_boot_status *status)
724{
725	status->all = 0;
726
727	if (!dmub->sw_init)
728		return DMUB_STATUS_INVALID;
729
730	if (dmub->hw_funcs.get_fw_status)
731		*status = dmub->hw_funcs.get_fw_status(dmub);
732
733	return DMUB_STATUS_OK;
734}
735
736enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
737					      union dmub_rb_cmd *cmd)
738{
739	enum dmub_status status = DMUB_STATUS_OK;
740
741	// Queue command
742	status = dmub_srv_cmd_queue(dmub, cmd);
743
744	if (status != DMUB_STATUS_OK)
745		return status;
746
747	// Execute command
748	status = dmub_srv_cmd_execute(dmub);
749
750	if (status != DMUB_STATUS_OK)
751		return status;
752
753	// Wait for DMUB to process command
754	status = dmub_srv_wait_for_idle(dmub, 100000);
755
756	if (status != DMUB_STATUS_OK)
757		return status;
758
759	// Copy data back from ring buffer into command
760	dmub_rb_get_return_data(&dmub->inbox1_rb, cmd);
761
762	return status;
763}
764
765static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
766				 void *entry)
767{
768	const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
769	uint64_t *dst = (uint64_t *)entry;
770	uint8_t i;
771	uint8_t loop_count;
772
773	if (rb->rptr == rb->wrpt)
774		return false;
775
776	loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t);
777	// copying data
778	for (i = 0; i < loop_count; i++)
779		*dst++ = *src++;
780
781	rb->rptr += sizeof(struct dmcub_trace_buf_entry);
782
783	rb->rptr %= rb->capacity;
784
785	return true;
786}
787
788bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry)
789{
790	dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub);
791
792	return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
793}
794
795bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
796{
797	if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data)
798		return false;
799	dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
800	return true;
801}