Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
 
  26#include "dc.h"
  27#include "dc_dmub_srv.h"
  28#include "../dmub/dmub_srv.h"
  29#include "dm_helpers.h"
  30#include "dc_hw_types.h"
  31#include "core_types.h"
  32#include "../basics/conversion.h"
  33#include "cursor_reg_cache.h"
  34#include "resource.h"
  35#include "clk_mgr.h"
  36#include "dc_state_priv.h"
 
  37
  38#define CTX dc_dmub_srv->ctx
  39#define DC_LOGGER CTX->logger
  40
  41static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
  42				  struct dmub_srv *dmub)
  43{
  44	dc_srv->dmub = dmub;
  45	dc_srv->ctx = dc->ctx;
  46}
  47
  48struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
  49{
  50	struct dc_dmub_srv *dc_srv =
  51		kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
  52
  53	if (dc_srv == NULL) {
  54		BREAK_TO_DEBUGGER();
  55		return NULL;
  56	}
  57
  58	dc_dmub_srv_construct(dc_srv, dc, dmub);
  59
  60	return dc_srv;
  61}
  62
  63void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
  64{
  65	if (*dmub_srv) {
  66		kfree(*dmub_srv);
  67		*dmub_srv = NULL;
  68	}
  69}
  70
  71void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
  72{
  73	struct dmub_srv *dmub = dc_dmub_srv->dmub;
  74	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
  75	enum dmub_status status;
  76
  77	do {
  78		status = dmub_srv_wait_for_idle(dmub, 100000);
  79	} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
  80
  81	if (status != DMUB_STATUS_OK) {
  82		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
  83		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
  84	}
  85}
  86
  87void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
  88{
  89	struct dmub_srv *dmub = dc_dmub_srv->dmub;
  90	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
  91	enum dmub_status status = DMUB_STATUS_OK;
  92
  93	status = dmub_srv_clear_inbox0_ack(dmub);
  94	if (status != DMUB_STATUS_OK) {
  95		DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
  96		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
  97	}
  98}
  99
 100void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
 101{
 102	struct dmub_srv *dmub = dc_dmub_srv->dmub;
 103	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
 104	enum dmub_status status = DMUB_STATUS_OK;
 105
 106	status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
 107	if (status != DMUB_STATUS_OK) {
 108		DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
 109		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 110	}
 111}
 112
 113void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
 114				 union dmub_inbox0_data_register data)
 115{
 116	struct dmub_srv *dmub = dc_dmub_srv->dmub;
 117	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
 118	enum dmub_status status = DMUB_STATUS_OK;
 119
 120	status = dmub_srv_send_inbox0_cmd(dmub, data);
 121	if (status != DMUB_STATUS_OK) {
 122		DC_ERROR("Error sending INBOX0 cmd\n");
 123		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 124	}
 125}
 126
 127bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
 128		unsigned int count,
 129		union dmub_rb_cmd *cmd_list)
 130{
 131	struct dc_context *dc_ctx;
 132	struct dmub_srv *dmub;
 133	enum dmub_status status;
 134	int i;
 135
 136	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 137		return false;
 138
 139	dc_ctx = dc_dmub_srv->ctx;
 140	dmub = dc_dmub_srv->dmub;
 141
 142	for (i = 0 ; i < count; i++) {
 143		// Queue command
 144		status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 145
 146		if (status == DMUB_STATUS_QUEUE_FULL) {
 147			/* Execute and wait for queue to become empty again. */
 148			status = dmub_srv_cmd_execute(dmub);
 149			if (status == DMUB_STATUS_POWER_STATE_D3)
 150				return false;
 151
 152			do {
 153				status = dmub_srv_wait_for_idle(dmub, 100000);
 154			} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
 155
 156			/* Requeue the command. */
 157			status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 158		}
 159
 160		if (status != DMUB_STATUS_OK) {
 161			if (status != DMUB_STATUS_POWER_STATE_D3) {
 162				DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 163				dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 164			}
 165			return false;
 166		}
 167	}
 168
 169	status = dmub_srv_cmd_execute(dmub);
 170	if (status != DMUB_STATUS_OK) {
 171		if (status != DMUB_STATUS_POWER_STATE_D3) {
 172			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 173			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 174		}
 175		return false;
 176	}
 177
 178	return true;
 179}
 180
 181bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
 182		enum dm_dmub_wait_type wait_type,
 183		union dmub_rb_cmd *cmd_list)
 184{
 185	struct dmub_srv *dmub;
 186	enum dmub_status status;
 187
 188	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 189		return false;
 190
 191	dmub = dc_dmub_srv->dmub;
 192
 193	// Wait for DMUB to process command
 194	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
 195		do {
 196			status = dmub_srv_wait_for_idle(dmub, 100000);
 197		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
 198
 199		if (status != DMUB_STATUS_OK) {
 200			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
 
 
 
 
 
 201			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 202			return false;
 203		}
 204
 205		// Copy data back from ring buffer into command
 206		if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
 207			dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
 208	}
 209
 210	return true;
 211}
 212
 213bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
 214{
 215	return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
 216}
 217
 218bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
 219{
 220	struct dc_context *dc_ctx;
 221	struct dmub_srv *dmub;
 222	enum dmub_status status;
 223	int i;
 224
 225	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 226		return false;
 227
 228	dc_ctx = dc_dmub_srv->ctx;
 229	dmub = dc_dmub_srv->dmub;
 230
 231	for (i = 0 ; i < count; i++) {
 232		// Queue command
 233		status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 234
 235		if (status == DMUB_STATUS_QUEUE_FULL) {
 236			/* Execute and wait for queue to become empty again. */
 237			status = dmub_srv_cmd_execute(dmub);
 238			if (status == DMUB_STATUS_POWER_STATE_D3)
 239				return false;
 240
 241			dmub_srv_wait_for_idle(dmub, 100000);
 
 
 242
 243			/* Requeue the command. */
 244			status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 245		}
 246
 247		if (status != DMUB_STATUS_OK) {
 248			if (status != DMUB_STATUS_POWER_STATE_D3) {
 249				DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 250				dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 251			}
 252			return false;
 253		}
 254	}
 255
 256	status = dmub_srv_cmd_execute(dmub);
 257	if (status != DMUB_STATUS_OK) {
 258		if (status != DMUB_STATUS_POWER_STATE_D3) {
 259			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 260			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 261		}
 262		return false;
 263	}
 264
 265	// Wait for DMUB to process command
 266	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
 267		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
 268			do {
 269				status = dmub_srv_wait_for_idle(dmub, 100000);
 270			} while (status != DMUB_STATUS_OK);
 271		} else
 272			status = dmub_srv_wait_for_idle(dmub, 100000);
 273
 274		if (status != DMUB_STATUS_OK) {
 275			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
 276			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 277			return false;
 278		}
 279
 280		// Copy data back from ring buffer into command
 281		if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
 282			dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
 283	}
 284
 285	return true;
 286}
 287
 288bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
 289{
 290	struct dmub_srv *dmub;
 291	struct dc_context *dc_ctx;
 292	union dmub_fw_boot_status boot_status;
 293	enum dmub_status status;
 294
 295	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 296		return false;
 297
 298	dmub = dc_dmub_srv->dmub;
 299	dc_ctx = dc_dmub_srv->ctx;
 300
 301	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
 302	if (status != DMUB_STATUS_OK) {
 303		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
 304		return false;
 305	}
 306
 307	return boot_status.bits.optimized_init_done;
 308}
 309
 310bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
 311				    unsigned int stream_mask)
 312{
 313	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 314		return false;
 315
 316	return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
 317					 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
 318}
 319
 320bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
 321{
 322	struct dmub_srv *dmub;
 323	struct dc_context *dc_ctx;
 324	union dmub_fw_boot_status boot_status;
 325	enum dmub_status status;
 326
 327	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 328		return false;
 329
 330	dmub = dc_dmub_srv->dmub;
 331	dc_ctx = dc_dmub_srv->ctx;
 332
 333	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
 334	if (status != DMUB_STATUS_OK) {
 335		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
 336		return false;
 337	}
 338
 339	return boot_status.bits.restore_required;
 340}
 341
 342bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
 343{
 344	struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub;
 345	return dmub_srv_get_outbox0_msg(dmub, entry);
 346}
 347
 348void dc_dmub_trace_event_control(struct dc *dc, bool enable)
 349{
 350	dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
 351}
 352
 353void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max)
 354{
 355	union dmub_rb_cmd cmd = { 0 };
 356
 357	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 358	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE;
 359	cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
 360	cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
 361	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
 362
 363	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 364
 365	// Send the command to the DMCUB.
 366	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 367}
 368
 369void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
 370{
 371	union dmub_rb_cmd cmd = { 0 };
 372
 373	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 374	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
 375	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
 376
 377	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 378
 379	// Send the command to the DMCUB.
 380	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 381}
 382
 383static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
 384{
 385	uint8_t pipes = 0;
 386	int i = 0;
 387
 388	for (i = 0; i < MAX_PIPES; i++) {
 389		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 390
 391		if (pipe->stream == stream && pipe->stream_res.tg)
 392			pipes = i;
 393	}
 394	return pipes;
 395}
 396
 397static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context,
 398		struct pipe_ctx *head_pipe,
 399		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data)
 400{
 401	int j;
 402	int pipe_idx = 0;
 403
 404	fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst;
 405	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 406		struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j];
 407
 408		if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) {
 409			fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst;
 410		}
 411	}
 412	fams_pipe_data->pipe_count = pipe_idx;
 413}
 414
 415bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
 416{
 417	union dmub_rb_cmd cmd = { 0 };
 418	struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
 419	int i = 0, k = 0;
 420	int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
 421	uint8_t visual_confirm_enabled;
 422	int pipe_idx = 0;
 
 423
 424	if (dc == NULL)
 425		return false;
 426
 427	visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
 428
 429	// Format command.
 430	cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 431	cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
 432	cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
 433	cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
 434
 435	if (should_manage_pstate) {
 436		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 437			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 438
 439			if (!pipe->stream)
 440				continue;
 441
 442			/* If FAMS is being used to support P-State and there is a stream
 443			 * that does not use FAMS, we are in an FPO + VActive scenario.
 444			 * Assign vactive stretch margin in this case.
 445			 */
 446			if (!pipe->stream->fpo_in_use) {
 
 447				cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
 448				break;
 449			}
 450			pipe_idx++;
 451		}
 452	}
 453
 454	for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
 455		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 456
 457		if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) {
 
 
 
 
 458			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 459			uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
 460
 461			config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz;
 462			config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz;
 463			config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps;
 464			config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream);
 465			dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]);
 466			k++;
 467		}
 468	}
 469	cmd.fw_assisted_mclk_switch.header.payload_bytes =
 470		sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
 471
 472	// Send the command to the DMCUB.
 473	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 474
 475	return true;
 476}
 477
 478void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
 479{
 480	union dmub_rb_cmd cmd = { 0 };
 481
 482	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
 483		return;
 484
 485	memset(&cmd, 0, sizeof(cmd));
 486
 487	/* Prepare fw command */
 488	cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS;
 489	cmd.query_feature_caps.header.sub_type = 0;
 490	cmd.query_feature_caps.header.ret_status = 1;
 491	cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
 492
 493	/* If command was processed, copy feature caps to dmub srv */
 494	if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 495	    cmd.query_feature_caps.header.ret_status == 0) {
 496		memcpy(&dc_dmub_srv->dmub->feature_caps,
 497		       &cmd.query_feature_caps.query_feature_caps_data,
 498		       sizeof(struct dmub_feature_caps));
 499	}
 500}
 501
 502void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
 503{
 504	union dmub_rb_cmd cmd = { 0 };
 505	unsigned int panel_inst = 0;
 506
 507	dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
 
 
 508
 509	memset(&cmd, 0, sizeof(cmd));
 510
 511	// Prepare fw command
 512	cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
 513	cmd.visual_confirm_color.header.sub_type = 0;
 514	cmd.visual_confirm_color.header.ret_status = 1;
 515	cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
 516	cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
 517
 518	// If command was processed, copy feature caps to dmub srv
 519	if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 520		cmd.visual_confirm_color.header.ret_status == 0) {
 521		memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
 522			&cmd.visual_confirm_color.visual_confirm_color_data,
 523			sizeof(struct dmub_visual_confirm_color));
 524	}
 525}
 526
 527/**
 528 * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
 529 *
 530 * @dc: [in] pointer to dc object
 531 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
 532 * @vblank_pipe: [in] pipe_ctx for the DRR pipe
 533 * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
 534 * @context: [in] DC state for access to phantom stream
 535 *
 536 * Populate the DMCUB SubVP command with DRR pipe info. All the information
 537 * required for calculating the SubVP + DRR microschedule is populated here.
 538 *
 539 * High level algorithm:
 540 * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
 541 * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule
 542 * 3. Populate the drr_info with the min and max supported vtotal values
 543 */
 544static void populate_subvp_cmd_drr_info(struct dc *dc,
 545		struct dc_state *context,
 546		struct pipe_ctx *subvp_pipe,
 547		struct pipe_ctx *vblank_pipe,
 548		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
 549{
 550	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
 551	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 552	struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
 553	struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
 554	uint16_t drr_frame_us = 0;
 555	uint16_t min_drr_supported_us = 0;
 556	uint16_t max_drr_supported_us = 0;
 557	uint16_t max_drr_vblank_us = 0;
 558	uint16_t max_drr_mallregion_us = 0;
 559	uint16_t mall_region_us = 0;
 560	uint16_t prefetch_us = 0;
 561	uint16_t subvp_active_us = 0;
 562	uint16_t drr_active_us = 0;
 563	uint16_t min_vtotal_supported = 0;
 564	uint16_t max_vtotal_supported = 0;
 565
 
 
 
 
 
 566	pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
 567	pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
 568	pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
 569
 570	drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
 571			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
 572	// P-State allow width and FW delays already included phantom_timing->v_addressable
 573	mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
 574			(((uint64_t)phantom_timing->pix_clk_100hz * 100)));
 575	min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
 576	min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
 577			(((uint64_t)drr_timing->h_total * 1000000)));
 578
 579	prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
 580			(((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 581	subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
 582			(((uint64_t)main_timing->pix_clk_100hz * 100)));
 583	drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
 584			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
 585	max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
 586			dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
 587	max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
 588	max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
 589	max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
 590			(((uint64_t)drr_timing->h_total * 1000000)));
 591
 592	/* When calculating the max vtotal supported for SubVP + DRR cases, add
 593	 * margin due to possible rounding errors (being off by 1 line in the
 594	 * FW calculation can incorrectly push the P-State switch to wait 1 frame
 595	 * longer).
 596	 */
 597	max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
 598
 599	pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
 600	pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
 601	pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us;
 602}
 603
 604/**
 605 * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command
 606 *
 607 * @dc: [in] current dc state
 608 * @context: [in] new dc state
 609 * @cmd: [in] DMUB cmd to be populated with SubVP info
 610 * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe
 611 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
 612 *
 613 * Populate the DMCUB SubVP command with VBLANK pipe info. All the information
 614 * required to calculate the microschedule for SubVP + VBLANK case is stored in
 615 * the pipe_data (subvp_data and vblank_data).  Also check if the VBLANK pipe
 616 * is a DRR display -- if it is make a call to populate drr_info.
 617 */
 618static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
 619		struct dc_state *context,
 620		union dmub_rb_cmd *cmd,
 621		struct pipe_ctx *vblank_pipe,
 622		uint8_t cmd_pipe_index)
 623{
 624	uint32_t i;
 625	struct pipe_ctx *pipe = NULL;
 626	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
 627			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
 628
 629	// Find the SubVP pipe
 630	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 631		pipe = &context->res_ctx.pipe_ctx[i];
 632
 633		// We check for master pipe, but it shouldn't matter since we only need
 634		// the pipe for timing info (stream should be same for any pipe splits)
 635		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
 636				!resource_is_pipe_type(pipe, DPP_PIPE))
 637			continue;
 638
 639		// Find the SubVP pipe
 640		if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
 641			break;
 642	}
 643
 644	pipe_data->mode = VBLANK;
 645	pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz;
 646	pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total -
 647							vblank_pipe->stream->timing.v_front_porch;
 648	pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total;
 649	pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total;
 650	pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx;
 651	pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start;
 652	pipe_data->pipe_config.vblank_data.vblank_end =
 653			vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable;
 654
 655	if (vblank_pipe->stream->ignore_msa_timing_param &&
 656		(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
 657		populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
 658}
 659
 660/**
 661 * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case
 662 *
 663 * @dc: [in] current dc state
 664 * @context: [in] new dc state
 665 * @cmd: [in] DMUB cmd to be populated with SubVP info
 666 * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2)
 667 *
 668 * For SubVP + SubVP, we use a single vertical interrupt to start the
 669 * microschedule for both SubVP pipes. In order for this to work correctly, the
 670 * MALL REGION of both SubVP pipes must start at the same time. This function
 671 * lengthens the prefetch end to mall start delay of the SubVP pipe that has
 672 * the shorter prefetch so that both MALL REGION's will start at the same time.
 673 */
 674static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
 675		struct dc_state *context,
 676		union dmub_rb_cmd *cmd,
 677		struct pipe_ctx *subvp_pipes[])
 678{
 679	uint32_t subvp0_prefetch_us = 0;
 680	uint32_t subvp1_prefetch_us = 0;
 681	uint32_t prefetch_delta_us = 0;
 682	struct dc_stream_state *phantom_stream0 = NULL;
 683	struct dc_stream_state *phantom_stream1 = NULL;
 684	struct dc_crtc_timing *phantom_timing0 = NULL;
 685	struct dc_crtc_timing *phantom_timing1 = NULL;
 686	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
 687
 688	phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
 
 
 
 689	phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
 
 
 
 690	phantom_timing0 = &phantom_stream0->timing;
 691	phantom_timing1 = &phantom_stream1->timing;
 692
 693	subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
 694			(uint64_t)phantom_timing0->h_total * 1000000),
 695			(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 696	subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
 697			(uint64_t)phantom_timing1->h_total * 1000000),
 698			(((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 699
 700	// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
 701	// should increase it's prefetch time to match the other
 702	if (subvp0_prefetch_us > subvp1_prefetch_us) {
 703		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
 704		prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
 705		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
 706				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
 707					((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
 708					((uint64_t)phantom_timing1->h_total * 1000000));
 709
 710	} else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
 711		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
 712		prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
 713		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
 714				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
 715					((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
 716					((uint64_t)phantom_timing0->h_total * 1000000));
 717	}
 718}
 719
 720/**
 721 * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command
 722 *
 723 * @dc: [in] current dc state
 724 * @context: [in] new dc state
 725 * @cmd: [in] DMUB cmd to be populated with SubVP info
 726 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
 727 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
 728 *
 729 * Populate the DMCUB SubVP command with SubVP pipe info. All the information
 730 * required to calculate the microschedule for the SubVP pipe is stored in the
 731 * pipe_data of the DMCUB SubVP command.
 732 */
 733static void populate_subvp_cmd_pipe_info(struct dc *dc,
 734		struct dc_state *context,
 735		union dmub_rb_cmd *cmd,
 736		struct pipe_ctx *subvp_pipe,
 737		uint8_t cmd_pipe_index)
 738{
 739	uint32_t j;
 740	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
 741			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
 742	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
 743	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 744	struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
 745	uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
 746
 
 
 
 
 
 747	pipe_data->mode = SUBVP;
 748	pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
 749	pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
 750	pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total;
 751	pipe_data->pipe_config.subvp_data.main_vblank_start =
 752			main_timing->v_total - main_timing->v_front_porch;
 753	pipe_data->pipe_config.subvp_data.main_vblank_end =
 754			main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
 755	pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
 756	pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
 757	pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param &&
 758		(subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed);
 759
 760	/* Calculate the scaling factor from the src and dst height.
 761	 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
 762	 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
 763	 *
 764	 * Make sure to combine stream and plane scaling together.
 765	 */
 766	reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
 767			&out_num_stream, &out_den_stream);
 768	reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
 769			&out_num_plane, &out_den_plane);
 770	reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
 771	pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
 772	pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
 773
 774	// Prefetch lines is equal to VACTIVE + BP + VSYNC
 775	pipe_data->pipe_config.subvp_data.prefetch_lines =
 776			phantom_timing->v_total - phantom_timing->v_front_porch;
 777
 778	// Round up
 779	pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
 780			div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
 781					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
 782	pipe_data->pipe_config.subvp_data.processing_delay_lines =
 783			div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
 784					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
 785
 786	if (subvp_pipe->bottom_pipe) {
 787		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
 788	} else if (subvp_pipe->next_odm_pipe) {
 789		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
 790	} else {
 791		pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
 792	}
 793
 794	// Find phantom pipe index based on phantom stream
 795	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 796		struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
 797
 798		if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
 799				phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
 800			pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
 801			if (phantom_pipe->bottom_pipe) {
 802				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
 803			} else if (phantom_pipe->next_odm_pipe) {
 804				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
 805			} else {
 806				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
 807			}
 808			break;
 809		}
 810	}
 811}
 812
 813/**
 814 * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command
 815 *
 816 * @dc: [in] current dc state
 817 * @context: [in] new dc state
 818 * @enable: [in] if true enables the pipes population
 819 *
 820 * This function loops through each pipe and populates the DMUB SubVP CMD info
 821 * based on the pipe (e.g. SubVP, VBLANK).
 822 */
 823void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 824		struct dc_state *context,
 825		bool enable)
 826{
 827	uint8_t cmd_pipe_index = 0;
 828	uint32_t i, pipe_idx;
 829	uint8_t subvp_count = 0;
 830	union dmub_rb_cmd cmd;
 831	struct pipe_ctx *subvp_pipes[2];
 832	uint32_t wm_val_refclk = 0;
 833	enum mall_stream_type pipe_mall_type;
 834
 835	memset(&cmd, 0, sizeof(cmd));
 836	// FW command for SUBVP
 837	cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 838	cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD;
 839	cmd.fw_assisted_mclk_switch_v2.header.payload_bytes =
 840			sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header);
 841
 842	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 843		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 844
 845		/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
 846		 */
 847		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 848				resource_is_pipe_type(pipe, DPP_PIPE) &&
 849				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
 850			subvp_pipes[subvp_count++] = pipe;
 851	}
 852
 853	if (enable) {
 854		// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
 855		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 856			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 857			pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 858
 859			if (!pipe->stream)
 860				continue;
 861
 862			/* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
 863			 * Any ODM or MPC splits being used in SubVP will be handled internally in
 864			 * populate_subvp_cmd_pipe_info
 865			 */
 866			if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 867					resource_is_pipe_type(pipe, DPP_PIPE) &&
 868					pipe_mall_type == SUBVP_MAIN) {
 869				populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 870			} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 871					resource_is_pipe_type(pipe, DPP_PIPE) &&
 872					pipe_mall_type == SUBVP_NONE) {
 873				// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
 874				// we run through DML without calculating "natural" P-state support
 875				populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 876
 877			}
 878			pipe_idx++;
 879		}
 880		if (subvp_count == 2) {
 881			update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
 882		}
 883		cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
 884		cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us;
 885
 886		// Store the original watermark value for this SubVP config so we can lower it when the
 887		// MCLK switch starts
 888		wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
 889				(dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
 890
 891		cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
 892	}
 893
 894	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 895}
 896
 897bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
 898{
 899	if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
 900		return false;
 901	return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
 902}
 903
 904void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
 905{
 906	struct dmub_diagnostic_data diag_data = {0};
 
 907
 908	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
 909		DC_LOG_ERROR("%s: invalid parameters.", __func__);
 910		return;
 911	}
 912
 
 
 913	if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
 914		DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
 915		return;
 916	}
 917
 918	DC_LOG_DEBUG("DMCUB STATE:");
 919	DC_LOG_DEBUG("    dmcub_version      : %08x", diag_data.dmcub_version);
 920	DC_LOG_DEBUG("    scratch  [0]       : %08x", diag_data.scratch[0]);
 921	DC_LOG_DEBUG("    scratch  [1]       : %08x", diag_data.scratch[1]);
 922	DC_LOG_DEBUG("    scratch  [2]       : %08x", diag_data.scratch[2]);
 923	DC_LOG_DEBUG("    scratch  [3]       : %08x", diag_data.scratch[3]);
 924	DC_LOG_DEBUG("    scratch  [4]       : %08x", diag_data.scratch[4]);
 925	DC_LOG_DEBUG("    scratch  [5]       : %08x", diag_data.scratch[5]);
 926	DC_LOG_DEBUG("    scratch  [6]       : %08x", diag_data.scratch[6]);
 927	DC_LOG_DEBUG("    scratch  [7]       : %08x", diag_data.scratch[7]);
 928	DC_LOG_DEBUG("    scratch  [8]       : %08x", diag_data.scratch[8]);
 929	DC_LOG_DEBUG("    scratch  [9]       : %08x", diag_data.scratch[9]);
 930	DC_LOG_DEBUG("    scratch [10]       : %08x", diag_data.scratch[10]);
 931	DC_LOG_DEBUG("    scratch [11]       : %08x", diag_data.scratch[11]);
 932	DC_LOG_DEBUG("    scratch [12]       : %08x", diag_data.scratch[12]);
 933	DC_LOG_DEBUG("    scratch [13]       : %08x", diag_data.scratch[13]);
 934	DC_LOG_DEBUG("    scratch [14]       : %08x", diag_data.scratch[14]);
 935	DC_LOG_DEBUG("    scratch [15]       : %08x", diag_data.scratch[15]);
 936	DC_LOG_DEBUG("    pc                 : %08x", diag_data.pc);
 
 937	DC_LOG_DEBUG("    unk_fault_addr     : %08x", diag_data.undefined_address_fault_addr);
 938	DC_LOG_DEBUG("    inst_fault_addr    : %08x", diag_data.inst_fetch_fault_addr);
 939	DC_LOG_DEBUG("    data_fault_addr    : %08x", diag_data.data_write_fault_addr);
 940	DC_LOG_DEBUG("    inbox1_rptr        : %08x", diag_data.inbox1_rptr);
 941	DC_LOG_DEBUG("    inbox1_wptr        : %08x", diag_data.inbox1_wptr);
 942	DC_LOG_DEBUG("    inbox1_size        : %08x", diag_data.inbox1_size);
 943	DC_LOG_DEBUG("    inbox0_rptr        : %08x", diag_data.inbox0_rptr);
 944	DC_LOG_DEBUG("    inbox0_wptr        : %08x", diag_data.inbox0_wptr);
 945	DC_LOG_DEBUG("    inbox0_size        : %08x", diag_data.inbox0_size);
 
 
 
 946	DC_LOG_DEBUG("    is_enabled         : %d", diag_data.is_dmcub_enabled);
 947	DC_LOG_DEBUG("    is_soft_reset      : %d", diag_data.is_dmcub_soft_reset);
 948	DC_LOG_DEBUG("    is_secure_reset    : %d", diag_data.is_dmcub_secure_reset);
 949	DC_LOG_DEBUG("    is_traceport_en    : %d", diag_data.is_traceport_en);
 950	DC_LOG_DEBUG("    is_cw0_en          : %d", diag_data.is_cw0_enabled);
 951	DC_LOG_DEBUG("    is_cw6_en          : %d", diag_data.is_cw6_enabled);
 952}
 953
 954static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
 955{
 956	struct pipe_ctx *test_pipe, *split_pipe;
 957	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
 958	struct rect r1 = scl_data->recout, r2, r2_half;
 959	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
 960	int cur_layer = pipe_ctx->plane_state->layer_index;
 961
 962	/**
 963	 * Disable the cursor if there's another pipe above this with a
 964	 * plane that contains this pipe's viewport to prevent double cursor
 965	 * and incorrect scaling artifacts.
 966	 */
 967	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
 968	     test_pipe = test_pipe->top_pipe) {
 969		// Skip invisible layer and pipe-split plane on same layer
 970		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
 971			continue;
 972
 973		r2 = test_pipe->plane_res.scl_data.recout;
 974		r2_r = r2.x + r2.width;
 975		r2_b = r2.y + r2.height;
 976		split_pipe = test_pipe;
 977
 978		/**
 979		 * There is another half plane on same layer because of
 980		 * pipe-split, merge together per same height.
 981		 */
 982		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
 983		     split_pipe = split_pipe->top_pipe)
 984			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
 985				r2_half = split_pipe->plane_res.scl_data.recout;
 986				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
 987				r2.width = r2.width + r2_half.width;
 988				r2_r = r2.x + r2.width;
 989				break;
 990			}
 991
 992		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
 993			return true;
 994	}
 995
 996	return false;
 997}
 998
 999static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
1000{
1001	if (pipe_ctx->plane_state != NULL) {
1002		if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
1003			return false;
1004
1005		if (dc_can_pipe_disable_cursor(pipe_ctx))
1006			return false;
1007	}
1008
1009	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
1010		pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
1011		pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
1012		return true;
1013
1014	if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
1015		return true;
1016
1017	return false;
1018}
1019
1020static void dc_build_cursor_update_payload0(
1021		struct pipe_ctx *pipe_ctx, uint8_t p_idx,
1022		struct dmub_cmd_update_cursor_payload0 *payload)
1023{
1024	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1025	unsigned int panel_inst = 0;
1026
1027	if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
1028		pipe_ctx->stream->link, &panel_inst))
1029		return;
1030
1031	/* Payload: Cursor Rect is built from position & attribute
1032	 * x & y are obtained from postion
1033	 */
1034	payload->cursor_rect.x = hubp->cur_rect.x;
1035	payload->cursor_rect.y = hubp->cur_rect.y;
1036	/* w & h are obtained from attribute */
1037	payload->cursor_rect.width  = hubp->cur_rect.w;
1038	payload->cursor_rect.height = hubp->cur_rect.h;
1039
1040	payload->enable      = hubp->pos.cur_ctl.bits.cur_enable;
1041	payload->pipe_idx    = p_idx;
1042	payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
1043	payload->panel_inst  = panel_inst;
1044}
1045
1046static void dc_build_cursor_position_update_payload0(
1047		struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
1048		const struct hubp *hubp, const struct dpp *dpp)
1049{
1050	/* Hubp */
1051	pl->position_cfg.pHubp.cur_ctl.raw  = hubp->pos.cur_ctl.raw;
1052	pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
1053	pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
1054	pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
1055
1056	/* dpp */
1057	pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
1058	pl->position_cfg.pipe_idx = p_idx;
1059}
1060
1061static void dc_build_cursor_attribute_update_payload1(
1062		struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
1063		const struct hubp *hubp, const struct dpp *dpp)
1064{
1065	/* Hubp */
1066	pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
1067	pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
1068	pl_A->aHubp.cur_ctl.raw  = hubp->att.cur_ctl.raw;
1069	pl_A->aHubp.size.raw     = hubp->att.size.raw;
1070	pl_A->aHubp.settings.raw = hubp->att.settings.raw;
1071
1072	/* dpp */
1073	pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
1074}
1075
1076/**
1077 * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command
1078 *
1079 * @pCtx: [in] pipe context
1080 * @pipe_idx: [in] pipe index
1081 *
1082 * This function would store the cursor related information and pass it into
1083 * dmub
1084 */
1085void dc_send_update_cursor_info_to_dmu(
1086		struct pipe_ctx *pCtx, uint8_t pipe_idx)
1087{
1088	union dmub_rb_cmd cmd[2];
1089	union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
1090					&cmd[0].update_cursor_info.update_cursor_info_data;
1091
1092	memset(cmd, 0, sizeof(cmd));
1093
1094	if (!dc_dmub_should_update_cursor_data(pCtx))
1095		return;
1096	/*
1097	 * Since we use multi_cmd_pending for dmub command, the 2nd command is
1098	 * only assigned to store cursor attributes info.
1099	 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
1100	 * is to store cursor position info.
1101	 *
1102	 * Command heaer type must be the same type if using  multi_cmd_pending.
1103	 * Besides, while process 2nd command in DMU, the sub type is useless.
1104	 * So it's meanless to pass the sub type header with different type.
1105	 */
1106
1107	{
1108		/* Build Payload#0 Header */
1109		cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1110		cmd[0].update_cursor_info.header.payload_bytes =
1111				sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
1112		cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
1113
1114		/* Prepare Payload */
1115		dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
1116
1117		dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
1118				pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1119		}
1120	{
1121		/* Build Payload#1 Header */
1122		cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1123		cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
1124		cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
1125
1126		dc_build_cursor_attribute_update_payload1(
1127				&cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
1128				pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1129
1130		/* Combine 2nd cmds update_curosr_info to DMU */
1131		dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1132	}
1133}
1134
1135bool dc_dmub_check_min_version(struct dmub_srv *srv)
1136{
1137	if (!srv->hw_funcs.is_psrsu_supported)
1138		return true;
1139	return srv->hw_funcs.is_psrsu_supported(srv);
1140}
1141
1142void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
1143{
1144	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1145
1146	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
1147		DC_LOG_ERROR("%s: invalid parameters.", __func__);
1148		return;
1149	}
1150
1151	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
1152				       0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1153		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1154		return;
1155	}
1156
1157	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
1158				       0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1159		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1160		return;
1161	}
1162
1163	DC_LOG_DEBUG("Enabled DPIA trace\n");
1164}
1165
1166void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index)
1167{
1168	dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
1169}
1170
1171bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
1172{
1173	struct dc_context *dc_ctx;
1174	enum dmub_status status;
1175
1176	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1177		return true;
1178
1179	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
1180		return true;
1181
1182	dc_ctx = dc_dmub_srv->ctx;
1183
1184	if (wait) {
1185		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
1186			do {
1187				status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1188			} while (status != DMUB_STATUS_OK);
1189		} else {
1190			status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1191			if (status != DMUB_STATUS_OK) {
1192				DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
1193				return false;
1194			}
1195		}
1196	} else
1197		return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
1198
1199	return true;
1200}
1201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
1203{
 
1204	struct dc_dmub_srv *dc_dmub_srv;
1205	union dmub_rb_cmd cmd = {0};
1206
1207	if (dc->debug.dmcub_emulation)
1208		return;
1209
1210	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1211		return;
1212
1213	dc_dmub_srv = dc->ctx->dmub_srv;
 
1214
1215	memset(&cmd, 0, sizeof(cmd));
1216	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
1217	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
1218	cmd.idle_opt_notify_idle.header.payload_bytes =
1219		sizeof(cmd.idle_opt_notify_idle) -
1220		sizeof(cmd.idle_opt_notify_idle.header);
1221
1222	cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
1223
 
 
 
1224	if (allow_idle) {
1225		volatile struct dmub_shared_state_ips_driver *ips_driver =
1226			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1227		union dmub_shared_state_ips_driver_signals new_signals;
1228
 
 
 
 
 
 
1229		dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
1230
1231		memset(&new_signals, 0, sizeof(new_signals));
1232
 
 
1233		if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
1234		    dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
1235			new_signals.bits.allow_pg = 1;
1236			new_signals.bits.allow_ips1 = 1;
1237			new_signals.bits.allow_ips2 = 1;
1238			new_signals.bits.allow_z10 = 1;
1239		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
1240			new_signals.bits.allow_ips1 = 1;
1241		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
1242			new_signals.bits.allow_pg = 1;
1243			new_signals.bits.allow_ips1 = 1;
1244		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
1245			new_signals.bits.allow_pg = 1;
1246			new_signals.bits.allow_ips1 = 1;
1247			new_signals.bits.allow_ips2 = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1248		}
1249
1250		ips_driver->signals = new_signals;
 
1251	}
1252
 
 
 
 
 
 
 
1253	/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
1254	/* We also do not perform a wait since DMCUB could enter idle after the notification. */
1255	dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
 
 
 
 
1256}
1257
1258static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
1259{
1260	struct dc_dmub_srv *dc_dmub_srv;
 
1261
1262	if (dc->debug.dmcub_emulation)
1263		return;
1264
1265	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1266		return;
1267
1268	dc_dmub_srv = dc->ctx->dmub_srv;
1269
1270	if (dc->clk_mgr->funcs->exit_low_power_state) {
1271		volatile const struct dmub_shared_state_ips_fw *ips_fw =
1272			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1273		volatile struct dmub_shared_state_ips_driver *ips_driver =
1274			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1275		union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
1276
 
 
 
 
1277		ips_driver->signals.all = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278
1279		if (prev_driver_signals.bits.allow_ips2) {
1280			udelay(dc->debug.ips2_eval_delay_us);
 
 
 
 
 
 
 
 
1281
1282			if (ips_fw->signals.bits.ips2_commit) {
 
 
 
 
 
1283				// Tell PMFW to exit low power state
1284				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1285
 
 
 
 
 
1286				// Wait for IPS2 entry upper bound
1287				udelay(dc->debug.ips2_entry_delay_us);
1288
 
 
 
 
 
1289				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1290
 
 
 
 
 
1291				while (ips_fw->signals.bits.ips2_commit)
1292					udelay(1);
1293
 
 
 
 
 
1294				if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1295					ASSERT(0);
1296
 
 
 
 
 
1297				dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
1298			}
1299		}
1300
1301		dc_dmub_srv_notify_idle(dc, false);
1302		if (prev_driver_signals.bits.allow_ips1) {
 
 
 
 
 
1303			while (ips_fw->signals.bits.ips1_commit)
1304				udelay(1);
1305
 
 
 
 
1306		}
1307	}
1308
1309	if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1310		ASSERT(0);
 
 
 
 
 
 
1311}
1312
1313void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
1314{
1315	struct dmub_srv *dmub;
1316
1317	if (!dc_dmub_srv)
1318		return;
1319
1320	dmub = dc_dmub_srv->dmub;
1321
1322	if (powerState == DC_ACPI_CM_POWER_STATE_D0)
1323		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
1324	else
1325		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
1326}
1327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
1329{
1330	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1331
1332	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1333		return;
1334
 
 
1335	if (dc_dmub_srv->idle_allowed == allow_idle)
1336		return;
1337
 
 
1338	/*
1339	 * Entering a low power state requires a driver notification.
1340	 * Powering up the hardware requires notifying PMFW and DMCUB.
1341	 * Clearing the driver idle allow requires a DMCUB command.
1342	 * DMCUB commands requires the DMCUB to be powered up and restored.
1343	 *
1344	 * Exit out early to prevent an infinite loop of DMCUB commands
1345	 * triggering exit low power - use software state to track this.
1346	 */
1347	dc_dmub_srv->idle_allowed = allow_idle;
1348
1349	if (!allow_idle)
 
 
1350		dc_dmub_srv_exit_low_power_state(dc);
1351	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1352		dc_dmub_srv_notify_idle(dc, allow_idle);
 
1353}
1354
1355bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
1356				  enum dm_dmub_wait_type wait_type)
1357{
1358	return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
1359}
1360
1361bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
1362				       union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
1363{
1364	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1365	bool result = false, reallow_idle = false;
1366
1367	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1368		return false;
1369
1370	if (count == 0)
1371		return true;
1372
1373	if (dc_dmub_srv->idle_allowed) {
1374		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1375		reallow_idle = true;
1376	}
1377
1378	/*
1379	 * These may have different implementations in DM, so ensure
1380	 * that we guide it to the expected helper.
1381	 */
1382	if (count > 1)
1383		result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
1384	else
1385		result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
1386
1387	if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
 
1388		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1389
1390	return result;
1391}
1392
1393static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1394				  uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1395{
1396	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1397	const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
1398	enum dmub_status status;
1399
1400	if (response)
1401		*response = 0;
1402
1403	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1404		return false;
1405
1406	status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
1407	if (status != DMUB_STATUS_OK) {
1408		if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
1409			return true;
1410
1411		return false;
1412	}
1413
1414	if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
1415		dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
1416
1417	return true;
1418}
1419
1420bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1421			       uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1422{
1423	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1424	bool result = false, reallow_idle = false;
1425
1426	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1427		return false;
1428
1429	if (dc_dmub_srv->idle_allowed) {
1430		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1431		reallow_idle = true;
1432	}
1433
1434	result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
1435
1436	if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
 
1437		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1438
1439	return result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1440}
v6.13.7
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
  26#include "dm_services.h"
  27#include "dc.h"
  28#include "dc_dmub_srv.h"
  29#include "../dmub/dmub_srv.h"
  30#include "dm_helpers.h"
  31#include "dc_hw_types.h"
  32#include "core_types.h"
  33#include "../basics/conversion.h"
  34#include "cursor_reg_cache.h"
  35#include "resource.h"
  36#include "clk_mgr.h"
  37#include "dc_state_priv.h"
  38#include "dc_plane_priv.h"
  39
  40#define CTX dc_dmub_srv->ctx
  41#define DC_LOGGER CTX->logger
  42
  43static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
  44				  struct dmub_srv *dmub)
  45{
  46	dc_srv->dmub = dmub;
  47	dc_srv->ctx = dc->ctx;
  48}
  49
  50struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
  51{
  52	struct dc_dmub_srv *dc_srv =
  53		kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
  54
  55	if (dc_srv == NULL) {
  56		BREAK_TO_DEBUGGER();
  57		return NULL;
  58	}
  59
  60	dc_dmub_srv_construct(dc_srv, dc, dmub);
  61
  62	return dc_srv;
  63}
  64
  65void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
  66{
  67	if (*dmub_srv) {
  68		kfree(*dmub_srv);
  69		*dmub_srv = NULL;
  70	}
  71}
  72
  73void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
  74{
  75	struct dmub_srv *dmub = dc_dmub_srv->dmub;
  76	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
  77	enum dmub_status status;
  78
  79	do {
  80		status = dmub_srv_wait_for_idle(dmub, 100000);
  81	} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
  82
  83	if (status != DMUB_STATUS_OK) {
  84		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
  85		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
  86	}
  87}
  88
  89void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
  90{
  91	struct dmub_srv *dmub = dc_dmub_srv->dmub;
  92	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
  93	enum dmub_status status = DMUB_STATUS_OK;
  94
  95	status = dmub_srv_clear_inbox0_ack(dmub);
  96	if (status != DMUB_STATUS_OK) {
  97		DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
  98		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
  99	}
 100}
 101
 102void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
 103{
 104	struct dmub_srv *dmub = dc_dmub_srv->dmub;
 105	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
 106	enum dmub_status status = DMUB_STATUS_OK;
 107
 108	status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
 109	if (status != DMUB_STATUS_OK) {
 110		DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
 111		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 112	}
 113}
 114
 115void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
 116				 union dmub_inbox0_data_register data)
 117{
 118	struct dmub_srv *dmub = dc_dmub_srv->dmub;
 119	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
 120	enum dmub_status status = DMUB_STATUS_OK;
 121
 122	status = dmub_srv_send_inbox0_cmd(dmub, data);
 123	if (status != DMUB_STATUS_OK) {
 124		DC_ERROR("Error sending INBOX0 cmd\n");
 125		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 126	}
 127}
 128
 129bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
 130		unsigned int count,
 131		union dmub_rb_cmd *cmd_list)
 132{
 133	struct dc_context *dc_ctx;
 134	struct dmub_srv *dmub;
 135	enum dmub_status status;
 136	int i;
 137
 138	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 139		return false;
 140
 141	dc_ctx = dc_dmub_srv->ctx;
 142	dmub = dc_dmub_srv->dmub;
 143
 144	for (i = 0 ; i < count; i++) {
 145		// Queue command
 146		status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 147
 148		if (status == DMUB_STATUS_QUEUE_FULL) {
 149			/* Execute and wait for queue to become empty again. */
 150			status = dmub_srv_cmd_execute(dmub);
 151			if (status == DMUB_STATUS_POWER_STATE_D3)
 152				return false;
 153
 154			do {
 155				status = dmub_srv_wait_for_idle(dmub, 100000);
 156			} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
 157
 158			/* Requeue the command. */
 159			status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 160		}
 161
 162		if (status != DMUB_STATUS_OK) {
 163			if (status != DMUB_STATUS_POWER_STATE_D3) {
 164				DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 165				dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 166			}
 167			return false;
 168		}
 169	}
 170
 171	status = dmub_srv_cmd_execute(dmub);
 172	if (status != DMUB_STATUS_OK) {
 173		if (status != DMUB_STATUS_POWER_STATE_D3) {
 174			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 175			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 176		}
 177		return false;
 178	}
 179
 180	return true;
 181}
 182
 183bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
 184		enum dm_dmub_wait_type wait_type,
 185		union dmub_rb_cmd *cmd_list)
 186{
 187	struct dmub_srv *dmub;
 188	enum dmub_status status;
 189
 190	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 191		return false;
 192
 193	dmub = dc_dmub_srv->dmub;
 194
 195	// Wait for DMUB to process command
 196	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
 197		do {
 198			status = dmub_srv_wait_for_idle(dmub, 100000);
 199		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
 200
 201		if (status != DMUB_STATUS_OK) {
 202			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
 203			if (!dmub->debug.timeout_occured) {
 204				dmub->debug.timeout_occured = true;
 205				dmub->debug.timeout_cmd = *cmd_list;
 206				dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
 207			}
 208			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 209			return false;
 210		}
 211
 212		// Copy data back from ring buffer into command
 213		if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
 214			dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
 215	}
 216
 217	return true;
 218}
 219
 220bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
 221{
 222	return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
 223}
 224
 225bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
 226{
 227	struct dc_context *dc_ctx;
 228	struct dmub_srv *dmub;
 229	enum dmub_status status;
 230	int i;
 231
 232	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 233		return false;
 234
 235	dc_ctx = dc_dmub_srv->ctx;
 236	dmub = dc_dmub_srv->dmub;
 237
 238	for (i = 0 ; i < count; i++) {
 239		// Queue command
 240		status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 241
 242		if (status == DMUB_STATUS_QUEUE_FULL) {
 243			/* Execute and wait for queue to become empty again. */
 244			status = dmub_srv_cmd_execute(dmub);
 245			if (status == DMUB_STATUS_POWER_STATE_D3)
 246				return false;
 247
 248			status = dmub_srv_wait_for_idle(dmub, 100000);
 249			if (status != DMUB_STATUS_OK)
 250				return false;
 251
 252			/* Requeue the command. */
 253			status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
 254		}
 255
 256		if (status != DMUB_STATUS_OK) {
 257			if (status != DMUB_STATUS_POWER_STATE_D3) {
 258				DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 259				dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 260			}
 261			return false;
 262		}
 263	}
 264
 265	status = dmub_srv_cmd_execute(dmub);
 266	if (status != DMUB_STATUS_OK) {
 267		if (status != DMUB_STATUS_POWER_STATE_D3) {
 268			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 269			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 270		}
 271		return false;
 272	}
 273
 274	// Wait for DMUB to process command
 275	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
 276		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
 277			do {
 278				status = dmub_srv_wait_for_idle(dmub, 100000);
 279			} while (status != DMUB_STATUS_OK);
 280		} else
 281			status = dmub_srv_wait_for_idle(dmub, 100000);
 282
 283		if (status != DMUB_STATUS_OK) {
 284			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
 285			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 286			return false;
 287		}
 288
 289		// Copy data back from ring buffer into command
 290		if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
 291			dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
 292	}
 293
 294	return true;
 295}
 296
 297bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
 298{
 299	struct dmub_srv *dmub;
 300	struct dc_context *dc_ctx;
 301	union dmub_fw_boot_status boot_status;
 302	enum dmub_status status;
 303
 304	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 305		return false;
 306
 307	dmub = dc_dmub_srv->dmub;
 308	dc_ctx = dc_dmub_srv->ctx;
 309
 310	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
 311	if (status != DMUB_STATUS_OK) {
 312		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
 313		return false;
 314	}
 315
 316	return boot_status.bits.optimized_init_done;
 317}
 318
 319bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
 320				    unsigned int stream_mask)
 321{
 322	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 323		return false;
 324
 325	return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
 326					 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
 327}
 328
 329bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
 330{
 331	struct dmub_srv *dmub;
 332	struct dc_context *dc_ctx;
 333	union dmub_fw_boot_status boot_status;
 334	enum dmub_status status;
 335
 336	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 337		return false;
 338
 339	dmub = dc_dmub_srv->dmub;
 340	dc_ctx = dc_dmub_srv->ctx;
 341
 342	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
 343	if (status != DMUB_STATUS_OK) {
 344		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
 345		return false;
 346	}
 347
 348	return boot_status.bits.restore_required;
 349}
 350
 351bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
 352{
 353	struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub;
 354	return dmub_srv_get_outbox0_msg(dmub, entry);
 355}
 356
 357void dc_dmub_trace_event_control(struct dc *dc, bool enable)
 358{
 359	dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
 360}
 361
 362void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max)
 363{
 364	union dmub_rb_cmd cmd = { 0 };
 365
 366	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 367	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE;
 368	cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
 369	cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
 370	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
 371
 372	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 373
 374	// Send the command to the DMCUB.
 375	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 376}
 377
 378void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
 379{
 380	union dmub_rb_cmd cmd = { 0 };
 381
 382	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 383	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
 384	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
 385
 386	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 387
 388	// Send the command to the DMCUB.
 389	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 390}
 391
 392static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
 393{
 394	uint8_t pipes = 0;
 395	int i = 0;
 396
 397	for (i = 0; i < MAX_PIPES; i++) {
 398		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 399
 400		if (pipe->stream == stream && pipe->stream_res.tg)
 401			pipes = i;
 402	}
 403	return pipes;
 404}
 405
 406static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context,
 407		struct pipe_ctx *head_pipe,
 408		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data)
 409{
 410	int j;
 411	int pipe_idx = 0;
 412
 413	fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst;
 414	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 415		struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j];
 416
 417		if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) {
 418			fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst;
 419		}
 420	}
 421	fams_pipe_data->pipe_count = pipe_idx;
 422}
 423
 424bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
 425{
 426	union dmub_rb_cmd cmd = { 0 };
 427	struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
 428	int i = 0, k = 0;
 429	int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
 430	uint8_t visual_confirm_enabled;
 431	int pipe_idx = 0;
 432	struct dc_stream_status *stream_status = NULL;
 433
 434	if (dc == NULL)
 435		return false;
 436
 437	visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
 438
 439	// Format command.
 440	cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 441	cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
 442	cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
 443	cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
 444
 445	if (should_manage_pstate) {
 446		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 447			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 448
 449			if (!pipe->stream)
 450				continue;
 451
 452			/* If FAMS is being used to support P-State and there is a stream
 453			 * that does not use FAMS, we are in an FPO + VActive scenario.
 454			 * Assign vactive stretch margin in this case.
 455			 */
 456			stream_status = dc_state_get_stream_status(context, pipe->stream);
 457			if (stream_status && !stream_status->fpo_in_use) {
 458				cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
 459				break;
 460			}
 461			pipe_idx++;
 462		}
 463	}
 464
 465	for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
 466		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 467
 468		if (!resource_is_pipe_type(pipe, OTG_MASTER))
 469			continue;
 470
 471		stream_status = dc_state_get_stream_status(context, pipe->stream);
 472		if (stream_status && stream_status->fpo_in_use) {
 473			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 474			uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
 475
 476			config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz;
 477			config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz;
 478			config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps;
 479			config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream);
 480			dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]);
 481			k++;
 482		}
 483	}
 484	cmd.fw_assisted_mclk_switch.header.payload_bytes =
 485		sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
 486
 487	// Send the command to the DMCUB.
 488	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 489
 490	return true;
 491}
 492
 493void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
 494{
 495	union dmub_rb_cmd cmd = { 0 };
 496
 497	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
 498		return;
 499
 500	memset(&cmd, 0, sizeof(cmd));
 501
 502	/* Prepare fw command */
 503	cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS;
 504	cmd.query_feature_caps.header.sub_type = 0;
 505	cmd.query_feature_caps.header.ret_status = 1;
 506	cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
 507
 508	/* If command was processed, copy feature caps to dmub srv */
 509	if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 510	    cmd.query_feature_caps.header.ret_status == 0) {
 511		memcpy(&dc_dmub_srv->dmub->feature_caps,
 512		       &cmd.query_feature_caps.query_feature_caps_data,
 513		       sizeof(struct dmub_feature_caps));
 514	}
 515}
 516
 517void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
 518{
 519	union dmub_rb_cmd cmd = { 0 };
 520	unsigned int panel_inst = 0;
 521
 522	if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) &&
 523			dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
 524		return;
 525
 526	memset(&cmd, 0, sizeof(cmd));
 527
 528	// Prepare fw command
 529	cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
 530	cmd.visual_confirm_color.header.sub_type = 0;
 531	cmd.visual_confirm_color.header.ret_status = 1;
 532	cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
 533	cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
 534
 535	// If command was processed, copy feature caps to dmub srv
 536	if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 537		cmd.visual_confirm_color.header.ret_status == 0) {
 538		memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
 539			&cmd.visual_confirm_color.visual_confirm_color_data,
 540			sizeof(struct dmub_visual_confirm_color));
 541	}
 542}
 543
 544/**
 545 * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
 546 *
 547 * @dc: [in] pointer to dc object
 548 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
 549 * @vblank_pipe: [in] pipe_ctx for the DRR pipe
 550 * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
 551 * @context: [in] DC state for access to phantom stream
 552 *
 553 * Populate the DMCUB SubVP command with DRR pipe info. All the information
 554 * required for calculating the SubVP + DRR microschedule is populated here.
 555 *
 556 * High level algorithm:
 557 * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
 558 * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule
 559 * 3. Populate the drr_info with the min and max supported vtotal values
 560 */
 561static void populate_subvp_cmd_drr_info(struct dc *dc,
 562		struct dc_state *context,
 563		struct pipe_ctx *subvp_pipe,
 564		struct pipe_ctx *vblank_pipe,
 565		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
 566{
 567	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
 568	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 569	struct dc_crtc_timing *phantom_timing;
 570	struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
 571	uint16_t drr_frame_us = 0;
 572	uint16_t min_drr_supported_us = 0;
 573	uint16_t max_drr_supported_us = 0;
 574	uint16_t max_drr_vblank_us = 0;
 575	uint16_t max_drr_mallregion_us = 0;
 576	uint16_t mall_region_us = 0;
 577	uint16_t prefetch_us = 0;
 578	uint16_t subvp_active_us = 0;
 579	uint16_t drr_active_us = 0;
 580	uint16_t min_vtotal_supported = 0;
 581	uint16_t max_vtotal_supported = 0;
 582
 583	if (!phantom_stream)
 584		return;
 585
 586	phantom_timing = &phantom_stream->timing;
 587
 588	pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
 589	pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
 590	pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
 591
 592	drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
 593			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
 594	// P-State allow width and FW delays already included phantom_timing->v_addressable
 595	mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
 596			(((uint64_t)phantom_timing->pix_clk_100hz * 100)));
 597	min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
 598	min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
 599			(((uint64_t)drr_timing->h_total * 1000000)));
 600
 601	prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
 602			(((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 603	subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
 604			(((uint64_t)main_timing->pix_clk_100hz * 100)));
 605	drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
 606			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
 607	max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
 608			dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
 609	max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
 610	max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
 611	max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
 612			(((uint64_t)drr_timing->h_total * 1000000)));
 613
 614	/* When calculating the max vtotal supported for SubVP + DRR cases, add
 615	 * margin due to possible rounding errors (being off by 1 line in the
 616	 * FW calculation can incorrectly push the P-State switch to wait 1 frame
 617	 * longer).
 618	 */
 619	max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
 620
 621	pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
 622	pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
 623	pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us;
 624}
 625
 626/**
 627 * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command
 628 *
 629 * @dc: [in] current dc state
 630 * @context: [in] new dc state
 631 * @cmd: [in] DMUB cmd to be populated with SubVP info
 632 * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe
 633 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
 634 *
 635 * Populate the DMCUB SubVP command with VBLANK pipe info. All the information
 636 * required to calculate the microschedule for SubVP + VBLANK case is stored in
 637 * the pipe_data (subvp_data and vblank_data).  Also check if the VBLANK pipe
 638 * is a DRR display -- if it is make a call to populate drr_info.
 639 */
 640static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
 641		struct dc_state *context,
 642		union dmub_rb_cmd *cmd,
 643		struct pipe_ctx *vblank_pipe,
 644		uint8_t cmd_pipe_index)
 645{
 646	uint32_t i;
 647	struct pipe_ctx *pipe = NULL;
 648	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
 649			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
 650
 651	// Find the SubVP pipe
 652	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 653		pipe = &context->res_ctx.pipe_ctx[i];
 654
 655		// We check for master pipe, but it shouldn't matter since we only need
 656		// the pipe for timing info (stream should be same for any pipe splits)
 657		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
 658				!resource_is_pipe_type(pipe, DPP_PIPE))
 659			continue;
 660
 661		// Find the SubVP pipe
 662		if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
 663			break;
 664	}
 665
 666	pipe_data->mode = VBLANK;
 667	pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz;
 668	pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total -
 669							vblank_pipe->stream->timing.v_front_porch;
 670	pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total;
 671	pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total;
 672	pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx;
 673	pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start;
 674	pipe_data->pipe_config.vblank_data.vblank_end =
 675			vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable;
 676
 677	if (vblank_pipe->stream->ignore_msa_timing_param &&
 678		(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
 679		populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
 680}
 681
 682/**
 683 * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case
 684 *
 685 * @dc: [in] current dc state
 686 * @context: [in] new dc state
 687 * @cmd: [in] DMUB cmd to be populated with SubVP info
 688 * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2)
 689 *
 690 * For SubVP + SubVP, we use a single vertical interrupt to start the
 691 * microschedule for both SubVP pipes. In order for this to work correctly, the
 692 * MALL REGION of both SubVP pipes must start at the same time. This function
 693 * lengthens the prefetch end to mall start delay of the SubVP pipe that has
 694 * the shorter prefetch so that both MALL REGION's will start at the same time.
 695 */
 696static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
 697		struct dc_state *context,
 698		union dmub_rb_cmd *cmd,
 699		struct pipe_ctx *subvp_pipes[])
 700{
 701	uint32_t subvp0_prefetch_us = 0;
 702	uint32_t subvp1_prefetch_us = 0;
 703	uint32_t prefetch_delta_us = 0;
 704	struct dc_stream_state *phantom_stream0 = NULL;
 705	struct dc_stream_state *phantom_stream1 = NULL;
 706	struct dc_crtc_timing *phantom_timing0 = NULL;
 707	struct dc_crtc_timing *phantom_timing1 = NULL;
 708	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
 709
 710	phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
 711	if (!phantom_stream0)
 712		return;
 713
 714	phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
 715	if (!phantom_stream1)
 716		return;
 717
 718	phantom_timing0 = &phantom_stream0->timing;
 719	phantom_timing1 = &phantom_stream1->timing;
 720
 721	subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
 722			(uint64_t)phantom_timing0->h_total * 1000000),
 723			(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 724	subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
 725			(uint64_t)phantom_timing1->h_total * 1000000),
 726			(((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 727
 728	// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
 729	// should increase it's prefetch time to match the other
 730	if (subvp0_prefetch_us > subvp1_prefetch_us) {
 731		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
 732		prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
 733		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
 734				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
 735					((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
 736					((uint64_t)phantom_timing1->h_total * 1000000));
 737
 738	} else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
 739		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
 740		prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
 741		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
 742				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
 743					((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
 744					((uint64_t)phantom_timing0->h_total * 1000000));
 745	}
 746}
 747
 748/**
 749 * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command
 750 *
 751 * @dc: [in] current dc state
 752 * @context: [in] new dc state
 753 * @cmd: [in] DMUB cmd to be populated with SubVP info
 754 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
 755 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
 756 *
 757 * Populate the DMCUB SubVP command with SubVP pipe info. All the information
 758 * required to calculate the microschedule for the SubVP pipe is stored in the
 759 * pipe_data of the DMCUB SubVP command.
 760 */
 761static void populate_subvp_cmd_pipe_info(struct dc *dc,
 762		struct dc_state *context,
 763		union dmub_rb_cmd *cmd,
 764		struct pipe_ctx *subvp_pipe,
 765		uint8_t cmd_pipe_index)
 766{
 767	uint32_t j;
 768	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
 769			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
 770	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
 771	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 772	struct dc_crtc_timing *phantom_timing;
 773	uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
 774
 775	if (!phantom_stream)
 776		return;
 777
 778	phantom_timing = &phantom_stream->timing;
 779
 780	pipe_data->mode = SUBVP;
 781	pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
 782	pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
 783	pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total;
 784	pipe_data->pipe_config.subvp_data.main_vblank_start =
 785			main_timing->v_total - main_timing->v_front_porch;
 786	pipe_data->pipe_config.subvp_data.main_vblank_end =
 787			main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
 788	pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
 789	pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
 790	pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param &&
 791		(subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed);
 792
 793	/* Calculate the scaling factor from the src and dst height.
 794	 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
 795	 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
 796	 *
 797	 * Make sure to combine stream and plane scaling together.
 798	 */
 799	reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
 800			&out_num_stream, &out_den_stream);
 801	reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
 802			&out_num_plane, &out_den_plane);
 803	reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
 804	pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
 805	pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
 806
 807	// Prefetch lines is equal to VACTIVE + BP + VSYNC
 808	pipe_data->pipe_config.subvp_data.prefetch_lines =
 809			phantom_timing->v_total - phantom_timing->v_front_porch;
 810
 811	// Round up
 812	pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
 813			div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
 814					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
 815	pipe_data->pipe_config.subvp_data.processing_delay_lines =
 816			div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
 817					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
 818
 819	if (subvp_pipe->bottom_pipe) {
 820		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
 821	} else if (subvp_pipe->next_odm_pipe) {
 822		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
 823	} else {
 824		pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
 825	}
 826
 827	// Find phantom pipe index based on phantom stream
 828	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 829		struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
 830
 831		if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
 832				phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
 833			pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
 834			if (phantom_pipe->bottom_pipe) {
 835				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
 836			} else if (phantom_pipe->next_odm_pipe) {
 837				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
 838			} else {
 839				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
 840			}
 841			break;
 842		}
 843	}
 844}
 845
 846/**
 847 * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command
 848 *
 849 * @dc: [in] current dc state
 850 * @context: [in] new dc state
 851 * @enable: [in] if true enables the pipes population
 852 *
 853 * This function loops through each pipe and populates the DMUB SubVP CMD info
 854 * based on the pipe (e.g. SubVP, VBLANK).
 855 */
 856void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 857		struct dc_state *context,
 858		bool enable)
 859{
 860	uint8_t cmd_pipe_index = 0;
 861	uint32_t i, pipe_idx;
 862	uint8_t subvp_count = 0;
 863	union dmub_rb_cmd cmd;
 864	struct pipe_ctx *subvp_pipes[2];
 865	uint32_t wm_val_refclk = 0;
 866	enum mall_stream_type pipe_mall_type;
 867
 868	memset(&cmd, 0, sizeof(cmd));
 869	// FW command for SUBVP
 870	cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
 871	cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD;
 872	cmd.fw_assisted_mclk_switch_v2.header.payload_bytes =
 873			sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header);
 874
 875	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 876		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 877
 878		/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
 879		 */
 880		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 881				resource_is_pipe_type(pipe, DPP_PIPE) &&
 882				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
 883			subvp_pipes[subvp_count++] = pipe;
 884	}
 885
 886	if (enable) {
 887		// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
 888		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 889			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 890			pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 891
 892			if (!pipe->stream)
 893				continue;
 894
 895			/* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
 896			 * Any ODM or MPC splits being used in SubVP will be handled internally in
 897			 * populate_subvp_cmd_pipe_info
 898			 */
 899			if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 900					resource_is_pipe_type(pipe, DPP_PIPE) &&
 901					pipe_mall_type == SUBVP_MAIN) {
 902				populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 903			} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 904					resource_is_pipe_type(pipe, DPP_PIPE) &&
 905					pipe_mall_type == SUBVP_NONE) {
 906				// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
 907				// we run through DML without calculating "natural" P-state support
 908				populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 909
 910			}
 911			pipe_idx++;
 912		}
 913		if (subvp_count == 2) {
 914			update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
 915		}
 916		cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
 917		cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us;
 918
 919		// Store the original watermark value for this SubVP config so we can lower it when the
 920		// MCLK switch starts
 921		wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
 922				(dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
 923
 924		cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
 925	}
 926
 927	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 928}
 929
 930bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
 931{
 932	if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
 933		return false;
 934	return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
 935}
 936
 937void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
 938{
 939	struct dmub_diagnostic_data diag_data = {0};
 940	uint32_t i;
 941
 942	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
 943		DC_LOG_ERROR("%s: invalid parameters.", __func__);
 944		return;
 945	}
 946
 947	DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
 948
 949	if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
 950		DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
 951		return;
 952	}
 953
 954	DC_LOG_DEBUG("DMCUB STATE:");
 955	DC_LOG_DEBUG("    dmcub_version      : %08x", diag_data.dmcub_version);
 956	DC_LOG_DEBUG("    scratch  [0]       : %08x", diag_data.scratch[0]);
 957	DC_LOG_DEBUG("    scratch  [1]       : %08x", diag_data.scratch[1]);
 958	DC_LOG_DEBUG("    scratch  [2]       : %08x", diag_data.scratch[2]);
 959	DC_LOG_DEBUG("    scratch  [3]       : %08x", diag_data.scratch[3]);
 960	DC_LOG_DEBUG("    scratch  [4]       : %08x", diag_data.scratch[4]);
 961	DC_LOG_DEBUG("    scratch  [5]       : %08x", diag_data.scratch[5]);
 962	DC_LOG_DEBUG("    scratch  [6]       : %08x", diag_data.scratch[6]);
 963	DC_LOG_DEBUG("    scratch  [7]       : %08x", diag_data.scratch[7]);
 964	DC_LOG_DEBUG("    scratch  [8]       : %08x", diag_data.scratch[8]);
 965	DC_LOG_DEBUG("    scratch  [9]       : %08x", diag_data.scratch[9]);
 966	DC_LOG_DEBUG("    scratch [10]       : %08x", diag_data.scratch[10]);
 967	DC_LOG_DEBUG("    scratch [11]       : %08x", diag_data.scratch[11]);
 968	DC_LOG_DEBUG("    scratch [12]       : %08x", diag_data.scratch[12]);
 969	DC_LOG_DEBUG("    scratch [13]       : %08x", diag_data.scratch[13]);
 970	DC_LOG_DEBUG("    scratch [14]       : %08x", diag_data.scratch[14]);
 971	DC_LOG_DEBUG("    scratch [15]       : %08x", diag_data.scratch[15]);
 972	for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
 973		DC_LOG_DEBUG("    pc[%d]             : %08x", i, diag_data.pc[i]);
 974	DC_LOG_DEBUG("    unk_fault_addr     : %08x", diag_data.undefined_address_fault_addr);
 975	DC_LOG_DEBUG("    inst_fault_addr    : %08x", diag_data.inst_fetch_fault_addr);
 976	DC_LOG_DEBUG("    data_fault_addr    : %08x", diag_data.data_write_fault_addr);
 977	DC_LOG_DEBUG("    inbox1_rptr        : %08x", diag_data.inbox1_rptr);
 978	DC_LOG_DEBUG("    inbox1_wptr        : %08x", diag_data.inbox1_wptr);
 979	DC_LOG_DEBUG("    inbox1_size        : %08x", diag_data.inbox1_size);
 980	DC_LOG_DEBUG("    inbox0_rptr        : %08x", diag_data.inbox0_rptr);
 981	DC_LOG_DEBUG("    inbox0_wptr        : %08x", diag_data.inbox0_wptr);
 982	DC_LOG_DEBUG("    inbox0_size        : %08x", diag_data.inbox0_size);
 983	DC_LOG_DEBUG("    outbox1_rptr       : %08x", diag_data.outbox1_rptr);
 984	DC_LOG_DEBUG("    outbox1_wptr       : %08x", diag_data.outbox1_wptr);
 985	DC_LOG_DEBUG("    outbox1_size       : %08x", diag_data.outbox1_size);
 986	DC_LOG_DEBUG("    is_enabled         : %d", diag_data.is_dmcub_enabled);
 987	DC_LOG_DEBUG("    is_soft_reset      : %d", diag_data.is_dmcub_soft_reset);
 988	DC_LOG_DEBUG("    is_secure_reset    : %d", diag_data.is_dmcub_secure_reset);
 989	DC_LOG_DEBUG("    is_traceport_en    : %d", diag_data.is_traceport_en);
 990	DC_LOG_DEBUG("    is_cw0_en          : %d", diag_data.is_cw0_enabled);
 991	DC_LOG_DEBUG("    is_cw6_en          : %d", diag_data.is_cw6_enabled);
 992}
 993
 994static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
 995{
 996	struct pipe_ctx *test_pipe, *split_pipe;
 997	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
 998	struct rect r1 = scl_data->recout, r2, r2_half;
 999	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
1000	int cur_layer = pipe_ctx->plane_state->layer_index;
1001
1002	/**
1003	 * Disable the cursor if there's another pipe above this with a
1004	 * plane that contains this pipe's viewport to prevent double cursor
1005	 * and incorrect scaling artifacts.
1006	 */
1007	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
1008	     test_pipe = test_pipe->top_pipe) {
1009		// Skip invisible layer and pipe-split plane on same layer
1010		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
1011			continue;
1012
1013		r2 = test_pipe->plane_res.scl_data.recout;
1014		r2_r = r2.x + r2.width;
1015		r2_b = r2.y + r2.height;
 
1016
1017		/**
1018		 * There is another half plane on same layer because of
1019		 * pipe-split, merge together per same height.
1020		 */
1021		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
1022		     split_pipe = split_pipe->top_pipe)
1023			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
1024				r2_half = split_pipe->plane_res.scl_data.recout;
1025				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
1026				r2.width = r2.width + r2_half.width;
1027				r2_r = r2.x + r2.width;
1028				break;
1029			}
1030
1031		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
1032			return true;
1033	}
1034
1035	return false;
1036}
1037
1038static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
1039{
1040	if (pipe_ctx->plane_state != NULL) {
1041		if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
1042			return false;
1043
1044		if (dc_can_pipe_disable_cursor(pipe_ctx))
1045			return false;
1046	}
1047
1048	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
1049		pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
1050		pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
1051		return true;
1052
1053	if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
1054		return true;
1055
1056	return false;
1057}
1058
1059static void dc_build_cursor_update_payload0(
1060		struct pipe_ctx *pipe_ctx, uint8_t p_idx,
1061		struct dmub_cmd_update_cursor_payload0 *payload)
1062{
1063	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1064	unsigned int panel_inst = 0;
1065
1066	if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
1067		pipe_ctx->stream->link, &panel_inst))
1068		return;
1069
1070	/* Payload: Cursor Rect is built from position & attribute
1071	 * x & y are obtained from postion
1072	 */
1073	payload->cursor_rect.x = hubp->cur_rect.x;
1074	payload->cursor_rect.y = hubp->cur_rect.y;
1075	/* w & h are obtained from attribute */
1076	payload->cursor_rect.width  = hubp->cur_rect.w;
1077	payload->cursor_rect.height = hubp->cur_rect.h;
1078
1079	payload->enable      = hubp->pos.cur_ctl.bits.cur_enable;
1080	payload->pipe_idx    = p_idx;
1081	payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
1082	payload->panel_inst  = panel_inst;
1083}
1084
1085static void dc_build_cursor_position_update_payload0(
1086		struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
1087		const struct hubp *hubp, const struct dpp *dpp)
1088{
1089	/* Hubp */
1090	pl->position_cfg.pHubp.cur_ctl.raw  = hubp->pos.cur_ctl.raw;
1091	pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
1092	pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
1093	pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
1094
1095	/* dpp */
1096	pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
1097	pl->position_cfg.pipe_idx = p_idx;
1098}
1099
1100static void dc_build_cursor_attribute_update_payload1(
1101		struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
1102		const struct hubp *hubp, const struct dpp *dpp)
1103{
1104	/* Hubp */
1105	pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
1106	pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
1107	pl_A->aHubp.cur_ctl.raw  = hubp->att.cur_ctl.raw;
1108	pl_A->aHubp.size.raw     = hubp->att.size.raw;
1109	pl_A->aHubp.settings.raw = hubp->att.settings.raw;
1110
1111	/* dpp */
1112	pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
1113}
1114
1115/**
1116 * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command
1117 *
1118 * @pCtx: [in] pipe context
1119 * @pipe_idx: [in] pipe index
1120 *
1121 * This function would store the cursor related information and pass it into
1122 * dmub
1123 */
1124void dc_send_update_cursor_info_to_dmu(
1125		struct pipe_ctx *pCtx, uint8_t pipe_idx)
1126{
1127	union dmub_rb_cmd cmd[2];
1128	union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
1129					&cmd[0].update_cursor_info.update_cursor_info_data;
1130
1131	memset(cmd, 0, sizeof(cmd));
1132
1133	if (!dc_dmub_should_update_cursor_data(pCtx))
1134		return;
1135	/*
1136	 * Since we use multi_cmd_pending for dmub command, the 2nd command is
1137	 * only assigned to store cursor attributes info.
1138	 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
1139	 * is to store cursor position info.
1140	 *
1141	 * Command heaer type must be the same type if using  multi_cmd_pending.
1142	 * Besides, while process 2nd command in DMU, the sub type is useless.
1143	 * So it's meanless to pass the sub type header with different type.
1144	 */
1145
1146	{
1147		/* Build Payload#0 Header */
1148		cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1149		cmd[0].update_cursor_info.header.payload_bytes =
1150				sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
1151		cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
1152
1153		/* Prepare Payload */
1154		dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
1155
1156		dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
1157				pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1158		}
1159	{
1160		/* Build Payload#1 Header */
1161		cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1162		cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
1163		cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
1164
1165		dc_build_cursor_attribute_update_payload1(
1166				&cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
1167				pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1168
1169		/* Combine 2nd cmds update_curosr_info to DMU */
1170		dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1171	}
1172}
1173
1174bool dc_dmub_check_min_version(struct dmub_srv *srv)
1175{
1176	if (!srv->hw_funcs.is_psrsu_supported)
1177		return true;
1178	return srv->hw_funcs.is_psrsu_supported(srv);
1179}
1180
1181void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
1182{
1183	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1184
1185	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
1186		DC_LOG_ERROR("%s: invalid parameters.", __func__);
1187		return;
1188	}
1189
1190	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
1191				       0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1192		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1193		return;
1194	}
1195
1196	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
1197				       0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1198		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1199		return;
1200	}
1201
1202	DC_LOG_DEBUG("Enabled DPIA trace\n");
1203}
1204
1205void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index)
1206{
1207	dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
1208}
1209
1210bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
1211{
1212	struct dc_context *dc_ctx;
1213	enum dmub_status status;
1214
1215	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1216		return true;
1217
1218	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
1219		return true;
1220
1221	dc_ctx = dc_dmub_srv->ctx;
1222
1223	if (wait) {
1224		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
1225			do {
1226				status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1227			} while (status != DMUB_STATUS_OK);
1228		} else {
1229			status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1230			if (status != DMUB_STATUS_OK) {
1231				DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
1232				return false;
1233			}
1234		}
1235	} else
1236		return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
1237
1238	return true;
1239}
1240
1241static int count_active_streams(const struct dc *dc)
1242{
1243	int i, count = 0;
1244
1245	for (i = 0; i < dc->current_state->stream_count; ++i) {
1246		struct dc_stream_state *stream = dc->current_state->streams[i];
1247
1248		if (stream && !stream->dpms_off)
1249			count += 1;
1250	}
1251
1252	return count;
1253}
1254
1255static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
1256{
1257	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1258	struct dc_dmub_srv *dc_dmub_srv;
1259	union dmub_rb_cmd cmd = {0};
1260
1261	if (dc->debug.dmcub_emulation)
1262		return;
1263
1264	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1265		return;
1266
1267	dc_dmub_srv = dc->ctx->dmub_srv;
1268	ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1269
1270	memset(&cmd, 0, sizeof(cmd));
1271	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
1272	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
1273	cmd.idle_opt_notify_idle.header.payload_bytes =
1274		sizeof(cmd.idle_opt_notify_idle) -
1275		sizeof(cmd.idle_opt_notify_idle.header);
1276
1277	cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
1278
1279	if (dc->work_arounds.skip_psr_ips_crtc_disable)
1280		cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true;
1281
1282	if (allow_idle) {
1283		volatile struct dmub_shared_state_ips_driver *ips_driver =
1284			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1285		union dmub_shared_state_ips_driver_signals new_signals;
1286
1287		DC_LOG_IPS(
1288			"%s wait idle (ips1_commit=%u ips2_commit=%u)",
1289			__func__,
1290			ips_fw->signals.bits.ips1_commit,
1291			ips_fw->signals.bits.ips2_commit);
1292
1293		dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
1294
1295		memset(&new_signals, 0, sizeof(new_signals));
1296
1297		new_signals.bits.allow_idle = 1; /* always set */
1298
1299		if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
1300		    dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
1301			new_signals.bits.allow_pg = 1;
1302			new_signals.bits.allow_ips1 = 1;
1303			new_signals.bits.allow_ips2 = 1;
1304			new_signals.bits.allow_z10 = 1;
1305		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
1306			new_signals.bits.allow_ips1 = 1;
1307		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
1308			new_signals.bits.allow_pg = 1;
1309			new_signals.bits.allow_ips1 = 1;
1310		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
1311			new_signals.bits.allow_pg = 1;
1312			new_signals.bits.allow_ips1 = 1;
1313			new_signals.bits.allow_ips2 = 1;
1314		} else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
1315			/* TODO: Move this logic out to hwseq */
1316			if (count_active_streams(dc) == 0) {
1317				/* IPS2 - Display off */
1318				new_signals.bits.allow_pg = 1;
1319				new_signals.bits.allow_ips1 = 1;
1320				new_signals.bits.allow_ips2 = 1;
1321				new_signals.bits.allow_z10 = 1;
1322			} else {
1323				/* RCG only */
1324				new_signals.bits.allow_pg = 0;
1325				new_signals.bits.allow_ips1 = 1;
1326				new_signals.bits.allow_ips2 = 0;
1327				new_signals.bits.allow_z10 = 0;
1328			}
1329		}
1330
1331		ips_driver->signals = new_signals;
1332		dc_dmub_srv->driver_signals = ips_driver->signals;
1333	}
1334
1335	DC_LOG_IPS(
1336		"%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)",
1337		__func__,
1338		allow_idle,
1339		ips_fw->signals.bits.ips1_commit,
1340		ips_fw->signals.bits.ips2_commit);
1341
1342	/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
1343	/* We also do not perform a wait since DMCUB could enter idle after the notification. */
1344	dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
1345
1346	/* Register access should stop at this point. */
1347	if (allow_idle)
1348		dc_dmub_srv->needs_idle_wake = true;
1349}
1350
1351static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
1352{
1353	struct dc_dmub_srv *dc_dmub_srv;
1354	uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
1355
1356	if (dc->debug.dmcub_emulation)
1357		return;
1358
1359	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1360		return;
1361
1362	dc_dmub_srv = dc->ctx->dmub_srv;
1363
1364	if (dc->clk_mgr->funcs->exit_low_power_state) {
1365		volatile const struct dmub_shared_state_ips_fw *ips_fw =
1366			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1367		volatile struct dmub_shared_state_ips_driver *ips_driver =
1368			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1369		union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
1370
1371		rcg_exit_count = ips_fw->rcg_exit_count;
1372		ips1_exit_count = ips_fw->ips1_exit_count;
1373		ips2_exit_count = ips_fw->ips2_exit_count;
1374
1375		ips_driver->signals.all = 0;
1376		dc_dmub_srv->driver_signals = ips_driver->signals;
1377
1378		DC_LOG_IPS(
1379			"%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
1380			__func__,
1381			ips_driver->signals.bits.allow_ips1,
1382			ips_driver->signals.bits.allow_ips2,
1383			ips_fw->signals.bits.ips1_commit,
1384			ips_fw->signals.bits.ips2_commit,
1385			ips_fw->rcg_entry_count,
1386			ips_fw->ips1_entry_count,
1387			ips_fw->ips2_entry_count);
1388
1389		/* Note: register access has technically not resumed for DCN here, but we
1390		 * need to be message PMFW through our standard register interface.
1391		 */
1392		dc_dmub_srv->needs_idle_wake = false;
1393
1394		if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
1395		    (!dc->debug.optimize_ips_handshake ||
1396		     ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
1397			DC_LOG_IPS(
1398				"wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
1399				ips_fw->signals.bits.ips1_commit,
1400				ips_fw->signals.bits.ips2_commit);
1401
1402			if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
1403				udelay(dc->debug.ips2_eval_delay_us);
1404
1405			if (ips_fw->signals.bits.ips2_commit) {
1406				DC_LOG_IPS(
1407					"exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
1408					ips_fw->signals.bits.ips1_commit,
1409					ips_fw->signals.bits.ips2_commit);
1410
1411				// Tell PMFW to exit low power state
1412				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1413
1414				DC_LOG_IPS(
1415					"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
1416					ips_fw->signals.bits.ips1_commit,
1417					ips_fw->signals.bits.ips2_commit);
1418
1419				// Wait for IPS2 entry upper bound
1420				udelay(dc->debug.ips2_entry_delay_us);
1421
1422				DC_LOG_IPS(
1423					"exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)",
1424					ips_fw->signals.bits.ips1_commit,
1425					ips_fw->signals.bits.ips2_commit);
1426
1427				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1428
1429				DC_LOG_IPS(
1430					"wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)",
1431					ips_fw->signals.bits.ips1_commit,
1432					ips_fw->signals.bits.ips2_commit);
1433
1434				while (ips_fw->signals.bits.ips2_commit)
1435					udelay(1);
1436
1437				DC_LOG_IPS(
1438					"wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)",
1439					ips_fw->signals.bits.ips1_commit,
1440					ips_fw->signals.bits.ips2_commit);
1441
1442				if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1443					ASSERT(0);
1444
1445				DC_LOG_IPS(
1446					"resync inbox1 (ips1_commit=%u ips2_commit=%u)",
1447					ips_fw->signals.bits.ips1_commit,
1448					ips_fw->signals.bits.ips2_commit);
1449
1450				dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
1451			}
1452		}
1453
1454		dc_dmub_srv_notify_idle(dc, false);
1455		if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
1456			DC_LOG_IPS(
1457				"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
1458				ips_fw->signals.bits.ips1_commit,
1459				ips_fw->signals.bits.ips2_commit);
1460
1461			while (ips_fw->signals.bits.ips1_commit)
1462				udelay(1);
1463
1464			DC_LOG_IPS(
1465				"wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
1466				ips_fw->signals.bits.ips1_commit,
1467				ips_fw->signals.bits.ips2_commit);
1468		}
1469	}
1470
1471	if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1472		ASSERT(0);
1473
1474	DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
1475		__func__,
1476		rcg_exit_count,
1477		ips1_exit_count,
1478		ips2_exit_count);
1479}
1480
1481void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
1482{
1483	struct dmub_srv *dmub;
1484
1485	if (!dc_dmub_srv)
1486		return;
1487
1488	dmub = dc_dmub_srv->dmub;
1489
1490	if (power_state == DC_ACPI_CM_POWER_STATE_D0)
1491		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
1492	else
1493		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
1494}
1495
1496void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
1497					  enum dc_acpi_cm_power_state power_state)
1498{
1499	union dmub_rb_cmd cmd;
1500
1501	if (!dc_dmub_srv)
1502		return;
1503
1504	memset(&cmd, 0, sizeof(cmd));
1505
1506	cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
1507	cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
1508	cmd.idle_opt_set_dc_power_state.header.payload_bytes =
1509		sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
1510
1511	if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
1512		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
1513	} else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
1514		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
1515	} else {
1516		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
1517	}
1518
1519	dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1520}
1521
1522bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
1523{
1524	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1525	bool reallow_idle = false, should_detect = false;
1526
1527	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1528		return false;
1529
1530	if (dc_dmub_srv->dmub->shared_state &&
1531	    dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) {
1532		ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1533		return ips_fw->signals.bits.detection_required;
1534	}
1535
1536	/* Detection may require reading scratch 0 - exit out of idle prior to the read. */
1537	if (dc_dmub_srv->idle_allowed) {
1538		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false);
1539		reallow_idle = true;
1540	}
1541
1542	should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub);
1543
1544	/* Re-enter idle if we're not about to immediately redetect links. */
1545	if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1546	    !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle)
1547		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true);
1548
1549	return should_detect;
1550}
1551
1552void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
1553{
1554	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1555
1556	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1557		return;
1558
1559	allow_idle &= (!dc->debug.ips_disallow_entry);
1560
1561	if (dc_dmub_srv->idle_allowed == allow_idle)
1562		return;
1563
1564	DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
1565
1566	/*
1567	 * Entering a low power state requires a driver notification.
1568	 * Powering up the hardware requires notifying PMFW and DMCUB.
1569	 * Clearing the driver idle allow requires a DMCUB command.
1570	 * DMCUB commands requires the DMCUB to be powered up and restored.
 
 
 
1571	 */
 
1572
1573	if (!allow_idle) {
1574		dc_dmub_srv->idle_exit_counter += 1;
1575
1576		dc_dmub_srv_exit_low_power_state(dc);
1577		/*
1578		 * Idle is considered fully exited only after the sequence above
1579		 * fully completes. If we have a race of two threads exiting
1580		 * at the same time then it's safe to perform the sequence
1581		 * twice as long as we're not re-entering.
1582		 *
1583		 * Infinite command submission is avoided by using the
1584		 * dm_execute_dmub_cmd submission instead of the "wake" helpers.
1585		 */
1586		dc_dmub_srv->idle_allowed = false;
1587
1588		dc_dmub_srv->idle_exit_counter -= 1;
1589		if (dc_dmub_srv->idle_exit_counter < 0) {
1590			ASSERT(0);
1591			dc_dmub_srv->idle_exit_counter = 0;
1592		}
1593	} else {
1594		/* Consider idle as notified prior to the actual submission to
1595		 * prevent multiple entries. */
1596		dc_dmub_srv->idle_allowed = true;
1597
1598		dc_dmub_srv_notify_idle(dc, allow_idle);
1599	}
1600}
1601
1602bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
1603				  enum dm_dmub_wait_type wait_type)
1604{
1605	return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
1606}
1607
1608bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
1609				       union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
1610{
1611	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1612	bool result = false, reallow_idle = false;
1613
1614	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1615		return false;
1616
1617	if (count == 0)
1618		return true;
1619
1620	if (dc_dmub_srv->idle_allowed) {
1621		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1622		reallow_idle = true;
1623	}
1624
1625	/*
1626	 * These may have different implementations in DM, so ensure
1627	 * that we guide it to the expected helper.
1628	 */
1629	if (count > 1)
1630		result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
1631	else
1632		result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
1633
1634	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1635	    !ctx->dc->debug.disable_dmub_reallow_idle)
1636		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1637
1638	return result;
1639}
1640
1641static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1642				  uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1643{
1644	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1645	const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
1646	enum dmub_status status;
1647
1648	if (response)
1649		*response = 0;
1650
1651	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1652		return false;
1653
1654	status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
1655	if (status != DMUB_STATUS_OK) {
1656		if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
1657			return true;
1658
1659		return false;
1660	}
1661
1662	if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
1663		dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
1664
1665	return true;
1666}
1667
1668bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1669			       uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1670{
1671	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1672	bool result = false, reallow_idle = false;
1673
1674	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1675		return false;
1676
1677	if (dc_dmub_srv->idle_allowed) {
1678		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1679		reallow_idle = true;
1680	}
1681
1682	result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
1683
1684	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1685	    !ctx->dc->debug.disable_dmub_reallow_idle)
1686		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1687
1688	return result;
1689}
1690
1691void dc_dmub_srv_fams2_update_config(struct dc *dc,
1692		struct dc_state *context,
1693		bool enable)
1694{
1695	uint8_t num_cmds = 1;
1696	uint32_t i;
1697	union dmub_rb_cmd cmd[MAX_STREAMS + 1];
1698	struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
1699
1700	memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1));
1701	/* fill in generic command header */
1702	global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1703	global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1704	global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1705
1706	if (enable) {
1707		/* send global configuration parameters */
1708		memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config));
1709
1710		/* copy static feature configuration overrides */
1711		global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
1712		global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
1713		global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
1714
1715		/* construct per-stream configs */
1716		for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
1717			struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
1718
1719			/* configure command header */
1720			stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1721			stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1722			stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1723			stream_cmd->header.multi_cmd_pending = 1;
1724			/* copy stream static state */
1725			memcpy(&stream_cmd->config.stream,
1726					&context->bw_ctx.bw.dcn.fams2_stream_params[i],
1727					sizeof(struct dmub_fams2_stream_static_state));
1728		}
1729	}
1730
1731	/* apply feature configuration based on current driver state */
1732	global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
1733	global_cmd->config.global.features.bits.enable = enable;
1734
1735	if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
1736		/* set multi pending for global, and unset for last stream cmd */
1737		global_cmd->header.multi_cmd_pending = 1;
1738		cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
1739		num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
1740	}
1741
1742	dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1743}
1744
1745void dc_dmub_srv_fams2_drr_update(struct dc *dc,
1746		uint32_t tg_inst,
1747		uint32_t vtotal_min,
1748		uint32_t vtotal_max,
1749		uint32_t vtotal_mid,
1750		uint32_t vtotal_mid_frame_num,
1751		bool program_manual_trigger)
1752{
1753	union dmub_rb_cmd cmd = { 0 };
1754
1755	cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1756	cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE;
1757	cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst;
1758	cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
1759	cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
1760	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid;
1761	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
1762	cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
1763
1764	cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
1765
1766	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1767}
1768
1769void dc_dmub_srv_fams2_passthrough_flip(
1770		struct dc *dc,
1771		struct dc_state *state,
1772		struct dc_stream_state *stream,
1773		struct dc_surface_update *srf_updates,
1774		int surface_count)
1775{
1776	int plane_index;
1777	union dmub_rb_cmd cmds[MAX_PLANES];
1778	struct dc_plane_address *address;
1779	struct dc_plane_state *plane_state;
1780	int num_cmds = 0;
1781	struct dc_stream_status *stream_status = dc_stream_get_status(stream);
1782
1783	if (surface_count <= 0 || stream_status == NULL)
1784		return;
1785
1786	memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES);
1787
1788	/* build command for each surface update */
1789	for (plane_index = 0; plane_index < surface_count; plane_index++) {
1790		plane_state = srf_updates[plane_index].surface;
1791		address = &plane_state->address;
1792
1793		/* skip if there is no address update for plane */
1794		if (!srf_updates[plane_index].flip_addr)
1795			continue;
1796
1797		/* build command header */
1798		cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1799		cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
1800		cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
1801
1802		/* for chaining multiple commands, all but last command should set to 1 */
1803		cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
1804
1805		/* set topology info */
1806		cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state);
1807		if (stream_status)
1808			cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst;
1809
1810		cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate;
1811
1812		/* build address info for command */
1813		switch (address->type) {
1814		case PLN_ADDR_TYPE_GRAPHICS:
1815			if (address->grph.addr.quad_part == 0) {
1816				BREAK_TO_DEBUGGER();
1817				break;
1818			}
1819
1820			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
1821					address->grph.meta_addr.low_part;
1822			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
1823					(uint16_t)address->grph.meta_addr.high_part;
1824			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
1825					address->grph.addr.low_part;
1826			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
1827					(uint16_t)address->grph.addr.high_part;
1828			break;
1829		case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
1830			if (address->video_progressive.luma_addr.quad_part == 0 ||
1831				address->video_progressive.chroma_addr.quad_part == 0) {
1832				BREAK_TO_DEBUGGER();
1833				break;
1834			}
1835
1836			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
1837					address->video_progressive.luma_meta_addr.low_part;
1838			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
1839					(uint16_t)address->video_progressive.luma_meta_addr.high_part;
1840			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo =
1841					address->video_progressive.chroma_meta_addr.low_part;
1842			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi =
1843					(uint16_t)address->video_progressive.chroma_meta_addr.high_part;
1844			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
1845					address->video_progressive.luma_addr.low_part;
1846			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
1847					(uint16_t)address->video_progressive.luma_addr.high_part;
1848			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo =
1849					address->video_progressive.chroma_addr.low_part;
1850			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi =
1851					(uint16_t)address->video_progressive.chroma_addr.high_part;
1852			break;
1853		default:
1854			// Should never be hit
1855			BREAK_TO_DEBUGGER();
1856			break;
1857		}
1858
1859		num_cmds++;
1860	}
1861
1862	if (num_cmds > 0)  {
1863		cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0;
1864		dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
1865	}
1866}
1867
1868bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
1869{
1870	bool result;
1871
1872	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1873		return false;
1874
1875	result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
1876					   start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
1877
1878	return result;
1879}
1880
1881void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
1882{
1883	uint32_t i;
1884	enum dmub_gpint_command command_code;
1885
1886	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1887		return;
1888
1889	switch (output->ips_mode) {
1890	case DMUB_IPS_MODE_IPS1_MAX:
1891		command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
1892		break;
1893	case DMUB_IPS_MODE_IPS2:
1894		command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
1895		break;
1896	case DMUB_IPS_MODE_IPS1_RCG:
1897		command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
1898		break;
1899	case DMUB_IPS_MODE_IPS1_ONO2_ON:
1900		command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
1901		break;
1902	default:
1903		command_code = DMUB_GPINT__INVALID_COMMAND;
1904		break;
1905	}
1906
1907	if (command_code == DMUB_GPINT__INVALID_COMMAND)
1908		return;
1909
1910	// send gpint commands and wait for ack
1911	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
1912				      (uint16_t)(output->ips_mode),
1913				       &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1914		output->residency_percent = 0;
1915
1916	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
1917				      (uint16_t)(output->ips_mode),
1918				       &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1919		output->entry_counter = 0;
1920
1921	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
1922				      (uint16_t)(output->ips_mode),
1923				       &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1924		output->total_active_time_us[0] = 0;
1925	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
1926				      (uint16_t)(output->ips_mode),
1927				       &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1928		output->total_active_time_us[1] = 0;
1929
1930	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
1931				      (uint16_t)(output->ips_mode),
1932				       &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1933		output->total_inactive_time_us[0] = 0;
1934	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
1935				      (uint16_t)(output->ips_mode),
1936				       &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1937		output->total_inactive_time_us[1] = 0;
1938
1939	// NUM_IPS_HISTOGRAM_BUCKETS = 16
1940	for (i = 0; i < 16; i++)
1941		if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
1942					       DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1943			output->histogram[i] = 0;
1944}