Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
  26#include <linux/delay.h>
  27#include "dm_services.h"
  28#include "basics/dc_common.h"
  29#include "core_types.h"
  30#include "resource.h"
  31#include "custom_float.h"
  32#include "dcn10_hw_sequencer.h"
  33#include "dcn10_hw_sequencer_debug.h"
  34#include "dce/dce_hwseq.h"
  35#include "abm.h"
  36#include "dmcu.h"
  37#include "dcn10_optc.h"
  38#include "dcn10_dpp.h"
  39#include "dcn10_mpc.h"
  40#include "timing_generator.h"
  41#include "opp.h"
  42#include "ipp.h"
  43#include "mpc.h"
  44#include "reg_helper.h"
  45#include "dcn10_hubp.h"
  46#include "dcn10_hubbub.h"
  47#include "dcn10_cm_common.h"
  48#include "dc_link_dp.h"
  49#include "dccg.h"
  50#include "clk_mgr.h"
  51#include "link_hwss.h"
  52#include "dpcd_defs.h"
  53#include "dsc.h"
  54#include "dce/dmub_hw_lock_mgr.h"
  55
  56#define DC_LOGGER_INIT(logger)
  57
  58#define CTX \
  59	hws->ctx
  60#define REG(reg)\
  61	hws->regs->reg
  62
  63#undef FN
  64#define FN(reg_name, field_name) \
  65	hws->shifts->field_name, hws->masks->field_name
  66
  67/*print is 17 wide, first two characters are spaces*/
  68#define DTN_INFO_MICRO_SEC(ref_cycle) \
  69	print_microsec(dc_ctx, log_ctx, ref_cycle)
  70
  71#define GAMMA_HW_POINTS_NUM 256
  72
  73void print_microsec(struct dc_context *dc_ctx,
  74	struct dc_log_buffer_ctx *log_ctx,
  75	uint32_t ref_cycle)
  76{
  77	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
  78	static const unsigned int frac = 1000;
  79	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
  80
  81	DTN_INFO("  %11d.%03d",
  82			us_x10 / frac,
  83			us_x10 % frac);
  84}
  85
  86void dcn10_lock_all_pipes(struct dc *dc,
  87	struct dc_state *context,
  88	bool lock)
  89{
  90	struct pipe_ctx *pipe_ctx;
  91	struct timing_generator *tg;
  92	int i;
  93
  94	for (i = 0; i < dc->res_pool->pipe_count; i++) {
  95		pipe_ctx = &context->res_ctx.pipe_ctx[i];
  96		tg = pipe_ctx->stream_res.tg;
  97
  98		/*
  99		 * Only lock the top pipe's tg to prevent redundant
 100		 * (un)locking. Also skip if pipe is disabled.
 101		 */
 102		if (pipe_ctx->top_pipe ||
 103		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
 104		    !tg->funcs->is_tg_enabled(tg))
 105			continue;
 106
 107		if (lock)
 108			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
 109		else
 110			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
 111	}
 112}
 113
 114static void log_mpc_crc(struct dc *dc,
 115	struct dc_log_buffer_ctx *log_ctx)
 116{
 117	struct dc_context *dc_ctx = dc->ctx;
 118	struct dce_hwseq *hws = dc->hwseq;
 119
 120	if (REG(MPC_CRC_RESULT_GB))
 121		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
 122		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
 123	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
 124		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
 125		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
 126}
 127
 128void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
 129{
 130	struct dc_context *dc_ctx = dc->ctx;
 131	struct dcn_hubbub_wm wm;
 132	int i;
 133
 134	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
 135	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
 136
 137	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
 138			"         sr_enter          sr_exit  dram_clk_change\n");
 139
 140	for (i = 0; i < 4; i++) {
 141		struct dcn_hubbub_wm_set *s;
 142
 143		s = &wm.sets[i];
 144		DTN_INFO("WM_Set[%d]:", s->wm_set);
 145		DTN_INFO_MICRO_SEC(s->data_urgent);
 146		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
 147		DTN_INFO_MICRO_SEC(s->sr_enter);
 148		DTN_INFO_MICRO_SEC(s->sr_exit);
 149		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
 150		DTN_INFO("\n");
 151	}
 152
 153	DTN_INFO("\n");
 154}
 155
 156static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
 157{
 158	struct dc_context *dc_ctx = dc->ctx;
 159	struct resource_pool *pool = dc->res_pool;
 160	int i;
 161
 162	DTN_INFO(
 163		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
 164	for (i = 0; i < pool->pipe_count; i++) {
 165		struct hubp *hubp = pool->hubps[i];
 166		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
 167
 168		hubp->funcs->hubp_read_state(hubp);
 169
 170		if (!s->blank_en) {
 171			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
 172					hubp->inst,
 173					s->pixel_format,
 174					s->inuse_addr_hi,
 175					s->viewport_width,
 176					s->viewport_height,
 177					s->rotation_angle,
 178					s->h_mirror_en,
 179					s->sw_mode,
 180					s->dcc_en,
 181					s->blank_en,
 182					s->clock_en,
 183					s->ttu_disable,
 184					s->underflow_status);
 185			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
 186			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
 187			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
 188			DTN_INFO("\n");
 189		}
 190	}
 191
 192	DTN_INFO("\n=========RQ========\n");
 193	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
 194		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
 195		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
 196	for (i = 0; i < pool->pipe_count; i++) {
 197		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
 198		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
 199
 200		if (!s->blank_en)
 201			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
 202				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
 203				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
 204				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
 205				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
 206				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
 207				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
 208				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
 209				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
 210				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
 211	}
 212
 213	DTN_INFO("========DLG========\n");
 214	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
 215			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
 216			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
 217			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
 218			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
 219			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
 220			"  x_rp_dlay  x_rr_sfl\n");
 221	for (i = 0; i < pool->pipe_count; i++) {
 222		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
 223		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
 224
 225		if (!s->blank_en)
 226			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
 227				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
 228				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
 229				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
 230				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
 231				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
 232				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
 233				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
 234				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
 235				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
 236				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
 237				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
 238				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
 239				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
 240				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
 241				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
 242				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
 243				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
 244				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
 245				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
 246				dlg_regs->xfc_reg_remote_surface_flip_latency);
 247	}
 248
 249	DTN_INFO("========TTU========\n");
 250	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
 251			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
 252			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
 253	for (i = 0; i < pool->pipe_count; i++) {
 254		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
 255		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
 256
 257		if (!s->blank_en)
 258			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
 259				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
 260				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
 261				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
 262				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
 263				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
 264				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
 265				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
 266	}
 267	DTN_INFO("\n");
 268}
 269
 270void dcn10_log_hw_state(struct dc *dc,
 271	struct dc_log_buffer_ctx *log_ctx)
 272{
 273	struct dc_context *dc_ctx = dc->ctx;
 274	struct resource_pool *pool = dc->res_pool;
 275	int i;
 276
 277	DTN_INFO_BEGIN();
 278
 279	dcn10_log_hubbub_state(dc, log_ctx);
 280
 281	dcn10_log_hubp_states(dc, log_ctx);
 282
 283	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
 284			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
 285			"C31 C32   C33 C34\n");
 286	for (i = 0; i < pool->pipe_count; i++) {
 287		struct dpp *dpp = pool->dpps[i];
 288		struct dcn_dpp_state s = {0};
 289
 290		dpp->funcs->dpp_read_state(dpp, &s);
 291
 292		if (!s.is_enabled)
 293			continue;
 294
 295		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
 296				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
 297				dpp->inst,
 298				s.igam_input_format,
 299				(s.igam_lut_mode == 0) ? "BypassFixed" :
 300					((s.igam_lut_mode == 1) ? "BypassFloat" :
 301					((s.igam_lut_mode == 2) ? "RAM" :
 302					((s.igam_lut_mode == 3) ? "RAM" :
 303								 "Unknown"))),
 304				(s.dgam_lut_mode == 0) ? "Bypass" :
 305					((s.dgam_lut_mode == 1) ? "sRGB" :
 306					((s.dgam_lut_mode == 2) ? "Ycc" :
 307					((s.dgam_lut_mode == 3) ? "RAM" :
 308					((s.dgam_lut_mode == 4) ? "RAM" :
 309								 "Unknown")))),
 310				(s.rgam_lut_mode == 0) ? "Bypass" :
 311					((s.rgam_lut_mode == 1) ? "sRGB" :
 312					((s.rgam_lut_mode == 2) ? "Ycc" :
 313					((s.rgam_lut_mode == 3) ? "RAM" :
 314					((s.rgam_lut_mode == 4) ? "RAM" :
 315								 "Unknown")))),
 316				s.gamut_remap_mode,
 317				s.gamut_remap_c11_c12,
 318				s.gamut_remap_c13_c14,
 319				s.gamut_remap_c21_c22,
 320				s.gamut_remap_c23_c24,
 321				s.gamut_remap_c31_c32,
 322				s.gamut_remap_c33_c34);
 323		DTN_INFO("\n");
 324	}
 325	DTN_INFO("\n");
 326
 327	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
 328	for (i = 0; i < pool->pipe_count; i++) {
 329		struct mpcc_state s = {0};
 330
 331		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
 332		if (s.opp_id != 0xf)
 333			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
 334				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
 335				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
 336				s.idle);
 337	}
 338	DTN_INFO("\n");
 339
 340	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
 341
 342	for (i = 0; i < pool->timing_generator_count; i++) {
 343		struct timing_generator *tg = pool->timing_generators[i];
 344		struct dcn_otg_state s = {0};
 345		/* Read shared OTG state registers for all DCNx */
 346		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
 347
 348		/*
 349		 * For DCN2 and greater, a register on the OPP is used to
 350		 * determine if the CRTC is blanked instead of the OTG. So use
 351		 * dpg_is_blanked() if exists, otherwise fallback on otg.
 352		 *
 353		 * TODO: Implement DCN-specific read_otg_state hooks.
 354		 */
 355		if (pool->opps[i]->funcs->dpg_is_blanked)
 356			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
 357		else
 358			s.blank_enabled = tg->funcs->is_blanked(tg);
 359
 360		//only print if OTG master is enabled
 361		if ((s.otg_enabled & 1) == 0)
 362			continue;
 363
 364		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
 365				tg->inst,
 366				s.v_blank_start,
 367				s.v_blank_end,
 368				s.v_sync_a_start,
 369				s.v_sync_a_end,
 370				s.v_sync_a_pol,
 371				s.v_total_max,
 372				s.v_total_min,
 373				s.v_total_max_sel,
 374				s.v_total_min_sel,
 375				s.h_blank_start,
 376				s.h_blank_end,
 377				s.h_sync_a_start,
 378				s.h_sync_a_end,
 379				s.h_sync_a_pol,
 380				s.h_total,
 381				s.v_total,
 382				s.underflow_occurred_status,
 383				s.blank_enabled);
 384
 385		// Clear underflow for debug purposes
 386		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
 387		// This function is called only from Windows or Diags test environment, hence it's safe to clear
 388		// it from here without affecting the original intent.
 389		tg->funcs->clear_optc_underflow(tg);
 390	}
 391	DTN_INFO("\n");
 392
 393	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
 394	// TODO: Update golden log header to reflect this name change
 395	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
 396	for (i = 0; i < pool->res_cap->num_dsc; i++) {
 397		struct display_stream_compressor *dsc = pool->dscs[i];
 398		struct dcn_dsc_state s = {0};
 399
 400		dsc->funcs->dsc_read_state(dsc, &s);
 401		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
 402		dsc->inst,
 403			s.dsc_clock_en,
 404			s.dsc_slice_width,
 405			s.dsc_bits_per_pixel);
 406		DTN_INFO("\n");
 407	}
 408	DTN_INFO("\n");
 409
 410	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
 411			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
 412	for (i = 0; i < pool->stream_enc_count; i++) {
 413		struct stream_encoder *enc = pool->stream_enc[i];
 414		struct enc_state s = {0};
 415
 416		if (enc->funcs->enc_read_state) {
 417			enc->funcs->enc_read_state(enc, &s);
 418			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
 419				enc->id,
 420				s.dsc_mode,
 421				s.sec_gsp_pps_line_num,
 422				s.vbid6_line_reference,
 423				s.vbid6_line_num,
 424				s.sec_gsp_pps_enable,
 425				s.sec_stream_enable);
 426			DTN_INFO("\n");
 427		}
 428	}
 429	DTN_INFO("\n");
 430
 431	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
 432	for (i = 0; i < dc->link_count; i++) {
 433		struct link_encoder *lenc = dc->links[i]->link_enc;
 434
 435		struct link_enc_state s = {0};
 436
 437		if (lenc->funcs->read_state) {
 438			lenc->funcs->read_state(lenc, &s);
 439			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
 440				i,
 441				s.dphy_fec_en,
 442				s.dphy_fec_ready_shadow,
 443				s.dphy_fec_active_status,
 444				s.dp_link_training_complete);
 445			DTN_INFO("\n");
 446		}
 447	}
 448	DTN_INFO("\n");
 449
 450	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
 451		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
 452			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
 453			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
 454			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
 455			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
 456			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
 457			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
 458			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
 459
 460	log_mpc_crc(dc, log_ctx);
 461
 462	DTN_INFO_END();
 463}
 464
 465bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
 466{
 467	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 468	struct timing_generator *tg = pipe_ctx->stream_res.tg;
 469
 470	if (tg->funcs->is_optc_underflow_occurred(tg)) {
 471		tg->funcs->clear_optc_underflow(tg);
 472		return true;
 473	}
 474
 475	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
 476		hubp->funcs->hubp_clear_underflow(hubp);
 477		return true;
 478	}
 479	return false;
 480}
 481
 482void dcn10_enable_power_gating_plane(
 483	struct dce_hwseq *hws,
 484	bool enable)
 485{
 486	bool force_on = true; /* disable power gating */
 487
 488	if (enable)
 489		force_on = false;
 490
 491	/* DCHUBP0/1/2/3 */
 492	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
 493	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
 494	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
 495	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
 496
 497	/* DPP0/1/2/3 */
 498	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
 499	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
 500	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
 501	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
 502}
 503
 504void dcn10_disable_vga(
 505	struct dce_hwseq *hws)
 506{
 507	unsigned int in_vga1_mode = 0;
 508	unsigned int in_vga2_mode = 0;
 509	unsigned int in_vga3_mode = 0;
 510	unsigned int in_vga4_mode = 0;
 511
 512	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
 513	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
 514	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
 515	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
 516
 517	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
 518			in_vga3_mode == 0 && in_vga4_mode == 0)
 519		return;
 520
 521	REG_WRITE(D1VGA_CONTROL, 0);
 522	REG_WRITE(D2VGA_CONTROL, 0);
 523	REG_WRITE(D3VGA_CONTROL, 0);
 524	REG_WRITE(D4VGA_CONTROL, 0);
 525
 526	/* HW Engineer's Notes:
 527	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
 528	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
 529	 *
 530	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
 531	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
 532	 */
 533	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
 534	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
 535}
 536
 537void dcn10_dpp_pg_control(
 538		struct dce_hwseq *hws,
 539		unsigned int dpp_inst,
 540		bool power_on)
 541{
 542	uint32_t power_gate = power_on ? 0 : 1;
 543	uint32_t pwr_status = power_on ? 0 : 2;
 544
 545	if (hws->ctx->dc->debug.disable_dpp_power_gate)
 546		return;
 547	if (REG(DOMAIN1_PG_CONFIG) == 0)
 548		return;
 549
 550	switch (dpp_inst) {
 551	case 0: /* DPP0 */
 552		REG_UPDATE(DOMAIN1_PG_CONFIG,
 553				DOMAIN1_POWER_GATE, power_gate);
 554
 555		REG_WAIT(DOMAIN1_PG_STATUS,
 556				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
 557				1, 1000);
 558		break;
 559	case 1: /* DPP1 */
 560		REG_UPDATE(DOMAIN3_PG_CONFIG,
 561				DOMAIN3_POWER_GATE, power_gate);
 562
 563		REG_WAIT(DOMAIN3_PG_STATUS,
 564				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
 565				1, 1000);
 566		break;
 567	case 2: /* DPP2 */
 568		REG_UPDATE(DOMAIN5_PG_CONFIG,
 569				DOMAIN5_POWER_GATE, power_gate);
 570
 571		REG_WAIT(DOMAIN5_PG_STATUS,
 572				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
 573				1, 1000);
 574		break;
 575	case 3: /* DPP3 */
 576		REG_UPDATE(DOMAIN7_PG_CONFIG,
 577				DOMAIN7_POWER_GATE, power_gate);
 578
 579		REG_WAIT(DOMAIN7_PG_STATUS,
 580				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
 581				1, 1000);
 582		break;
 583	default:
 584		BREAK_TO_DEBUGGER();
 585		break;
 586	}
 587}
 588
 589void dcn10_hubp_pg_control(
 590		struct dce_hwseq *hws,
 591		unsigned int hubp_inst,
 592		bool power_on)
 593{
 594	uint32_t power_gate = power_on ? 0 : 1;
 595	uint32_t pwr_status = power_on ? 0 : 2;
 596
 597	if (hws->ctx->dc->debug.disable_hubp_power_gate)
 598		return;
 599	if (REG(DOMAIN0_PG_CONFIG) == 0)
 600		return;
 601
 602	switch (hubp_inst) {
 603	case 0: /* DCHUBP0 */
 604		REG_UPDATE(DOMAIN0_PG_CONFIG,
 605				DOMAIN0_POWER_GATE, power_gate);
 606
 607		REG_WAIT(DOMAIN0_PG_STATUS,
 608				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
 609				1, 1000);
 610		break;
 611	case 1: /* DCHUBP1 */
 612		REG_UPDATE(DOMAIN2_PG_CONFIG,
 613				DOMAIN2_POWER_GATE, power_gate);
 614
 615		REG_WAIT(DOMAIN2_PG_STATUS,
 616				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
 617				1, 1000);
 618		break;
 619	case 2: /* DCHUBP2 */
 620		REG_UPDATE(DOMAIN4_PG_CONFIG,
 621				DOMAIN4_POWER_GATE, power_gate);
 622
 623		REG_WAIT(DOMAIN4_PG_STATUS,
 624				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
 625				1, 1000);
 626		break;
 627	case 3: /* DCHUBP3 */
 628		REG_UPDATE(DOMAIN6_PG_CONFIG,
 629				DOMAIN6_POWER_GATE, power_gate);
 630
 631		REG_WAIT(DOMAIN6_PG_STATUS,
 632				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
 633				1, 1000);
 634		break;
 635	default:
 636		BREAK_TO_DEBUGGER();
 637		break;
 638	}
 639}
 640
 641static void power_on_plane(
 642	struct dce_hwseq *hws,
 643	int plane_id)
 644{
 645	DC_LOGGER_INIT(hws->ctx->logger);
 646	if (REG(DC_IP_REQUEST_CNTL)) {
 647		REG_SET(DC_IP_REQUEST_CNTL, 0,
 648				IP_REQUEST_EN, 1);
 649		hws->funcs.dpp_pg_control(hws, plane_id, true);
 650		hws->funcs.hubp_pg_control(hws, plane_id, true);
 651		REG_SET(DC_IP_REQUEST_CNTL, 0,
 652				IP_REQUEST_EN, 0);
 653		DC_LOG_DEBUG(
 654				"Un-gated front end for pipe %d\n", plane_id);
 655	}
 656}
 657
 658static void undo_DEGVIDCN10_253_wa(struct dc *dc)
 659{
 660	struct dce_hwseq *hws = dc->hwseq;
 661	struct hubp *hubp = dc->res_pool->hubps[0];
 662
 663	if (!hws->wa_state.DEGVIDCN10_253_applied)
 664		return;
 665
 666	hubp->funcs->set_blank(hubp, true);
 667
 668	REG_SET(DC_IP_REQUEST_CNTL, 0,
 669			IP_REQUEST_EN, 1);
 670
 671	hws->funcs.hubp_pg_control(hws, 0, false);
 672	REG_SET(DC_IP_REQUEST_CNTL, 0,
 673			IP_REQUEST_EN, 0);
 674
 675	hws->wa_state.DEGVIDCN10_253_applied = false;
 676}
 677
 678static void apply_DEGVIDCN10_253_wa(struct dc *dc)
 679{
 680	struct dce_hwseq *hws = dc->hwseq;
 681	struct hubp *hubp = dc->res_pool->hubps[0];
 682	int i;
 683
 684	if (dc->debug.disable_stutter)
 685		return;
 686
 687	if (!hws->wa.DEGVIDCN10_253)
 688		return;
 689
 690	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 691		if (!dc->res_pool->hubps[i]->power_gated)
 692			return;
 693	}
 694
 695	/* all pipe power gated, apply work around to enable stutter. */
 696
 697	REG_SET(DC_IP_REQUEST_CNTL, 0,
 698			IP_REQUEST_EN, 1);
 699
 700	hws->funcs.hubp_pg_control(hws, 0, true);
 701	REG_SET(DC_IP_REQUEST_CNTL, 0,
 702			IP_REQUEST_EN, 0);
 703
 704	hubp->funcs->set_hubp_blank_en(hubp, false);
 705	hws->wa_state.DEGVIDCN10_253_applied = true;
 706}
 707
 708void dcn10_bios_golden_init(struct dc *dc)
 709{
 710	struct dce_hwseq *hws = dc->hwseq;
 711	struct dc_bios *bp = dc->ctx->dc_bios;
 712	int i;
 713	bool allow_self_fresh_force_enable = true;
 714
 715	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
 716		return;
 717
 718	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
 719		allow_self_fresh_force_enable =
 720				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
 721
 722
 723	/* WA for making DF sleep when idle after resume from S0i3.
 724	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
 725	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
 726	 * before calling command table and it changed to 1 after,
 727	 * it should be set back to 0.
 728	 */
 729
 730	/* initialize dcn global */
 731	bp->funcs->enable_disp_power_gating(bp,
 732			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
 733
 734	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 735		/* initialize dcn per pipe */
 736		bp->funcs->enable_disp_power_gating(bp,
 737				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
 738	}
 739
 740	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
 741		if (allow_self_fresh_force_enable == false &&
 742				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
 743			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
 744										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
 745
 746}
 747
 748static void false_optc_underflow_wa(
 749		struct dc *dc,
 750		const struct dc_stream_state *stream,
 751		struct timing_generator *tg)
 752{
 753	int i;
 754	bool underflow;
 755
 756	if (!dc->hwseq->wa.false_optc_underflow)
 757		return;
 758
 759	underflow = tg->funcs->is_optc_underflow_occurred(tg);
 760
 761	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 762		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
 763
 764		if (old_pipe_ctx->stream != stream)
 765			continue;
 766
 767		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
 768	}
 769
 770	if (tg->funcs->set_blank_data_double_buffer)
 771		tg->funcs->set_blank_data_double_buffer(tg, true);
 772
 773	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
 774		tg->funcs->clear_optc_underflow(tg);
 775}
 776
 777enum dc_status dcn10_enable_stream_timing(
 778		struct pipe_ctx *pipe_ctx,
 779		struct dc_state *context,
 780		struct dc *dc)
 781{
 782	struct dc_stream_state *stream = pipe_ctx->stream;
 783	enum dc_color_space color_space;
 784	struct tg_color black_color = {0};
 785
 786	/* by upper caller loop, pipe0 is parent pipe and be called first.
 787	 * back end is set up by for pipe0. Other children pipe share back end
 788	 * with pipe 0. No program is needed.
 789	 */
 790	if (pipe_ctx->top_pipe != NULL)
 791		return DC_OK;
 792
 793	/* TODO check if timing_changed, disable stream if timing changed */
 794
 795	/* HW program guide assume display already disable
 796	 * by unplug sequence. OTG assume stop.
 797	 */
 798	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
 799
 800	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
 801			pipe_ctx->clock_source,
 802			&pipe_ctx->stream_res.pix_clk_params,
 803			&pipe_ctx->pll_settings)) {
 804		BREAK_TO_DEBUGGER();
 805		return DC_ERROR_UNEXPECTED;
 806	}
 807
 808	pipe_ctx->stream_res.tg->funcs->program_timing(
 809			pipe_ctx->stream_res.tg,
 810			&stream->timing,
 811			pipe_ctx->pipe_dlg_param.vready_offset,
 812			pipe_ctx->pipe_dlg_param.vstartup_start,
 813			pipe_ctx->pipe_dlg_param.vupdate_offset,
 814			pipe_ctx->pipe_dlg_param.vupdate_width,
 815			pipe_ctx->stream->signal,
 816			true);
 817
 818#if 0 /* move to after enable_crtc */
 819	/* TODO: OPP FMT, ABM. etc. should be done here. */
 820	/* or FPGA now. instance 0 only. TODO: move to opp.c */
 821
 822	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
 823
 824	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
 825				pipe_ctx->stream_res.opp,
 826				&stream->bit_depth_params,
 827				&stream->clamping);
 828#endif
 829	/* program otg blank color */
 830	color_space = stream->output_color_space;
 831	color_space_to_black_color(dc, color_space, &black_color);
 832
 833	/*
 834	 * The way 420 is packed, 2 channels carry Y component, 1 channel
 835	 * alternate between Cb and Cr, so both channels need the pixel
 836	 * value for Y
 837	 */
 838	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
 839		black_color.color_r_cr = black_color.color_g_y;
 840
 841	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
 842		pipe_ctx->stream_res.tg->funcs->set_blank_color(
 843				pipe_ctx->stream_res.tg,
 844				&black_color);
 845
 846	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
 847			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
 848		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
 849		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
 850		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
 851	}
 852
 853	/* VTG is  within DCHUB command block. DCFCLK is always on */
 854	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
 855		BREAK_TO_DEBUGGER();
 856		return DC_ERROR_UNEXPECTED;
 857	}
 858
 859	/* TODO program crtc source select for non-virtual signal*/
 860	/* TODO program FMT */
 861	/* TODO setup link_enc */
 862	/* TODO set stream attributes */
 863	/* TODO program audio */
 864	/* TODO enable stream if timing changed */
 865	/* TODO unblank stream if DP */
 866
 867	return DC_OK;
 868}
 869
 870static void dcn10_reset_back_end_for_pipe(
 871		struct dc *dc,
 872		struct pipe_ctx *pipe_ctx,
 873		struct dc_state *context)
 874{
 875	int i;
 876	struct dc_link *link;
 877	DC_LOGGER_INIT(dc->ctx->logger);
 878	if (pipe_ctx->stream_res.stream_enc == NULL) {
 879		pipe_ctx->stream = NULL;
 880		return;
 881	}
 882
 883	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 884		link = pipe_ctx->stream->link;
 885		/* DPMS may already disable or */
 886		/* dpms_off status is incorrect due to fastboot
 887		 * feature. When system resume from S4 with second
 888		 * screen only, the dpms_off would be true but
 889		 * VBIOS lit up eDP, so check link status too.
 890		 */
 891		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
 892			core_link_disable_stream(pipe_ctx);
 893		else if (pipe_ctx->stream_res.audio)
 894			dc->hwss.disable_audio_stream(pipe_ctx);
 895
 896		if (pipe_ctx->stream_res.audio) {
 897			/*disable az_endpoint*/
 898			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
 899
 900			/*free audio*/
 901			if (dc->caps.dynamic_audio == true) {
 902				/*we have to dynamic arbitrate the audio endpoints*/
 903				/*we free the resource, need reset is_audio_acquired*/
 904				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
 905						pipe_ctx->stream_res.audio, false);
 906				pipe_ctx->stream_res.audio = NULL;
 907			}
 908		}
 909	}
 910
 911	/* by upper caller loop, parent pipe: pipe0, will be reset last.
 912	 * back end share by all pipes and will be disable only when disable
 913	 * parent pipe.
 914	 */
 915	if (pipe_ctx->top_pipe == NULL) {
 916
 917		if (pipe_ctx->stream_res.abm)
 918			dc->hwss.set_abm_immediate_disable(pipe_ctx);
 919
 920		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
 921
 922		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
 923		if (pipe_ctx->stream_res.tg->funcs->set_drr)
 924			pipe_ctx->stream_res.tg->funcs->set_drr(
 925					pipe_ctx->stream_res.tg, NULL);
 926	}
 927
 928	for (i = 0; i < dc->res_pool->pipe_count; i++)
 929		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
 930			break;
 931
 932	if (i == dc->res_pool->pipe_count)
 933		return;
 934
 935	pipe_ctx->stream = NULL;
 936	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
 937					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
 938}
 939
 940static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 941{
 942	struct hubp *hubp ;
 943	unsigned int i;
 944	bool need_recover = true;
 945
 946	if (!dc->debug.recovery_enabled)
 947		return false;
 948
 949	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 950		struct pipe_ctx *pipe_ctx =
 951			&dc->current_state->res_ctx.pipe_ctx[i];
 952		if (pipe_ctx != NULL) {
 953			hubp = pipe_ctx->plane_res.hubp;
 954			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
 955				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
 956					/* one pipe underflow, we will reset all the pipes*/
 957					need_recover = true;
 958				}
 959			}
 960		}
 961	}
 962	if (!need_recover)
 963		return false;
 964	/*
 965	DCHUBP_CNTL:HUBP_BLANK_EN=1
 966	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
 967	DCHUBP_CNTL:HUBP_DISABLE=1
 968	DCHUBP_CNTL:HUBP_DISABLE=0
 969	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
 970	DCSURF_PRIMARY_SURFACE_ADDRESS
 971	DCHUBP_CNTL:HUBP_BLANK_EN=0
 972	*/
 973
 974	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 975		struct pipe_ctx *pipe_ctx =
 976			&dc->current_state->res_ctx.pipe_ctx[i];
 977		if (pipe_ctx != NULL) {
 978			hubp = pipe_ctx->plane_res.hubp;
 979			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
 980			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
 981				hubp->funcs->set_hubp_blank_en(hubp, true);
 982		}
 983	}
 984	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
 985	hubbub1_soft_reset(dc->res_pool->hubbub, true);
 986
 987	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 988		struct pipe_ctx *pipe_ctx =
 989			&dc->current_state->res_ctx.pipe_ctx[i];
 990		if (pipe_ctx != NULL) {
 991			hubp = pipe_ctx->plane_res.hubp;
 992			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
 993			if (hubp != NULL && hubp->funcs->hubp_disable_control)
 994				hubp->funcs->hubp_disable_control(hubp, true);
 995		}
 996	}
 997	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 998		struct pipe_ctx *pipe_ctx =
 999			&dc->current_state->res_ctx.pipe_ctx[i];
1000		if (pipe_ctx != NULL) {
1001			hubp = pipe_ctx->plane_res.hubp;
1002			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1003			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1004				hubp->funcs->hubp_disable_control(hubp, true);
1005		}
1006	}
1007	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1008	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1009	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1010		struct pipe_ctx *pipe_ctx =
1011			&dc->current_state->res_ctx.pipe_ctx[i];
1012		if (pipe_ctx != NULL) {
1013			hubp = pipe_ctx->plane_res.hubp;
1014			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1015			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1016				hubp->funcs->set_hubp_blank_en(hubp, true);
1017		}
1018	}
1019	return true;
1020
1021}
1022
1023
1024void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1025{
1026	static bool should_log_hw_state; /* prevent hw state log by default */
1027
1028	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1029		if (should_log_hw_state) {
1030			dcn10_log_hw_state(dc, NULL);
1031		}
1032		BREAK_TO_DEBUGGER();
1033		if (dcn10_hw_wa_force_recovery(dc)) {
1034		/*check again*/
1035			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1036				BREAK_TO_DEBUGGER();
1037		}
1038	}
1039}
1040
1041/* trigger HW to start disconnect plane from stream on the next vsync */
1042void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1043{
1044	struct dce_hwseq *hws = dc->hwseq;
1045	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1046	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1047	struct mpc *mpc = dc->res_pool->mpc;
1048	struct mpc_tree *mpc_tree_params;
1049	struct mpcc *mpcc_to_remove = NULL;
1050	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1051
1052	mpc_tree_params = &(opp->mpc_tree_params);
1053	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1054
1055	/*Already reset*/
1056	if (mpcc_to_remove == NULL)
1057		return;
1058
1059	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1060	if (opp != NULL)
1061		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1062
1063	dc->optimized_required = true;
1064
1065	if (hubp->funcs->hubp_disconnect)
1066		hubp->funcs->hubp_disconnect(hubp);
1067
1068	if (dc->debug.sanity_checks)
1069		hws->funcs.verify_allow_pstate_change_high(dc);
1070}
1071
1072void dcn10_plane_atomic_power_down(struct dc *dc,
1073		struct dpp *dpp,
1074		struct hubp *hubp)
1075{
1076	struct dce_hwseq *hws = dc->hwseq;
1077	DC_LOGGER_INIT(dc->ctx->logger);
1078
1079	if (REG(DC_IP_REQUEST_CNTL)) {
1080		REG_SET(DC_IP_REQUEST_CNTL, 0,
1081				IP_REQUEST_EN, 1);
1082		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1083		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1084		dpp->funcs->dpp_reset(dpp);
1085		REG_SET(DC_IP_REQUEST_CNTL, 0,
1086				IP_REQUEST_EN, 0);
1087		DC_LOG_DEBUG(
1088				"Power gated front end %d\n", hubp->inst);
1089	}
1090}
1091
1092/* disable HW used by plane.
1093 * note:  cannot disable until disconnect is complete
1094 */
1095void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1096{
1097	struct dce_hwseq *hws = dc->hwseq;
1098	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1099	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1100	int opp_id = hubp->opp_id;
1101
1102	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1103
1104	hubp->funcs->hubp_clk_cntl(hubp, false);
1105
1106	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1107
1108	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1109		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1110				pipe_ctx->stream_res.opp,
1111				false);
1112
1113	hubp->power_gated = true;
1114	dc->optimized_required = false; /* We're powering off, no need to optimize */
1115
1116	hws->funcs.plane_atomic_power_down(dc,
1117			pipe_ctx->plane_res.dpp,
1118			pipe_ctx->plane_res.hubp);
1119
1120	pipe_ctx->stream = NULL;
1121	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1122	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1123	pipe_ctx->top_pipe = NULL;
1124	pipe_ctx->bottom_pipe = NULL;
1125	pipe_ctx->plane_state = NULL;
1126}
1127
1128void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1129{
1130	struct dce_hwseq *hws = dc->hwseq;
1131	DC_LOGGER_INIT(dc->ctx->logger);
1132
1133	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1134		return;
1135
1136	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1137
1138	apply_DEGVIDCN10_253_wa(dc);
1139
1140	DC_LOG_DC("Power down front end %d\n",
1141					pipe_ctx->pipe_idx);
1142}
1143
1144void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1145{
1146	int i;
1147	struct dce_hwseq *hws = dc->hwseq;
1148	bool can_apply_seamless_boot = false;
1149
1150	for (i = 0; i < context->stream_count; i++) {
1151		if (context->streams[i]->apply_seamless_boot_optimization) {
1152			can_apply_seamless_boot = true;
1153			break;
1154		}
1155	}
1156
1157	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1158		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1159		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1160
1161		/* There is assumption that pipe_ctx is not mapping irregularly
1162		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1163		 * we will use the pipe, so don't disable
1164		 */
1165		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1166			continue;
1167
1168		/* Blank controller using driver code instead of
1169		 * command table.
1170		 */
1171		if (tg->funcs->is_tg_enabled(tg)) {
1172			if (hws->funcs.init_blank != NULL) {
1173				hws->funcs.init_blank(dc, tg);
1174				tg->funcs->lock(tg);
1175			} else {
1176				tg->funcs->lock(tg);
1177				tg->funcs->set_blank(tg, true);
1178				hwss_wait_for_blank_complete(tg);
1179			}
1180		}
1181	}
1182
1183	/* num_opp will be equal to number of mpcc */
1184	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1185		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1186
1187		/* Cannot reset the MPC mux if seamless boot */
1188		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1189			continue;
1190
1191		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1192				dc->res_pool->mpc, i);
1193	}
1194
1195	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1196		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1197		struct hubp *hubp = dc->res_pool->hubps[i];
1198		struct dpp *dpp = dc->res_pool->dpps[i];
1199		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1200
1201		/* There is assumption that pipe_ctx is not mapping irregularly
1202		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1203		 * we will use the pipe, so don't disable
1204		 */
1205		if (can_apply_seamless_boot &&
1206			pipe_ctx->stream != NULL &&
1207			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1208				pipe_ctx->stream_res.tg)) {
1209			// Enable double buffering for OTG_BLANK no matter if
1210			// seamless boot is enabled or not to suppress global sync
1211			// signals when OTG blanked. This is to prevent pipe from
1212			// requesting data while in PSR.
1213			tg->funcs->tg_init(tg);
1214			continue;
1215		}
1216
1217		/* Disable on the current state so the new one isn't cleared. */
1218		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1219
1220		dpp->funcs->dpp_reset(dpp);
1221
1222		pipe_ctx->stream_res.tg = tg;
1223		pipe_ctx->pipe_idx = i;
1224
1225		pipe_ctx->plane_res.hubp = hubp;
1226		pipe_ctx->plane_res.dpp = dpp;
1227		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1228		hubp->mpcc_id = dpp->inst;
1229		hubp->opp_id = OPP_ID_INVALID;
1230		hubp->power_gated = false;
1231
1232		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1233		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1234		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1235		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1236
1237		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1238
1239		if (tg->funcs->is_tg_enabled(tg))
1240			tg->funcs->unlock(tg);
1241
1242		dc->hwss.disable_plane(dc, pipe_ctx);
1243
1244		pipe_ctx->stream_res.tg = NULL;
1245		pipe_ctx->plane_res.hubp = NULL;
1246
1247		tg->funcs->tg_init(tg);
1248	}
1249}
1250
1251void dcn10_init_hw(struct dc *dc)
1252{
1253	int i, j;
1254	struct abm *abm = dc->res_pool->abm;
1255	struct dmcu *dmcu = dc->res_pool->dmcu;
1256	struct dce_hwseq *hws = dc->hwseq;
1257	struct dc_bios *dcb = dc->ctx->dc_bios;
1258	struct resource_pool *res_pool = dc->res_pool;
1259	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1260	bool   is_optimized_init_done = false;
1261
1262	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1263		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1264
1265	// Initialize the dccg
1266	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1267		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1268
1269	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1270
1271		REG_WRITE(REFCLK_CNTL, 0);
1272		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1273		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1274
1275		if (!dc->debug.disable_clock_gate) {
1276			/* enable all DCN clock gating */
1277			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1278
1279			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1280
1281			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1282		}
1283
1284		//Enable ability to power gate / don't force power on permanently
1285		if (hws->funcs.enable_power_gating_plane)
1286			hws->funcs.enable_power_gating_plane(hws, true);
1287
1288		return;
1289	}
1290
1291	if (!dcb->funcs->is_accelerated_mode(dcb))
1292		hws->funcs.disable_vga(dc->hwseq);
1293
1294	hws->funcs.bios_golden_init(dc);
1295
1296	if (dc->ctx->dc_bios->fw_info_valid) {
1297		res_pool->ref_clocks.xtalin_clock_inKhz =
1298				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1299
1300		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1301			if (res_pool->dccg && res_pool->hubbub) {
1302
1303				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1304						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1305						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1306
1307				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1308						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1309						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1310			} else {
1311				// Not all ASICs have DCCG sw component
1312				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1313						res_pool->ref_clocks.xtalin_clock_inKhz;
1314				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1315						res_pool->ref_clocks.xtalin_clock_inKhz;
1316			}
1317		}
1318	} else
1319		ASSERT_CRITICAL(false);
1320
1321	for (i = 0; i < dc->link_count; i++) {
1322		/* Power up AND update implementation according to the
1323		 * required signal (which may be different from the
1324		 * default signal on connector).
1325		 */
1326		struct dc_link *link = dc->links[i];
1327
1328		if (!is_optimized_init_done)
1329			link->link_enc->funcs->hw_init(link->link_enc);
1330
1331		/* Check for enabled DIG to identify enabled display */
1332		if (link->link_enc->funcs->is_dig_enabled &&
1333			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1334			link->link_status.link_active = true;
1335	}
1336
1337	/* Power gate DSCs */
1338	if (!is_optimized_init_done) {
1339		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1340			if (hws->funcs.dsc_pg_control != NULL)
1341				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1342	}
1343
1344	/* we want to turn off all dp displays before doing detection */
1345	if (dc->config.power_down_display_on_boot) {
1346		uint8_t dpcd_power_state = '\0';
1347		enum dc_status status = DC_ERROR_UNEXPECTED;
1348
1349		for (i = 0; i < dc->link_count; i++) {
1350			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1351				continue;
1352
1353			/*
1354			 * If any of the displays are lit up turn them off.
1355			 * The reason is that some MST hubs cannot be turned off
1356			 * completely until we tell them to do so.
1357			 * If not turned off, then displays connected to MST hub
1358			 * won't light up.
1359			 */
1360			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1361							&dpcd_power_state, sizeof(dpcd_power_state));
1362			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1363				/* blank dp stream before power off receiver*/
1364				if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1365					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1366
1367					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1368						if (fe == dc->res_pool->stream_enc[j]->id) {
1369							dc->res_pool->stream_enc[j]->funcs->dp_blank(
1370										dc->res_pool->stream_enc[j]);
1371							break;
1372						}
1373					}
1374				}
1375				dp_receiver_power_ctrl(dc->links[i], false);
1376			}
1377		}
1378	}
1379
1380	/* If taking control over from VBIOS, we may want to optimize our first
1381	 * mode set, so we need to skip powering down pipes until we know which
1382	 * pipes we want to use.
1383	 * Otherwise, if taking control is not possible, we need to power
1384	 * everything down.
1385	 */
1386	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1387		if (!is_optimized_init_done) {
1388			hws->funcs.init_pipes(dc, dc->current_state);
1389			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1390				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1391						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1392		}
1393	}
1394
1395	if (!is_optimized_init_done) {
1396
1397		for (i = 0; i < res_pool->audio_count; i++) {
1398			struct audio *audio = res_pool->audios[i];
1399
1400			audio->funcs->hw_init(audio);
1401		}
1402
1403		for (i = 0; i < dc->link_count; i++) {
1404			struct dc_link *link = dc->links[i];
1405
1406			if (link->panel_cntl)
1407				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1408		}
1409
1410		if (abm != NULL)
1411			abm->funcs->abm_init(abm, backlight);
1412
1413		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1414			dmcu->funcs->dmcu_init(dmcu);
1415	}
1416
1417	if (abm != NULL && dmcu != NULL)
1418		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1419
1420	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1421	if (!is_optimized_init_done)
1422		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1423
1424	if (!dc->debug.disable_clock_gate) {
1425		/* enable all DCN clock gating */
1426		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1427
1428		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1429
1430		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1431	}
1432	if (hws->funcs.enable_power_gating_plane)
1433		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1434
1435	if (dc->clk_mgr->funcs->notify_wm_ranges)
1436		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1437
1438#ifdef CONFIG_DRM_AMD_DC_DCN3_0
1439	if (dc->clk_mgr->funcs->set_hard_max_memclk)
1440		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
1441#endif
1442
1443}
1444
1445/* In headless boot cases, DIG may be turned
1446 * on which causes HW/SW discrepancies.
1447 * To avoid this, power down hardware on boot
1448 * if DIG is turned on and seamless boot not enabled
1449 */
1450void dcn10_power_down_on_boot(struct dc *dc)
1451{
1452	int i = 0;
1453	struct dc_link *edp_link;
1454
1455	if (!dc->config.power_down_display_on_boot)
1456		return;
1457
1458	edp_link = get_edp_link(dc);
1459	if (edp_link &&
1460			edp_link->link_enc->funcs->is_dig_enabled &&
1461			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1462			dc->hwseq->funcs.edp_backlight_control &&
1463			dc->hwss.power_down &&
1464			dc->hwss.edp_power_control) {
1465		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1466		dc->hwss.power_down(dc);
1467		dc->hwss.edp_power_control(edp_link, false);
1468	} else {
1469		for (i = 0; i < dc->link_count; i++) {
1470			struct dc_link *link = dc->links[i];
1471
1472			if (link->link_enc->funcs->is_dig_enabled &&
1473					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1474					dc->hwss.power_down) {
1475				dc->hwss.power_down(dc);
1476				break;
1477			}
1478
1479		}
1480	}
1481
1482	/*
1483	 * Call update_clocks with empty context
1484	 * to send DISPLAY_OFF
1485	 * Otherwise DISPLAY_OFF may not be asserted
1486	 */
1487	if (dc->clk_mgr->funcs->set_low_power_state)
1488		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1489}
1490
1491void dcn10_reset_hw_ctx_wrap(
1492		struct dc *dc,
1493		struct dc_state *context)
1494{
1495	int i;
1496	struct dce_hwseq *hws = dc->hwseq;
1497
1498	/* Reset Back End*/
1499	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1500		struct pipe_ctx *pipe_ctx_old =
1501			&dc->current_state->res_ctx.pipe_ctx[i];
1502		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1503
1504		if (!pipe_ctx_old->stream)
1505			continue;
1506
1507		if (pipe_ctx_old->top_pipe)
1508			continue;
1509
1510		if (!pipe_ctx->stream ||
1511				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1512			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1513
1514			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1515			if (hws->funcs.enable_stream_gating)
1516				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1517			if (old_clk)
1518				old_clk->funcs->cs_power_down(old_clk);
1519		}
1520	}
1521}
1522
1523static bool patch_address_for_sbs_tb_stereo(
1524		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1525{
1526	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1527	bool sec_split = pipe_ctx->top_pipe &&
1528			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1529	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1530		(pipe_ctx->stream->timing.timing_3d_format ==
1531		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1532		 pipe_ctx->stream->timing.timing_3d_format ==
1533		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1534		*addr = plane_state->address.grph_stereo.left_addr;
1535		plane_state->address.grph_stereo.left_addr =
1536		plane_state->address.grph_stereo.right_addr;
1537		return true;
1538	} else {
1539		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1540			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1541			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1542			plane_state->address.grph_stereo.right_addr =
1543			plane_state->address.grph_stereo.left_addr;
1544		}
1545	}
1546	return false;
1547}
1548
1549void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1550{
1551	bool addr_patched = false;
1552	PHYSICAL_ADDRESS_LOC addr;
1553	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1554
1555	if (plane_state == NULL)
1556		return;
1557
1558	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1559
1560	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1561			pipe_ctx->plane_res.hubp,
1562			&plane_state->address,
1563			plane_state->flip_immediate);
1564
1565	plane_state->status.requested_address = plane_state->address;
1566
1567	if (plane_state->flip_immediate)
1568		plane_state->status.current_address = plane_state->address;
1569
1570	if (addr_patched)
1571		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1572}
1573
1574bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1575			const struct dc_plane_state *plane_state)
1576{
1577	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1578	const struct dc_transfer_func *tf = NULL;
1579	bool result = true;
1580
1581	if (dpp_base == NULL)
1582		return false;
1583
1584	if (plane_state->in_transfer_func)
1585		tf = plane_state->in_transfer_func;
1586
1587	if (plane_state->gamma_correction &&
1588		!dpp_base->ctx->dc->debug.always_use_regamma
1589		&& !plane_state->gamma_correction->is_identity
1590			&& dce_use_lut(plane_state->format))
1591		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1592
1593	if (tf == NULL)
1594		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1595	else if (tf->type == TF_TYPE_PREDEFINED) {
1596		switch (tf->tf) {
1597		case TRANSFER_FUNCTION_SRGB:
1598			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1599			break;
1600		case TRANSFER_FUNCTION_BT709:
1601			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1602			break;
1603		case TRANSFER_FUNCTION_LINEAR:
1604			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1605			break;
1606		case TRANSFER_FUNCTION_PQ:
1607			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1608			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1609			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1610			result = true;
1611			break;
1612		default:
1613			result = false;
1614			break;
1615		}
1616	} else if (tf->type == TF_TYPE_BYPASS) {
1617		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1618	} else {
1619		cm_helper_translate_curve_to_degamma_hw_format(tf,
1620					&dpp_base->degamma_params);
1621		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1622				&dpp_base->degamma_params);
1623		result = true;
1624	}
1625
1626	return result;
1627}
1628
1629#define MAX_NUM_HW_POINTS 0x200
1630
1631static void log_tf(struct dc_context *ctx,
1632				struct dc_transfer_func *tf, uint32_t hw_points_num)
1633{
1634	// DC_LOG_GAMMA is default logging of all hw points
1635	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1636	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1637	int i = 0;
1638
1639	DC_LOGGER_INIT(ctx->logger);
1640	DC_LOG_GAMMA("Gamma Correction TF");
1641	DC_LOG_ALL_GAMMA("Logging all tf points...");
1642	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1643
1644	for (i = 0; i < hw_points_num; i++) {
1645		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1646		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1647		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1648	}
1649
1650	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1651		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1652		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1653		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1654	}
1655}
1656
1657bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1658				const struct dc_stream_state *stream)
1659{
1660	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1661
1662	if (dpp == NULL)
1663		return false;
1664
1665	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1666
1667	if (stream->out_transfer_func &&
1668	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1669	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1670		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1671
1672	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1673	 * update.
1674	 */
1675	else if (cm_helper_translate_curve_to_hw_format(
1676			stream->out_transfer_func,
1677			&dpp->regamma_params, false)) {
1678		dpp->funcs->dpp_program_regamma_pwl(
1679				dpp,
1680				&dpp->regamma_params, OPP_REGAMMA_USER);
1681	} else
1682		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1683
1684	if (stream != NULL && stream->ctx != NULL &&
1685			stream->out_transfer_func != NULL) {
1686		log_tf(stream->ctx,
1687				stream->out_transfer_func,
1688				dpp->regamma_params.hw_points_num);
1689	}
1690
1691	return true;
1692}
1693
1694void dcn10_pipe_control_lock(
1695	struct dc *dc,
1696	struct pipe_ctx *pipe,
1697	bool lock)
1698{
1699	struct dce_hwseq *hws = dc->hwseq;
1700
1701	/* use TG master update lock to lock everything on the TG
1702	 * therefore only top pipe need to lock
1703	 */
1704	if (!pipe || pipe->top_pipe)
1705		return;
1706
1707	if (dc->debug.sanity_checks)
1708		hws->funcs.verify_allow_pstate_change_high(dc);
1709
1710	if (lock)
1711		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1712	else
1713		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1714
1715	if (dc->debug.sanity_checks)
1716		hws->funcs.verify_allow_pstate_change_high(dc);
1717}
1718
1719/**
1720 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1721 *
1722 * Software keepout workaround to prevent cursor update locking from stalling
1723 * out cursor updates indefinitely or from old values from being retained in
1724 * the case where the viewport changes in the same frame as the cursor.
1725 *
1726 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1727 * too close to VUPDATE, then stall out until VUPDATE finishes.
1728 *
1729 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1730 *       to avoid the need for this workaround.
1731 */
1732static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1733{
1734	struct dc_stream_state *stream = pipe_ctx->stream;
1735	struct crtc_position position;
1736	uint32_t vupdate_start, vupdate_end;
1737	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1738	unsigned int us_per_line, us_vupdate;
1739
1740	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1741		return;
1742
1743	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1744		return;
1745
1746	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1747				       &vupdate_end);
1748
1749	dc->hwss.get_position(&pipe_ctx, 1, &position);
1750	vpos = position.vertical_count;
1751
1752	/* Avoid wraparound calculation issues */
1753	vupdate_start += stream->timing.v_total;
1754	vupdate_end += stream->timing.v_total;
1755	vpos += stream->timing.v_total;
1756
1757	if (vpos <= vupdate_start) {
1758		/* VPOS is in VACTIVE or back porch. */
1759		lines_to_vupdate = vupdate_start - vpos;
1760	} else if (vpos > vupdate_end) {
1761		/* VPOS is in the front porch. */
1762		return;
1763	} else {
1764		/* VPOS is in VUPDATE. */
1765		lines_to_vupdate = 0;
1766	}
1767
1768	/* Calculate time until VUPDATE in microseconds. */
1769	us_per_line =
1770		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1771	us_to_vupdate = lines_to_vupdate * us_per_line;
1772
1773	/* 70 us is a conservative estimate of cursor update time*/
1774	if (us_to_vupdate > 70)
1775		return;
1776
1777	/* Stall out until the cursor update completes. */
1778	if (vupdate_end < vupdate_start)
1779		vupdate_end += stream->timing.v_total;
1780	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1781	udelay(us_to_vupdate + us_vupdate);
1782}
1783
1784void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1785{
1786	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1787	if (!pipe || pipe->top_pipe)
1788		return;
1789
1790	/* Prevent cursor lock from stalling out cursor updates. */
1791	if (lock)
1792		delay_cursor_until_vupdate(dc, pipe);
1793
1794	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1795		union dmub_hw_lock_flags hw_locks = { 0 };
1796		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1797
1798		hw_locks.bits.lock_cursor = 1;
1799		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1800
1801		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1802					lock,
1803					&hw_locks,
1804					&inst_flags);
1805	} else
1806		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1807				pipe->stream_res.opp->inst, lock);
1808}
1809
1810static bool wait_for_reset_trigger_to_occur(
1811	struct dc_context *dc_ctx,
1812	struct timing_generator *tg)
1813{
1814	bool rc = false;
1815
1816	/* To avoid endless loop we wait at most
1817	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1818	const uint32_t frames_to_wait_on_triggered_reset = 10;
1819	int i;
1820
1821	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1822
1823		if (!tg->funcs->is_counter_moving(tg)) {
1824			DC_ERROR("TG counter is not moving!\n");
1825			break;
1826		}
1827
1828		if (tg->funcs->did_triggered_reset_occur(tg)) {
1829			rc = true;
1830			/* usually occurs at i=1 */
1831			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1832					i);
1833			break;
1834		}
1835
1836		/* Wait for one frame. */
1837		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1838		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1839	}
1840
1841	if (false == rc)
1842		DC_ERROR("GSL: Timeout on reset trigger!\n");
1843
1844	return rc;
1845}
1846
1847void dcn10_enable_timing_synchronization(
1848	struct dc *dc,
1849	int group_index,
1850	int group_size,
1851	struct pipe_ctx *grouped_pipes[])
1852{
1853	struct dc_context *dc_ctx = dc->ctx;
1854	int i;
1855
1856	DC_SYNC_INFO("Setting up OTG reset trigger\n");
1857
1858	for (i = 1; i < group_size; i++)
1859		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1860				grouped_pipes[i]->stream_res.tg,
1861				grouped_pipes[0]->stream_res.tg->inst);
1862
1863	DC_SYNC_INFO("Waiting for trigger\n");
1864
1865	/* Need to get only check 1 pipe for having reset as all the others are
1866	 * synchronized. Look at last pipe programmed to reset.
1867	 */
1868
1869	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1870	for (i = 1; i < group_size; i++)
1871		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1872				grouped_pipes[i]->stream_res.tg);
1873
1874	DC_SYNC_INFO("Sync complete\n");
1875}
1876
1877void dcn10_enable_per_frame_crtc_position_reset(
1878	struct dc *dc,
1879	int group_size,
1880	struct pipe_ctx *grouped_pipes[])
1881{
1882	struct dc_context *dc_ctx = dc->ctx;
1883	int i;
1884
1885	DC_SYNC_INFO("Setting up\n");
1886	for (i = 0; i < group_size; i++)
1887		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1888			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1889					grouped_pipes[i]->stream_res.tg,
1890					0,
1891					&grouped_pipes[i]->stream->triggered_crtc_reset);
1892
1893	DC_SYNC_INFO("Waiting for trigger\n");
1894
1895	for (i = 0; i < group_size; i++)
1896		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1897
1898	DC_SYNC_INFO("Multi-display sync is complete\n");
1899}
1900
1901/*static void print_rq_dlg_ttu(
1902		struct dc *dc,
1903		struct pipe_ctx *pipe_ctx)
1904{
1905	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1906			"\n============== DML TTU Output parameters [%d] ==============\n"
1907			"qos_level_low_wm: %d, \n"
1908			"qos_level_high_wm: %d, \n"
1909			"min_ttu_vblank: %d, \n"
1910			"qos_level_flip: %d, \n"
1911			"refcyc_per_req_delivery_l: %d, \n"
1912			"qos_level_fixed_l: %d, \n"
1913			"qos_ramp_disable_l: %d, \n"
1914			"refcyc_per_req_delivery_pre_l: %d, \n"
1915			"refcyc_per_req_delivery_c: %d, \n"
1916			"qos_level_fixed_c: %d, \n"
1917			"qos_ramp_disable_c: %d, \n"
1918			"refcyc_per_req_delivery_pre_c: %d\n"
1919			"=============================================================\n",
1920			pipe_ctx->pipe_idx,
1921			pipe_ctx->ttu_regs.qos_level_low_wm,
1922			pipe_ctx->ttu_regs.qos_level_high_wm,
1923			pipe_ctx->ttu_regs.min_ttu_vblank,
1924			pipe_ctx->ttu_regs.qos_level_flip,
1925			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1926			pipe_ctx->ttu_regs.qos_level_fixed_l,
1927			pipe_ctx->ttu_regs.qos_ramp_disable_l,
1928			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1929			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1930			pipe_ctx->ttu_regs.qos_level_fixed_c,
1931			pipe_ctx->ttu_regs.qos_ramp_disable_c,
1932			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1933			);
1934
1935	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1936			"\n============== DML DLG Output parameters [%d] ==============\n"
1937			"refcyc_h_blank_end: %d, \n"
1938			"dlg_vblank_end: %d, \n"
1939			"min_dst_y_next_start: %d, \n"
1940			"refcyc_per_htotal: %d, \n"
1941			"refcyc_x_after_scaler: %d, \n"
1942			"dst_y_after_scaler: %d, \n"
1943			"dst_y_prefetch: %d, \n"
1944			"dst_y_per_vm_vblank: %d, \n"
1945			"dst_y_per_row_vblank: %d, \n"
1946			"ref_freq_to_pix_freq: %d, \n"
1947			"vratio_prefetch: %d, \n"
1948			"refcyc_per_pte_group_vblank_l: %d, \n"
1949			"refcyc_per_meta_chunk_vblank_l: %d, \n"
1950			"dst_y_per_pte_row_nom_l: %d, \n"
1951			"refcyc_per_pte_group_nom_l: %d, \n",
1952			pipe_ctx->pipe_idx,
1953			pipe_ctx->dlg_regs.refcyc_h_blank_end,
1954			pipe_ctx->dlg_regs.dlg_vblank_end,
1955			pipe_ctx->dlg_regs.min_dst_y_next_start,
1956			pipe_ctx->dlg_regs.refcyc_per_htotal,
1957			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1958			pipe_ctx->dlg_regs.dst_y_after_scaler,
1959			pipe_ctx->dlg_regs.dst_y_prefetch,
1960			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1961			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1962			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1963			pipe_ctx->dlg_regs.vratio_prefetch,
1964			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1965			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1966			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1967			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1968			);
1969
1970	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1971			"\ndst_y_per_meta_row_nom_l: %d, \n"
1972			"refcyc_per_meta_chunk_nom_l: %d, \n"
1973			"refcyc_per_line_delivery_pre_l: %d, \n"
1974			"refcyc_per_line_delivery_l: %d, \n"
1975			"vratio_prefetch_c: %d, \n"
1976			"refcyc_per_pte_group_vblank_c: %d, \n"
1977			"refcyc_per_meta_chunk_vblank_c: %d, \n"
1978			"dst_y_per_pte_row_nom_c: %d, \n"
1979			"refcyc_per_pte_group_nom_c: %d, \n"
1980			"dst_y_per_meta_row_nom_c: %d, \n"
1981			"refcyc_per_meta_chunk_nom_c: %d, \n"
1982			"refcyc_per_line_delivery_pre_c: %d, \n"
1983			"refcyc_per_line_delivery_c: %d \n"
1984			"========================================================\n",
1985			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1986			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1987			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1988			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1989			pipe_ctx->dlg_regs.vratio_prefetch_c,
1990			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1991			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1992			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1993			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1994			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1995			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1996			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1997			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1998			);
1999
2000	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2001			"\n============== DML RQ Output parameters [%d] ==============\n"
2002			"chunk_size: %d \n"
2003			"min_chunk_size: %d \n"
2004			"meta_chunk_size: %d \n"
2005			"min_meta_chunk_size: %d \n"
2006			"dpte_group_size: %d \n"
2007			"mpte_group_size: %d \n"
2008			"swath_height: %d \n"
2009			"pte_row_height_linear: %d \n"
2010			"========================================================\n",
2011			pipe_ctx->pipe_idx,
2012			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
2013			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
2014			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
2015			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
2016			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
2017			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
2018			pipe_ctx->rq_regs.rq_regs_l.swath_height,
2019			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
2020			);
2021}
2022*/
2023
2024static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2025		struct vm_system_aperture_param *apt,
2026		struct dce_hwseq *hws)
2027{
2028	PHYSICAL_ADDRESS_LOC physical_page_number;
2029	uint32_t logical_addr_low;
2030	uint32_t logical_addr_high;
2031
2032	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2033			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2034	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2035			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2036
2037	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2038			LOGICAL_ADDR, &logical_addr_low);
2039
2040	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2041			LOGICAL_ADDR, &logical_addr_high);
2042
2043	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2044	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2045	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2046}
2047
2048/* Temporary read settings, future will get values from kmd directly */
2049static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2050		struct vm_context0_param *vm0,
2051		struct dce_hwseq *hws)
2052{
2053	PHYSICAL_ADDRESS_LOC fb_base;
2054	PHYSICAL_ADDRESS_LOC fb_offset;
2055	uint32_t fb_base_value;
2056	uint32_t fb_offset_value;
2057
2058	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2059	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2060
2061	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2062			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2063	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2064			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2065
2066	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2067			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2068	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2069			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2070
2071	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2072			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2073	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2074			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2075
2076	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2077			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2078	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2079			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2080
2081	/*
2082	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2083	 * Therefore we need to do
2084	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2085	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2086	 */
2087	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2088	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2089	vm0->pte_base.quad_part += fb_base.quad_part;
2090	vm0->pte_base.quad_part -= fb_offset.quad_part;
2091}
2092
2093
2094void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2095{
2096	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2097	struct vm_system_aperture_param apt = { {{ 0 } } };
2098	struct vm_context0_param vm0 = { { { 0 } } };
2099
2100	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2101	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2102
2103	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2104	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2105}
2106
2107static void dcn10_enable_plane(
2108	struct dc *dc,
2109	struct pipe_ctx *pipe_ctx,
2110	struct dc_state *context)
2111{
2112	struct dce_hwseq *hws = dc->hwseq;
2113
2114	if (dc->debug.sanity_checks) {
2115		hws->funcs.verify_allow_pstate_change_high(dc);
2116	}
2117
2118	undo_DEGVIDCN10_253_wa(dc);
2119
2120	power_on_plane(dc->hwseq,
2121		pipe_ctx->plane_res.hubp->inst);
2122
2123	/* enable DCFCLK current DCHUB */
2124	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2125
2126	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2127	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2128			pipe_ctx->stream_res.opp,
2129			true);
2130
2131/* TODO: enable/disable in dm as per update type.
2132	if (plane_state) {
2133		DC_LOG_DC(dc->ctx->logger,
2134				"Pipe:%d 0x%x: addr hi:0x%x, "
2135				"addr low:0x%x, "
2136				"src: %d, %d, %d,"
2137				" %d; dst: %d, %d, %d, %d;\n",
2138				pipe_ctx->pipe_idx,
2139				plane_state,
2140				plane_state->address.grph.addr.high_part,
2141				plane_state->address.grph.addr.low_part,
2142				plane_state->src_rect.x,
2143				plane_state->src_rect.y,
2144				plane_state->src_rect.width,
2145				plane_state->src_rect.height,
2146				plane_state->dst_rect.x,
2147				plane_state->dst_rect.y,
2148				plane_state->dst_rect.width,
2149				plane_state->dst_rect.height);
2150
2151		DC_LOG_DC(dc->ctx->logger,
2152				"Pipe %d: width, height, x, y         format:%d\n"
2153				"viewport:%d, %d, %d, %d\n"
2154				"recout:  %d, %d, %d, %d\n",
2155				pipe_ctx->pipe_idx,
2156				plane_state->format,
2157				pipe_ctx->plane_res.scl_data.viewport.width,
2158				pipe_ctx->plane_res.scl_data.viewport.height,
2159				pipe_ctx->plane_res.scl_data.viewport.x,
2160				pipe_ctx->plane_res.scl_data.viewport.y,
2161				pipe_ctx->plane_res.scl_data.recout.width,
2162				pipe_ctx->plane_res.scl_data.recout.height,
2163				pipe_ctx->plane_res.scl_data.recout.x,
2164				pipe_ctx->plane_res.scl_data.recout.y);
2165		print_rq_dlg_ttu(dc, pipe_ctx);
2166	}
2167*/
2168	if (dc->config.gpu_vm_support)
2169		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2170
2171	if (dc->debug.sanity_checks) {
2172		hws->funcs.verify_allow_pstate_change_high(dc);
2173	}
2174}
2175
2176void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2177{
2178	int i = 0;
2179	struct dpp_grph_csc_adjustment adjust;
2180	memset(&adjust, 0, sizeof(adjust));
2181	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2182
2183
2184	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2185		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2186		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2187			adjust.temperature_matrix[i] =
2188				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2189	} else if (pipe_ctx->plane_state &&
2190		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2191		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2192		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2193			adjust.temperature_matrix[i] =
2194				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2195	}
2196
2197	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2198}
2199
2200
2201static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2202{
2203	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2204		if (pipe_ctx->top_pipe) {
2205			struct pipe_ctx *top = pipe_ctx->top_pipe;
2206
2207			while (top->top_pipe)
2208				top = top->top_pipe; // Traverse to top pipe_ctx
2209			if (top->plane_state && top->plane_state->layer_index == 0)
2210				return true; // Front MPO plane not hidden
2211		}
2212	}
2213	return false;
2214}
2215
2216static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2217{
2218	// Override rear plane RGB bias to fix MPO brightness
2219	uint16_t rgb_bias = matrix[3];
2220
2221	matrix[3] = 0;
2222	matrix[7] = 0;
2223	matrix[11] = 0;
2224	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2225	matrix[3] = rgb_bias;
2226	matrix[7] = rgb_bias;
2227	matrix[11] = rgb_bias;
2228}
2229
2230void dcn10_program_output_csc(struct dc *dc,
2231		struct pipe_ctx *pipe_ctx,
2232		enum dc_color_space colorspace,
2233		uint16_t *matrix,
2234		int opp_id)
2235{
2236	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2237		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2238
2239			/* MPO is broken with RGB colorspaces when OCSC matrix
2240			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2241			 * Blending adds offsets from front + rear to rear plane
2242			 *
2243			 * Fix is to set RGB bias to 0 on rear plane, top plane
2244			 * black value pixels add offset instead of rear + front
2245			 */
2246
2247			int16_t rgb_bias = matrix[3];
2248			// matrix[3/7/11] are all the same offset value
2249
2250			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2251				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2252			} else {
2253				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2254			}
2255		}
2256	} else {
2257		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2258			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2259	}
2260}
2261
2262void dcn10_get_surface_visual_confirm_color(
2263		const struct pipe_ctx *pipe_ctx,
2264		struct tg_color *color)
2265{
2266	uint32_t color_value = MAX_TG_COLOR_VALUE;
2267
2268	switch (pipe_ctx->plane_res.scl_data.format) {
2269	case PIXEL_FORMAT_ARGB8888:
2270		/* set border color to red */
2271		color->color_r_cr = color_value;
2272		break;
2273
2274	case PIXEL_FORMAT_ARGB2101010:
2275		/* set border color to blue */
2276		color->color_b_cb = color_value;
2277		break;
2278	case PIXEL_FORMAT_420BPP8:
2279		/* set border color to green */
2280		color->color_g_y = color_value;
2281		break;
2282	case PIXEL_FORMAT_420BPP10:
2283		/* set border color to yellow */
2284		color->color_g_y = color_value;
2285		color->color_r_cr = color_value;
2286		break;
2287	case PIXEL_FORMAT_FP16:
2288		/* set border color to white */
2289		color->color_r_cr = color_value;
2290		color->color_b_cb = color_value;
2291		color->color_g_y = color_value;
2292		break;
2293	default:
2294		break;
2295	}
2296}
2297
2298void dcn10_get_hdr_visual_confirm_color(
2299		struct pipe_ctx *pipe_ctx,
2300		struct tg_color *color)
2301{
2302	uint32_t color_value = MAX_TG_COLOR_VALUE;
2303
2304	// Determine the overscan color based on the top-most (desktop) plane's context
2305	struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
2306
2307	while (top_pipe_ctx->top_pipe != NULL)
2308		top_pipe_ctx = top_pipe_ctx->top_pipe;
2309
2310	switch (top_pipe_ctx->plane_res.scl_data.format) {
2311	case PIXEL_FORMAT_ARGB2101010:
2312		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2313			/* HDR10, ARGB2101010 - set border color to red */
2314			color->color_r_cr = color_value;
2315		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2316			/* FreeSync 2 ARGB2101010 - set border color to pink */
2317			color->color_r_cr = color_value;
2318			color->color_b_cb = color_value;
2319		}
2320		break;
2321	case PIXEL_FORMAT_FP16:
2322		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2323			/* HDR10, FP16 - set border color to blue */
2324			color->color_b_cb = color_value;
2325		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2326			/* FreeSync 2 HDR - set border color to green */
2327			color->color_g_y = color_value;
2328		}
2329		break;
2330	default:
2331		/* SDR - set border color to Gray */
2332		color->color_r_cr = color_value/2;
2333		color->color_b_cb = color_value/2;
2334		color->color_g_y = color_value/2;
2335		break;
2336	}
2337}
2338
2339static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2340{
2341	struct dc_bias_and_scale bns_params = {0};
2342
2343	// program the input csc
2344	dpp->funcs->dpp_setup(dpp,
2345			plane_state->format,
2346			EXPANSION_MODE_ZERO,
2347			plane_state->input_csc_color_matrix,
2348			plane_state->color_space,
2349			NULL);
2350
2351	//set scale and bias registers
2352	build_prescale_params(&bns_params, plane_state);
2353	if (dpp->funcs->dpp_program_bias_and_scale)
2354		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2355}
2356
2357void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2358{
2359	struct dce_hwseq *hws = dc->hwseq;
2360	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2361	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2362	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2363	int mpcc_id;
2364	struct mpcc *new_mpcc;
2365	struct mpc *mpc = dc->res_pool->mpc;
2366	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2367
2368	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2369		hws->funcs.get_hdr_visual_confirm_color(
2370				pipe_ctx, &blnd_cfg.black_color);
2371	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2372		hws->funcs.get_surface_visual_confirm_color(
2373				pipe_ctx, &blnd_cfg.black_color);
2374	} else {
2375		color_space_to_black_color(
2376				dc, pipe_ctx->stream->output_color_space,
2377				&blnd_cfg.black_color);
2378	}
2379
2380	/*
2381	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2382	 * alternate between Cb and Cr, so both channels need the pixel
2383	 * value for Y
2384	 */
2385	if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2386		blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
2387
2388	if (per_pixel_alpha)
2389		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2390	else
2391		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2392
2393	blnd_cfg.overlap_only = false;
2394	blnd_cfg.global_gain = 0xff;
2395
2396	if (pipe_ctx->plane_state->global_alpha)
2397		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2398	else
2399		blnd_cfg.global_alpha = 0xff;
2400
2401	/* DCN1.0 has output CM before MPC which seems to screw with
2402	 * pre-multiplied alpha.
2403	 */
2404	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2405			pipe_ctx->stream->output_color_space)
2406					&& per_pixel_alpha;
2407
2408
2409	/*
2410	 * TODO: remove hack
2411	 * Note: currently there is a bug in init_hw such that
2412	 * on resume from hibernate, BIOS sets up MPCC0, and
2413	 * we do mpcc_remove but the mpcc cannot go to idle
2414	 * after remove. This cause us to pick mpcc1 here,
2415	 * which causes a pstate hang for yet unknown reason.
2416	 */
2417	mpcc_id = hubp->inst;
2418
2419	/* If there is no full update, don't need to touch MPC tree*/
2420	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2421		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2422		return;
2423	}
2424
2425	/* check if this MPCC is already being used */
2426	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2427	/* remove MPCC if being used */
2428	if (new_mpcc != NULL)
2429		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2430	else
2431		if (dc->debug.sanity_checks)
2432			mpc->funcs->assert_mpcc_idle_before_connect(
2433					dc->res_pool->mpc, mpcc_id);
2434
2435	/* Call MPC to insert new plane */
2436	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2437			mpc_tree_params,
2438			&blnd_cfg,
2439			NULL,
2440			NULL,
2441			hubp->inst,
2442			mpcc_id);
2443
2444	ASSERT(new_mpcc != NULL);
2445
2446	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2447	hubp->mpcc_id = mpcc_id;
2448}
2449
2450static void update_scaler(struct pipe_ctx *pipe_ctx)
2451{
2452	bool per_pixel_alpha =
2453			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2454
2455	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2456	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2457	/* scaler configuration */
2458	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2459			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2460}
2461
2462static void dcn10_update_dchubp_dpp(
2463	struct dc *dc,
2464	struct pipe_ctx *pipe_ctx,
2465	struct dc_state *context)
2466{
2467	struct dce_hwseq *hws = dc->hwseq;
2468	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2469	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2470	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2471	struct plane_size size = plane_state->plane_size;
2472	unsigned int compat_level = 0;
2473	bool should_divided_by_2 = false;
2474
2475	/* depends on DML calculation, DPP clock value may change dynamically */
2476	/* If request max dpp clk is lower than current dispclk, no need to
2477	 * divided by 2
2478	 */
2479	if (plane_state->update_flags.bits.full_update) {
2480
2481		/* new calculated dispclk, dppclk are stored in
2482		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2483		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2484		 * dcn_validate_bandwidth compute new dispclk, dppclk.
2485		 * dispclk will put in use after optimize_bandwidth when
2486		 * ramp_up_dispclk_with_dpp is called.
2487		 * there are two places for dppclk be put in use. One location
2488		 * is the same as the location as dispclk. Another is within
2489		 * update_dchubp_dpp which happens between pre_bandwidth and
2490		 * optimize_bandwidth.
2491		 * dppclk updated within update_dchubp_dpp will cause new
2492		 * clock values of dispclk and dppclk not be in use at the same
2493		 * time. when clocks are decreased, this may cause dppclk is
2494		 * lower than previous configuration and let pipe stuck.
2495		 * for example, eDP + external dp,  change resolution of DP from
2496		 * 1920x1080x144hz to 1280x960x60hz.
2497		 * before change: dispclk = 337889 dppclk = 337889
2498		 * change mode, dcn_validate_bandwidth calculate
2499		 *                dispclk = 143122 dppclk = 143122
2500		 * update_dchubp_dpp be executed before dispclk be updated,
2501		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2502		 * 168944. this will cause pipe pstate warning issue.
2503		 * solution: between pre_bandwidth and optimize_bandwidth, while
2504		 * dispclk is going to be decreased, keep dppclk = dispclk
2505		 **/
2506		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2507				dc->clk_mgr->clks.dispclk_khz)
2508			should_divided_by_2 = false;
2509		else
2510			should_divided_by_2 =
2511					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2512					dc->clk_mgr->clks.dispclk_khz / 2;
2513
2514		dpp->funcs->dpp_dppclk_control(
2515				dpp,
2516				should_divided_by_2,
2517				true);
2518
2519		if (dc->res_pool->dccg)
2520			dc->res_pool->dccg->funcs->update_dpp_dto(
2521					dc->res_pool->dccg,
2522					dpp->inst,
2523					pipe_ctx->plane_res.bw.dppclk_khz);
2524		else
2525			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2526						dc->clk_mgr->clks.dispclk_khz / 2 :
2527							dc->clk_mgr->clks.dispclk_khz;
2528	}
2529
2530	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2531	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2532	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2533	 */
2534	if (plane_state->update_flags.bits.full_update) {
2535		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2536
2537		hubp->funcs->hubp_setup(
2538			hubp,
2539			&pipe_ctx->dlg_regs,
2540			&pipe_ctx->ttu_regs,
2541			&pipe_ctx->rq_regs,
2542			&pipe_ctx->pipe_dlg_param);
2543		hubp->funcs->hubp_setup_interdependent(
2544			hubp,
2545			&pipe_ctx->dlg_regs,
2546			&pipe_ctx->ttu_regs);
2547	}
2548
2549	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2550
2551	if (plane_state->update_flags.bits.full_update ||
2552		plane_state->update_flags.bits.bpp_change)
2553		dcn10_update_dpp(dpp, plane_state);
2554
2555	if (plane_state->update_flags.bits.full_update ||
2556		plane_state->update_flags.bits.per_pixel_alpha_change ||
2557		plane_state->update_flags.bits.global_alpha_change)
2558		hws->funcs.update_mpcc(dc, pipe_ctx);
2559
2560	if (plane_state->update_flags.bits.full_update ||
2561		plane_state->update_flags.bits.per_pixel_alpha_change ||
2562		plane_state->update_flags.bits.global_alpha_change ||
2563		plane_state->update_flags.bits.scaling_change ||
2564		plane_state->update_flags.bits.position_change) {
2565		update_scaler(pipe_ctx);
2566	}
2567
2568	if (plane_state->update_flags.bits.full_update ||
2569		plane_state->update_flags.bits.scaling_change ||
2570		plane_state->update_flags.bits.position_change) {
2571		hubp->funcs->mem_program_viewport(
2572			hubp,
2573			&pipe_ctx->plane_res.scl_data.viewport,
2574			&pipe_ctx->plane_res.scl_data.viewport_c);
2575	}
2576
2577	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2578		dc->hwss.set_cursor_position(pipe_ctx);
2579		dc->hwss.set_cursor_attribute(pipe_ctx);
2580
2581		if (dc->hwss.set_cursor_sdr_white_level)
2582			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2583	}
2584
2585	if (plane_state->update_flags.bits.full_update) {
2586		/*gamut remap*/
2587		dc->hwss.program_gamut_remap(pipe_ctx);
2588
2589		dc->hwss.program_output_csc(dc,
2590				pipe_ctx,
2591				pipe_ctx->stream->output_color_space,
2592				pipe_ctx->stream->csc_color_matrix.matrix,
2593				pipe_ctx->stream_res.opp->inst);
2594	}
2595
2596	if (plane_state->update_flags.bits.full_update ||
2597		plane_state->update_flags.bits.pixel_format_change ||
2598		plane_state->update_flags.bits.horizontal_mirror_change ||
2599		plane_state->update_flags.bits.rotation_change ||
2600		plane_state->update_flags.bits.swizzle_change ||
2601		plane_state->update_flags.bits.dcc_change ||
2602		plane_state->update_flags.bits.bpp_change ||
2603		plane_state->update_flags.bits.scaling_change ||
2604		plane_state->update_flags.bits.plane_size_change) {
2605		hubp->funcs->hubp_program_surface_config(
2606			hubp,
2607			plane_state->format,
2608			&plane_state->tiling_info,
2609			&size,
2610			plane_state->rotation,
2611			&plane_state->dcc,
2612			plane_state->horizontal_mirror,
2613			compat_level);
2614	}
2615
2616	hubp->power_gated = false;
2617
2618	hws->funcs.update_plane_addr(dc, pipe_ctx);
2619
2620	if (is_pipe_tree_visible(pipe_ctx))
2621		hubp->funcs->set_blank(hubp, false);
2622}
2623
2624void dcn10_blank_pixel_data(
2625		struct dc *dc,
2626		struct pipe_ctx *pipe_ctx,
2627		bool blank)
2628{
2629	enum dc_color_space color_space;
2630	struct tg_color black_color = {0};
2631	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2632	struct dc_stream_state *stream = pipe_ctx->stream;
2633
2634	/* program otg blank color */
2635	color_space = stream->output_color_space;
2636	color_space_to_black_color(dc, color_space, &black_color);
2637
2638	/*
2639	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2640	 * alternate between Cb and Cr, so both channels need the pixel
2641	 * value for Y
2642	 */
2643	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2644		black_color.color_r_cr = black_color.color_g_y;
2645
2646
2647	if (stream_res->tg->funcs->set_blank_color)
2648		stream_res->tg->funcs->set_blank_color(
2649				stream_res->tg,
2650				&black_color);
2651
2652	if (!blank) {
2653		if (stream_res->tg->funcs->set_blank)
2654			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2655		if (stream_res->abm) {
2656			dc->hwss.set_pipe(pipe_ctx);
2657			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2658		}
2659	} else if (blank) {
2660		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2661		if (stream_res->tg->funcs->set_blank) {
2662			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2663			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2664		}
2665	}
2666}
2667
2668void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2669{
2670	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2671	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2672	struct custom_float_format fmt;
2673
2674	fmt.exponenta_bits = 6;
2675	fmt.mantissa_bits = 12;
2676	fmt.sign = true;
2677
2678
2679	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2680		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2681
2682	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2683			pipe_ctx->plane_res.dpp, hw_mult);
2684}
2685
2686void dcn10_program_pipe(
2687		struct dc *dc,
2688		struct pipe_ctx *pipe_ctx,
2689		struct dc_state *context)
2690{
2691	struct dce_hwseq *hws = dc->hwseq;
2692
2693	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2694		dcn10_enable_plane(dc, pipe_ctx, context);
2695
2696	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2697
2698	hws->funcs.set_hdr_multiplier(pipe_ctx);
2699
2700	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2701			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2702			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2703		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2704
2705	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2706	 * only do gamma programming for full update.
2707	 * TODO: This can be further optimized/cleaned up
2708	 * Always call this for now since it does memcmp inside before
2709	 * doing heavy calculation and programming
2710	 */
2711	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2712		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2713}
2714
2715static void dcn10_program_all_pipe_in_tree(
2716		struct dc *dc,
2717		struct pipe_ctx *pipe_ctx,
2718		struct dc_state *context)
2719{
2720	struct dce_hwseq *hws = dc->hwseq;
2721
2722	if (pipe_ctx->top_pipe == NULL) {
2723		bool blank = !is_pipe_tree_visible(pipe_ctx);
2724
2725		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2726				pipe_ctx->stream_res.tg,
2727				pipe_ctx->pipe_dlg_param.vready_offset,
2728				pipe_ctx->pipe_dlg_param.vstartup_start,
2729				pipe_ctx->pipe_dlg_param.vupdate_offset,
2730				pipe_ctx->pipe_dlg_param.vupdate_width);
2731
2732		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2733				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2734
2735		if (hws->funcs.setup_vupdate_interrupt)
2736			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2737
2738		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2739	}
2740
2741	if (pipe_ctx->plane_state != NULL)
2742		hws->funcs.program_pipe(dc, pipe_ctx, context);
2743
2744	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2745		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2746}
2747
2748static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
2749		struct dc *dc,
2750		struct dc_state *context,
2751		const struct dc_stream_state *stream)
2752{
2753	int i;
2754
2755	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2756		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2757		struct pipe_ctx *old_pipe_ctx =
2758				&dc->current_state->res_ctx.pipe_ctx[i];
2759
2760		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2761			continue;
2762
2763		if (pipe_ctx->stream != stream)
2764			continue;
2765
2766		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2767			return pipe_ctx;
2768	}
2769	return NULL;
2770}
2771
2772void dcn10_apply_ctx_for_surface(
2773		struct dc *dc,
2774		const struct dc_stream_state *stream,
2775		int num_planes,
2776		struct dc_state *context)
2777{
2778	struct dce_hwseq *hws = dc->hwseq;
2779	int i;
2780	struct timing_generator *tg;
2781	uint32_t underflow_check_delay_us;
2782	bool interdependent_update = false;
2783	struct pipe_ctx *top_pipe_to_program =
2784			dcn10_find_top_pipe_for_stream(dc, context, stream);
2785	DC_LOGGER_INIT(dc->ctx->logger);
2786
2787	// Clear pipe_ctx flag
2788	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2789		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2790		pipe_ctx->update_flags.raw = 0;
2791	}
2792
2793	if (!top_pipe_to_program)
2794		return;
2795
2796	tg = top_pipe_to_program->stream_res.tg;
2797
2798	interdependent_update = top_pipe_to_program->plane_state &&
2799		top_pipe_to_program->plane_state->update_flags.bits.full_update;
2800
2801	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2802
2803	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2804		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2805
2806	if (underflow_check_delay_us != 0xFFFFFFFF)
2807		udelay(underflow_check_delay_us);
2808
2809	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2810		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2811
2812	if (num_planes == 0) {
2813		/* OTG blank before remove all front end */
2814		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
2815	}
2816
2817	/* Disconnect unused mpcc */
2818	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2819		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2820		struct pipe_ctx *old_pipe_ctx =
2821				&dc->current_state->res_ctx.pipe_ctx[i];
2822
2823		if ((!pipe_ctx->plane_state ||
2824		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2825		    old_pipe_ctx->plane_state &&
2826		    old_pipe_ctx->stream_res.tg == tg) {
2827
2828			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2829			pipe_ctx->update_flags.bits.disable = 1;
2830
2831			DC_LOG_DC("Reset mpcc for pipe %d\n",
2832					old_pipe_ctx->pipe_idx);
2833		}
2834	}
2835
2836	if (num_planes > 0)
2837		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2838
2839	/* Program secondary blending tree and writeback pipes */
2840	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
2841		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
2842	if (interdependent_update)
2843		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2844			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2845			/* Skip inactive pipes and ones already updated */
2846			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2847			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2848				continue;
2849
2850			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2851				pipe_ctx->plane_res.hubp,
2852				&pipe_ctx->dlg_regs,
2853				&pipe_ctx->ttu_regs);
2854		}
2855}
2856
2857void dcn10_post_unlock_program_front_end(
2858		struct dc *dc,
2859		struct dc_state *context)
2860{
2861	int i;
2862
2863	DC_LOGGER_INIT(dc->ctx->logger);
2864
2865	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2866		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2867
2868		if (!pipe_ctx->top_pipe &&
2869			!pipe_ctx->prev_odm_pipe &&
2870			pipe_ctx->stream) {
2871			struct timing_generator *tg = pipe_ctx->stream_res.tg;
2872
2873			if (context->stream_status[i].plane_count == 0)
2874				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2875		}
2876	}
2877
2878	for (i = 0; i < dc->res_pool->pipe_count; i++)
2879		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2880			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2881
2882	for (i = 0; i < dc->res_pool->pipe_count; i++)
2883		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2884			dc->hwss.optimize_bandwidth(dc, context);
2885			break;
2886		}
2887
2888	if (dc->hwseq->wa.DEGVIDCN10_254)
2889		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2890}
2891
2892static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2893{
2894	uint8_t i;
2895
2896	for (i = 0; i < context->stream_count; i++) {
2897		if (context->streams[i]->timing.timing_3d_format
2898				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2899			/*
2900			 * Disable stutter
2901			 */
2902			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2903			break;
2904		}
2905	}
2906}
2907
2908void dcn10_prepare_bandwidth(
2909		struct dc *dc,
2910		struct dc_state *context)
2911{
2912	struct dce_hwseq *hws = dc->hwseq;
2913	struct hubbub *hubbub = dc->res_pool->hubbub;
2914
2915	if (dc->debug.sanity_checks)
2916		hws->funcs.verify_allow_pstate_change_high(dc);
2917
2918	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2919		if (context->stream_count == 0)
2920			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2921
2922		dc->clk_mgr->funcs->update_clocks(
2923				dc->clk_mgr,
2924				context,
2925				false);
2926	}
2927
2928	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2929			&context->bw_ctx.bw.dcn.watermarks,
2930			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2931			true);
2932	dcn10_stereo_hw_frame_pack_wa(dc, context);
2933
2934	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2935		dcn_bw_notify_pplib_of_wm_ranges(dc);
2936
2937	if (dc->debug.sanity_checks)
2938		hws->funcs.verify_allow_pstate_change_high(dc);
2939}
2940
2941void dcn10_optimize_bandwidth(
2942		struct dc *dc,
2943		struct dc_state *context)
2944{
2945	struct dce_hwseq *hws = dc->hwseq;
2946	struct hubbub *hubbub = dc->res_pool->hubbub;
2947
2948	if (dc->debug.sanity_checks)
2949		hws->funcs.verify_allow_pstate_change_high(dc);
2950
2951	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2952		if (context->stream_count == 0)
2953			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2954
2955		dc->clk_mgr->funcs->update_clocks(
2956				dc->clk_mgr,
2957				context,
2958				true);
2959	}
2960
2961	hubbub->funcs->program_watermarks(hubbub,
2962			&context->bw_ctx.bw.dcn.watermarks,
2963			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2964			true);
2965
2966	dcn10_stereo_hw_frame_pack_wa(dc, context);
2967
2968	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2969		dcn_bw_notify_pplib_of_wm_ranges(dc);
2970
2971	if (dc->debug.sanity_checks)
2972		hws->funcs.verify_allow_pstate_change_high(dc);
2973}
2974
2975void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2976		int num_pipes, unsigned int vmin, unsigned int vmax,
2977		unsigned int vmid, unsigned int vmid_frame_number)
2978{
2979	int i = 0;
2980	struct drr_params params = {0};
2981	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2982	unsigned int event_triggers = 0x800;
2983	// Note DRR trigger events are generated regardless of whether num frames met.
2984	unsigned int num_frames = 2;
2985
2986	params.vertical_total_max = vmax;
2987	params.vertical_total_min = vmin;
2988	params.vertical_total_mid = vmid;
2989	params.vertical_total_mid_frame_num = vmid_frame_number;
2990
2991	/* TODO: If multiple pipes are to be supported, you need
2992	 * some GSL stuff. Static screen triggers may be programmed differently
2993	 * as well.
2994	 */
2995	for (i = 0; i < num_pipes; i++) {
2996		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2997			pipe_ctx[i]->stream_res.tg, &params);
2998		if (vmax != 0 && vmin != 0)
2999			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3000					pipe_ctx[i]->stream_res.tg,
3001					event_triggers, num_frames);
3002	}
3003}
3004
3005void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3006		int num_pipes,
3007		struct crtc_position *position)
3008{
3009	int i = 0;
3010
3011	/* TODO: handle pipes > 1
3012	 */
3013	for (i = 0; i < num_pipes; i++)
3014		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3015}
3016
3017void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3018		int num_pipes, const struct dc_static_screen_params *params)
3019{
3020	unsigned int i;
3021	unsigned int triggers = 0;
3022
3023	if (params->triggers.surface_update)
3024		triggers |= 0x80;
3025	if (params->triggers.cursor_update)
3026		triggers |= 0x2;
3027	if (params->triggers.force_trigger)
3028		triggers |= 0x1;
3029
3030	for (i = 0; i < num_pipes; i++)
3031		pipe_ctx[i]->stream_res.tg->funcs->
3032			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3033					triggers, params->num_frames);
3034}
3035
3036static void dcn10_config_stereo_parameters(
3037		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3038{
3039	enum view_3d_format view_format = stream->view_format;
3040	enum dc_timing_3d_format timing_3d_format =\
3041			stream->timing.timing_3d_format;
3042	bool non_stereo_timing = false;
3043
3044	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3045		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3046		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3047		non_stereo_timing = true;
3048
3049	if (non_stereo_timing == false &&
3050		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3051
3052		flags->PROGRAM_STEREO         = 1;
3053		flags->PROGRAM_POLARITY       = 1;
3054		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3055			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3056			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3057			enum display_dongle_type dongle = \
3058					stream->link->ddc->dongle_type;
3059			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3060				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3061				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3062				flags->DISABLE_STEREO_DP_SYNC = 1;
3063		}
3064		flags->RIGHT_EYE_POLARITY =\
3065				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3066		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3067			flags->FRAME_PACKED = 1;
3068	}
3069
3070	return;
3071}
3072
3073void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3074{
3075	struct crtc_stereo_flags flags = { 0 };
3076	struct dc_stream_state *stream = pipe_ctx->stream;
3077
3078	dcn10_config_stereo_parameters(stream, &flags);
3079
3080	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3081		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3082			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3083	} else {
3084		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3085	}
3086
3087	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3088		pipe_ctx->stream_res.opp,
3089		flags.PROGRAM_STEREO == 1 ? true:false,
3090		&stream->timing);
3091
3092	pipe_ctx->stream_res.tg->funcs->program_stereo(
3093		pipe_ctx->stream_res.tg,
3094		&stream->timing,
3095		&flags);
3096
3097	return;
3098}
3099
3100static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3101{
3102	int i;
3103
3104	for (i = 0; i < res_pool->pipe_count; i++) {
3105		if (res_pool->hubps[i]->inst == mpcc_inst)
3106			return res_pool->hubps[i];
3107	}
3108	ASSERT(false);
3109	return NULL;
3110}
3111
3112void dcn10_wait_for_mpcc_disconnect(
3113		struct dc *dc,
3114		struct resource_pool *res_pool,
3115		struct pipe_ctx *pipe_ctx)
3116{
3117	struct dce_hwseq *hws = dc->hwseq;
3118	int mpcc_inst;
3119
3120	if (dc->debug.sanity_checks) {
3121		hws->funcs.verify_allow_pstate_change_high(dc);
3122	}
3123
3124	if (!pipe_ctx->stream_res.opp)
3125		return;
3126
3127	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3128		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3129			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3130
3131			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3132			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3133			hubp->funcs->set_blank(hubp, true);
3134		}
3135	}
3136
3137	if (dc->debug.sanity_checks) {
3138		hws->funcs.verify_allow_pstate_change_high(dc);
3139	}
3140
3141}
3142
3143bool dcn10_dummy_display_power_gating(
3144	struct dc *dc,
3145	uint8_t controller_id,
3146	struct dc_bios *dcb,
3147	enum pipe_gating_control power_gating)
3148{
3149	return true;
3150}
3151
3152void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3153{
3154	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3155	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3156	bool flip_pending;
3157	struct dc *dc = plane_state->ctx->dc;
3158
3159	if (plane_state == NULL)
3160		return;
3161
3162	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3163					pipe_ctx->plane_res.hubp);
3164
3165	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3166
3167	if (!flip_pending)
3168		plane_state->status.current_address = plane_state->status.requested_address;
3169
3170	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3171			tg->funcs->is_stereo_left_eye) {
3172		plane_state->status.is_right_eye =
3173				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3174	}
3175
3176	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3177		struct dce_hwseq *hwseq = dc->hwseq;
3178		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3179		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3180
3181		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3182			struct hubbub *hubbub = dc->res_pool->hubbub;
3183
3184			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3185			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3186		}
3187	}
3188}
3189
3190void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3191{
3192	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3193
3194	/* In DCN, this programming sequence is owned by the hubbub */
3195	hubbub->funcs->update_dchub(hubbub, dh_data);
3196}
3197
3198static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3199{
3200	struct pipe_ctx *test_pipe;
3201	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3202	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3203
3204	/**
3205	 * Disable the cursor if there's another pipe above this with a
3206	 * plane that contains this pipe's viewport to prevent double cursor
3207	 * and incorrect scaling artifacts.
3208	 */
3209	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3210	     test_pipe = test_pipe->top_pipe) {
3211		if (!test_pipe->plane_state->visible)
3212			continue;
3213
3214		r2 = &test_pipe->plane_res.scl_data.recout;
3215		r2_r = r2->x + r2->width;
3216		r2_b = r2->y + r2->height;
3217
3218		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3219			return true;
3220	}
3221
3222	return false;
3223}
3224
3225void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3226{
3227	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3228	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3229	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3230	struct dc_cursor_mi_param param = {
3231		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3232		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3233		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3234		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3235		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3236		.rotation = pipe_ctx->plane_state->rotation,
3237		.mirror = pipe_ctx->plane_state->horizontal_mirror
3238	};
3239	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3240		(pipe_ctx->bottom_pipe != NULL);
3241
3242	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3243	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3244	int x_pos = pos_cpy.x;
3245	int y_pos = pos_cpy.y;
3246
3247	/**
3248	 * DC cursor is stream space, HW cursor is plane space and drawn
3249	 * as part of the framebuffer.
3250	 *
3251	 * Cursor position can't be negative, but hotspot can be used to
3252	 * shift cursor out of the plane bounds. Hotspot must be smaller
3253	 * than the cursor size.
3254	 */
3255
3256	/**
3257	 * Translate cursor from stream space to plane space.
3258	 *
3259	 * If the cursor is scaled then we need to scale the position
3260	 * to be in the approximately correct place. We can't do anything
3261	 * about the actual size being incorrect, that's a limitation of
3262	 * the hardware.
3263	 */
3264	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3265			pipe_ctx->plane_state->dst_rect.width;
3266	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3267			pipe_ctx->plane_state->dst_rect.height;
3268
3269	/**
3270	 * If the cursor's source viewport is clipped then we need to
3271	 * translate the cursor to appear in the correct position on
3272	 * the screen.
3273	 *
3274	 * This translation isn't affected by scaling so it needs to be
3275	 * done *after* we adjust the position for the scale factor.
3276	 *
3277	 * This is only done by opt-in for now since there are still
3278	 * some usecases like tiled display that might enable the
3279	 * cursor on both streams while expecting dc to clip it.
3280	 */
3281	if (pos_cpy.translate_by_source) {
3282		x_pos += pipe_ctx->plane_state->src_rect.x;
3283		y_pos += pipe_ctx->plane_state->src_rect.y;
3284	}
3285
3286	/**
3287	 * If the position is negative then we need to add to the hotspot
3288	 * to shift the cursor outside the plane.
3289	 */
3290
3291	if (x_pos < 0) {
3292		pos_cpy.x_hotspot -= x_pos;
3293		x_pos = 0;
3294	}
3295
3296	if (y_pos < 0) {
3297		pos_cpy.y_hotspot -= y_pos;
3298		y_pos = 0;
3299	}
3300
3301	pos_cpy.x = (uint32_t)x_pos;
3302	pos_cpy.y = (uint32_t)y_pos;
3303
3304	if (pipe_ctx->plane_state->address.type
3305			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3306		pos_cpy.enable = false;
3307
3308	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3309		pos_cpy.enable = false;
3310
3311	// Swap axis and mirror horizontally
3312	if (param.rotation == ROTATION_ANGLE_90) {
3313		uint32_t temp_x = pos_cpy.x;
3314
3315		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3316				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3317		pos_cpy.y = temp_x;
3318	}
3319	// Swap axis and mirror vertically
3320	else if (param.rotation == ROTATION_ANGLE_270) {
3321		uint32_t temp_y = pos_cpy.y;
3322		int viewport_height =
3323			pipe_ctx->plane_res.scl_data.viewport.height;
3324
3325		if (pipe_split_on) {
3326			if (pos_cpy.x > viewport_height) {
3327				pos_cpy.x = pos_cpy.x - viewport_height;
3328				pos_cpy.y = viewport_height - pos_cpy.x;
3329			} else {
3330				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3331			}
3332		} else
3333			pos_cpy.y = viewport_height - pos_cpy.x;
3334		pos_cpy.x = temp_y;
3335	}
3336	// Mirror horizontally and vertically
3337	else if (param.rotation == ROTATION_ANGLE_180) {
3338		int viewport_width =
3339			pipe_ctx->plane_res.scl_data.viewport.width;
3340		int viewport_x =
3341			pipe_ctx->plane_res.scl_data.viewport.x;
3342
3343		if (pipe_split_on) {
3344			if (pos_cpy.x >= viewport_width + viewport_x) {
3345				pos_cpy.x = 2 * viewport_width
3346						- pos_cpy.x + 2 * viewport_x;
3347			} else {
3348				uint32_t temp_x = pos_cpy.x;
3349
3350				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3351				if (temp_x >= viewport_x +
3352					(int)hubp->curs_attr.width || pos_cpy.x
3353					<= (int)hubp->curs_attr.width +
3354					pipe_ctx->plane_state->src_rect.x) {
3355					pos_cpy.x = temp_x + viewport_width;
3356				}
3357			}
3358		} else {
3359			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3360		}
3361		pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3362	}
3363
3364	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3365	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3366}
3367
3368void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3369{
3370	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3371
3372	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3373			pipe_ctx->plane_res.hubp, attributes);
3374	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3375		pipe_ctx->plane_res.dpp, attributes);
3376}
3377
3378void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3379{
3380	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3381	struct fixed31_32 multiplier;
3382	struct dpp_cursor_attributes opt_attr = { 0 };
3383	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3384	struct custom_float_format fmt;
3385
3386	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3387		return;
3388
3389	fmt.exponenta_bits = 5;
3390	fmt.mantissa_bits = 10;
3391	fmt.sign = true;
3392
3393	if (sdr_white_level > 80) {
3394		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3395		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3396	}
3397
3398	opt_attr.scale = hw_scale;
3399	opt_attr.bias = 0;
3400
3401	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3402			pipe_ctx->plane_res.dpp, &opt_attr);
3403}
3404
3405/*
3406 * apply_front_porch_workaround  TODO FPGA still need?
3407 *
3408 * This is a workaround for a bug that has existed since R5xx and has not been
3409 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3410 */
3411static void apply_front_porch_workaround(
3412	struct dc_crtc_timing *timing)
3413{
3414	if (timing->flags.INTERLACE == 1) {
3415		if (timing->v_front_porch < 2)
3416			timing->v_front_porch = 2;
3417	} else {
3418		if (timing->v_front_porch < 1)
3419			timing->v_front_porch = 1;
3420	}
3421}
3422
3423int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3424{
3425	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3426	struct dc_crtc_timing patched_crtc_timing;
3427	int vesa_sync_start;
3428	int asic_blank_end;
3429	int interlace_factor;
3430	int vertical_line_start;
3431
3432	patched_crtc_timing = *dc_crtc_timing;
3433	apply_front_porch_workaround(&patched_crtc_timing);
3434
3435	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3436
3437	vesa_sync_start = patched_crtc_timing.v_addressable +
3438			patched_crtc_timing.v_border_bottom +
3439			patched_crtc_timing.v_front_porch;
3440
3441	asic_blank_end = (patched_crtc_timing.v_total -
3442			vesa_sync_start -
3443			patched_crtc_timing.v_border_top)
3444			* interlace_factor;
3445
3446	vertical_line_start = asic_blank_end -
3447			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3448
3449	return vertical_line_start;
3450}
3451
3452void dcn10_calc_vupdate_position(
3453		struct dc *dc,
3454		struct pipe_ctx *pipe_ctx,
3455		uint32_t *start_line,
3456		uint32_t *end_line)
3457{
3458	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3459	int vline_int_offset_from_vupdate =
3460			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3461	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3462	int start_position;
3463
3464	if (vline_int_offset_from_vupdate > 0)
3465		vline_int_offset_from_vupdate--;
3466	else if (vline_int_offset_from_vupdate < 0)
3467		vline_int_offset_from_vupdate++;
3468
3469	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3470
3471	if (start_position >= 0)
3472		*start_line = start_position;
3473	else
3474		*start_line = dc_crtc_timing->v_total + start_position - 1;
3475
3476	*end_line = *start_line + 2;
3477
3478	if (*end_line >= dc_crtc_timing->v_total)
3479		*end_line = 2;
3480}
3481
3482static void dcn10_cal_vline_position(
3483		struct dc *dc,
3484		struct pipe_ctx *pipe_ctx,
3485		enum vline_select vline,
3486		uint32_t *start_line,
3487		uint32_t *end_line)
3488{
3489	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3490
3491	if (vline == VLINE0)
3492		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3493	else if (vline == VLINE1)
3494		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3495
3496	switch (ref_point) {
3497	case START_V_UPDATE:
3498		dcn10_calc_vupdate_position(
3499				dc,
3500				pipe_ctx,
3501				start_line,
3502				end_line);
3503		break;
3504	case START_V_SYNC:
3505		// Suppose to do nothing because vsync is 0;
3506		break;
3507	default:
3508		ASSERT(0);
3509		break;
3510	}
3511}
3512
3513void dcn10_setup_periodic_interrupt(
3514		struct dc *dc,
3515		struct pipe_ctx *pipe_ctx,
3516		enum vline_select vline)
3517{
3518	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3519
3520	if (vline == VLINE0) {
3521		uint32_t start_line = 0;
3522		uint32_t end_line = 0;
3523
3524		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3525
3526		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3527
3528	} else if (vline == VLINE1) {
3529		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3530				tg,
3531				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3532	}
3533}
3534
3535void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3536{
3537	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3538	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3539
3540	if (start_line < 0) {
3541		ASSERT(0);
3542		start_line = 0;
3543	}
3544
3545	if (tg->funcs->setup_vertical_interrupt2)
3546		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3547}
3548
3549void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3550		struct dc_link_settings *link_settings)
3551{
3552	struct encoder_unblank_param params = { { 0 } };
3553	struct dc_stream_state *stream = pipe_ctx->stream;
3554	struct dc_link *link = stream->link;
3555	struct dce_hwseq *hws = link->dc->hwseq;
3556
3557	/* only 3 items below are used by unblank */
3558	params.timing = pipe_ctx->stream->timing;
3559
3560	params.link_settings.link_rate = link_settings->link_rate;
3561
3562	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3563		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3564			params.timing.pix_clk_100hz /= 2;
3565		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3566	}
3567
3568	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3569		hws->funcs.edp_backlight_control(link, true);
3570	}
3571}
3572
3573void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3574				const uint8_t *custom_sdp_message,
3575				unsigned int sdp_message_size)
3576{
3577	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3578		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3579				pipe_ctx->stream_res.stream_enc,
3580				custom_sdp_message,
3581				sdp_message_size);
3582	}
3583}
3584enum dc_status dcn10_set_clock(struct dc *dc,
3585			enum dc_clock_type clock_type,
3586			uint32_t clk_khz,
3587			uint32_t stepping)
3588{
3589	struct dc_state *context = dc->current_state;
3590	struct dc_clock_config clock_cfg = {0};
3591	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3592
3593	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3594				dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3595						context, clock_type, &clock_cfg);
3596
3597	if (!dc->clk_mgr->funcs->get_clock)
3598		return DC_FAIL_UNSUPPORTED_1;
3599
3600	if (clk_khz > clock_cfg.max_clock_khz)
3601		return DC_FAIL_CLK_EXCEED_MAX;
3602
3603	if (clk_khz < clock_cfg.min_clock_khz)
3604		return DC_FAIL_CLK_BELOW_MIN;
3605
3606	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3607		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3608
3609	/*update internal request clock for update clock use*/
3610	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3611		current_clocks->dispclk_khz = clk_khz;
3612	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3613		current_clocks->dppclk_khz = clk_khz;
3614	else
3615		return DC_ERROR_UNEXPECTED;
3616
3617	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3618				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3619				context, true);
3620	return DC_OK;
3621
3622}
3623
3624void dcn10_get_clock(struct dc *dc,
3625			enum dc_clock_type clock_type,
3626			struct dc_clock_config *clock_cfg)
3627{
3628	struct dc_state *context = dc->current_state;
3629
3630	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3631				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3632
3633}