Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2* Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
  26#include <linux/slab.h>
  27
  28#include "dm_services.h"
  29#include "dc.h"
  30
  31#include "resource.h"
  32#include "include/irq_service_interface.h"
  33#include "dcn20/dcn20_resource.h"
  34
  35#include "dcn10/dcn10_hubp.h"
  36#include "dcn10/dcn10_ipp.h"
  37#include "dcn20_hubbub.h"
  38#include "dcn20_mpc.h"
  39#include "dcn20_hubp.h"
  40#include "irq/dcn20/irq_service_dcn20.h"
  41#include "dcn20_dpp.h"
  42#include "dcn20_optc.h"
  43#include "dcn20_hwseq.h"
  44#include "dce110/dce110_hw_sequencer.h"
  45#include "dcn10/dcn10_resource.h"
  46#include "dcn20_opp.h"
  47
  48#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
  49#include "dcn20_dsc.h"
  50#endif
  51
  52#include "dcn20_link_encoder.h"
  53#include "dcn20_stream_encoder.h"
  54#include "dce/dce_clock_source.h"
  55#include "dce/dce_audio.h"
  56#include "dce/dce_hwseq.h"
  57#include "virtual/virtual_stream_encoder.h"
  58#include "dce110/dce110_resource.h"
  59#include "dml/display_mode_vba.h"
  60#include "dcn20_dccg.h"
  61#include "dcn20_vmid.h"
  62
  63#include "navi10_ip_offset.h"
  64
  65#include "dcn/dcn_2_0_0_offset.h"
  66#include "dcn/dcn_2_0_0_sh_mask.h"
  67
  68#include "nbio/nbio_2_3_offset.h"
  69
  70#include "dcn20/dcn20_dwb.h"
  71#include "dcn20/dcn20_mmhubbub.h"
  72
  73#include "mmhub/mmhub_2_0_0_offset.h"
  74#include "mmhub/mmhub_2_0_0_sh_mask.h"
  75
  76#include "reg_helper.h"
  77#include "dce/dce_abm.h"
  78#include "dce/dce_dmcu.h"
  79#include "dce/dce_aux.h"
  80#include "dce/dce_i2c.h"
  81#include "vm_helper.h"
  82
  83#include "amdgpu_socbb.h"
  84
  85/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */
  86#define SOC_BOUNDING_BOX_VALID false
  87#define DC_LOGGER_INIT(logger)
  88
  89struct _vcs_dpi_ip_params_st dcn2_0_ip = {
  90	.odm_capable = 1,
  91	.gpuvm_enable = 0,
  92	.hostvm_enable = 0,
  93	.gpuvm_max_page_table_levels = 4,
  94	.hostvm_max_page_table_levels = 4,
  95	.hostvm_cached_page_table_levels = 0,
  96	.pte_group_size_bytes = 2048,
  97#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
  98	.num_dsc = 6,
  99#else
 100	.num_dsc = 0,
 101#endif
 102	.rob_buffer_size_kbytes = 168,
 103	.det_buffer_size_kbytes = 164,
 104	.dpte_buffer_size_in_pte_reqs_luma = 84,
 105	.pde_proc_buffer_size_64k_reqs = 48,
 106	.dpp_output_buffer_pixels = 2560,
 107	.opp_output_buffer_lines = 1,
 108	.pixel_chunk_size_kbytes = 8,
 109	.pte_chunk_size_kbytes = 2,
 110	.meta_chunk_size_kbytes = 2,
 111	.writeback_chunk_size_kbytes = 2,
 112	.line_buffer_size_bits = 789504,
 113	.is_line_buffer_bpp_fixed = 0,
 114	.line_buffer_fixed_bpp = 0,
 115	.dcc_supported = true,
 116	.max_line_buffer_lines = 12,
 117	.writeback_luma_buffer_size_kbytes = 12,
 118	.writeback_chroma_buffer_size_kbytes = 8,
 119	.writeback_chroma_line_buffer_width_pixels = 4,
 120	.writeback_max_hscl_ratio = 1,
 121	.writeback_max_vscl_ratio = 1,
 122	.writeback_min_hscl_ratio = 1,
 123	.writeback_min_vscl_ratio = 1,
 124	.writeback_max_hscl_taps = 12,
 125	.writeback_max_vscl_taps = 12,
 126	.writeback_line_buffer_luma_buffer_size = 0,
 127	.writeback_line_buffer_chroma_buffer_size = 14643,
 128	.cursor_buffer_size = 8,
 129	.cursor_chunk_size = 2,
 130	.max_num_otg = 6,
 131	.max_num_dpp = 6,
 132	.max_num_wb = 1,
 133	.max_dchub_pscl_bw_pix_per_clk = 4,
 134	.max_pscl_lb_bw_pix_per_clk = 2,
 135	.max_lb_vscl_bw_pix_per_clk = 4,
 136	.max_vscl_hscl_bw_pix_per_clk = 4,
 137	.max_hscl_ratio = 8,
 138	.max_vscl_ratio = 8,
 139	.hscl_mults = 4,
 140	.vscl_mults = 4,
 141	.max_hscl_taps = 8,
 142	.max_vscl_taps = 8,
 143	.dispclk_ramp_margin_percent = 1,
 144	.underscan_factor = 1.10,
 145	.min_vblank_lines = 32, //
 146	.dppclk_delay_subtotal = 77, //
 147	.dppclk_delay_scl_lb_only = 16,
 148	.dppclk_delay_scl = 50,
 149	.dppclk_delay_cnvc_formatter = 8,
 150	.dppclk_delay_cnvc_cursor = 6,
 151	.dispclk_delay_subtotal = 87, //
 152	.dcfclk_cstate_latency = 10, // SRExitTime
 153	.max_inter_dcn_tile_repeaters = 8,
 154
 155	.xfc_supported = true,
 156	.xfc_fill_bw_overhead_percent = 10.0,
 157	.xfc_fill_constant_bytes = 0,
 158};
 159
 160struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
 161	/* Defaults that get patched on driver load from firmware. */
 162	.clock_limits = {
 163			{
 164				.state = 0,
 165				.dcfclk_mhz = 560.0,
 166				.fabricclk_mhz = 560.0,
 167				.dispclk_mhz = 513.0,
 168				.dppclk_mhz = 513.0,
 169				.phyclk_mhz = 540.0,
 170				.socclk_mhz = 560.0,
 171				.dscclk_mhz = 171.0,
 172				.dram_speed_mts = 8960.0,
 173			},
 174			{
 175				.state = 1,
 176				.dcfclk_mhz = 694.0,
 177				.fabricclk_mhz = 694.0,
 178				.dispclk_mhz = 642.0,
 179				.dppclk_mhz = 642.0,
 180				.phyclk_mhz = 600.0,
 181				.socclk_mhz = 694.0,
 182				.dscclk_mhz = 214.0,
 183				.dram_speed_mts = 11104.0,
 184			},
 185			{
 186				.state = 2,
 187				.dcfclk_mhz = 875.0,
 188				.fabricclk_mhz = 875.0,
 189				.dispclk_mhz = 734.0,
 190				.dppclk_mhz = 734.0,
 191				.phyclk_mhz = 810.0,
 192				.socclk_mhz = 875.0,
 193				.dscclk_mhz = 245.0,
 194				.dram_speed_mts = 14000.0,
 195			},
 196			{
 197				.state = 3,
 198				.dcfclk_mhz = 1000.0,
 199				.fabricclk_mhz = 1000.0,
 200				.dispclk_mhz = 1100.0,
 201				.dppclk_mhz = 1100.0,
 202				.phyclk_mhz = 810.0,
 203				.socclk_mhz = 1000.0,
 204				.dscclk_mhz = 367.0,
 205				.dram_speed_mts = 16000.0,
 206			},
 207			{
 208				.state = 4,
 209				.dcfclk_mhz = 1200.0,
 210				.fabricclk_mhz = 1200.0,
 211				.dispclk_mhz = 1284.0,
 212				.dppclk_mhz = 1284.0,
 213				.phyclk_mhz = 810.0,
 214				.socclk_mhz = 1200.0,
 215				.dscclk_mhz = 428.0,
 216				.dram_speed_mts = 16000.0,
 217			},
 218			/*Extra state, no dispclk ramping*/
 219			{
 220				.state = 5,
 221				.dcfclk_mhz = 1200.0,
 222				.fabricclk_mhz = 1200.0,
 223				.dispclk_mhz = 1284.0,
 224				.dppclk_mhz = 1284.0,
 225				.phyclk_mhz = 810.0,
 226				.socclk_mhz = 1200.0,
 227				.dscclk_mhz = 428.0,
 228				.dram_speed_mts = 16000.0,
 229			},
 230		},
 231	.num_states = 5,
 232	.sr_exit_time_us = 8.6,
 233	.sr_enter_plus_exit_time_us = 10.9,
 234	.urgent_latency_us = 4.0,
 235	.urgent_latency_pixel_data_only_us = 4.0,
 236	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
 237	.urgent_latency_vm_data_only_us = 4.0,
 238	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
 239	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
 240	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
 241	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
 242	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
 243	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
 244	.max_avg_sdp_bw_use_normal_percent = 40.0,
 245	.max_avg_dram_bw_use_normal_percent = 40.0,
 246	.writeback_latency_us = 12.0,
 247	.ideal_dram_bw_after_urgent_percent = 40.0,
 248	.max_request_size_bytes = 256,
 249	.dram_channel_width_bytes = 2,
 250	.fabric_datapath_to_dcn_data_return_bytes = 64,
 251	.dcn_downspread_percent = 0.5,
 252	.downspread_percent = 0.38,
 253	.dram_page_open_time_ns = 50.0,
 254	.dram_rw_turnaround_time_ns = 17.5,
 255	.dram_return_buffer_per_channel_bytes = 8192,
 256	.round_trip_ping_latency_dcfclk_cycles = 131,
 257	.urgent_out_of_order_return_per_channel_bytes = 256,
 258	.channel_interleave_bytes = 256,
 259	.num_banks = 8,
 260	.num_chans = 16,
 261	.vmm_page_size_bytes = 4096,
 262	.dram_clock_change_latency_us = 404.0,
 263	.dummy_pstate_latency_us = 5.0,
 264	.writeback_dram_clock_change_latency_us = 23.0,
 265	.return_bus_width_bytes = 64,
 266	.dispclk_dppclk_vco_speed_mhz = 3850,
 267	.xfc_bus_transport_time_us = 20,
 268	.xfc_xbuf_latency_tolerance_us = 4,
 269	.use_urgent_burst_bw = 0
 270};
 271
 272struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
 273
 274#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
 275	#define mmDP0_DP_DPHY_INTERNAL_CTRL		0x210f
 276	#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
 277	#define mmDP1_DP_DPHY_INTERNAL_CTRL		0x220f
 278	#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
 279	#define mmDP2_DP_DPHY_INTERNAL_CTRL		0x230f
 280	#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
 281	#define mmDP3_DP_DPHY_INTERNAL_CTRL		0x240f
 282	#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
 283	#define mmDP4_DP_DPHY_INTERNAL_CTRL		0x250f
 284	#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
 285	#define mmDP5_DP_DPHY_INTERNAL_CTRL		0x260f
 286	#define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
 287	#define mmDP6_DP_DPHY_INTERNAL_CTRL		0x270f
 288	#define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
 289#endif
 290
 291
 292enum dcn20_clk_src_array_id {
 293	DCN20_CLK_SRC_PLL0,
 294	DCN20_CLK_SRC_PLL1,
 295	DCN20_CLK_SRC_PLL2,
 296	DCN20_CLK_SRC_PLL3,
 297	DCN20_CLK_SRC_PLL4,
 298	DCN20_CLK_SRC_PLL5,
 299	DCN20_CLK_SRC_TOTAL
 300};
 301
 302/* begin *********************
 303 * macros to expend register list macro defined in HW object header file */
 304
 305/* DCN */
 306/* TODO awful hack. fixup dcn20_dwb.h */
 307#undef BASE_INNER
 308#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
 309
 310#define BASE(seg) BASE_INNER(seg)
 311
 312#define SR(reg_name)\
 313		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
 314					mm ## reg_name
 315
 316#define SRI(reg_name, block, id)\
 317	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 318					mm ## block ## id ## _ ## reg_name
 319
 320#define SRIR(var_name, reg_name, block, id)\
 321	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 322					mm ## block ## id ## _ ## reg_name
 323
 324#define SRII(reg_name, block, id)\
 325	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 326					mm ## block ## id ## _ ## reg_name
 327
 328#define DCCG_SRII(reg_name, block, id)\
 329	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 330					mm ## block ## id ## _ ## reg_name
 331
 332/* NBIO */
 333#define NBIO_BASE_INNER(seg) \
 334	NBIO_BASE__INST0_SEG ## seg
 335
 336#define NBIO_BASE(seg) \
 337	NBIO_BASE_INNER(seg)
 338
 339#define NBIO_SR(reg_name)\
 340		.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
 341					mm ## reg_name
 342
 343/* MMHUB */
 344#define MMHUB_BASE_INNER(seg) \
 345	MMHUB_BASE__INST0_SEG ## seg
 346
 347#define MMHUB_BASE(seg) \
 348	MMHUB_BASE_INNER(seg)
 349
 350#define MMHUB_SR(reg_name)\
 351		.reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
 352					mmMM ## reg_name
 353
 354static const struct bios_registers bios_regs = {
 355		NBIO_SR(BIOS_SCRATCH_3),
 356		NBIO_SR(BIOS_SCRATCH_6)
 357};
 358
 359#define clk_src_regs(index, pllid)\
 360[index] = {\
 361	CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
 362}
 363
 364static const struct dce110_clk_src_regs clk_src_regs[] = {
 365	clk_src_regs(0, A),
 366	clk_src_regs(1, B),
 367	clk_src_regs(2, C),
 368	clk_src_regs(3, D),
 369	clk_src_regs(4, E),
 370	clk_src_regs(5, F)
 371};
 372
 373static const struct dce110_clk_src_shift cs_shift = {
 374		CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 375};
 376
 377static const struct dce110_clk_src_mask cs_mask = {
 378		CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 379};
 380
 381static const struct dce_dmcu_registers dmcu_regs = {
 382		DMCU_DCN10_REG_LIST()
 383};
 384
 385static const struct dce_dmcu_shift dmcu_shift = {
 386		DMCU_MASK_SH_LIST_DCN10(__SHIFT)
 387};
 388
 389static const struct dce_dmcu_mask dmcu_mask = {
 390		DMCU_MASK_SH_LIST_DCN10(_MASK)
 391};
 392
 393static const struct dce_abm_registers abm_regs = {
 394		ABM_DCN20_REG_LIST()
 395};
 396
 397static const struct dce_abm_shift abm_shift = {
 398		ABM_MASK_SH_LIST_DCN20(__SHIFT)
 399};
 400
 401static const struct dce_abm_mask abm_mask = {
 402		ABM_MASK_SH_LIST_DCN20(_MASK)
 403};
 404
 405#define audio_regs(id)\
 406[id] = {\
 407		AUD_COMMON_REG_LIST(id)\
 408}
 409
 410static const struct dce_audio_registers audio_regs[] = {
 411	audio_regs(0),
 412	audio_regs(1),
 413	audio_regs(2),
 414	audio_regs(3),
 415	audio_regs(4),
 416	audio_regs(5),
 417	audio_regs(6),
 418};
 419
 420#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
 421		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
 422		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
 423		AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
 424
 425static const struct dce_audio_shift audio_shift = {
 426		DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
 427};
 428
 429static const struct dce_audio_mask audio_mask = {
 430		DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
 431};
 432
 433#define stream_enc_regs(id)\
 434[id] = {\
 435	SE_DCN2_REG_LIST(id)\
 436}
 437
 438static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
 439	stream_enc_regs(0),
 440	stream_enc_regs(1),
 441	stream_enc_regs(2),
 442	stream_enc_regs(3),
 443	stream_enc_regs(4),
 444	stream_enc_regs(5),
 445};
 446
 447static const struct dcn10_stream_encoder_shift se_shift = {
 448		SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
 449};
 450
 451static const struct dcn10_stream_encoder_mask se_mask = {
 452		SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
 453};
 454
 455
 456#define aux_regs(id)\
 457[id] = {\
 458	DCN2_AUX_REG_LIST(id)\
 459}
 460
 461static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
 462		aux_regs(0),
 463		aux_regs(1),
 464		aux_regs(2),
 465		aux_regs(3),
 466		aux_regs(4),
 467		aux_regs(5)
 468};
 469
 470#define hpd_regs(id)\
 471[id] = {\
 472	HPD_REG_LIST(id)\
 473}
 474
 475static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
 476		hpd_regs(0),
 477		hpd_regs(1),
 478		hpd_regs(2),
 479		hpd_regs(3),
 480		hpd_regs(4),
 481		hpd_regs(5)
 482};
 483
 484#define link_regs(id, phyid)\
 485[id] = {\
 486	LE_DCN10_REG_LIST(id), \
 487	UNIPHY_DCN2_REG_LIST(phyid), \
 488	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
 489}
 490
 491static const struct dcn10_link_enc_registers link_enc_regs[] = {
 492	link_regs(0, A),
 493	link_regs(1, B),
 494	link_regs(2, C),
 495	link_regs(3, D),
 496	link_regs(4, E),
 497	link_regs(5, F)
 498};
 499
 500static const struct dcn10_link_enc_shift le_shift = {
 501	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
 502};
 503
 504static const struct dcn10_link_enc_mask le_mask = {
 505	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
 506};
 507
 508#define ipp_regs(id)\
 509[id] = {\
 510	IPP_REG_LIST_DCN20(id),\
 511}
 512
 513static const struct dcn10_ipp_registers ipp_regs[] = {
 514	ipp_regs(0),
 515	ipp_regs(1),
 516	ipp_regs(2),
 517	ipp_regs(3),
 518	ipp_regs(4),
 519	ipp_regs(5),
 520};
 521
 522static const struct dcn10_ipp_shift ipp_shift = {
 523		IPP_MASK_SH_LIST_DCN20(__SHIFT)
 524};
 525
 526static const struct dcn10_ipp_mask ipp_mask = {
 527		IPP_MASK_SH_LIST_DCN20(_MASK),
 528};
 529
 530#define opp_regs(id)\
 531[id] = {\
 532	OPP_REG_LIST_DCN20(id),\
 533}
 534
 535static const struct dcn20_opp_registers opp_regs[] = {
 536	opp_regs(0),
 537	opp_regs(1),
 538	opp_regs(2),
 539	opp_regs(3),
 540	opp_regs(4),
 541	opp_regs(5),
 542};
 543
 544static const struct dcn20_opp_shift opp_shift = {
 545		OPP_MASK_SH_LIST_DCN20(__SHIFT)
 546};
 547
 548static const struct dcn20_opp_mask opp_mask = {
 549		OPP_MASK_SH_LIST_DCN20(_MASK)
 550};
 551
 552#define aux_engine_regs(id)\
 553[id] = {\
 554	AUX_COMMON_REG_LIST0(id), \
 555	.AUXN_IMPCAL = 0, \
 556	.AUXP_IMPCAL = 0, \
 557	.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
 558}
 559
 560static const struct dce110_aux_registers aux_engine_regs[] = {
 561		aux_engine_regs(0),
 562		aux_engine_regs(1),
 563		aux_engine_regs(2),
 564		aux_engine_regs(3),
 565		aux_engine_regs(4),
 566		aux_engine_regs(5)
 567};
 568
 569#define tf_regs(id)\
 570[id] = {\
 571	TF_REG_LIST_DCN20(id),\
 572}
 573
 574static const struct dcn2_dpp_registers tf_regs[] = {
 575	tf_regs(0),
 576	tf_regs(1),
 577	tf_regs(2),
 578	tf_regs(3),
 579	tf_regs(4),
 580	tf_regs(5),
 581};
 582
 583static const struct dcn2_dpp_shift tf_shift = {
 584		TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
 585};
 586
 587static const struct dcn2_dpp_mask tf_mask = {
 588		TF_REG_LIST_SH_MASK_DCN20(_MASK)
 589};
 590
 591#define dwbc_regs_dcn2(id)\
 592[id] = {\
 593	DWBC_COMMON_REG_LIST_DCN2_0(id),\
 594		}
 595
 596static const struct dcn20_dwbc_registers dwbc20_regs[] = {
 597	dwbc_regs_dcn2(0),
 598};
 599
 600static const struct dcn20_dwbc_shift dwbc20_shift = {
 601	DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 602};
 603
 604static const struct dcn20_dwbc_mask dwbc20_mask = {
 605	DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 606};
 607
 608#define mcif_wb_regs_dcn2(id)\
 609[id] = {\
 610	MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
 611		}
 612
 613static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
 614	mcif_wb_regs_dcn2(0),
 615};
 616
 617static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
 618	MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 619};
 620
 621static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
 622	MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 623};
 624
 625static const struct dcn20_mpc_registers mpc_regs = {
 626		MPC_REG_LIST_DCN2_0(0),
 627		MPC_REG_LIST_DCN2_0(1),
 628		MPC_REG_LIST_DCN2_0(2),
 629		MPC_REG_LIST_DCN2_0(3),
 630		MPC_REG_LIST_DCN2_0(4),
 631		MPC_REG_LIST_DCN2_0(5),
 632		MPC_OUT_MUX_REG_LIST_DCN2_0(0),
 633		MPC_OUT_MUX_REG_LIST_DCN2_0(1),
 634		MPC_OUT_MUX_REG_LIST_DCN2_0(2),
 635		MPC_OUT_MUX_REG_LIST_DCN2_0(3),
 636		MPC_OUT_MUX_REG_LIST_DCN2_0(4),
 637		MPC_OUT_MUX_REG_LIST_DCN2_0(5),
 638};
 639
 640static const struct dcn20_mpc_shift mpc_shift = {
 641	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 642};
 643
 644static const struct dcn20_mpc_mask mpc_mask = {
 645	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 646};
 647
 648#define tg_regs(id)\
 649[id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
 650
 651
 652static const struct dcn_optc_registers tg_regs[] = {
 653	tg_regs(0),
 654	tg_regs(1),
 655	tg_regs(2),
 656	tg_regs(3),
 657	tg_regs(4),
 658	tg_regs(5)
 659};
 660
 661static const struct dcn_optc_shift tg_shift = {
 662	TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 663};
 664
 665static const struct dcn_optc_mask tg_mask = {
 666	TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 667};
 668
 669#define hubp_regs(id)\
 670[id] = {\
 671	HUBP_REG_LIST_DCN20(id)\
 672}
 673
 674static const struct dcn_hubp2_registers hubp_regs[] = {
 675		hubp_regs(0),
 676		hubp_regs(1),
 677		hubp_regs(2),
 678		hubp_regs(3),
 679		hubp_regs(4),
 680		hubp_regs(5)
 681};
 682
 683static const struct dcn_hubp2_shift hubp_shift = {
 684		HUBP_MASK_SH_LIST_DCN20(__SHIFT)
 685};
 686
 687static const struct dcn_hubp2_mask hubp_mask = {
 688		HUBP_MASK_SH_LIST_DCN20(_MASK)
 689};
 690
 691static const struct dcn_hubbub_registers hubbub_reg = {
 692		HUBBUB_REG_LIST_DCN20(0)
 693};
 694
 695static const struct dcn_hubbub_shift hubbub_shift = {
 696		HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
 697};
 698
 699static const struct dcn_hubbub_mask hubbub_mask = {
 700		HUBBUB_MASK_SH_LIST_DCN20(_MASK)
 701};
 702
 703#define vmid_regs(id)\
 704[id] = {\
 705		DCN20_VMID_REG_LIST(id)\
 706}
 707
 708static const struct dcn_vmid_registers vmid_regs[] = {
 709	vmid_regs(0),
 710	vmid_regs(1),
 711	vmid_regs(2),
 712	vmid_regs(3),
 713	vmid_regs(4),
 714	vmid_regs(5),
 715	vmid_regs(6),
 716	vmid_regs(7),
 717	vmid_regs(8),
 718	vmid_regs(9),
 719	vmid_regs(10),
 720	vmid_regs(11),
 721	vmid_regs(12),
 722	vmid_regs(13),
 723	vmid_regs(14),
 724	vmid_regs(15)
 725};
 726
 727static const struct dcn20_vmid_shift vmid_shifts = {
 728		DCN20_VMID_MASK_SH_LIST(__SHIFT)
 729};
 730
 731static const struct dcn20_vmid_mask vmid_masks = {
 732		DCN20_VMID_MASK_SH_LIST(_MASK)
 733};
 734
 735#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 736#define dsc_regsDCN20(id)\
 737[id] = {\
 738	DSC_REG_LIST_DCN20(id)\
 739}
 740
 741static const struct dcn20_dsc_registers dsc_regs[] = {
 742	dsc_regsDCN20(0),
 743	dsc_regsDCN20(1),
 744	dsc_regsDCN20(2),
 745	dsc_regsDCN20(3),
 746	dsc_regsDCN20(4),
 747	dsc_regsDCN20(5)
 748};
 749
 750static const struct dcn20_dsc_shift dsc_shift = {
 751	DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
 752};
 753
 754static const struct dcn20_dsc_mask dsc_mask = {
 755	DSC_REG_LIST_SH_MASK_DCN20(_MASK)
 756};
 757#endif
 758
 759static const struct dccg_registers dccg_regs = {
 760		DCCG_REG_LIST_DCN2()
 761};
 762
 763static const struct dccg_shift dccg_shift = {
 764		DCCG_MASK_SH_LIST_DCN2(__SHIFT)
 765};
 766
 767static const struct dccg_mask dccg_mask = {
 768		DCCG_MASK_SH_LIST_DCN2(_MASK)
 769};
 770
 771static const struct resource_caps res_cap_nv10 = {
 772		.num_timing_generator = 6,
 773		.num_opp = 6,
 774		.num_video_plane = 6,
 775		.num_audio = 7,
 776		.num_stream_encoder = 6,
 777		.num_pll = 6,
 778		.num_dwb = 1,
 779		.num_ddc = 6,
 780		.num_vmid = 16,
 781#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 782		.num_dsc = 6,
 783#endif
 784};
 785
 786static const struct dc_plane_cap plane_cap = {
 787	.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
 788	.blends_with_above = true,
 789	.blends_with_below = true,
 790	.per_pixel_alpha = true,
 791
 792	.pixel_format_support = {
 793			.argb8888 = true,
 794			.nv12 = true,
 795			.fp16 = true
 796	},
 797
 798	.max_upscale_factor = {
 799			.argb8888 = 16000,
 800			.nv12 = 16000,
 801			.fp16 = 1
 802	},
 803
 804	.max_downscale_factor = {
 805			.argb8888 = 250,
 806			.nv12 = 250,
 807			.fp16 = 1
 808	}
 809};
 810static const struct resource_caps res_cap_nv14 = {
 811		.num_timing_generator = 5,
 812		.num_opp = 5,
 813		.num_video_plane = 5,
 814		.num_audio = 6,
 815		.num_stream_encoder = 5,
 816		.num_pll = 5,
 817		.num_dwb = 1,
 818		.num_ddc = 5,
 819};
 820
 821static const struct dc_debug_options debug_defaults_drv = {
 822		.disable_dmcu = true,
 823		.force_abm_enable = false,
 824		.timing_trace = false,
 825		.clock_trace = true,
 826		.disable_pplib_clock_request = true,
 827		.pipe_split_policy = MPC_SPLIT_DYNAMIC,
 828		.force_single_disp_pipe_split = true,
 829		.disable_dcc = DCC_ENABLE,
 830		.vsr_support = true,
 831		.performance_trace = false,
 832		.max_downscale_src_width = 5120,/*upto 5K*/
 833		.disable_pplib_wm_range = false,
 834		.scl_reset_length10 = true,
 835		.sanity_checks = false,
 836		.disable_tri_buf = true,
 837		.underflow_assert_delay_us = 0xFFFFFFFF,
 838};
 839
 840static const struct dc_debug_options debug_defaults_diags = {
 841		.disable_dmcu = true,
 842		.force_abm_enable = false,
 843		.timing_trace = true,
 844		.clock_trace = true,
 845		.disable_dpp_power_gate = true,
 846		.disable_hubp_power_gate = true,
 847		.disable_clock_gate = true,
 848		.disable_pplib_clock_request = true,
 849		.disable_pplib_wm_range = true,
 850		.disable_stutter = true,
 851		.scl_reset_length10 = true,
 852		.underflow_assert_delay_us = 0xFFFFFFFF,
 853};
 854
 855void dcn20_dpp_destroy(struct dpp **dpp)
 856{
 857	kfree(TO_DCN20_DPP(*dpp));
 858	*dpp = NULL;
 859}
 860
 861struct dpp *dcn20_dpp_create(
 862	struct dc_context *ctx,
 863	uint32_t inst)
 864{
 865	struct dcn20_dpp *dpp =
 866		kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
 867
 868	if (!dpp)
 869		return NULL;
 870
 871	if (dpp2_construct(dpp, ctx, inst,
 872			&tf_regs[inst], &tf_shift, &tf_mask))
 873		return &dpp->base;
 874
 875	BREAK_TO_DEBUGGER();
 876	kfree(dpp);
 877	return NULL;
 878}
 879
 880struct input_pixel_processor *dcn20_ipp_create(
 881	struct dc_context *ctx, uint32_t inst)
 882{
 883	struct dcn10_ipp *ipp =
 884		kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
 885
 886	if (!ipp) {
 887		BREAK_TO_DEBUGGER();
 888		return NULL;
 889	}
 890
 891	dcn20_ipp_construct(ipp, ctx, inst,
 892			&ipp_regs[inst], &ipp_shift, &ipp_mask);
 893	return &ipp->base;
 894}
 895
 896
 897struct output_pixel_processor *dcn20_opp_create(
 898	struct dc_context *ctx, uint32_t inst)
 899{
 900	struct dcn20_opp *opp =
 901		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
 902
 903	if (!opp) {
 904		BREAK_TO_DEBUGGER();
 905		return NULL;
 906	}
 907
 908	dcn20_opp_construct(opp, ctx, inst,
 909			&opp_regs[inst], &opp_shift, &opp_mask);
 910	return &opp->base;
 911}
 912
 913struct dce_aux *dcn20_aux_engine_create(
 914	struct dc_context *ctx,
 915	uint32_t inst)
 916{
 917	struct aux_engine_dce110 *aux_engine =
 918		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
 919
 920	if (!aux_engine)
 921		return NULL;
 922
 923	dce110_aux_engine_construct(aux_engine, ctx, inst,
 924				    SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
 925				    &aux_engine_regs[inst]);
 926
 927	return &aux_engine->base;
 928}
 929#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
 930
 931static const struct dce_i2c_registers i2c_hw_regs[] = {
 932		i2c_inst_regs(1),
 933		i2c_inst_regs(2),
 934		i2c_inst_regs(3),
 935		i2c_inst_regs(4),
 936		i2c_inst_regs(5),
 937		i2c_inst_regs(6),
 938};
 939
 940static const struct dce_i2c_shift i2c_shifts = {
 941		I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
 942};
 943
 944static const struct dce_i2c_mask i2c_masks = {
 945		I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
 946};
 947
 948struct dce_i2c_hw *dcn20_i2c_hw_create(
 949	struct dc_context *ctx,
 950	uint32_t inst)
 951{
 952	struct dce_i2c_hw *dce_i2c_hw =
 953		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
 954
 955	if (!dce_i2c_hw)
 956		return NULL;
 957
 958	dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
 959				    &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
 960
 961	return dce_i2c_hw;
 962}
 963struct mpc *dcn20_mpc_create(struct dc_context *ctx)
 964{
 965	struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
 966					  GFP_KERNEL);
 967
 968	if (!mpc20)
 969		return NULL;
 970
 971	dcn20_mpc_construct(mpc20, ctx,
 972			&mpc_regs,
 973			&mpc_shift,
 974			&mpc_mask,
 975			6);
 976
 977	return &mpc20->base;
 978}
 979
 980struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
 981{
 982	int i;
 983	struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
 984					  GFP_KERNEL);
 985
 986	if (!hubbub)
 987		return NULL;
 988
 989	hubbub2_construct(hubbub, ctx,
 990			&hubbub_reg,
 991			&hubbub_shift,
 992			&hubbub_mask);
 993
 994	for (i = 0; i < res_cap_nv10.num_vmid; i++) {
 995		struct dcn20_vmid *vmid = &hubbub->vmid[i];
 996
 997		vmid->ctx = ctx;
 998
 999		vmid->regs = &vmid_regs[i];
1000		vmid->shifts = &vmid_shifts;
1001		vmid->masks = &vmid_masks;
1002	}
1003
1004	return &hubbub->base;
1005}
1006
1007struct timing_generator *dcn20_timing_generator_create(
1008		struct dc_context *ctx,
1009		uint32_t instance)
1010{
1011	struct optc *tgn10 =
1012		kzalloc(sizeof(struct optc), GFP_KERNEL);
1013
1014	if (!tgn10)
1015		return NULL;
1016
1017	tgn10->base.inst = instance;
1018	tgn10->base.ctx = ctx;
1019
1020	tgn10->tg_regs = &tg_regs[instance];
1021	tgn10->tg_shift = &tg_shift;
1022	tgn10->tg_mask = &tg_mask;
1023
1024	dcn20_timing_generator_init(tgn10);
1025
1026	return &tgn10->base;
1027}
1028
1029static const struct encoder_feature_support link_enc_feature = {
1030		.max_hdmi_deep_color = COLOR_DEPTH_121212,
1031		.max_hdmi_pixel_clock = 600000,
1032		.hdmi_ycbcr420_supported = true,
1033		.dp_ycbcr420_supported = true,
1034		.flags.bits.IS_HBR2_CAPABLE = true,
1035		.flags.bits.IS_HBR3_CAPABLE = true,
1036		.flags.bits.IS_TPS3_CAPABLE = true,
1037		.flags.bits.IS_TPS4_CAPABLE = true
1038};
1039
1040struct link_encoder *dcn20_link_encoder_create(
1041	const struct encoder_init_data *enc_init_data)
1042{
1043	struct dcn20_link_encoder *enc20 =
1044		kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
1045
1046	if (!enc20)
1047		return NULL;
1048
1049	dcn20_link_encoder_construct(enc20,
1050				      enc_init_data,
1051				      &link_enc_feature,
1052				      &link_enc_regs[enc_init_data->transmitter],
1053				      &link_enc_aux_regs[enc_init_data->channel - 1],
1054				      &link_enc_hpd_regs[enc_init_data->hpd_source],
1055				      &le_shift,
1056				      &le_mask);
1057
1058	return &enc20->enc10.base;
1059}
1060
1061struct clock_source *dcn20_clock_source_create(
1062	struct dc_context *ctx,
1063	struct dc_bios *bios,
1064	enum clock_source_id id,
1065	const struct dce110_clk_src_regs *regs,
1066	bool dp_clk_src)
1067{
1068	struct dce110_clk_src *clk_src =
1069		kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
1070
1071	if (!clk_src)
1072		return NULL;
1073
1074	if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
1075			regs, &cs_shift, &cs_mask)) {
1076		clk_src->base.dp_clk_src = dp_clk_src;
1077		return &clk_src->base;
1078	}
1079
1080	kfree(clk_src);
1081	BREAK_TO_DEBUGGER();
1082	return NULL;
1083}
1084
1085static void read_dce_straps(
1086	struct dc_context *ctx,
1087	struct resource_straps *straps)
1088{
1089	generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
1090		FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
1091}
1092
1093static struct audio *dcn20_create_audio(
1094		struct dc_context *ctx, unsigned int inst)
1095{
1096	return dce_audio_create(ctx, inst,
1097			&audio_regs[inst], &audio_shift, &audio_mask);
1098}
1099
1100struct stream_encoder *dcn20_stream_encoder_create(
1101	enum engine_id eng_id,
1102	struct dc_context *ctx)
1103{
1104	struct dcn10_stream_encoder *enc1 =
1105		kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
1106
1107	if (!enc1)
1108		return NULL;
1109
1110	if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
1111		if (eng_id >= ENGINE_ID_DIGD)
1112			eng_id++;
1113	}
1114
1115	dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
1116					&stream_enc_regs[eng_id],
1117					&se_shift, &se_mask);
1118
1119	return &enc1->base;
1120}
1121
1122static const struct dce_hwseq_registers hwseq_reg = {
1123		HWSEQ_DCN2_REG_LIST()
1124};
1125
1126static const struct dce_hwseq_shift hwseq_shift = {
1127		HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
1128};
1129
1130static const struct dce_hwseq_mask hwseq_mask = {
1131		HWSEQ_DCN2_MASK_SH_LIST(_MASK)
1132};
1133
1134struct dce_hwseq *dcn20_hwseq_create(
1135	struct dc_context *ctx)
1136{
1137	struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
1138
1139	if (hws) {
1140		hws->ctx = ctx;
1141		hws->regs = &hwseq_reg;
1142		hws->shifts = &hwseq_shift;
1143		hws->masks = &hwseq_mask;
1144	}
1145	return hws;
1146}
1147
1148static const struct resource_create_funcs res_create_funcs = {
1149	.read_dce_straps = read_dce_straps,
1150	.create_audio = dcn20_create_audio,
1151	.create_stream_encoder = dcn20_stream_encoder_create,
1152	.create_hwseq = dcn20_hwseq_create,
1153};
1154
1155static const struct resource_create_funcs res_create_maximus_funcs = {
1156	.read_dce_straps = NULL,
1157	.create_audio = NULL,
1158	.create_stream_encoder = NULL,
1159	.create_hwseq = dcn20_hwseq_create,
1160};
1161
1162void dcn20_clock_source_destroy(struct clock_source **clk_src)
1163{
1164	kfree(TO_DCE110_CLK_SRC(*clk_src));
1165	*clk_src = NULL;
1166}
1167
1168#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1169
1170struct display_stream_compressor *dcn20_dsc_create(
1171	struct dc_context *ctx, uint32_t inst)
1172{
1173	struct dcn20_dsc *dsc =
1174		kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
1175
1176	if (!dsc) {
1177		BREAK_TO_DEBUGGER();
1178		return NULL;
1179	}
1180
1181	dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
1182	return &dsc->base;
1183}
1184
1185void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
1186{
1187	kfree(container_of(*dsc, struct dcn20_dsc, base));
1188	*dsc = NULL;
1189}
1190
1191#endif
1192
1193static void destruct(struct dcn20_resource_pool *pool)
1194{
1195	unsigned int i;
1196
1197	for (i = 0; i < pool->base.stream_enc_count; i++) {
1198		if (pool->base.stream_enc[i] != NULL) {
1199			kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
1200			pool->base.stream_enc[i] = NULL;
1201		}
1202	}
1203
1204#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1205	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
1206		if (pool->base.dscs[i] != NULL)
1207			dcn20_dsc_destroy(&pool->base.dscs[i]);
1208	}
1209#endif
1210
1211	if (pool->base.mpc != NULL) {
1212		kfree(TO_DCN20_MPC(pool->base.mpc));
1213		pool->base.mpc = NULL;
1214	}
1215	if (pool->base.hubbub != NULL) {
1216		kfree(pool->base.hubbub);
1217		pool->base.hubbub = NULL;
1218	}
1219	for (i = 0; i < pool->base.pipe_count; i++) {
1220		if (pool->base.dpps[i] != NULL)
1221			dcn20_dpp_destroy(&pool->base.dpps[i]);
1222
1223		if (pool->base.ipps[i] != NULL)
1224			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
1225
1226		if (pool->base.hubps[i] != NULL) {
1227			kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
1228			pool->base.hubps[i] = NULL;
1229		}
1230
1231		if (pool->base.irqs != NULL) {
1232			dal_irq_service_destroy(&pool->base.irqs);
1233		}
1234	}
1235
1236	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
1237		if (pool->base.engines[i] != NULL)
1238			dce110_engine_destroy(&pool->base.engines[i]);
1239		if (pool->base.hw_i2cs[i] != NULL) {
1240			kfree(pool->base.hw_i2cs[i]);
1241			pool->base.hw_i2cs[i] = NULL;
1242		}
1243		if (pool->base.sw_i2cs[i] != NULL) {
1244			kfree(pool->base.sw_i2cs[i]);
1245			pool->base.sw_i2cs[i] = NULL;
1246		}
1247	}
1248
1249	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
1250		if (pool->base.opps[i] != NULL)
1251			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
1252	}
1253
1254	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
1255		if (pool->base.timing_generators[i] != NULL)	{
1256			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
1257			pool->base.timing_generators[i] = NULL;
1258		}
1259	}
1260
1261	for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
1262		if (pool->base.dwbc[i] != NULL) {
1263			kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
1264			pool->base.dwbc[i] = NULL;
1265		}
1266		if (pool->base.mcif_wb[i] != NULL) {
1267			kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
1268			pool->base.mcif_wb[i] = NULL;
1269		}
1270	}
1271
1272	for (i = 0; i < pool->base.audio_count; i++) {
1273		if (pool->base.audios[i])
1274			dce_aud_destroy(&pool->base.audios[i]);
1275	}
1276
1277	for (i = 0; i < pool->base.clk_src_count; i++) {
1278		if (pool->base.clock_sources[i] != NULL) {
1279			dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
1280			pool->base.clock_sources[i] = NULL;
1281		}
1282	}
1283
1284	if (pool->base.dp_clock_source != NULL) {
1285		dcn20_clock_source_destroy(&pool->base.dp_clock_source);
1286		pool->base.dp_clock_source = NULL;
1287	}
1288
1289
1290	if (pool->base.abm != NULL)
1291		dce_abm_destroy(&pool->base.abm);
1292
1293	if (pool->base.dmcu != NULL)
1294		dce_dmcu_destroy(&pool->base.dmcu);
1295
1296	if (pool->base.dccg != NULL)
1297		dcn_dccg_destroy(&pool->base.dccg);
1298
1299	if (pool->base.pp_smu != NULL)
1300		dcn20_pp_smu_destroy(&pool->base.pp_smu);
1301
1302}
1303
1304struct hubp *dcn20_hubp_create(
1305	struct dc_context *ctx,
1306	uint32_t inst)
1307{
1308	struct dcn20_hubp *hubp2 =
1309		kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
1310
1311	if (!hubp2)
1312		return NULL;
1313
1314	if (hubp2_construct(hubp2, ctx, inst,
1315			&hubp_regs[inst], &hubp_shift, &hubp_mask))
1316		return &hubp2->base;
1317
1318	BREAK_TO_DEBUGGER();
1319	kfree(hubp2);
1320	return NULL;
1321}
1322
1323static void get_pixel_clock_parameters(
1324	struct pipe_ctx *pipe_ctx,
1325	struct pixel_clk_params *pixel_clk_params)
1326{
1327	const struct dc_stream_state *stream = pipe_ctx->stream;
1328	struct pipe_ctx *odm_pipe;
1329	int opp_cnt = 1;
1330
1331	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1332		opp_cnt++;
1333
1334	pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
1335	pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
1336	pixel_clk_params->signal_type = pipe_ctx->stream->signal;
1337	pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
1338	/* TODO: un-hardcode*/
1339	pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
1340		LINK_RATE_REF_FREQ_IN_KHZ;
1341	pixel_clk_params->flags.ENABLE_SS = 0;
1342	pixel_clk_params->color_depth =
1343		stream->timing.display_color_depth;
1344	pixel_clk_params->flags.DISPLAY_BLANKED = 1;
1345	pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
1346
1347	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
1348		pixel_clk_params->color_depth = COLOR_DEPTH_888;
1349
1350	if (opp_cnt == 4)
1351		pixel_clk_params->requested_pix_clk_100hz /= 4;
1352	else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
1353		pixel_clk_params->requested_pix_clk_100hz /= 2;
1354
1355	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1356		pixel_clk_params->requested_pix_clk_100hz *= 2;
1357
1358}
1359
1360static void build_clamping_params(struct dc_stream_state *stream)
1361{
1362	stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
1363	stream->clamping.c_depth = stream->timing.display_color_depth;
1364	stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
1365}
1366
1367static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
1368{
1369
1370	get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
1371
1372	pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
1373		pipe_ctx->clock_source,
1374		&pipe_ctx->stream_res.pix_clk_params,
1375		&pipe_ctx->pll_settings);
1376
1377	pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
1378
1379	resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1380					&pipe_ctx->stream->bit_depth_params);
1381	build_clamping_params(pipe_ctx->stream);
1382
1383	return DC_OK;
1384}
1385
1386enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
1387{
1388	enum dc_status status = DC_OK;
1389	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
1390
1391	/*TODO Seems unneeded anymore */
1392	/*	if (old_context && resource_is_stream_unchanged(old_context, stream)) {
1393			if (stream != NULL && old_context->streams[i] != NULL) {
1394				 todo: shouldn't have to copy missing parameter here
1395				resource_build_bit_depth_reduction_params(stream,
1396						&stream->bit_depth_params);
1397				stream->clamping.pixel_encoding =
1398						stream->timing.pixel_encoding;
1399
1400				resource_build_bit_depth_reduction_params(stream,
1401								&stream->bit_depth_params);
1402				build_clamping_params(stream);
1403
1404				continue;
1405			}
1406		}
1407	*/
1408
1409	if (!pipe_ctx)
1410		return DC_ERROR_UNEXPECTED;
1411
1412
1413	status = build_pipe_hw_param(pipe_ctx);
1414
1415	return status;
1416}
1417
1418#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1419
1420static void acquire_dsc(struct resource_context *res_ctx,
1421			const struct resource_pool *pool,
1422			struct display_stream_compressor **dsc)
1423{
1424	int i;
1425
1426	ASSERT(*dsc == NULL);
1427	*dsc = NULL;
1428
1429	/* Find first free DSC */
1430	for (i = 0; i < pool->res_cap->num_dsc; i++)
1431		if (!res_ctx->is_dsc_acquired[i]) {
1432			*dsc = pool->dscs[i];
1433			res_ctx->is_dsc_acquired[i] = true;
1434			break;
1435		}
1436}
1437
1438static void release_dsc(struct resource_context *res_ctx,
1439			const struct resource_pool *pool,
1440			struct display_stream_compressor **dsc)
1441{
1442	int i;
1443
1444	for (i = 0; i < pool->res_cap->num_dsc; i++)
1445		if (pool->dscs[i] == *dsc) {
1446			res_ctx->is_dsc_acquired[i] = false;
1447			*dsc = NULL;
1448			break;
1449		}
1450}
1451
1452#endif
1453
1454
1455#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1456static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
1457		struct dc_state *dc_ctx,
1458		struct dc_stream_state *dc_stream)
1459{
1460	enum dc_status result = DC_OK;
1461	int i;
1462	const struct resource_pool *pool = dc->res_pool;
1463
1464	/* Get a DSC if required and available */
1465	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1466		struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
1467
1468		if (pipe_ctx->stream != dc_stream)
1469			continue;
1470
1471		acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
1472
1473		/* The number of DSCs can be less than the number of pipes */
1474		if (!pipe_ctx->stream_res.dsc) {
1475			dm_output_to_console("No DSCs available\n");
1476			result = DC_NO_DSC_RESOURCE;
1477		}
1478
1479		break;
1480	}
1481
1482	return result;
1483}
1484
1485
1486static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
1487		struct dc_state *new_ctx,
1488		struct dc_stream_state *dc_stream)
1489{
1490	struct pipe_ctx *pipe_ctx = NULL;
1491	int i;
1492
1493	for (i = 0; i < MAX_PIPES; i++) {
1494		if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
1495			pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
1496
1497			if (pipe_ctx->stream_res.dsc)
1498				release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
1499		}
1500	}
1501
1502	if (!pipe_ctx)
1503		return DC_ERROR_UNEXPECTED;
1504	else
1505		return DC_OK;
1506}
1507#endif
1508
1509
1510enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1511{
1512	enum dc_status result = DC_ERROR_UNEXPECTED;
1513
1514	result = resource_map_pool_resources(dc, new_ctx, dc_stream);
1515
1516	if (result == DC_OK)
1517		result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
1518
1519#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1520	/* Get a DSC if required and available */
1521	if (result == DC_OK && dc_stream->timing.flags.DSC)
1522		result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
1523#endif
1524
1525	if (result == DC_OK)
1526		result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
1527
1528	return result;
1529}
1530
1531
1532enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1533{
1534	enum dc_status result = DC_OK;
1535
1536#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1537	result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
1538#endif
1539
1540	return result;
1541}
1542
1543
1544static void swizzle_to_dml_params(
1545		enum swizzle_mode_values swizzle,
1546		unsigned int *sw_mode)
1547{
1548	switch (swizzle) {
1549	case DC_SW_LINEAR:
1550		*sw_mode = dm_sw_linear;
1551		break;
1552	case DC_SW_4KB_S:
1553		*sw_mode = dm_sw_4kb_s;
1554		break;
1555	case DC_SW_4KB_S_X:
1556		*sw_mode = dm_sw_4kb_s_x;
1557		break;
1558	case DC_SW_4KB_D:
1559		*sw_mode = dm_sw_4kb_d;
1560		break;
1561	case DC_SW_4KB_D_X:
1562		*sw_mode = dm_sw_4kb_d_x;
1563		break;
1564	case DC_SW_64KB_S:
1565		*sw_mode = dm_sw_64kb_s;
1566		break;
1567	case DC_SW_64KB_S_X:
1568		*sw_mode = dm_sw_64kb_s_x;
1569		break;
1570	case DC_SW_64KB_S_T:
1571		*sw_mode = dm_sw_64kb_s_t;
1572		break;
1573	case DC_SW_64KB_D:
1574		*sw_mode = dm_sw_64kb_d;
1575		break;
1576	case DC_SW_64KB_D_X:
1577		*sw_mode = dm_sw_64kb_d_x;
1578		break;
1579	case DC_SW_64KB_D_T:
1580		*sw_mode = dm_sw_64kb_d_t;
1581		break;
1582	case DC_SW_64KB_R_X:
1583		*sw_mode = dm_sw_64kb_r_x;
1584		break;
1585	case DC_SW_VAR_S:
1586		*sw_mode = dm_sw_var_s;
1587		break;
1588	case DC_SW_VAR_S_X:
1589		*sw_mode = dm_sw_var_s_x;
1590		break;
1591	case DC_SW_VAR_D:
1592		*sw_mode = dm_sw_var_d;
1593		break;
1594	case DC_SW_VAR_D_X:
1595		*sw_mode = dm_sw_var_d_x;
1596		break;
1597
1598	default:
1599		ASSERT(0); /* Not supported */
1600		break;
1601	}
1602}
1603
1604static bool dcn20_split_stream_for_odm(
1605		struct resource_context *res_ctx,
1606		const struct resource_pool *pool,
1607		struct pipe_ctx *prev_odm_pipe,
1608		struct pipe_ctx *next_odm_pipe)
1609{
1610	int pipe_idx = next_odm_pipe->pipe_idx;
1611
1612	*next_odm_pipe = *prev_odm_pipe;
1613
1614	next_odm_pipe->pipe_idx = pipe_idx;
1615	next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
1616	next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
1617	next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
1618	next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
1619	next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
1620	next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
1621#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1622	next_odm_pipe->stream_res.dsc = NULL;
1623#endif
1624	if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
1625		ASSERT(!next_odm_pipe->next_odm_pipe);
1626		next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
1627		next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
1628	}
1629	prev_odm_pipe->next_odm_pipe = next_odm_pipe;
1630	next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
1631	ASSERT(next_odm_pipe->top_pipe == NULL);
1632
1633	if (prev_odm_pipe->plane_state) {
1634		struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
1635		int new_width;
1636
1637		/* HACTIVE halved for odm combine */
1638		sd->h_active /= 2;
1639		/* Calculate new vp and recout for left pipe */
1640		/* Need at least 16 pixels width per side */
1641		if (sd->recout.x + 16 >= sd->h_active)
1642			return false;
1643		new_width = sd->h_active - sd->recout.x;
1644		sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1645				sd->ratios.horz, sd->recout.width - new_width));
1646		sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1647				sd->ratios.horz_c, sd->recout.width - new_width));
1648		sd->recout.width = new_width;
1649
1650		/* Calculate new vp and recout for right pipe */
1651		sd = &next_odm_pipe->plane_res.scl_data;
1652		/* HACTIVE halved for odm combine */
1653		sd->h_active /= 2;
1654		/* Need at least 16 pixels width per side */
1655		if (new_width <= 16)
1656			return false;
1657		new_width = sd->recout.width + sd->recout.x - sd->h_active;
1658		sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1659				sd->ratios.horz, sd->recout.width - new_width));
1660		sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1661				sd->ratios.horz_c, sd->recout.width - new_width));
1662		sd->recout.width = new_width;
1663		sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
1664				sd->ratios.horz, sd->h_active - sd->recout.x));
1665		sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
1666				sd->ratios.horz_c, sd->h_active - sd->recout.x));
1667		sd->recout.x = 0;
1668	}
1669	next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
1670#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1671	if (next_odm_pipe->stream->timing.flags.DSC == 1) {
1672		acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc);
1673		ASSERT(next_odm_pipe->stream_res.dsc);
1674		if (next_odm_pipe->stream_res.dsc == NULL)
1675			return false;
1676	}
1677#endif
1678
1679	return true;
1680}
1681
1682static void dcn20_split_stream_for_mpc(
1683		struct resource_context *res_ctx,
1684		const struct resource_pool *pool,
1685		struct pipe_ctx *primary_pipe,
1686		struct pipe_ctx *secondary_pipe)
1687{
1688	int pipe_idx = secondary_pipe->pipe_idx;
1689	struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
1690
1691	*secondary_pipe = *primary_pipe;
1692	secondary_pipe->bottom_pipe = sec_bot_pipe;
1693
1694	secondary_pipe->pipe_idx = pipe_idx;
1695	secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
1696	secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
1697	secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
1698	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
1699	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
1700	secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
1701#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1702	secondary_pipe->stream_res.dsc = NULL;
1703#endif
1704	if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
1705		ASSERT(!secondary_pipe->bottom_pipe);
1706		secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
1707		secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
1708	}
1709	primary_pipe->bottom_pipe = secondary_pipe;
1710	secondary_pipe->top_pipe = primary_pipe;
1711
1712	ASSERT(primary_pipe->plane_state);
1713	resource_build_scaling_params(primary_pipe);
1714	resource_build_scaling_params(secondary_pipe);
1715}
1716
1717void dcn20_populate_dml_writeback_from_context(
1718		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
1719{
1720	int pipe_cnt, i;
1721
1722	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1723		struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
1724
1725		if (!res_ctx->pipe_ctx[i].stream)
1726			continue;
1727
1728		/* Set writeback information */
1729		pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
1730		pipes[pipe_cnt].dout.num_active_wb++;
1731		pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
1732		pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
1733		pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
1734		pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
1735		pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
1736		pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
1737		pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
1738		pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
1739		pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
1740		pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
1741		if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
1742			if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
1743				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
1744			else
1745				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
1746		} else
1747			pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
1748
1749		pipe_cnt++;
1750	}
1751
1752}
1753
1754int dcn20_populate_dml_pipes_from_context(
1755		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
1756{
1757	int pipe_cnt, i;
1758	bool synchronized_vblank = true;
1759
1760	for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
1761		if (!res_ctx->pipe_ctx[i].stream)
1762			continue;
1763
1764		if (pipe_cnt < 0) {
1765			pipe_cnt = i;
1766			continue;
1767		}
1768		if (!resource_are_streams_timing_synchronizable(
1769				res_ctx->pipe_ctx[pipe_cnt].stream,
1770				res_ctx->pipe_ctx[i].stream)) {
1771			synchronized_vblank = false;
1772			break;
1773		}
1774	}
1775
1776	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1777		struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
1778		int output_bpc;
1779
1780		if (!res_ctx->pipe_ctx[i].stream)
1781			continue;
1782		/* todo:
1783		pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
1784		pipes[pipe_cnt].pipe.src.dcc = 0;
1785		pipes[pipe_cnt].pipe.src.vm = 0;*/
1786
1787#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1788		pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
1789		/* todo: rotation?*/
1790		pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
1791#endif
1792		if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
1793			pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
1794			/* 1/2 vblank */
1795			pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
1796				(timing->v_total - timing->v_addressable
1797					- timing->v_border_top - timing->v_border_bottom) / 2;
1798			/* 36 bytes dp, 32 hdmi */
1799			pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
1800				dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
1801		}
1802		pipes[pipe_cnt].pipe.src.dcc = false;
1803		pipes[pipe_cnt].pipe.src.dcc_rate = 1;
1804		pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
1805		pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch;
1806		pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start
1807				- timing->h_addressable
1808				- timing->h_border_left
1809				- timing->h_border_right;
1810		pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
1811		pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
1812				- timing->v_addressable
1813				- timing->v_border_top
1814				- timing->v_border_bottom;
1815		pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
1816		pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
1817		pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
1818		pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
1819		pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
1820		pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
1821		if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1822			pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
1823		pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
1824		pipes[pipe_cnt].dout.dp_lanes = 4;
1825		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
1826		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
1827		pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe
1828							|| res_ctx->pipe_ctx[i].next_odm_pipe;
1829		pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
1830		if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
1831				== res_ctx->pipe_ctx[i].plane_state)
1832			pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
1833		else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
1834			struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
1835
1836			while (first_pipe->prev_odm_pipe)
1837				first_pipe = first_pipe->prev_odm_pipe;
1838			pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
1839		}
1840
1841		switch (res_ctx->pipe_ctx[i].stream->signal) {
1842		case SIGNAL_TYPE_DISPLAY_PORT_MST:
1843		case SIGNAL_TYPE_DISPLAY_PORT:
1844			pipes[pipe_cnt].dout.output_type = dm_dp;
1845			break;
1846		case SIGNAL_TYPE_EDP:
1847			pipes[pipe_cnt].dout.output_type = dm_edp;
1848			break;
1849		case SIGNAL_TYPE_HDMI_TYPE_A:
1850		case SIGNAL_TYPE_DVI_SINGLE_LINK:
1851		case SIGNAL_TYPE_DVI_DUAL_LINK:
1852			pipes[pipe_cnt].dout.output_type = dm_hdmi;
1853			break;
1854		default:
1855			/* In case there is no signal, set dp with 4 lanes to allow max config */
1856			pipes[pipe_cnt].dout.output_type = dm_dp;
1857			pipes[pipe_cnt].dout.dp_lanes = 4;
1858		}
1859
1860		switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
1861		case COLOR_DEPTH_666:
1862			output_bpc = 6;
1863			break;
1864		case COLOR_DEPTH_888:
1865			output_bpc = 8;
1866			break;
1867		case COLOR_DEPTH_101010:
1868			output_bpc = 10;
1869			break;
1870		case COLOR_DEPTH_121212:
1871			output_bpc = 12;
1872			break;
1873		case COLOR_DEPTH_141414:
1874			output_bpc = 14;
1875			break;
1876		case COLOR_DEPTH_161616:
1877			output_bpc = 16;
1878			break;
1879#ifdef CONFIG_DRM_AMD_DC_DCN2_0
1880		case COLOR_DEPTH_999:
1881			output_bpc = 9;
1882			break;
1883		case COLOR_DEPTH_111111:
1884			output_bpc = 11;
1885			break;
1886#endif
1887		default:
1888			output_bpc = 8;
1889			break;
1890		}
1891
1892		switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
1893		case PIXEL_ENCODING_RGB:
1894		case PIXEL_ENCODING_YCBCR444:
1895			pipes[pipe_cnt].dout.output_format = dm_444;
1896			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
1897			break;
1898		case PIXEL_ENCODING_YCBCR420:
1899			pipes[pipe_cnt].dout.output_format = dm_420;
1900			pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
1901			break;
1902		case PIXEL_ENCODING_YCBCR422:
1903			if (true) /* todo */
1904				pipes[pipe_cnt].dout.output_format = dm_s422;
1905			else
1906				pipes[pipe_cnt].dout.output_format = dm_n422;
1907			pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
1908			break;
1909		default:
1910			pipes[pipe_cnt].dout.output_format = dm_444;
1911			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
1912		}
1913
1914		/* todo: default max for now, until there is logic reflecting this in dc*/
1915		pipes[pipe_cnt].dout.output_bpc = 12;
1916		/*
1917		 * Use max cursor settings for calculations to minimize
1918		 * bw calculations due to cursor on/off
1919		 */
1920		pipes[pipe_cnt].pipe.src.num_cursors = 2;
1921		pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
1922		pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
1923		pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
1924		pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
1925
1926		if (!res_ctx->pipe_ctx[i].plane_state) {
1927			pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
1928			pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
1929			pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
1930			pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
1931			if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
1932				pipes[pipe_cnt].pipe.src.viewport_width = 1920;
1933			pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
1934			if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
1935				pipes[pipe_cnt].pipe.src.viewport_height = 1080;
1936			pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
1937			pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
1938			pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
1939			pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
1940			pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width;  /*when is_hsplit != 1*/
1941			pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/
1942			pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
1943			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0;
1944			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0;
1945			pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
1946			pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
1947			pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
1948			pipes[pipe_cnt].pipe.src.is_hsplit = 0;
1949			pipes[pipe_cnt].pipe.dest.odm_combine = 0;
1950			pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
1951			pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
1952		} else {
1953			struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
1954			struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
1955
1956			pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
1957			pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
1958					&& res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
1959					|| (res_ctx->pipe_ctx[i].top_pipe
1960					&& res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
1961			pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
1962					|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
1963			pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
1964			pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
1965			pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
1966			pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
1967			pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
1968			pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
1969			if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1970				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
1971				pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
1972				pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
1973				pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
1974			} else {
1975				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
1976				pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
1977			}
1978			pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
1979			pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
1980			pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
1981			pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
1982			pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
1983			if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
1984				pipes[pipe_cnt].pipe.dest.full_recout_width +=
1985						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
1986				pipes[pipe_cnt].pipe.dest.full_recout_height +=
1987						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
1988			} else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
1989				pipes[pipe_cnt].pipe.dest.full_recout_width +=
1990						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
1991				pipes[pipe_cnt].pipe.dest.full_recout_height +=
1992						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
1993			}
1994
1995			pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
1996			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
1997			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
1998			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
1999			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
2000			pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable =
2001					scl->ratios.vert.value != dc_fixpt_one.value
2002					|| scl->ratios.horz.value != dc_fixpt_one.value
2003					|| scl->ratios.vert_c.value != dc_fixpt_one.value
2004					|| scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
2005					|| dc->debug.always_scale; /*support always scale*/
2006			pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps;
2007			pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
2008			pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps;
2009			pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
2010
2011			pipes[pipe_cnt].pipe.src.macro_tile_size =
2012					swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
2013			swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
2014					&pipes[pipe_cnt].pipe.src.sw_mode);
2015
2016			switch (pln->format) {
2017			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
2018			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
2019				pipes[pipe_cnt].pipe.src.source_format = dm_420_8;
2020				break;
2021			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
2022			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
2023				pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
2024				break;
2025			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
2026			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
2027			case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
2028				pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
2029				break;
2030			case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
2031			case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
2032				pipes[pipe_cnt].pipe.src.source_format = dm_444_16;
2033				break;
2034			case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
2035				pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
2036				break;
2037			default:
2038				pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
2039				break;
2040			}
2041		}
2042
2043		pipe_cnt++;
2044	}
2045
2046	/* populate writeback information */
2047	dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
2048
2049	return pipe_cnt;
2050}
2051
2052unsigned int dcn20_calc_max_scaled_time(
2053		unsigned int time_per_pixel,
2054		enum mmhubbub_wbif_mode mode,
2055		unsigned int urgent_watermark)
2056{
2057	unsigned int time_per_byte = 0;
2058	unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
2059	unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
2060	unsigned int small_free_entry, max_free_entry;
2061	unsigned int buf_lh_capability;
2062	unsigned int max_scaled_time;
2063
2064	if (mode == PACKED_444) /* packed mode */
2065		time_per_byte = time_per_pixel/4;
2066	else if (mode == PLANAR_420_8BPC)
2067		time_per_byte  = time_per_pixel;
2068	else if (mode == PLANAR_420_10BPC) /* p010 */
2069		time_per_byte  = time_per_pixel * 819/1024;
2070
2071	if (time_per_byte == 0)
2072		time_per_byte = 1;
2073
2074	small_free_entry  = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
2075	max_free_entry    = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
2076	buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
2077	max_scaled_time   = buf_lh_capability - urgent_watermark;
2078	return max_scaled_time;
2079}
2080
2081void dcn20_set_mcif_arb_params(
2082		struct dc *dc,
2083		struct dc_state *context,
2084		display_e2e_pipe_params_st *pipes,
2085		int pipe_cnt)
2086{
2087	enum mmhubbub_wbif_mode wbif_mode;
2088	struct mcif_arb_params *wb_arb_params;
2089	int i, j, k, dwb_pipe;
2090
2091	/* Writeback MCIF_WB arbitration parameters */
2092	dwb_pipe = 0;
2093	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2094
2095		if (!context->res_ctx.pipe_ctx[i].stream)
2096			continue;
2097
2098		for (j = 0; j < MAX_DWB_PIPES; j++) {
2099			if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
2100				continue;
2101
2102			//wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
2103			wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
2104
2105			if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
2106				if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
2107					wbif_mode = PLANAR_420_8BPC;
2108				else
2109					wbif_mode = PLANAR_420_10BPC;
2110			} else
2111				wbif_mode = PACKED_444;
2112
2113			for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
2114				wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2115				wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2116			}
2117			wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
2118			wb_arb_params->slice_lines = 32;
2119			wb_arb_params->arbitration_slice = 2;
2120			wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
2121				wbif_mode,
2122				wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
2123
2124			dwb_pipe++;
2125
2126			if (dwb_pipe >= MAX_DWB_PIPES)
2127				return;
2128		}
2129		if (dwb_pipe >= MAX_DWB_PIPES)
2130			return;
2131	}
2132}
2133
2134#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2135static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
2136{
2137	int i;
2138
2139	/* Validate DSC config, dsc count validation is already done */
2140	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2141		struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
2142		struct dc_stream_state *stream = pipe_ctx->stream;
2143		struct dsc_config dsc_cfg;
2144		struct pipe_ctx *odm_pipe;
2145		int opp_cnt = 1;
2146
2147		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2148			opp_cnt++;
2149
2150		/* Only need to validate top pipe */
2151		if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
2152			continue;
2153
2154		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
2155				+ stream->timing.h_border_right) / opp_cnt;
2156		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
2157				+ stream->timing.v_border_bottom;
2158		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
2159		dsc_cfg.color_depth = stream->timing.display_color_depth;
2160		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
2161		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
2162
2163		if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
2164			return false;
2165	}
2166	return true;
2167}
2168#endif
2169
2170static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
2171		struct resource_context *res_ctx,
2172		const struct resource_pool *pool,
2173		const struct pipe_ctx *primary_pipe)
2174{
2175	struct pipe_ctx *secondary_pipe = NULL;
2176
2177	if (dc && primary_pipe) {
2178		int j;
2179		int preferred_pipe_idx = 0;
2180
2181		/* first check the prev dc state:
2182		 * if this primary pipe has a bottom pipe in prev. state
2183		 * and if the bottom pipe is still available (which it should be),
2184		 * pick that pipe as secondary
2185		 * Same logic applies for ODM pipes. Since mpo is not allowed with odm
2186		 * check in else case.
2187		 */
2188		if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
2189			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
2190			if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2191				secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2192				secondary_pipe->pipe_idx = preferred_pipe_idx;
2193			}
2194		} else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
2195			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
2196			if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2197				secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2198				secondary_pipe->pipe_idx = preferred_pipe_idx;
2199			}
2200		}
2201
2202		/*
2203		 * if this primary pipe does not have a bottom pipe in prev. state
2204		 * start backward and find a pipe that did not used to be a bottom pipe in
2205		 * prev. dc state. This way we make sure we keep the same assignment as
2206		 * last state and will not have to reprogram every pipe
2207		 */
2208		if (secondary_pipe == NULL) {
2209			for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
2210				if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) {
2211					preferred_pipe_idx = j;
2212
2213					if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2214						secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2215						secondary_pipe->pipe_idx = preferred_pipe_idx;
2216						break;
2217					}
2218				}
2219			}
2220		}
2221		/*
2222		 * We should never hit this assert unless assignments are shuffled around
2223		 * if this happens we will prob. hit a vsync tdr
2224		 */
2225		ASSERT(secondary_pipe);
2226		/*
2227		 * search backwards for the second pipe to keep pipe
2228		 * assignment more consistent
2229		 */
2230		if (secondary_pipe == NULL) {
2231			for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
2232				preferred_pipe_idx = j;
2233
2234				if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2235					secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2236					secondary_pipe->pipe_idx = preferred_pipe_idx;
2237					break;
2238				}
2239			}
2240		}
2241	}
2242
2243	return secondary_pipe;
2244}
2245
2246bool dcn20_fast_validate_bw(
2247		struct dc *dc,
2248		struct dc_state *context,
2249		display_e2e_pipe_params_st *pipes,
2250		int *pipe_cnt_out,
2251		int *pipe_split_from,
2252		int *vlevel_out)
2253{
2254	bool out = false;
2255
2256	int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
2257	bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
2258	bool force_split = false;
2259#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2260	bool failed_non_odm_dsc = false;
2261#endif
2262	int split_threshold = dc->res_pool->pipe_count / 2;
2263	bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
2264
2265
2266	ASSERT(pipes);
2267	if (!pipes)
2268		return false;
2269
2270	/* merge previously split odm pipes since mode support needs to make the decision */
2271	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2272		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2273		struct pipe_ctx *odm_pipe = pipe->next_odm_pipe;
2274
2275		if (pipe->prev_odm_pipe)
2276			continue;
2277
2278		pipe->next_odm_pipe = NULL;
2279		while (odm_pipe) {
2280			struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
2281
2282			odm_pipe->plane_state = NULL;
2283			odm_pipe->stream = NULL;
2284			odm_pipe->top_pipe = NULL;
2285			odm_pipe->bottom_pipe = NULL;
2286			odm_pipe->prev_odm_pipe = NULL;
2287			odm_pipe->next_odm_pipe = NULL;
2288#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2289			if (odm_pipe->stream_res.dsc)
2290				release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
2291#endif
2292			/* Clear plane_res and stream_res */
2293			memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
2294			memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
2295			odm_pipe = next_odm_pipe;
2296		}
2297		if (pipe->plane_state)
2298			resource_build_scaling_params(pipe);
2299	}
2300
2301	/* merge previously mpc split pipes since mode support needs to make the decision */
2302	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2303		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2304		struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
2305
2306		if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
2307			continue;
2308
2309		pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
2310		if (hsplit_pipe->bottom_pipe)
2311			hsplit_pipe->bottom_pipe->top_pipe = pipe;
2312		hsplit_pipe->plane_state = NULL;
2313		hsplit_pipe->stream = NULL;
2314		hsplit_pipe->top_pipe = NULL;
2315		hsplit_pipe->bottom_pipe = NULL;
2316
2317		/* Clear plane_res and stream_res */
2318		memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
2319		memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
2320		if (pipe->plane_state)
2321			resource_build_scaling_params(pipe);
2322	}
2323
2324	if (dc->res_pool->funcs->populate_dml_pipes)
2325		pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
2326			&context->res_ctx, pipes);
2327	else
2328		pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
2329			&context->res_ctx, pipes);
2330
2331	*pipe_cnt_out = pipe_cnt;
2332
2333	if (!pipe_cnt) {
2334		out = true;
2335		goto validate_out;
2336	}
2337
2338	context->bw_ctx.dml.ip.odm_capable = 0;
2339
2340	vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2341
2342	context->bw_ctx.dml.ip.odm_capable = odm_capable;
2343
2344#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2345	/* 1 dsc per stream dsc validation */
2346	if (vlevel <= context->bw_ctx.dml.soc.num_states)
2347		if (!dcn20_validate_dsc(dc, context)) {
2348			failed_non_odm_dsc = true;
2349			vlevel = context->bw_ctx.dml.soc.num_states + 1;
2350		}
2351#endif
2352
2353	if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
2354		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2355
2356	if (vlevel > context->bw_ctx.dml.soc.num_states)
2357		goto validate_fail;
2358
2359	if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
2360		|| (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
2361		context->commit_hints.full_update_needed = true;
2362
2363	/*initialize pipe_just_split_from to invalid idx*/
2364	for (i = 0; i < MAX_PIPES; i++)
2365		pipe_split_from[i] = -1;
2366
2367	/* Single display only conditionals get set here */
2368	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2369		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2370		bool exit_loop = false;
2371
2372		if (!pipe->stream || pipe->top_pipe)
2373			continue;
2374
2375		if (dc->debug.force_single_disp_pipe_split) {
2376			if (!force_split)
2377				force_split = true;
2378			else {
2379				force_split = false;
2380				exit_loop = true;
2381			}
2382		}
2383		if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
2384			if (avoid_split)
2385				avoid_split = false;
2386			else {
2387				avoid_split = true;
2388				exit_loop = true;
2389			}
2390		}
2391		if (exit_loop)
2392			break;
2393	}
2394
2395	if (context->stream_count > split_threshold)
2396		avoid_split = true;
2397
2398	vlevel_unsplit = vlevel;
2399	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2400		if (!context->res_ctx.pipe_ctx[i].stream)
2401			continue;
2402		for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
2403			if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
2404				break;
2405		pipe_idx++;
2406	}
2407
2408	for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
2409		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2410		struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
2411		bool need_split = true;
2412		bool need_split3d;
2413
2414		if (!pipe->stream || pipe_split_from[i] >= 0)
2415			continue;
2416
2417		pipe_idx++;
2418
2419		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
2420			force_split = true;
2421			context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
2422			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
2423		}
2424		if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
2425			context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
2426		if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
2427			hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
2428			ASSERT(hsplit_pipe);
2429			if (!dcn20_split_stream_for_odm(
2430					&context->res_ctx, dc->res_pool,
2431					pipe, hsplit_pipe))
2432				goto validate_fail;
2433			pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2434			dcn20_build_mapped_resource(dc, context, pipe->stream);
2435		}
2436
2437		if (!pipe->plane_state)
2438			continue;
2439		/* Skip 2nd half of already split pipe */
2440		if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
2441			continue;
2442
2443		need_split3d = ((pipe->stream->view_format ==
2444				VIEW_3D_FORMAT_SIDE_BY_SIDE ||
2445				pipe->stream->view_format ==
2446				VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
2447				(pipe->stream->timing.timing_3d_format ==
2448				TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
2449				 pipe->stream->timing.timing_3d_format ==
2450				TIMING_3D_FORMAT_SIDE_BY_SIDE));
2451
2452		if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
2453			need_split = false;
2454			vlevel = vlevel_unsplit;
2455			context->bw_ctx.dml.vba.maxMpcComb = 0;
2456		} else
2457			need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
2458
2459		/* We do not support mpo + odm at the moment */
2460		if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
2461				&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
2462			goto validate_fail;
2463
2464		if (need_split3d || need_split || force_split) {
2465			if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
2466				/* pipe not split previously needs split */
2467				hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
2468				ASSERT(hsplit_pipe || force_split);
2469				if (!hsplit_pipe)
2470					continue;
2471
2472				if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
2473					if (!dcn20_split_stream_for_odm(
2474							&context->res_ctx, dc->res_pool,
2475							pipe, hsplit_pipe))
2476						goto validate_fail;
2477				} else
2478					dcn20_split_stream_for_mpc(
2479						&context->res_ctx, dc->res_pool,
2480						pipe, hsplit_pipe);
2481				pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2482			}
2483		} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
2484			/* merge should already have been done */
2485			ASSERT(0);
2486		}
2487	}
2488#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2489	/* Actual dsc count per stream dsc validation*/
2490	if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
2491		context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
2492				DML_FAIL_DSC_VALIDATION_FAILURE;
2493		goto validate_fail;
2494	}
2495#endif
2496
2497	*vlevel_out = vlevel;
2498
2499	out = true;
2500	goto validate_out;
2501
2502validate_fail:
2503	out = false;
2504
2505validate_out:
2506	return out;
2507}
2508
2509void dcn20_calculate_wm(
2510		struct dc *dc, struct dc_state *context,
2511		display_e2e_pipe_params_st *pipes,
2512		int *out_pipe_cnt,
2513		int *pipe_split_from,
2514		int vlevel)
2515{
2516	int pipe_cnt, i, pipe_idx;
2517
2518	for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
2519		if (!context->res_ctx.pipe_ctx[i].stream)
2520			continue;
2521
2522		pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
2523		pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
2524
2525		if (pipe_split_from[i] < 0) {
2526			pipes[pipe_cnt].clks_cfg.dppclk_mhz =
2527					context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
2528			if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
2529				pipes[pipe_cnt].pipe.dest.odm_combine =
2530						context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
2531			else
2532				pipes[pipe_cnt].pipe.dest.odm_combine = 0;
2533			pipe_idx++;
2534		} else {
2535			pipes[pipe_cnt].clks_cfg.dppclk_mhz =
2536					context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
2537			if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
2538				pipes[pipe_cnt].pipe.dest.odm_combine =
2539						context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
2540			else
2541				pipes[pipe_cnt].pipe.dest.odm_combine = 0;
2542		}
2543
2544		if (dc->config.forced_clocks) {
2545			pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
2546			pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
2547		}
2548		if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000)
2549			pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
2550		if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000)
2551			pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
2552
2553		pipe_cnt++;
2554	}
2555
2556	if (pipe_cnt != pipe_idx) {
2557		if (dc->res_pool->funcs->populate_dml_pipes)
2558			pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
2559				&context->res_ctx, pipes);
2560		else
2561			pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
2562				&context->res_ctx, pipes);
2563	}
2564
2565	*out_pipe_cnt = pipe_cnt;
2566
2567	pipes[0].clks_cfg.voltage = vlevel;
2568	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
2569	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2570
2571	/* only pipe 0 is read for voltage and dcf/soc clocks */
2572	if (vlevel < 1) {
2573		pipes[0].clks_cfg.voltage = 1;
2574		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz;
2575		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz;
2576	}
2577	context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2578	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2579	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2580	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2581	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2582
2583	if (vlevel < 2) {
2584		pipes[0].clks_cfg.voltage = 2;
2585		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
2586		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
2587	}
2588	context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2589	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2590	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2591	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2592	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2593
2594	if (vlevel < 3) {
2595		pipes[0].clks_cfg.voltage = 3;
2596		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
2597		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
2598	}
2599	context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2600	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2601	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2602	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2603	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2604
2605	pipes[0].clks_cfg.voltage = vlevel;
2606	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
2607	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2608	context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2609	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2610	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2611	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2612	context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2613}
2614
2615void dcn20_calculate_dlg_params(
2616		struct dc *dc, struct dc_state *context,
2617		display_e2e_pipe_params_st *pipes,
2618		int pipe_cnt,
2619		int vlevel)
2620{
2621	int i, j, pipe_idx, pipe_idx_unsplit;
2622	bool visited[MAX_PIPES] = { 0 };
2623
2624	/* Writeback MCIF_WB arbitration parameters */
2625	dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
2626
2627	context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
2628	context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
2629	context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
2630	context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
2631	context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
2632	context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
2633	context->bw_ctx.bw.dcn.clk.p_state_change_support =
2634		context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
2635							!= dm_dram_clock_change_unsupported;
2636	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
2637
2638	/*
2639	 * An artifact of dml pipe split/odm is that pipes get merged back together for
2640	 * calculation. Therefore we need to only extract for first pipe in ascending index order
2641	 * and copy into the other split half.
2642	 */
2643	for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) {
2644		if (!context->res_ctx.pipe_ctx[i].stream)
2645			continue;
2646
2647		if (!visited[pipe_idx]) {
2648			display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src;
2649			display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest;
2650
2651			dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
2652			dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
2653			dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
2654			dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
2655			/*
2656			 * j iterates inside pipes array, unlike i which iterates inside
2657			 * pipe_ctx array
2658			 */
2659			if (src->is_hsplit)
2660				for (j = pipe_idx + 1; j < pipe_cnt; j++) {
2661					display_pipe_source_params_st *src_j = &pipes[j].pipe.src;
2662					display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest;
2663
2664					if (src_j->is_hsplit && !visited[j]
2665							&& src->hsplit_grp == src_j->hsplit_grp) {
2666						dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
2667						dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
2668						dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
2669						dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
2670						visited[j] = true;
2671					}
2672				}
2673			visited[pipe_idx] = true;
2674			pipe_idx_unsplit++;
2675		}
2676		pipe_idx++;
2677	}
2678
2679	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2680		if (!context->res_ctx.pipe_ctx[i].stream)
2681			continue;
2682		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
2683			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
2684		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
2685						pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
2686		ASSERT(visited[pipe_idx]);
2687		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
2688		pipe_idx++;
2689	}
2690	/*save a original dppclock copy*/
2691	context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
2692	context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
2693	context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
2694	context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
2695
2696	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2697		bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
2698
2699		if (!context->res_ctx.pipe_ctx[i].stream)
2700			continue;
2701
2702		context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
2703				&context->res_ctx.pipe_ctx[i].dlg_regs,
2704				&context->res_ctx.pipe_ctx[i].ttu_regs,
2705				pipes,
2706				pipe_cnt,
2707				pipe_idx,
2708				cstate_en,
2709				context->bw_ctx.bw.dcn.clk.p_state_change_support,
2710				false, false, false);
2711
2712		context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
2713				&context->res_ctx.pipe_ctx[i].rq_regs,
2714				pipes[pipe_idx].pipe);
2715		pipe_idx++;
2716	}
2717}
2718
2719static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
2720		bool fast_validate)
2721{
2722	bool out = false;
2723
2724	BW_VAL_TRACE_SETUP();
2725
2726	int vlevel = 0;
2727	int pipe_split_from[MAX_PIPES];
2728	int pipe_cnt = 0;
2729	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
2730	DC_LOGGER_INIT(dc->ctx->logger);
2731
2732	BW_VAL_TRACE_COUNT();
2733
2734	out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
2735
2736	if (pipe_cnt == 0)
2737		goto validate_out;
2738
2739	if (!out)
2740		goto validate_fail;
2741
2742	BW_VAL_TRACE_END_VOLTAGE_LEVEL();
2743
2744	if (fast_validate) {
2745		BW_VAL_TRACE_SKIP(fast);
2746		goto validate_out;
2747	}
2748
2749	dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel);
2750	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
2751
2752	BW_VAL_TRACE_END_WATERMARKS();
2753
2754	goto validate_out;
2755
2756validate_fail:
2757	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
2758		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
2759
2760	BW_VAL_TRACE_SKIP(fail);
2761	out = false;
2762
2763validate_out:
2764	kfree(pipes);
2765
2766	BW_VAL_TRACE_FINISH();
2767
2768	return out;
2769}
2770
2771
2772bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
2773		bool fast_validate)
2774{
2775	bool voltage_supported = false;
2776	bool full_pstate_supported = false;
2777	bool dummy_pstate_supported = false;
2778	double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
2779
2780	if (fast_validate)
2781		return dcn20_validate_bandwidth_internal(dc, context, true);
2782
2783
2784	// Best case, we support full UCLK switch latency
2785	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
2786	full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2787
2788	if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
2789		(voltage_supported && full_pstate_supported)) {
2790		context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
2791		goto restore_dml_state;
2792	}
2793
2794	// Fallback: Try to only support G6 temperature read latency
2795	context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
2796
2797	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
2798	dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2799
2800	if (voltage_supported && dummy_pstate_supported) {
2801		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
2802		goto restore_dml_state;
2803	}
2804
2805	// ERROR: fallback is supposed to always work.
2806	ASSERT(false);
2807
2808restore_dml_state:
2809	memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2810	context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
2811
2812	return voltage_supported;
2813}
2814
2815struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
2816		struct dc_state *state,
2817		const struct resource_pool *pool,
2818		struct dc_stream_state *stream)
2819{
2820	struct resource_context *res_ctx = &state->res_ctx;
2821	struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
2822	struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
2823
2824	if (!head_pipe)
2825		ASSERT(0);
2826
2827	if (!idle_pipe)
2828		return NULL;
2829
2830	idle_pipe->stream = head_pipe->stream;
2831	idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
2832	idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
2833
2834	idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
2835	idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
2836	idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
2837	idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
2838
2839	return idle_pipe;
2840}
2841
2842bool dcn20_get_dcc_compression_cap(const struct dc *dc,
2843		const struct dc_dcc_surface_param *input,
2844		struct dc_surface_dcc_cap *output)
2845{
2846	return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
2847			dc->res_pool->hubbub,
2848			input,
2849			output);
2850}
2851
2852static void dcn20_destroy_resource_pool(struct resource_pool **pool)
2853{
2854	struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
2855
2856	destruct(dcn20_pool);
2857	kfree(dcn20_pool);
2858	*pool = NULL;
2859}
2860
2861
2862static struct dc_cap_funcs cap_funcs = {
2863	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
2864};
2865
2866
2867enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
2868{
2869	enum dc_status result = DC_OK;
2870
2871	enum surface_pixel_format surf_pix_format = plane_state->format;
2872	unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
2873
2874	enum swizzle_mode_values swizzle = DC_SW_LINEAR;
2875
2876	if (bpp == 64)
2877		swizzle = DC_SW_64KB_D;
2878	else
2879		swizzle = DC_SW_64KB_S;
2880
2881	plane_state->tiling_info.gfx9.swizzle = swizzle;
2882	return result;
2883}
2884
2885static struct resource_funcs dcn20_res_pool_funcs = {
2886	.destroy = dcn20_destroy_resource_pool,
2887	.link_enc_create = dcn20_link_encoder_create,
2888	.validate_bandwidth = dcn20_validate_bandwidth,
2889	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
2890	.add_stream_to_ctx = dcn20_add_stream_to_ctx,
2891	.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
2892	.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
2893	.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
2894	.set_mcif_arb_params = dcn20_set_mcif_arb_params,
2895	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
2896};
2897
2898bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
2899{
2900	int i;
2901	uint32_t pipe_count = pool->res_cap->num_dwb;
2902
2903	ASSERT(pipe_count > 0);
2904
2905	for (i = 0; i < pipe_count; i++) {
2906		struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
2907						    GFP_KERNEL);
2908
2909		if (!dwbc20) {
2910			dm_error("DC: failed to create dwbc20!\n");
2911			return false;
2912		}
2913		dcn20_dwbc_construct(dwbc20, ctx,
2914				&dwbc20_regs[i],
2915				&dwbc20_shift,
2916				&dwbc20_mask,
2917				i);
2918		pool->dwbc[i] = &dwbc20->base;
2919	}
2920	return true;
2921}
2922
2923bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
2924{
2925	int i;
2926	uint32_t pipe_count = pool->res_cap->num_dwb;
2927
2928	ASSERT(pipe_count > 0);
2929
2930	for (i = 0; i < pipe_count; i++) {
2931		struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub),
2932						    GFP_KERNEL);
2933
2934		if (!mcif_wb20) {
2935			dm_error("DC: failed to create mcif_wb20!\n");
2936			return false;
2937		}
2938
2939		dcn20_mmhubbub_construct(mcif_wb20, ctx,
2940				&mcif_wb20_regs[i],
2941				&mcif_wb20_shift,
2942				&mcif_wb20_mask,
2943				i);
2944
2945		pool->mcif_wb[i] = &mcif_wb20->base;
2946	}
2947	return true;
2948}
2949
2950struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
2951{
2952	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
2953
2954	if (!pp_smu)
2955		return pp_smu;
2956
2957	dm_pp_get_funcs(ctx, pp_smu);
2958
2959	if (pp_smu->ctx.ver != PP_SMU_VER_NV)
2960		pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
2961
2962	return pp_smu;
2963}
2964
2965void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
2966{
2967	if (pp_smu && *pp_smu) {
2968		kfree(*pp_smu);
2969		*pp_smu = NULL;
2970	}
2971}
2972
2973static void cap_soc_clocks(
2974		struct _vcs_dpi_soc_bounding_box_st *bb,
2975		struct pp_smu_nv_clock_table max_clocks)
2976{
2977	int i;
2978
2979	// First pass - cap all clocks higher than the reported max
2980	for (i = 0; i < bb->num_states; i++) {
2981		if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
2982				&& max_clocks.dcfClockInKhz != 0)
2983			bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
2984
2985		if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
2986						&& max_clocks.uClockInKhz != 0)
2987			bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
2988
2989		if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
2990						&& max_clocks.fabricClockInKhz != 0)
2991			bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
2992
2993		if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
2994						&& max_clocks.displayClockInKhz != 0)
2995			bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
2996
2997		if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
2998						&& max_clocks.dppClockInKhz != 0)
2999			bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
3000
3001		if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
3002						&& max_clocks.phyClockInKhz != 0)
3003			bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
3004
3005		if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
3006						&& max_clocks.socClockInKhz != 0)
3007			bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
3008
3009		if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
3010						&& max_clocks.dscClockInKhz != 0)
3011			bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
3012	}
3013
3014	// Second pass - remove all duplicate clock states
3015	for (i = bb->num_states - 1; i > 1; i--) {
3016		bool duplicate = true;
3017
3018		if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz)
3019			duplicate = false;
3020		if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz)
3021			duplicate = false;
3022		if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz)
3023			duplicate = false;
3024		if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts)
3025			duplicate = false;
3026		if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz)
3027			duplicate = false;
3028		if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz)
3029			duplicate = false;
3030		if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz)
3031			duplicate = false;
3032		if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz)
3033			duplicate = false;
3034
3035		if (duplicate)
3036			bb->num_states--;
3037	}
3038}
3039
3040static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
3041		struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
3042{
3043	struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
3044	int i;
3045	int num_calculated_states = 0;
3046	int min_dcfclk = 0;
3047
3048	if (num_states == 0)
3049		return;
3050
3051	if (dc->bb_overrides.min_dcfclk_mhz > 0)
3052		min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
3053	else
3054		// Accounting for SOC/DCF relationship, we can go as high as
3055		// 506Mhz in Vmin.  We need to code 507 since SMU will round down to 506.
3056		min_dcfclk = 507;
3057
3058	for (i = 0; i < num_states; i++) {
3059		int min_fclk_required_by_uclk;
3060		calculated_states[i].state = i;
3061		calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
3062
3063		// FCLK:UCLK ratio is 1.08
3064		min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
3065
3066		calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
3067				min_dcfclk : min_fclk_required_by_uclk;
3068
3069		calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
3070				max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
3071
3072		calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
3073				max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
3074
3075		calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
3076		calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000;
3077		calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3);
3078
3079		calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000;
3080
3081		num_calculated_states++;
3082	}
3083
3084	calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000;
3085	calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000;
3086	calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000;
3087
3088	memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits));
3089	bb->num_states = num_calculated_states;
3090
3091	// Duplicate the last state, DML always an extra state identical to max state to work
3092	memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st));
3093	bb->clock_limits[num_calculated_states].state = bb->num_states;
3094}
3095
3096static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
3097{
3098	kernel_fpu_begin();
3099	if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
3100			&& dc->bb_overrides.sr_exit_time_ns) {
3101		bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
3102	}
3103
3104	if ((int)(bb->sr_enter_plus_exit_time_us * 1000)
3105				!= dc->bb_overrides.sr_enter_plus_exit_time_ns
3106			&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
3107		bb->sr_enter_plus_exit_time_us =
3108				dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
3109	}
3110
3111	if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
3112			&& dc->bb_overrides.urgent_latency_ns) {
3113		bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
3114	}
3115
3116	if ((int)(bb->dram_clock_change_latency_us * 1000)
3117				!= dc->bb_overrides.dram_clock_change_latency_ns
3118			&& dc->bb_overrides.dram_clock_change_latency_ns) {
3119		bb->dram_clock_change_latency_us =
3120				dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
3121	}
3122	kernel_fpu_end();
3123}
3124
3125static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
3126	uint32_t hw_internal_rev)
3127{
3128	if (ASICREV_IS_NAVI12_P(hw_internal_rev))
3129		return &dcn2_0_nv12_soc;
3130
3131	return &dcn2_0_soc;
3132}
3133
3134static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
3135	uint32_t hw_internal_rev)
3136{
3137	/* NV12 and NV10 */
3138	return &dcn2_0_ip;
3139}
3140
3141static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
3142{
3143	return DML_PROJECT_NAVI10v2;
3144}
3145
3146#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
3147#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
3148
3149static bool init_soc_bounding_box(struct dc *dc,
3150				  struct dcn20_resource_pool *pool)
3151{
3152	const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
3153	struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
3154			get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
3155	struct _vcs_dpi_ip_params_st *loaded_ip =
3156			get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
3157
3158	DC_LOGGER_INIT(dc->ctx->logger);
3159
3160	if (!bb && !SOC_BOUNDING_BOX_VALID) {
3161		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
3162		return false;
3163	}
3164
3165	if (bb && !SOC_BOUNDING_BOX_VALID) {
3166		int i;
3167
3168		dcn2_0_nv12_soc.sr_exit_time_us =
3169				fixed16_to_double_to_cpu(bb->sr_exit_time_us);
3170		dcn2_0_nv12_soc.sr_enter_plus_exit_time_us =
3171				fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
3172		dcn2_0_nv12_soc.urgent_latency_us =
3173				fixed16_to_double_to_cpu(bb->urgent_latency_us);
3174		dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us =
3175				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
3176		dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us =
3177				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
3178		dcn2_0_nv12_soc.urgent_latency_vm_data_only_us =
3179				fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
3180		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
3181				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
3182		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
3183				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
3184		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
3185				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
3186		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
3187				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
3188		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
3189				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
3190		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
3191				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
3192		dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent =
3193				fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
3194		dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent =
3195				fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
3196		dcn2_0_nv12_soc.writeback_latency_us =
3197				fixed16_to_double_to_cpu(bb->writeback_latency_us);
3198		dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent =
3199				fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
3200		dcn2_0_nv12_soc.max_request_size_bytes =
3201				le32_to_cpu(bb->max_request_size_bytes);
3202		dcn2_0_nv12_soc.dram_channel_width_bytes =
3203				le32_to_cpu(bb->dram_channel_width_bytes);
3204		dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes =
3205				le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
3206		dcn2_0_nv12_soc.dcn_downspread_percent =
3207				fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
3208		dcn2_0_nv12_soc.downspread_percent =
3209				fixed16_to_double_to_cpu(bb->downspread_percent);
3210		dcn2_0_nv12_soc.dram_page_open_time_ns =
3211				fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
3212		dcn2_0_nv12_soc.dram_rw_turnaround_time_ns =
3213				fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
3214		dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes =
3215				le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
3216		dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles =
3217				le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
3218		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes =
3219				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
3220		dcn2_0_nv12_soc.channel_interleave_bytes =
3221				le32_to_cpu(bb->channel_interleave_bytes);
3222		dcn2_0_nv12_soc.num_banks =
3223				le32_to_cpu(bb->num_banks);
3224		dcn2_0_nv12_soc.num_chans =
3225				le32_to_cpu(bb->num_chans);
3226		dcn2_0_nv12_soc.vmm_page_size_bytes =
3227				le32_to_cpu(bb->vmm_page_size_bytes);
3228		dcn2_0_nv12_soc.dram_clock_change_latency_us =
3229				fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
3230		// HACK!! Lower uclock latency switch time so we don't switch
3231		dcn2_0_nv12_soc.dram_clock_change_latency_us = 10;
3232		dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us =
3233				fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
3234		dcn2_0_nv12_soc.return_bus_width_bytes =
3235				le32_to_cpu(bb->return_bus_width_bytes);
3236		dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz =
3237				le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
3238		dcn2_0_nv12_soc.xfc_bus_transport_time_us =
3239				le32_to_cpu(bb->xfc_bus_transport_time_us);
3240		dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us =
3241				le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
3242		dcn2_0_nv12_soc.use_urgent_burst_bw =
3243				le32_to_cpu(bb->use_urgent_burst_bw);
3244		dcn2_0_nv12_soc.num_states =
3245				le32_to_cpu(bb->num_states);
3246
3247		for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) {
3248			dcn2_0_nv12_soc.clock_limits[i].state =
3249					le32_to_cpu(bb->clock_limits[i].state);
3250			dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz =
3251					fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
3252			dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz =
3253					fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
3254			dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz =
3255					fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
3256			dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz =
3257					fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
3258			dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz =
3259					fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
3260			dcn2_0_nv12_soc.clock_limits[i].socclk_mhz =
3261					fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
3262			dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz =
3263					fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
3264			dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts =
3265					fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
3266		}
3267	}
3268
3269	if (pool->base.pp_smu) {
3270		struct pp_smu_nv_clock_table max_clocks = {0};
3271		unsigned int uclk_states[8] = {0};
3272		unsigned int num_states = 0;
3273		enum pp_smu_status status;
3274		bool clock_limits_available = false;
3275		bool uclk_states_available = false;
3276
3277		if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
3278			status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
3279				(&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
3280
3281			uclk_states_available = (status == PP_SMU_RESULT_OK);
3282		}
3283
3284		if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
3285			status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
3286					(&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
3287			/* SMU cannot set DCF clock to anything equal to or higher than SOC clock
3288			 */
3289			if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
3290				max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
3291			clock_limits_available = (status == PP_SMU_RESULT_OK);
3292		}
3293
3294		if (clock_limits_available && uclk_states_available && num_states)
3295			update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
3296		else if (clock_limits_available)
3297			cap_soc_clocks(loaded_bb, max_clocks);
3298	}
3299
3300	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
3301	loaded_ip->max_num_dpp = pool->base.pipe_count;
3302	patch_bounding_box(dc, loaded_bb);
3303
3304	return true;
3305}
3306
3307static bool construct(
3308	uint8_t num_virtual_links,
3309	struct dc *dc,
3310	struct dcn20_resource_pool *pool)
3311{
3312	int i;
3313	struct dc_context *ctx = dc->ctx;
3314	struct irq_service_init_data init_data;
3315	struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
3316			get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
3317	struct _vcs_dpi_ip_params_st *loaded_ip =
3318			get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev);
3319	enum dml_project dml_project_version =
3320			get_dml_project_version(ctx->asic_id.hw_internal_rev);
3321
3322	ctx->dc_bios->regs = &bios_regs;
3323	pool->base.funcs = &dcn20_res_pool_funcs;
3324
3325	if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
3326		pool->base.res_cap = &res_cap_nv14;
3327		pool->base.pipe_count = 5;
3328		pool->base.mpcc_count = 5;
3329	} else {
3330		pool->base.res_cap = &res_cap_nv10;
3331		pool->base.pipe_count = 6;
3332		pool->base.mpcc_count = 6;
3333	}
3334	/*************************************************
3335	 *  Resource + asic cap harcoding                *
3336	 *************************************************/
3337	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
3338
3339	dc->caps.max_downscale_ratio = 200;
3340	dc->caps.i2c_speed_in_khz = 100;
3341	dc->caps.max_cursor_size = 256;
3342	dc->caps.dmdata_alloc_size = 2048;
3343
3344	dc->caps.max_slave_planes = 1;
3345	dc->caps.post_blend_color_processing = true;
3346	dc->caps.force_dp_tps4_for_cp2520 = true;
3347	dc->caps.hw_3d_lut = true;
3348
3349	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
3350		dc->debug = debug_defaults_drv;
3351	} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
3352		pool->base.pipe_count = 4;
3353		pool->base.mpcc_count = pool->base.pipe_count;
3354		dc->debug = debug_defaults_diags;
3355	} else {
3356		dc->debug = debug_defaults_diags;
3357	}
3358	//dcn2.0x
3359	dc->work_arounds.dedcn20_305_wa = true;
3360
3361	// Init the vm_helper
3362	if (dc->vm_helper)
3363		vm_helper_init(dc->vm_helper, 16);
3364
3365	/*************************************************
3366	 *  Create resources                             *
3367	 *************************************************/
3368
3369	pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
3370			dcn20_clock_source_create(ctx, ctx->dc_bios,
3371				CLOCK_SOURCE_COMBO_PHY_PLL0,
3372				&clk_src_regs[0], false);
3373	pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
3374			dcn20_clock_source_create(ctx, ctx->dc_bios,
3375				CLOCK_SOURCE_COMBO_PHY_PLL1,
3376				&clk_src_regs[1], false);
3377	pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
3378			dcn20_clock_source_create(ctx, ctx->dc_bios,
3379				CLOCK_SOURCE_COMBO_PHY_PLL2,
3380				&clk_src_regs[2], false);
3381	pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
3382			dcn20_clock_source_create(ctx, ctx->dc_bios,
3383				CLOCK_SOURCE_COMBO_PHY_PLL3,
3384				&clk_src_regs[3], false);
3385	pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
3386			dcn20_clock_source_create(ctx, ctx->dc_bios,
3387				CLOCK_SOURCE_COMBO_PHY_PLL4,
3388				&clk_src_regs[4], false);
3389	pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
3390			dcn20_clock_source_create(ctx, ctx->dc_bios,
3391				CLOCK_SOURCE_COMBO_PHY_PLL5,
3392				&clk_src_regs[5], false);
3393	pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
3394	/* todo: not reuse phy_pll registers */
3395	pool->base.dp_clock_source =
3396			dcn20_clock_source_create(ctx, ctx->dc_bios,
3397				CLOCK_SOURCE_ID_DP_DTO,
3398				&clk_src_regs[0], true);
3399
3400	for (i = 0; i < pool->base.clk_src_count; i++) {
3401		if (pool->base.clock_sources[i] == NULL) {
3402			dm_error("DC: failed to create clock sources!\n");
3403			BREAK_TO_DEBUGGER();
3404			goto create_fail;
3405		}
3406	}
3407
3408	pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
3409	if (pool->base.dccg == NULL) {
3410		dm_error("DC: failed to create dccg!\n");
3411		BREAK_TO_DEBUGGER();
3412		goto create_fail;
3413	}
3414
3415	pool->base.dmcu = dcn20_dmcu_create(ctx,
3416			&dmcu_regs,
3417			&dmcu_shift,
3418			&dmcu_mask);
3419	if (pool->base.dmcu == NULL) {
3420		dm_error("DC: failed to create dmcu!\n");
3421		BREAK_TO_DEBUGGER();
3422		goto create_fail;
3423	}
3424
3425	pool->base.abm = dce_abm_create(ctx,
3426			&abm_regs,
3427			&abm_shift,
3428			&abm_mask);
3429	if (pool->base.abm == NULL) {
3430		dm_error("DC: failed to create abm!\n");
3431		BREAK_TO_DEBUGGER();
3432		goto create_fail;
3433	}
3434
3435	pool->base.pp_smu = dcn20_pp_smu_create(ctx);
3436
3437
3438	if (!init_soc_bounding_box(dc, pool)) {
3439		dm_error("DC: failed to initialize soc bounding box!\n");
3440		BREAK_TO_DEBUGGER();
3441		goto create_fail;
3442	}
3443
3444	dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version);
3445
3446	if (!dc->debug.disable_pplib_wm_range) {
3447		struct pp_smu_wm_range_sets ranges = {0};
3448		int i = 0;
3449
3450		ranges.num_reader_wm_sets = 0;
3451
3452		if (loaded_bb->num_states == 1) {
3453			ranges.reader_wm_sets[0].wm_inst = i;
3454			ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3455			ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3456			ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3457			ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3458
3459			ranges.num_reader_wm_sets = 1;
3460		} else if (loaded_bb->num_states > 1) {
3461			for (i = 0; i < 4 && i < loaded_bb->num_states; i++) {
3462				ranges.reader_wm_sets[i].wm_inst = i;
3463				ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3464				ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3465				ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
3466				ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
3467
3468				ranges.num_reader_wm_sets = i + 1;
3469			}
3470
3471			ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3472			ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3473		}
3474
3475		ranges.num_writer_wm_sets = 1;
3476
3477		ranges.writer_wm_sets[0].wm_inst = 0;
3478		ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3479		ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3480		ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3481		ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3482
3483		/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
3484		if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
3485			pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
3486	}
3487
3488	init_data.ctx = dc->ctx;
3489	pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
3490	if (!pool->base.irqs)
3491		goto create_fail;
3492
3493	/* mem input -> ipp -> dpp -> opp -> TG */
3494	for (i = 0; i < pool->base.pipe_count; i++) {
3495		pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
3496		if (pool->base.hubps[i] == NULL) {
3497			BREAK_TO_DEBUGGER();
3498			dm_error(
3499				"DC: failed to create memory input!\n");
3500			goto create_fail;
3501		}
3502
3503		pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
3504		if (pool->base.ipps[i] == NULL) {
3505			BREAK_TO_DEBUGGER();
3506			dm_error(
3507				"DC: failed to create input pixel processor!\n");
3508			goto create_fail;
3509		}
3510
3511		pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
3512		if (pool->base.dpps[i] == NULL) {
3513			BREAK_TO_DEBUGGER();
3514			dm_error(
3515				"DC: failed to create dpps!\n");
3516			goto create_fail;
3517		}
3518	}
3519	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
3520		pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
3521		if (pool->base.engines[i] == NULL) {
3522			BREAK_TO_DEBUGGER();
3523			dm_error(
3524				"DC:failed to create aux engine!!\n");
3525			goto create_fail;
3526		}
3527		pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
3528		if (pool->base.hw_i2cs[i] == NULL) {
3529			BREAK_TO_DEBUGGER();
3530			dm_error(
3531				"DC:failed to create hw i2c!!\n");
3532			goto create_fail;
3533		}
3534		pool->base.sw_i2cs[i] = NULL;
3535	}
3536
3537	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
3538		pool->base.opps[i] = dcn20_opp_create(ctx, i);
3539		if (pool->base.opps[i] == NULL) {
3540			BREAK_TO_DEBUGGER();
3541			dm_error(
3542				"DC: failed to create output pixel processor!\n");
3543			goto create_fail;
3544		}
3545	}
3546
3547	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
3548		pool->base.timing_generators[i] = dcn20_timing_generator_create(
3549				ctx, i);
3550		if (pool->base.timing_generators[i] == NULL) {
3551			BREAK_TO_DEBUGGER();
3552			dm_error("DC: failed to create tg!\n");
3553			goto create_fail;
3554		}
3555	}
3556
3557	pool->base.timing_generator_count = i;
3558
3559	pool->base.mpc = dcn20_mpc_create(ctx);
3560	if (pool->base.mpc == NULL) {
3561		BREAK_TO_DEBUGGER();
3562		dm_error("DC: failed to create mpc!\n");
3563		goto create_fail;
3564	}
3565
3566	pool->base.hubbub = dcn20_hubbub_create(ctx);
3567	if (pool->base.hubbub == NULL) {
3568		BREAK_TO_DEBUGGER();
3569		dm_error("DC: failed to create hubbub!\n");
3570		goto create_fail;
3571	}
3572
3573#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3574	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
3575		pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
3576		if (pool->base.dscs[i] == NULL) {
3577			BREAK_TO_DEBUGGER();
3578			dm_error("DC: failed to create display stream compressor %d!\n", i);
3579			goto create_fail;
3580		}
3581	}
3582#endif
3583
3584	if (!dcn20_dwbc_create(ctx, &pool->base)) {
3585		BREAK_TO_DEBUGGER();
3586		dm_error("DC: failed to create dwbc!\n");
3587		goto create_fail;
3588	}
3589	if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
3590		BREAK_TO_DEBUGGER();
3591		dm_error("DC: failed to create mcif_wb!\n");
3592		goto create_fail;
3593	}
3594
3595	if (!resource_construct(num_virtual_links, dc, &pool->base,
3596			(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
3597			&res_create_funcs : &res_create_maximus_funcs)))
3598			goto create_fail;
3599
3600	dcn20_hw_sequencer_construct(dc);
3601
3602	dc->caps.max_planes =  pool->base.pipe_count;
3603
3604	for (i = 0; i < dc->caps.max_planes; ++i)
3605		dc->caps.planes[i] = plane_cap;
3606
3607	dc->cap_funcs = cap_funcs;
3608
3609	return true;
3610
3611create_fail:
3612
3613	destruct(pool);
3614
3615	return false;
3616}
3617
3618struct resource_pool *dcn20_create_resource_pool(
3619		const struct dc_init_data *init_data,
3620		struct dc *dc)
3621{
3622	struct dcn20_resource_pool *pool =
3623		kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
3624
3625	if (!pool)
3626		return NULL;
3627
3628	if (construct(init_data->num_virtual_links, dc, pool))
3629		return &pool->base;
3630
3631	BREAK_TO_DEBUGGER();
3632	kfree(pool);
3633	return NULL;
3634}