Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26
 27#include "dm_services.h"
 28#include "dm_helpers.h"
 29#include "core_types.h"
 30#include "resource.h"
 31#include "dccg.h"
 32#include "dce/dce_hwseq.h"
 33#include "clk_mgr.h"
 34#include "reg_helper.h"
 35#include "abm.h"
 36#include "hubp.h"
 37#include "dchubbub.h"
 38#include "timing_generator.h"
 39#include "opp.h"
 40#include "ipp.h"
 41#include "mpc.h"
 42#include "mcif_wb.h"
 43#include "dc_dmub_srv.h"
 44#include "dcn31_hwseq.h"
 45#include "link_hwss.h"
 46#include "dpcd_defs.h"
 47#include "dce/dmub_outbox.h"
 48#include "dc_link_dp.h"
 49#include "inc/link_dpcd.h"
 50#include "dcn10/dcn10_hw_sequencer.h"
 51#include "inc/link_enc_cfg.h"
 52#include "dcn30/dcn30_vpg.h"
 53#include "dce/dce_i2c_hw.h"
 54
 55#define DC_LOGGER_INIT(logger)
 56
 57#define CTX \
 58	hws->ctx
 59#define REG(reg)\
 60	hws->regs->reg
 61#define DC_LOGGER \
 62		dc->ctx->logger
 63
 64
 65#undef FN
 66#define FN(reg_name, field_name) \
 67	hws->shifts->field_name, hws->masks->field_name
 68
 69static void enable_memory_low_power(struct dc *dc)
 70{
 71	struct dce_hwseq *hws = dc->hwseq;
 72	int i;
 73
 74	if (dc->debug.enable_mem_low_power.bits.dmcu) {
 75		// Force ERAM to shutdown if DMCU is not enabled
 76		if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
 77			REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
 78		}
 79	}
 80
 81	// Set default OPTC memory power states
 82	if (dc->debug.enable_mem_low_power.bits.optc) {
 83		// Shutdown when unassigned and light sleep in VBLANK
 84		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
 85	}
 86
 87	if (dc->debug.enable_mem_low_power.bits.vga) {
 88		// Power down VGA memory
 89		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
 90	}
 91
 92	if (dc->debug.enable_mem_low_power.bits.mpc &&
 93		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
 94		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
 95
 96
 97	if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
 98		// Power down VPGs
 99		for (i = 0; i < dc->res_pool->stream_enc_count; i++)
100			dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
101#if defined(CONFIG_DRM_AMD_DC_DCN)
102		for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
103			dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
104#endif
105	}
106
107}
108
109void dcn31_init_hw(struct dc *dc)
110{
111	struct abm **abms = dc->res_pool->multiple_abms;
112	struct dce_hwseq *hws = dc->hwseq;
113	struct dc_bios *dcb = dc->ctx->dc_bios;
114	struct resource_pool *res_pool = dc->res_pool;
115	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
116	int i;
117
118	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
119		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
120
121	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
122
123		REG_WRITE(REFCLK_CNTL, 0);
124		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
125		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
126
127		if (!dc->debug.disable_clock_gate) {
128			/* enable all DCN clock gating */
129			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
130
131			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
132
133			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
134		}
135
136		//Enable ability to power gate / don't force power on permanently
137		if (hws->funcs.enable_power_gating_plane)
138			hws->funcs.enable_power_gating_plane(hws, true);
139
140		return;
141	}
142
143	if (!dcb->funcs->is_accelerated_mode(dcb)) {
144		hws->funcs.bios_golden_init(dc);
145		if (hws->funcs.disable_vga)
146			hws->funcs.disable_vga(dc->hwseq);
147	}
148	// Initialize the dccg
149	if (res_pool->dccg->funcs->dccg_init)
150		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
151
152	enable_memory_low_power(dc);
153
154	if (dc->ctx->dc_bios->fw_info_valid) {
155		res_pool->ref_clocks.xtalin_clock_inKhz =
156				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
157
158		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
159			if (res_pool->dccg && res_pool->hubbub) {
160
161				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
162						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
163						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
164
165				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
166						res_pool->ref_clocks.dccg_ref_clock_inKhz,
167						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
168			} else {
169				// Not all ASICs have DCCG sw component
170				res_pool->ref_clocks.dccg_ref_clock_inKhz =
171						res_pool->ref_clocks.xtalin_clock_inKhz;
172				res_pool->ref_clocks.dchub_ref_clock_inKhz =
173						res_pool->ref_clocks.xtalin_clock_inKhz;
174			}
175		}
176	} else
177		ASSERT_CRITICAL(false);
178
179	for (i = 0; i < dc->link_count; i++) {
180		/* Power up AND update implementation according to the
181		 * required signal (which may be different from the
182		 * default signal on connector).
183		 */
184		struct dc_link *link = dc->links[i];
185
186		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
187			continue;
188
189		link->link_enc->funcs->hw_init(link->link_enc);
190
191		/* Check for enabled DIG to identify enabled display */
192		if (link->link_enc->funcs->is_dig_enabled &&
193			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
194			link->link_status.link_active = true;
195			if (link->link_enc->funcs->fec_is_active &&
196					link->link_enc->funcs->fec_is_active(link->link_enc))
197				link->fec_state = dc_link_fec_enabled;
198		}
199	}
200
201	/* Enables outbox notifications for usb4 dpia */
202	if (dc->res_pool->usb4_dpia_count)
203		dmub_enable_outbox_notification(dc->ctx->dmub_srv);
204
205	/* we want to turn off all dp displays before doing detection */
206	dc_link_blank_all_dp_displays(dc);
207
208	if (hws->funcs.enable_power_gating_plane)
209		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
210
211	/* If taking control over from VBIOS, we may want to optimize our first
212	 * mode set, so we need to skip powering down pipes until we know which
213	 * pipes we want to use.
214	 * Otherwise, if taking control is not possible, we need to power
215	 * everything down.
216	 */
217	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
218
219		// we want to turn off edp displays if odm is enabled and no seamless boot
220		if (!dc->caps.seamless_odm) {
221			for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
222				struct timing_generator *tg = dc->res_pool->timing_generators[i];
223				uint32_t num_opps, opp_id_src0, opp_id_src1;
224
225				num_opps = 1;
226				if (tg) {
227					if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
228						tg->funcs->get_optc_source(tg, &num_opps,
229								&opp_id_src0, &opp_id_src1);
230					}
231				}
232
233				if (num_opps > 1) {
234					dc_link_blank_all_edp_displays(dc);
235					break;
236				}
237			}
238		}
239
240		hws->funcs.init_pipes(dc, dc->current_state);
241		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
242			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
243					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
244	}
245
246	for (i = 0; i < res_pool->audio_count; i++) {
247		struct audio *audio = res_pool->audios[i];
248
249		audio->funcs->hw_init(audio);
250	}
251
252	for (i = 0; i < dc->link_count; i++) {
253		struct dc_link *link = dc->links[i];
254
255		if (link->panel_cntl)
256			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
257	}
258
259	for (i = 0; i < dc->res_pool->pipe_count; i++) {
260		if (abms[i] != NULL)
261			abms[i]->funcs->abm_init(abms[i], backlight);
262	}
263
264	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
265	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
266
267	// Set i2c to light sleep until engine is setup
268	if (dc->debug.enable_mem_low_power.bits.i2c)
269		REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1);
270
271	if (hws->funcs.setup_hpo_hw_control)
272		hws->funcs.setup_hpo_hw_control(hws, false);
273
274	if (!dc->debug.disable_clock_gate) {
275		/* enable all DCN clock gating */
276		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
277
278		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
279
280		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
281	}
282
283	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
284		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
285
286	if (dc->clk_mgr->funcs->notify_wm_ranges)
287		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
288
289	if (dc->clk_mgr->funcs->set_hard_max_memclk)
290		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
291
292	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
293		dc->res_pool->hubbub->funcs->force_pstate_change_control(
294				dc->res_pool->hubbub, false, false);
295#if defined(CONFIG_DRM_AMD_DC_DCN)
296	if (dc->res_pool->hubbub->funcs->init_crb)
297		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
298#endif
299}
300
301void dcn31_dsc_pg_control(
302		struct dce_hwseq *hws,
303		unsigned int dsc_inst,
304		bool power_on)
305{
306	uint32_t power_gate = power_on ? 0 : 1;
307	uint32_t pwr_status = power_on ? 0 : 2;
308	uint32_t org_ip_request_cntl = 0;
309
310	if (hws->ctx->dc->debug.disable_dsc_power_gate)
311		return;
312
313	if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc &&
314		hws->ctx->dc->res_pool->dccg->funcs->enable_dsc &&
315		power_on)
316		hws->ctx->dc->res_pool->dccg->funcs->enable_dsc(
317			hws->ctx->dc->res_pool->dccg, dsc_inst);
318
319	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
320	if (org_ip_request_cntl == 0)
321		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
322
323	switch (dsc_inst) {
324	case 0: /* DSC0 */
325		REG_UPDATE(DOMAIN16_PG_CONFIG,
326				DOMAIN_POWER_GATE, power_gate);
327
328		REG_WAIT(DOMAIN16_PG_STATUS,
329				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
330				1, 1000);
331		break;
332	case 1: /* DSC1 */
333		REG_UPDATE(DOMAIN17_PG_CONFIG,
334				DOMAIN_POWER_GATE, power_gate);
335
336		REG_WAIT(DOMAIN17_PG_STATUS,
337				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
338				1, 1000);
339		break;
340	case 2: /* DSC2 */
341		REG_UPDATE(DOMAIN18_PG_CONFIG,
342				DOMAIN_POWER_GATE, power_gate);
343
344		REG_WAIT(DOMAIN18_PG_STATUS,
345				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
346				1, 1000);
347		break;
348	default:
349		BREAK_TO_DEBUGGER();
350		break;
351	}
352
353	if (org_ip_request_cntl == 0)
354		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
355
356	if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) {
357		if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on)
358			hws->ctx->dc->res_pool->dccg->funcs->disable_dsc(
359				hws->ctx->dc->res_pool->dccg, dsc_inst);
360	}
361
362}
363
364
365void dcn31_enable_power_gating_plane(
366	struct dce_hwseq *hws,
367	bool enable)
368{
369	bool force_on = true; /* disable power gating */
370	uint32_t org_ip_request_cntl = 0;
371
372	if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
373		force_on = false;
374
375	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
376	if (org_ip_request_cntl == 0)
377		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
378	/* DCHUBP0/1/2/3/4/5 */
379	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
380	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
381	/* DPP0/1/2/3/4/5 */
382	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
383	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
384
385	force_on = true; /* disable power gating */
386	if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
387		force_on = false;
388
389	/* DCS0/1/2/3/4/5 */
390	REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
391	REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
392	REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
393
394	if (org_ip_request_cntl == 0)
395		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
396}
397
398void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
399{
400	bool is_hdmi_tmds;
401	bool is_dp;
402
403	ASSERT(pipe_ctx->stream);
404
405	if (pipe_ctx->stream_res.stream_enc == NULL)
406		return;  /* this is not root pipe */
407
408	is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
409	is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
410
411	if (!is_hdmi_tmds && !is_dp)
412		return;
413
414	if (is_hdmi_tmds)
415		pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
416			pipe_ctx->stream_res.stream_enc,
417			&pipe_ctx->stream_res.encoder_info_frame);
418	else {
419		pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
420			pipe_ctx->stream_res.stream_enc,
421			&pipe_ctx->stream_res.encoder_info_frame);
422	}
423}
424void dcn31_z10_save_init(struct dc *dc)
425{
426	union dmub_rb_cmd cmd;
427
428	memset(&cmd, 0, sizeof(cmd));
429	cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
430	cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
431
432	dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
433	dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
434	dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
435}
436
437void dcn31_z10_restore(const struct dc *dc)
438{
439	union dmub_rb_cmd cmd;
440
441	/*
442	 * DMUB notifies whether restore is required.
443	 * Optimization to avoid sending commands when not required.
444	 */
445	if (!dc_dmub_srv_is_restore_required(dc->ctx->dmub_srv))
446		return;
447
448	memset(&cmd, 0, sizeof(cmd));
449	cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
450	cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE;
451
452	dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
453	dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
454	dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
455}
456
457void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
458{
459	uint32_t power_gate = power_on ? 0 : 1;
460	uint32_t pwr_status = power_on ? 0 : 2;
461	uint32_t org_ip_request_cntl;
462	if (hws->ctx->dc->debug.disable_hubp_power_gate)
463		return;
464
465	if (REG(DOMAIN0_PG_CONFIG) == 0)
466		return;
467	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
468	if (org_ip_request_cntl == 0)
469		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
470
471	switch (hubp_inst) {
472	case 0:
473		REG_SET(DOMAIN0_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
474		REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
475		break;
476	case 1:
477		REG_SET(DOMAIN1_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
478		REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
479		break;
480	case 2:
481		REG_SET(DOMAIN2_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
482		REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
483		break;
484	case 3:
485		REG_SET(DOMAIN3_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
486		REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
487		break;
488	default:
489		BREAK_TO_DEBUGGER();
490		break;
491	}
492	if (org_ip_request_cntl == 0)
493		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
494}
495
496int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
497{
498	struct dcn_hubbub_phys_addr_config config;
499
500	config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
501	config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
502	config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
503	config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
504	config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
505	config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
506	config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
507	config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
508
509	if (pa_config->gart_config.base_addr_is_mc_addr) {
510		/* Convert from MC address to offset into FB */
511		config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr -
512				pa_config->system_aperture.fb_base +
513				pa_config->system_aperture.fb_offset;
514	} else
515		config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
516
517	return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
518}
519
520static void dcn31_reset_back_end_for_pipe(
521		struct dc *dc,
522		struct pipe_ctx *pipe_ctx,
523		struct dc_state *context)
524{
525	struct dc_link *link;
526
527	DC_LOGGER_INIT(dc->ctx->logger);
528	if (pipe_ctx->stream_res.stream_enc == NULL) {
529		pipe_ctx->stream = NULL;
530		return;
531	}
532	ASSERT(!pipe_ctx->top_pipe);
533
534	dc->hwss.set_abm_immediate_disable(pipe_ctx);
535
536	pipe_ctx->stream_res.tg->funcs->set_dsc_config(
537			pipe_ctx->stream_res.tg,
538			OPTC_DSC_DISABLED, 0, 0);
539	pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
540	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
541	if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
542		pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
543				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
544	pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
545
546	if (pipe_ctx->stream_res.tg->funcs->set_drr)
547		pipe_ctx->stream_res.tg->funcs->set_drr(
548				pipe_ctx->stream_res.tg, NULL);
549
550	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
551		link = pipe_ctx->stream->link;
552		/* DPMS may already disable or */
553		/* dpms_off status is incorrect due to fastboot
554		 * feature. When system resume from S4 with second
555		 * screen only, the dpms_off would be true but
556		 * VBIOS lit up eDP, so check link status too.
557		 */
558		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
559			core_link_disable_stream(pipe_ctx);
560		else if (pipe_ctx->stream_res.audio)
561			dc->hwss.disable_audio_stream(pipe_ctx);
562
563		/* free acquired resources */
564		if (pipe_ctx->stream_res.audio) {
565			/*disable az_endpoint*/
566			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
567
568			/*free audio*/
569			if (dc->caps.dynamic_audio == true) {
570				/*we have to dynamic arbitrate the audio endpoints*/
571				/*we free the resource, need reset is_audio_acquired*/
572				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
573						pipe_ctx->stream_res.audio, false);
574				pipe_ctx->stream_res.audio = NULL;
575			}
576		}
577	} else if (pipe_ctx->stream_res.dsc) {
578			dp_set_dsc_enable(pipe_ctx, false);
579	}
580
581	pipe_ctx->stream = NULL;
582	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
583					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
584}
585
586void dcn31_reset_hw_ctx_wrap(
587		struct dc *dc,
588		struct dc_state *context)
589{
590	int i;
591	struct dce_hwseq *hws = dc->hwseq;
592
593	/* Reset Back End*/
594	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
595		struct pipe_ctx *pipe_ctx_old =
596			&dc->current_state->res_ctx.pipe_ctx[i];
597		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
598
599		if (!pipe_ctx_old->stream)
600			continue;
601
602		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
603			continue;
604
605		if (!pipe_ctx->stream ||
606				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
607			struct clock_source *old_clk = pipe_ctx_old->clock_source;
608
609			dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
610			if (hws->funcs.enable_stream_gating)
611				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
612			if (old_clk)
613				old_clk->funcs->cs_power_down(old_clk);
614		}
615	}
616
617	/* New dc_state in the process of being applied to hardware. */
618	link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
619}
620
621void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
622{
623	if (hws->ctx->dc->debug.hpo_optimization)
624		REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
625}
626void dcn31_set_drr(struct pipe_ctx **pipe_ctx,
627		int num_pipes, struct dc_crtc_timing_adjust adjust)
628{
629	int i = 0;
630	struct drr_params params = {0};
631	unsigned int event_triggers = 0x2;/*Bit[1]: OTG_TRIG_A*/
632	unsigned int num_frames = 2;
633	params.vertical_total_max = adjust.v_total_max;
634	params.vertical_total_min = adjust.v_total_min;
635	params.vertical_total_mid = adjust.v_total_mid;
636	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
637	for (i = 0; i < num_pipes; i++) {
638		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
639			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
640				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
641					pipe_ctx[i]->stream_res.tg, &params);
642			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
643				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
644					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
645						pipe_ctx[i]->stream_res.tg,
646						event_triggers, num_frames);
647		}
648	}
649}
650void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
651		int num_pipes, const struct dc_static_screen_params *params)
652{
653	unsigned int i;
654	unsigned int triggers = 0;
655	if (params->triggers.surface_update)
656		triggers |= 0x600;/*bit 9 and bit10 : 110 0000 0000*/
657	if (params->triggers.cursor_update)
658		triggers |= 0x10;/*bit4*/
659	if (params->triggers.force_trigger)
660		triggers |= 0x1;
661	for (i = 0; i < num_pipes; i++)
662		pipe_ctx[i]->stream_res.tg->funcs->
663			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
664					triggers, params->num_frames);
665}