Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
Note: File does not exist in v3.1.
    1/*
    2 * Copyright 2015 Advanced Micro Devices, Inc.
    3 *
    4 * Permission is hereby granted, free of charge, to any person obtaining a
    5 * copy of this software and associated documentation files (the "Software"),
    6 * to deal in the Software without restriction, including without limitation
    7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
    8 * and/or sell copies of the Software, and to permit persons to whom the
    9 * Software is furnished to do so, subject to the following conditions:
   10 *
   11 * The above copyright notice and this permission notice shall be included in
   12 * all copies or substantial portions of the Software.
   13 *
   14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
   18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
   19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
   20 * OTHER DEALINGS IN THE SOFTWARE.
   21 *
   22 * Authors: AMD
   23 *
   24 */
   25
   26/* The caprices of the preprocessor require that this be declared right here */
   27#define CREATE_TRACE_POINTS
   28
   29#include "dm_services_types.h"
   30#include "dc.h"
   31#include "link_enc_cfg.h"
   32#include "dc/inc/core_types.h"
   33#include "dal_asic_id.h"
   34#include "dmub/dmub_srv.h"
   35#include "dc/inc/hw/dmcu.h"
   36#include "dc/inc/hw/abm.h"
   37#include "dc/dc_dmub_srv.h"
   38#include "dc/dc_edid_parser.h"
   39#include "dc/dc_stat.h"
   40#include "dc/dc_state.h"
   41#include "amdgpu_dm_trace.h"
   42#include "dpcd_defs.h"
   43#include "link/protocols/link_dpcd.h"
   44#include "link_service_types.h"
   45#include "link/protocols/link_dp_capability.h"
   46#include "link/protocols/link_ddc.h"
   47
   48#include "vid.h"
   49#include "amdgpu.h"
   50#include "amdgpu_display.h"
   51#include "amdgpu_ucode.h"
   52#include "atom.h"
   53#include "amdgpu_dm.h"
   54#include "amdgpu_dm_plane.h"
   55#include "amdgpu_dm_crtc.h"
   56#include "amdgpu_dm_hdcp.h"
   57#include <drm/display/drm_hdcp_helper.h>
   58#include "amdgpu_dm_wb.h"
   59#include "amdgpu_pm.h"
   60#include "amdgpu_atombios.h"
   61
   62#include "amd_shared.h"
   63#include "amdgpu_dm_irq.h"
   64#include "dm_helpers.h"
   65#include "amdgpu_dm_mst_types.h"
   66#if defined(CONFIG_DEBUG_FS)
   67#include "amdgpu_dm_debugfs.h"
   68#endif
   69#include "amdgpu_dm_psr.h"
   70#include "amdgpu_dm_replay.h"
   71
   72#include "ivsrcid/ivsrcid_vislands30.h"
   73
   74#include <linux/backlight.h>
   75#include <linux/module.h>
   76#include <linux/moduleparam.h>
   77#include <linux/types.h>
   78#include <linux/pm_runtime.h>
   79#include <linux/pci.h>
   80#include <linux/power_supply.h>
   81#include <linux/firmware.h>
   82#include <linux/component.h>
   83#include <linux/dmi.h>
   84#include <linux/sort.h>
   85
   86#include <drm/display/drm_dp_mst_helper.h>
   87#include <drm/display/drm_hdmi_helper.h>
   88#include <drm/drm_atomic.h>
   89#include <drm/drm_atomic_uapi.h>
   90#include <drm/drm_atomic_helper.h>
   91#include <drm/drm_blend.h>
   92#include <drm/drm_fixed.h>
   93#include <drm/drm_fourcc.h>
   94#include <drm/drm_edid.h>
   95#include <drm/drm_eld.h>
   96#include <drm/drm_utils.h>
   97#include <drm/drm_vblank.h>
   98#include <drm/drm_audio_component.h>
   99#include <drm/drm_gem_atomic_helper.h>
  100
  101#include <acpi/video.h>
  102
  103#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
  104
  105#include "dcn/dcn_1_0_offset.h"
  106#include "dcn/dcn_1_0_sh_mask.h"
  107#include "soc15_hw_ip.h"
  108#include "soc15_common.h"
  109#include "vega10_ip_offset.h"
  110
  111#include "gc/gc_11_0_0_offset.h"
  112#include "gc/gc_11_0_0_sh_mask.h"
  113
  114#include "modules/inc/mod_freesync.h"
  115#include "modules/power/power_helpers.h"
  116
  117#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
  118MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
  119#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
  120MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
  121#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
  122MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
  123#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
  124MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
  125#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
  126MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
  127#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
  128MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
  129#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
  130MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
  131#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
  132MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
  133#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
  134MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
  135#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
  136MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
  137#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
  138MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
  139
  140#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
  141MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
  142#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
  143MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
  144
  145#define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
  146MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
  147
  148#define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
  149MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
  150
  151#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
  152MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
  153
  154#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
  155MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
  156
  157#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
  158MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
  159
  160/* Number of bytes in PSP header for firmware. */
  161#define PSP_HEADER_BYTES 0x100
  162
  163/* Number of bytes in PSP footer for firmware. */
  164#define PSP_FOOTER_BYTES 0x100
  165
  166/**
  167 * DOC: overview
  168 *
  169 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
  170 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
  171 * requests into DC requests, and DC responses into DRM responses.
  172 *
  173 * The root control structure is &struct amdgpu_display_manager.
  174 */
  175
  176/* basic init/fini API */
  177static int amdgpu_dm_init(struct amdgpu_device *adev);
  178static void amdgpu_dm_fini(struct amdgpu_device *adev);
  179static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
  180static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
  181
  182static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
  183{
  184	switch (link->dpcd_caps.dongle_type) {
  185	case DISPLAY_DONGLE_NONE:
  186		return DRM_MODE_SUBCONNECTOR_Native;
  187	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
  188		return DRM_MODE_SUBCONNECTOR_VGA;
  189	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
  190	case DISPLAY_DONGLE_DP_DVI_DONGLE:
  191		return DRM_MODE_SUBCONNECTOR_DVID;
  192	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
  193	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
  194		return DRM_MODE_SUBCONNECTOR_HDMIA;
  195	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
  196	default:
  197		return DRM_MODE_SUBCONNECTOR_Unknown;
  198	}
  199}
  200
  201static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
  202{
  203	struct dc_link *link = aconnector->dc_link;
  204	struct drm_connector *connector = &aconnector->base;
  205	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
  206
  207	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
  208		return;
  209
  210	if (aconnector->dc_sink)
  211		subconnector = get_subconnector_type(link);
  212
  213	drm_object_property_set_value(&connector->base,
  214			connector->dev->mode_config.dp_subconnector_property,
  215			subconnector);
  216}
  217
  218/*
  219 * initializes drm_device display related structures, based on the information
  220 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
  221 * drm_encoder, drm_mode_config
  222 *
  223 * Returns 0 on success
  224 */
  225static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
  226/* removes and deallocates the drm structures, created by the above function */
  227static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
  228
  229static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
  230				    struct amdgpu_dm_connector *amdgpu_dm_connector,
  231				    u32 link_index,
  232				    struct amdgpu_encoder *amdgpu_encoder);
  233static int amdgpu_dm_encoder_init(struct drm_device *dev,
  234				  struct amdgpu_encoder *aencoder,
  235				  uint32_t link_index);
  236
  237static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
  238
  239static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
  240
  241static int amdgpu_dm_atomic_check(struct drm_device *dev,
  242				  struct drm_atomic_state *state);
  243
  244static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
  245static void handle_hpd_rx_irq(void *param);
  246
  247static bool
  248is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
  249				 struct drm_crtc_state *new_crtc_state);
  250/*
  251 * dm_vblank_get_counter
  252 *
  253 * @brief
  254 * Get counter for number of vertical blanks
  255 *
  256 * @param
  257 * struct amdgpu_device *adev - [in] desired amdgpu device
  258 * int disp_idx - [in] which CRTC to get the counter from
  259 *
  260 * @return
  261 * Counter for vertical blanks
  262 */
  263static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  264{
  265	struct amdgpu_crtc *acrtc = NULL;
  266
  267	if (crtc >= adev->mode_info.num_crtc)
  268		return 0;
  269
  270	acrtc = adev->mode_info.crtcs[crtc];
  271
  272	if (!acrtc->dm_irq_params.stream) {
  273		DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
  274			  crtc);
  275		return 0;
  276	}
  277
  278	return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
  279}
  280
  281static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  282				  u32 *vbl, u32 *position)
  283{
  284	u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
  285	struct amdgpu_crtc *acrtc = NULL;
  286	struct dc *dc = adev->dm.dc;
  287
  288	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  289		return -EINVAL;
  290
  291	acrtc = adev->mode_info.crtcs[crtc];
  292
  293	if (!acrtc->dm_irq_params.stream) {
  294		DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
  295			  crtc);
  296		return 0;
  297	}
  298
  299	if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
  300		dc_allow_idle_optimizations(dc, false);
  301
  302	/*
  303	 * TODO rework base driver to use values directly.
  304	 * for now parse it back into reg-format
  305	 */
  306	dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
  307				 &v_blank_start,
  308				 &v_blank_end,
  309				 &h_position,
  310				 &v_position);
  311
  312	*position = v_position | (h_position << 16);
  313	*vbl = v_blank_start | (v_blank_end << 16);
  314
  315	return 0;
  316}
  317
  318static bool dm_is_idle(void *handle)
  319{
  320	/* XXX todo */
  321	return true;
  322}
  323
  324static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block)
  325{
  326	/* XXX todo */
  327	return 0;
  328}
  329
  330static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block)
  331{
  332	return false;
  333}
  334
  335static int dm_soft_reset(struct amdgpu_ip_block *ip_block)
  336{
  337	/* XXX todo */
  338	return 0;
  339}
  340
  341static struct amdgpu_crtc *
  342get_crtc_by_otg_inst(struct amdgpu_device *adev,
  343		     int otg_inst)
  344{
  345	struct drm_device *dev = adev_to_drm(adev);
  346	struct drm_crtc *crtc;
  347	struct amdgpu_crtc *amdgpu_crtc;
  348
  349	if (WARN_ON(otg_inst == -1))
  350		return adev->mode_info.crtcs[0];
  351
  352	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  353		amdgpu_crtc = to_amdgpu_crtc(crtc);
  354
  355		if (amdgpu_crtc->otg_inst == otg_inst)
  356			return amdgpu_crtc;
  357	}
  358
  359	return NULL;
  360}
  361
  362static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
  363					      struct dm_crtc_state *new_state)
  364{
  365	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
  366		return true;
  367	else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
  368		return true;
  369	else
  370		return false;
  371}
  372
  373/*
  374 * DC will program planes with their z-order determined by their ordering
  375 * in the dc_surface_updates array. This comparator is used to sort them
  376 * by descending zpos.
  377 */
  378static int dm_plane_layer_index_cmp(const void *a, const void *b)
  379{
  380	const struct dc_surface_update *sa = (struct dc_surface_update *)a;
  381	const struct dc_surface_update *sb = (struct dc_surface_update *)b;
  382
  383	/* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
  384	return sb->surface->layer_index - sa->surface->layer_index;
  385}
  386
  387/**
  388 * update_planes_and_stream_adapter() - Send planes to be updated in DC
  389 *
  390 * DC has a generic way to update planes and stream via
  391 * dc_update_planes_and_stream function; however, DM might need some
  392 * adjustments and preparation before calling it. This function is a wrapper
  393 * for the dc_update_planes_and_stream that does any required configuration
  394 * before passing control to DC.
  395 *
  396 * @dc: Display Core control structure
  397 * @update_type: specify whether it is FULL/MEDIUM/FAST update
  398 * @planes_count: planes count to update
  399 * @stream: stream state
  400 * @stream_update: stream update
  401 * @array_of_surface_update: dc surface update pointer
  402 *
  403 */
  404static inline bool update_planes_and_stream_adapter(struct dc *dc,
  405						    int update_type,
  406						    int planes_count,
  407						    struct dc_stream_state *stream,
  408						    struct dc_stream_update *stream_update,
  409						    struct dc_surface_update *array_of_surface_update)
  410{
  411	sort(array_of_surface_update, planes_count,
  412	     sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
  413
  414	/*
  415	 * Previous frame finished and HW is ready for optimization.
  416	 */
  417	if (update_type == UPDATE_TYPE_FAST)
  418		dc_post_update_surfaces_to_stream(dc);
  419
  420	return dc_update_planes_and_stream(dc,
  421					   array_of_surface_update,
  422					   planes_count,
  423					   stream,
  424					   stream_update);
  425}
  426
  427/**
  428 * dm_pflip_high_irq() - Handle pageflip interrupt
  429 * @interrupt_params: ignored
  430 *
  431 * Handles the pageflip interrupt by notifying all interested parties
  432 * that the pageflip has been completed.
  433 */
  434static void dm_pflip_high_irq(void *interrupt_params)
  435{
  436	struct amdgpu_crtc *amdgpu_crtc;
  437	struct common_irq_params *irq_params = interrupt_params;
  438	struct amdgpu_device *adev = irq_params->adev;
  439	struct drm_device *dev = adev_to_drm(adev);
  440	unsigned long flags;
  441	struct drm_pending_vblank_event *e;
  442	u32 vpos, hpos, v_blank_start, v_blank_end;
  443	bool vrr_active;
  444
  445	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  446
  447	/* IRQ could occur when in initial stage */
  448	/* TODO work and BO cleanup */
  449	if (amdgpu_crtc == NULL) {
  450		drm_dbg_state(dev, "CRTC is null, returning.\n");
  451		return;
  452	}
  453
  454	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
  455
  456	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
  457		drm_dbg_state(dev,
  458			      "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
  459			      amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
  460			      amdgpu_crtc->crtc_id, amdgpu_crtc);
  461		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  462		return;
  463	}
  464
  465	/* page flip completed. */
  466	e = amdgpu_crtc->event;
  467	amdgpu_crtc->event = NULL;
  468
  469	WARN_ON(!e);
  470
  471	vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
  472
  473	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
  474	if (!vrr_active ||
  475	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
  476				      &v_blank_end, &hpos, &vpos) ||
  477	    (vpos < v_blank_start)) {
  478		/* Update to correct count and vblank timestamp if racing with
  479		 * vblank irq. This also updates to the correct vblank timestamp
  480		 * even in VRR mode, as scanout is past the front-porch atm.
  481		 */
  482		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
  483
  484		/* Wake up userspace by sending the pageflip event with proper
  485		 * count and timestamp of vblank of flip completion.
  486		 */
  487		if (e) {
  488			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
  489
  490			/* Event sent, so done with vblank for this flip */
  491			drm_crtc_vblank_put(&amdgpu_crtc->base);
  492		}
  493	} else if (e) {
  494		/* VRR active and inside front-porch: vblank count and
  495		 * timestamp for pageflip event will only be up to date after
  496		 * drm_crtc_handle_vblank() has been executed from late vblank
  497		 * irq handler after start of back-porch (vline 0). We queue the
  498		 * pageflip event for send-out by drm_crtc_handle_vblank() with
  499		 * updated timestamp and count, once it runs after us.
  500		 *
  501		 * We need to open-code this instead of using the helper
  502		 * drm_crtc_arm_vblank_event(), as that helper would
  503		 * call drm_crtc_accurate_vblank_count(), which we must
  504		 * not call in VRR mode while we are in front-porch!
  505		 */
  506
  507		/* sequence will be replaced by real count during send-out. */
  508		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
  509		e->pipe = amdgpu_crtc->crtc_id;
  510
  511		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
  512		e = NULL;
  513	}
  514
  515	/* Keep track of vblank of this flip for flip throttling. We use the
  516	 * cooked hw counter, as that one incremented at start of this vblank
  517	 * of pageflip completion, so last_flip_vblank is the forbidden count
  518	 * for queueing new pageflips if vsync + VRR is enabled.
  519	 */
  520	amdgpu_crtc->dm_irq_params.last_flip_vblank =
  521		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
  522
  523	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  524	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  525
  526	drm_dbg_state(dev,
  527		      "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
  528		      amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
  529}
  530
  531static void dm_vupdate_high_irq(void *interrupt_params)
  532{
  533	struct common_irq_params *irq_params = interrupt_params;
  534	struct amdgpu_device *adev = irq_params->adev;
  535	struct amdgpu_crtc *acrtc;
  536	struct drm_device *drm_dev;
  537	struct drm_vblank_crtc *vblank;
  538	ktime_t frame_duration_ns, previous_timestamp;
  539	unsigned long flags;
  540	int vrr_active;
  541
  542	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
  543
  544	if (acrtc) {
  545		vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
  546		drm_dev = acrtc->base.dev;
  547		vblank = drm_crtc_vblank_crtc(&acrtc->base);
  548		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
  549		frame_duration_ns = vblank->time - previous_timestamp;
  550
  551		if (frame_duration_ns > 0) {
  552			trace_amdgpu_refresh_rate_track(acrtc->base.index,
  553						frame_duration_ns,
  554						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
  555			atomic64_set(&irq_params->previous_timestamp, vblank->time);
  556		}
  557
  558		drm_dbg_vbl(drm_dev,
  559			    "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
  560			    vrr_active);
  561
  562		/* Core vblank handling is done here after end of front-porch in
  563		 * vrr mode, as vblank timestamping will give valid results
  564		 * while now done after front-porch. This will also deliver
  565		 * page-flip completion events that have been queued to us
  566		 * if a pageflip happened inside front-porch.
  567		 */
  568		if (vrr_active) {
  569			amdgpu_dm_crtc_handle_vblank(acrtc);
  570
  571			/* BTR processing for pre-DCE12 ASICs */
  572			if (acrtc->dm_irq_params.stream &&
  573			    adev->family < AMDGPU_FAMILY_AI) {
  574				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
  575				mod_freesync_handle_v_update(
  576				    adev->dm.freesync_module,
  577				    acrtc->dm_irq_params.stream,
  578				    &acrtc->dm_irq_params.vrr_params);
  579
  580				dc_stream_adjust_vmin_vmax(
  581				    adev->dm.dc,
  582				    acrtc->dm_irq_params.stream,
  583				    &acrtc->dm_irq_params.vrr_params.adjust);
  584				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  585			}
  586		}
  587	}
  588}
  589
  590/**
  591 * dm_crtc_high_irq() - Handles CRTC interrupt
  592 * @interrupt_params: used for determining the CRTC instance
  593 *
  594 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
  595 * event handler.
  596 */
  597static void dm_crtc_high_irq(void *interrupt_params)
  598{
  599	struct common_irq_params *irq_params = interrupt_params;
  600	struct amdgpu_device *adev = irq_params->adev;
  601	struct drm_writeback_job *job;
  602	struct amdgpu_crtc *acrtc;
  603	unsigned long flags;
  604	int vrr_active;
  605
  606	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
  607	if (!acrtc)
  608		return;
  609
  610	if (acrtc->wb_conn) {
  611		spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
  612
  613		if (acrtc->wb_pending) {
  614			job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
  615						       struct drm_writeback_job,
  616						       list_entry);
  617			acrtc->wb_pending = false;
  618			spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
  619
  620			if (job) {
  621				unsigned int v_total, refresh_hz;
  622				struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
  623
  624				v_total = stream->adjust.v_total_max ?
  625					  stream->adjust.v_total_max : stream->timing.v_total;
  626				refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
  627					     100LL, (v_total * stream->timing.h_total));
  628				mdelay(1000 / refresh_hz);
  629
  630				drm_writeback_signal_completion(acrtc->wb_conn, 0);
  631				dc_stream_fc_disable_writeback(adev->dm.dc,
  632							       acrtc->dm_irq_params.stream, 0);
  633			}
  634		} else
  635			spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
  636	}
  637
  638	vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
  639
  640	drm_dbg_vbl(adev_to_drm(adev),
  641		    "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
  642		    vrr_active, acrtc->dm_irq_params.active_planes);
  643
  644	/**
  645	 * Core vblank handling at start of front-porch is only possible
  646	 * in non-vrr mode, as only there vblank timestamping will give
  647	 * valid results while done in front-porch. Otherwise defer it
  648	 * to dm_vupdate_high_irq after end of front-porch.
  649	 */
  650	if (!vrr_active)
  651		amdgpu_dm_crtc_handle_vblank(acrtc);
  652
  653	/**
  654	 * Following stuff must happen at start of vblank, for crc
  655	 * computation and below-the-range btr support in vrr mode.
  656	 */
  657	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
  658
  659	/* BTR updates need to happen before VUPDATE on Vega and above. */
  660	if (adev->family < AMDGPU_FAMILY_AI)
  661		return;
  662
  663	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
  664
  665	if (acrtc->dm_irq_params.stream &&
  666	    acrtc->dm_irq_params.vrr_params.supported &&
  667	    acrtc->dm_irq_params.freesync_config.state ==
  668		    VRR_STATE_ACTIVE_VARIABLE) {
  669		mod_freesync_handle_v_update(adev->dm.freesync_module,
  670					     acrtc->dm_irq_params.stream,
  671					     &acrtc->dm_irq_params.vrr_params);
  672
  673		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
  674					   &acrtc->dm_irq_params.vrr_params.adjust);
  675	}
  676
  677	/*
  678	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
  679	 * In that case, pageflip completion interrupts won't fire and pageflip
  680	 * completion events won't get delivered. Prevent this by sending
  681	 * pending pageflip events from here if a flip is still pending.
  682	 *
  683	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
  684	 * avoid race conditions between flip programming and completion,
  685	 * which could cause too early flip completion events.
  686	 */
  687	if (adev->family >= AMDGPU_FAMILY_RV &&
  688	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
  689	    acrtc->dm_irq_params.active_planes == 0) {
  690		if (acrtc->event) {
  691			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
  692			acrtc->event = NULL;
  693			drm_crtc_vblank_put(&acrtc->base);
  694		}
  695		acrtc->pflip_status = AMDGPU_FLIP_NONE;
  696	}
  697
  698	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  699}
  700
  701#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
  702/**
  703 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
  704 * DCN generation ASICs
  705 * @interrupt_params: interrupt parameters
  706 *
  707 * Used to set crc window/read out crc value at vertical line 0 position
  708 */
  709static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
  710{
  711	struct common_irq_params *irq_params = interrupt_params;
  712	struct amdgpu_device *adev = irq_params->adev;
  713	struct amdgpu_crtc *acrtc;
  714
  715	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
  716
  717	if (!acrtc)
  718		return;
  719
  720	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
  721}
  722#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
  723
  724/**
  725 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
  726 * @adev: amdgpu_device pointer
  727 * @notify: dmub notification structure
  728 *
  729 * Dmub AUX or SET_CONFIG command completion processing callback
  730 * Copies dmub notification to DM which is to be read by AUX command.
  731 * issuing thread and also signals the event to wake up the thread.
  732 */
  733static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
  734					struct dmub_notification *notify)
  735{
  736	if (adev->dm.dmub_notify)
  737		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
  738	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
  739		complete(&adev->dm.dmub_aux_transfer_done);
  740}
  741
  742/**
  743 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
  744 * @adev: amdgpu_device pointer
  745 * @notify: dmub notification structure
  746 *
  747 * Dmub Hpd interrupt processing callback. Gets displayindex through the
  748 * ink index and calls helper to do the processing.
  749 */
  750static void dmub_hpd_callback(struct amdgpu_device *adev,
  751			      struct dmub_notification *notify)
  752{
  753	struct amdgpu_dm_connector *aconnector;
  754	struct amdgpu_dm_connector *hpd_aconnector = NULL;
  755	struct drm_connector *connector;
  756	struct drm_connector_list_iter iter;
  757	struct dc_link *link;
  758	u8 link_index = 0;
  759	struct drm_device *dev;
  760
  761	if (adev == NULL)
  762		return;
  763
  764	if (notify == NULL) {
  765		DRM_ERROR("DMUB HPD callback notification was NULL");
  766		return;
  767	}
  768
  769	if (notify->link_index > adev->dm.dc->link_count) {
  770		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
  771		return;
  772	}
  773
  774	/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
  775	if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
  776		DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
  777		return;
  778	}
  779
  780	link_index = notify->link_index;
  781	link = adev->dm.dc->links[link_index];
  782	dev = adev->dm.ddev;
  783
  784	drm_connector_list_iter_begin(dev, &iter);
  785	drm_for_each_connector_iter(connector, &iter) {
  786
  787		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
  788			continue;
  789
  790		aconnector = to_amdgpu_dm_connector(connector);
  791		if (link && aconnector->dc_link == link) {
  792			if (notify->type == DMUB_NOTIFICATION_HPD)
  793				DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
  794			else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
  795				DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
  796			else
  797				DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
  798						notify->type, link_index);
  799
  800			hpd_aconnector = aconnector;
  801			break;
  802		}
  803	}
  804	drm_connector_list_iter_end(&iter);
  805
  806	if (hpd_aconnector) {
  807		if (notify->type == DMUB_NOTIFICATION_HPD) {
  808			if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
  809				DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
  810			handle_hpd_irq_helper(hpd_aconnector);
  811		} else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
  812			handle_hpd_rx_irq(hpd_aconnector);
  813		}
  814	}
  815}
  816
  817/**
  818 * dmub_hpd_sense_callback - DMUB HPD sense processing callback.
  819 * @adev: amdgpu_device pointer
  820 * @notify: dmub notification structure
  821 *
  822 * HPD sense changes can occur during low power states and need to be
  823 * notified from firmware to driver.
  824 */
  825static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
  826			      struct dmub_notification *notify)
  827{
  828	DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
  829}
  830
  831/**
  832 * register_dmub_notify_callback - Sets callback for DMUB notify
  833 * @adev: amdgpu_device pointer
  834 * @type: Type of dmub notification
  835 * @callback: Dmub interrupt callback function
  836 * @dmub_int_thread_offload: offload indicator
  837 *
  838 * API to register a dmub callback handler for a dmub notification
  839 * Also sets indicator whether callback processing to be offloaded.
  840 * to dmub interrupt handling thread
  841 * Return: true if successfully registered, false if there is existing registration
  842 */
  843static bool register_dmub_notify_callback(struct amdgpu_device *adev,
  844					  enum dmub_notification_type type,
  845					  dmub_notify_interrupt_callback_t callback,
  846					  bool dmub_int_thread_offload)
  847{
  848	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
  849		adev->dm.dmub_callback[type] = callback;
  850		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
  851	} else
  852		return false;
  853
  854	return true;
  855}
  856
  857static void dm_handle_hpd_work(struct work_struct *work)
  858{
  859	struct dmub_hpd_work *dmub_hpd_wrk;
  860
  861	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
  862
  863	if (!dmub_hpd_wrk->dmub_notify) {
  864		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
  865		return;
  866	}
  867
  868	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
  869		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
  870		dmub_hpd_wrk->dmub_notify);
  871	}
  872
  873	kfree(dmub_hpd_wrk->dmub_notify);
  874	kfree(dmub_hpd_wrk);
  875
  876}
  877
  878#define DMUB_TRACE_MAX_READ 64
  879/**
  880 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
  881 * @interrupt_params: used for determining the Outbox instance
  882 *
  883 * Handles the Outbox Interrupt
  884 * event handler.
  885 */
  886static void dm_dmub_outbox1_low_irq(void *interrupt_params)
  887{
  888	struct dmub_notification notify = {0};
  889	struct common_irq_params *irq_params = interrupt_params;
  890	struct amdgpu_device *adev = irq_params->adev;
  891	struct amdgpu_display_manager *dm = &adev->dm;
  892	struct dmcub_trace_buf_entry entry = { 0 };
  893	u32 count = 0;
  894	struct dmub_hpd_work *dmub_hpd_wrk;
  895	static const char *const event_type[] = {
  896		"NO_DATA",
  897		"AUX_REPLY",
  898		"HPD",
  899		"HPD_IRQ",
  900		"SET_CONFIGC_REPLY",
  901		"DPIA_NOTIFICATION",
  902		"HPD_SENSE_NOTIFY",
  903	};
  904
  905	do {
  906		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
  907			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
  908							entry.param0, entry.param1);
  909
  910			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
  911				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
  912		} else
  913			break;
  914
  915		count++;
  916
  917	} while (count <= DMUB_TRACE_MAX_READ);
  918
  919	if (count > DMUB_TRACE_MAX_READ)
  920		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
  921
  922	if (dc_enable_dmub_notifications(adev->dm.dc) &&
  923		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
  924
  925		do {
  926			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
  927			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
  928				DRM_ERROR("DM: notify type %d invalid!", notify.type);
  929				continue;
  930			}
  931			if (!dm->dmub_callback[notify.type]) {
  932				DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
  933					event_type[notify.type]);
  934				continue;
  935			}
  936			if (dm->dmub_thread_offload[notify.type] == true) {
  937				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
  938				if (!dmub_hpd_wrk) {
  939					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
  940					return;
  941				}
  942				dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
  943								    GFP_ATOMIC);
  944				if (!dmub_hpd_wrk->dmub_notify) {
  945					kfree(dmub_hpd_wrk);
  946					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
  947					return;
  948				}
  949				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
  950				dmub_hpd_wrk->adev = adev;
  951				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
  952			} else {
  953				dm->dmub_callback[notify.type](adev, &notify);
  954			}
  955		} while (notify.pending_notification);
  956	}
  957}
  958
  959static int dm_set_clockgating_state(void *handle,
  960		  enum amd_clockgating_state state)
  961{
  962	return 0;
  963}
  964
  965static int dm_set_powergating_state(void *handle,
  966		  enum amd_powergating_state state)
  967{
  968	return 0;
  969}
  970
  971/* Prototypes of private functions */
  972static int dm_early_init(struct amdgpu_ip_block *ip_block);
  973
  974/* Allocate memory for FBC compressed data  */
  975static void amdgpu_dm_fbc_init(struct drm_connector *connector)
  976{
  977	struct amdgpu_device *adev = drm_to_adev(connector->dev);
  978	struct dm_compressor_info *compressor = &adev->dm.compressor;
  979	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
  980	struct drm_display_mode *mode;
  981	unsigned long max_size = 0;
  982
  983	if (adev->dm.dc->fbc_compressor == NULL)
  984		return;
  985
  986	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
  987		return;
  988
  989	if (compressor->bo_ptr)
  990		return;
  991
  992
  993	list_for_each_entry(mode, &connector->modes, head) {
  994		if (max_size < (unsigned long) mode->htotal * mode->vtotal)
  995			max_size = (unsigned long) mode->htotal * mode->vtotal;
  996	}
  997
  998	if (max_size) {
  999		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
 1000			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
 1001			    &compressor->gpu_addr, &compressor->cpu_addr);
 1002
 1003		if (r)
 1004			DRM_ERROR("DM: Failed to initialize FBC\n");
 1005		else {
 1006			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
 1007			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
 1008		}
 1009
 1010	}
 1011
 1012}
 1013
 1014static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
 1015					  int pipe, bool *enabled,
 1016					  unsigned char *buf, int max_bytes)
 1017{
 1018	struct drm_device *dev = dev_get_drvdata(kdev);
 1019	struct amdgpu_device *adev = drm_to_adev(dev);
 1020	struct drm_connector *connector;
 1021	struct drm_connector_list_iter conn_iter;
 1022	struct amdgpu_dm_connector *aconnector;
 1023	int ret = 0;
 1024
 1025	*enabled = false;
 1026
 1027	mutex_lock(&adev->dm.audio_lock);
 1028
 1029	drm_connector_list_iter_begin(dev, &conn_iter);
 1030	drm_for_each_connector_iter(connector, &conn_iter) {
 1031
 1032		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 1033			continue;
 1034
 1035		aconnector = to_amdgpu_dm_connector(connector);
 1036		if (aconnector->audio_inst != port)
 1037			continue;
 1038
 1039		*enabled = true;
 1040		mutex_lock(&connector->eld_mutex);
 1041		ret = drm_eld_size(connector->eld);
 1042		memcpy(buf, connector->eld, min(max_bytes, ret));
 1043		mutex_unlock(&connector->eld_mutex);
 1044
 1045		break;
 1046	}
 1047	drm_connector_list_iter_end(&conn_iter);
 1048
 1049	mutex_unlock(&adev->dm.audio_lock);
 1050
 1051	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
 1052
 1053	return ret;
 1054}
 1055
 1056static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
 1057	.get_eld = amdgpu_dm_audio_component_get_eld,
 1058};
 1059
 1060static int amdgpu_dm_audio_component_bind(struct device *kdev,
 1061				       struct device *hda_kdev, void *data)
 1062{
 1063	struct drm_device *dev = dev_get_drvdata(kdev);
 1064	struct amdgpu_device *adev = drm_to_adev(dev);
 1065	struct drm_audio_component *acomp = data;
 1066
 1067	acomp->ops = &amdgpu_dm_audio_component_ops;
 1068	acomp->dev = kdev;
 1069	adev->dm.audio_component = acomp;
 1070
 1071	return 0;
 1072}
 1073
 1074static void amdgpu_dm_audio_component_unbind(struct device *kdev,
 1075					  struct device *hda_kdev, void *data)
 1076{
 1077	struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
 1078	struct drm_audio_component *acomp = data;
 1079
 1080	acomp->ops = NULL;
 1081	acomp->dev = NULL;
 1082	adev->dm.audio_component = NULL;
 1083}
 1084
 1085static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
 1086	.bind	= amdgpu_dm_audio_component_bind,
 1087	.unbind	= amdgpu_dm_audio_component_unbind,
 1088};
 1089
 1090static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
 1091{
 1092	int i, ret;
 1093
 1094	if (!amdgpu_audio)
 1095		return 0;
 1096
 1097	adev->mode_info.audio.enabled = true;
 1098
 1099	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
 1100
 1101	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
 1102		adev->mode_info.audio.pin[i].channels = -1;
 1103		adev->mode_info.audio.pin[i].rate = -1;
 1104		adev->mode_info.audio.pin[i].bits_per_sample = -1;
 1105		adev->mode_info.audio.pin[i].status_bits = 0;
 1106		adev->mode_info.audio.pin[i].category_code = 0;
 1107		adev->mode_info.audio.pin[i].connected = false;
 1108		adev->mode_info.audio.pin[i].id =
 1109			adev->dm.dc->res_pool->audios[i]->inst;
 1110		adev->mode_info.audio.pin[i].offset = 0;
 1111	}
 1112
 1113	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
 1114	if (ret < 0)
 1115		return ret;
 1116
 1117	adev->dm.audio_registered = true;
 1118
 1119	return 0;
 1120}
 1121
 1122static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
 1123{
 1124	if (!amdgpu_audio)
 1125		return;
 1126
 1127	if (!adev->mode_info.audio.enabled)
 1128		return;
 1129
 1130	if (adev->dm.audio_registered) {
 1131		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
 1132		adev->dm.audio_registered = false;
 1133	}
 1134
 1135	/* TODO: Disable audio? */
 1136
 1137	adev->mode_info.audio.enabled = false;
 1138}
 1139
 1140static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
 1141{
 1142	struct drm_audio_component *acomp = adev->dm.audio_component;
 1143
 1144	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
 1145		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
 1146
 1147		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
 1148						 pin, -1);
 1149	}
 1150}
 1151
 1152static int dm_dmub_hw_init(struct amdgpu_device *adev)
 1153{
 1154	const struct dmcub_firmware_header_v1_0 *hdr;
 1155	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
 1156	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
 1157	const struct firmware *dmub_fw = adev->dm.dmub_fw;
 1158	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
 1159	struct abm *abm = adev->dm.dc->res_pool->abm;
 1160	struct dc_context *ctx = adev->dm.dc->ctx;
 1161	struct dmub_srv_hw_params hw_params;
 1162	enum dmub_status status;
 1163	const unsigned char *fw_inst_const, *fw_bss_data;
 1164	u32 i, fw_inst_const_size, fw_bss_data_size;
 1165	bool has_hw_support;
 1166
 1167	if (!dmub_srv)
 1168		/* DMUB isn't supported on the ASIC. */
 1169		return 0;
 1170
 1171	if (!fb_info) {
 1172		DRM_ERROR("No framebuffer info for DMUB service.\n");
 1173		return -EINVAL;
 1174	}
 1175
 1176	if (!dmub_fw) {
 1177		/* Firmware required for DMUB support. */
 1178		DRM_ERROR("No firmware provided for DMUB.\n");
 1179		return -EINVAL;
 1180	}
 1181
 1182	/* initialize register offsets for ASICs with runtime initialization available */
 1183	if (dmub_srv->hw_funcs.init_reg_offsets)
 1184		dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
 1185
 1186	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
 1187	if (status != DMUB_STATUS_OK) {
 1188		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
 1189		return -EINVAL;
 1190	}
 1191
 1192	if (!has_hw_support) {
 1193		DRM_INFO("DMUB unsupported on ASIC\n");
 1194		return 0;
 1195	}
 1196
 1197	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
 1198	status = dmub_srv_hw_reset(dmub_srv);
 1199	if (status != DMUB_STATUS_OK)
 1200		DRM_WARN("Error resetting DMUB HW: %d\n", status);
 1201
 1202	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
 1203
 1204	fw_inst_const = dmub_fw->data +
 1205			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 1206			PSP_HEADER_BYTES;
 1207
 1208	fw_bss_data = dmub_fw->data +
 1209		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 1210		      le32_to_cpu(hdr->inst_const_bytes);
 1211
 1212	/* Copy firmware and bios info into FB memory. */
 1213	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
 1214			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
 1215
 1216	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
 1217
 1218	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
 1219	 * amdgpu_ucode_init_single_fw will load dmub firmware
 1220	 * fw_inst_const part to cw0; otherwise, the firmware back door load
 1221	 * will be done by dm_dmub_hw_init
 1222	 */
 1223	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 1224		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
 1225				fw_inst_const_size);
 1226	}
 1227
 1228	if (fw_bss_data_size)
 1229		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
 1230		       fw_bss_data, fw_bss_data_size);
 1231
 1232	/* Copy firmware bios info into FB memory. */
 1233	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
 1234	       adev->bios_size);
 1235
 1236	/* Reset regions that need to be reset. */
 1237	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
 1238	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
 1239
 1240	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
 1241	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
 1242
 1243	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
 1244	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
 1245
 1246	memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0,
 1247	       fb_info->fb[DMUB_WINDOW_SHARED_STATE].size);
 1248
 1249	/* Initialize hardware. */
 1250	memset(&hw_params, 0, sizeof(hw_params));
 1251	hw_params.fb_base = adev->gmc.fb_start;
 1252	hw_params.fb_offset = adev->vm_manager.vram_base_offset;
 1253
 1254	/* backdoor load firmware and trigger dmub running */
 1255	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 1256		hw_params.load_inst_const = true;
 1257
 1258	if (dmcu)
 1259		hw_params.psp_version = dmcu->psp_version;
 1260
 1261	for (i = 0; i < fb_info->num_fb; ++i)
 1262		hw_params.fb[i] = &fb_info->fb[i];
 1263
 1264	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 1265	case IP_VERSION(3, 1, 3):
 1266	case IP_VERSION(3, 1, 4):
 1267	case IP_VERSION(3, 5, 0):
 1268	case IP_VERSION(3, 5, 1):
 1269	case IP_VERSION(4, 0, 1):
 1270		hw_params.dpia_supported = true;
 1271		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
 1272		break;
 1273	default:
 1274		break;
 1275	}
 1276
 1277	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 1278	case IP_VERSION(3, 5, 0):
 1279	case IP_VERSION(3, 5, 1):
 1280		hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
 1281		break;
 1282	default:
 1283		break;
 1284	}
 1285
 1286	status = dmub_srv_hw_init(dmub_srv, &hw_params);
 1287	if (status != DMUB_STATUS_OK) {
 1288		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
 1289		return -EINVAL;
 1290	}
 1291
 1292	/* Wait for firmware load to finish. */
 1293	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
 1294	if (status != DMUB_STATUS_OK)
 1295		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
 1296
 1297	/* Init DMCU and ABM if available. */
 1298	if (dmcu && abm) {
 1299		dmcu->funcs->dmcu_init(dmcu);
 1300		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
 1301	}
 1302
 1303	if (!adev->dm.dc->ctx->dmub_srv)
 1304		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
 1305	if (!adev->dm.dc->ctx->dmub_srv) {
 1306		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
 1307		return -ENOMEM;
 1308	}
 1309
 1310	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
 1311		 adev->dm.dmcub_fw_version);
 1312
 1313	/* Keeping sanity checks off if
 1314	 * DCN31 >= 4.0.59.0
 1315	 * DCN314 >= 8.0.16.0
 1316	 * Otherwise, turn on sanity checks
 1317	 */
 1318	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 1319	case IP_VERSION(3, 1, 2):
 1320	case IP_VERSION(3, 1, 3):
 1321		if (adev->dm.dmcub_fw_version &&
 1322			adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
 1323			adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59))
 1324				adev->dm.dc->debug.sanity_checks = true;
 1325		break;
 1326	case IP_VERSION(3, 1, 4):
 1327		if (adev->dm.dmcub_fw_version &&
 1328			adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
 1329			adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16))
 1330				adev->dm.dc->debug.sanity_checks = true;
 1331		break;
 1332	default:
 1333		break;
 1334	}
 1335
 1336	return 0;
 1337}
 1338
 1339static void dm_dmub_hw_resume(struct amdgpu_device *adev)
 1340{
 1341	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
 1342	enum dmub_status status;
 1343	bool init;
 1344	int r;
 1345
 1346	if (!dmub_srv) {
 1347		/* DMUB isn't supported on the ASIC. */
 1348		return;
 1349	}
 1350
 1351	status = dmub_srv_is_hw_init(dmub_srv, &init);
 1352	if (status != DMUB_STATUS_OK)
 1353		DRM_WARN("DMUB hardware init check failed: %d\n", status);
 1354
 1355	if (status == DMUB_STATUS_OK && init) {
 1356		/* Wait for firmware load to finish. */
 1357		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
 1358		if (status != DMUB_STATUS_OK)
 1359			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
 1360	} else {
 1361		/* Perform the full hardware initialization. */
 1362		r = dm_dmub_hw_init(adev);
 1363		if (r)
 1364			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 1365	}
 1366}
 1367
 1368static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 1369{
 1370	u64 pt_base;
 1371	u32 logical_addr_low;
 1372	u32 logical_addr_high;
 1373	u32 agp_base, agp_bot, agp_top;
 1374	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 1375
 1376	memset(pa_config, 0, sizeof(*pa_config));
 1377
 1378	agp_base = 0;
 1379	agp_bot = adev->gmc.agp_start >> 24;
 1380	agp_top = adev->gmc.agp_end >> 24;
 1381
 1382	/* AGP aperture is disabled */
 1383	if (agp_bot > agp_top) {
 1384		logical_addr_low = adev->gmc.fb_start >> 18;
 1385		if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
 1386				       AMD_APU_IS_RENOIR |
 1387				       AMD_APU_IS_GREEN_SARDINE))
 1388			/*
 1389			 * Raven2 has a HW issue that it is unable to use the vram which
 1390			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 1391			 * workaround that increase system aperture high address (add 1)
 1392			 * to get rid of the VM fault and hardware hang.
 1393			 */
 1394			logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
 1395		else
 1396			logical_addr_high = adev->gmc.fb_end >> 18;
 1397	} else {
 1398		logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
 1399		if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
 1400				       AMD_APU_IS_RENOIR |
 1401				       AMD_APU_IS_GREEN_SARDINE))
 1402			/*
 1403			 * Raven2 has a HW issue that it is unable to use the vram which
 1404			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 1405			 * workaround that increase system aperture high address (add 1)
 1406			 * to get rid of the VM fault and hardware hang.
 1407			 */
 1408			logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
 1409		else
 1410			logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
 1411	}
 1412
 1413	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 1414
 1415	page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
 1416						   AMDGPU_GPU_PAGE_SHIFT);
 1417	page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
 1418						  AMDGPU_GPU_PAGE_SHIFT);
 1419	page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
 1420						 AMDGPU_GPU_PAGE_SHIFT);
 1421	page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
 1422						AMDGPU_GPU_PAGE_SHIFT);
 1423	page_table_base.high_part = upper_32_bits(pt_base);
 1424	page_table_base.low_part = lower_32_bits(pt_base);
 1425
 1426	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
 1427	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
 1428
 1429	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
 1430	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
 1431	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
 1432
 1433	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
 1434	pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
 1435	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
 1436
 1437	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
 1438	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
 1439	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
 1440
 1441	pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
 1442
 1443}
 1444
 1445static void force_connector_state(
 1446	struct amdgpu_dm_connector *aconnector,
 1447	enum drm_connector_force force_state)
 1448{
 1449	struct drm_connector *connector = &aconnector->base;
 1450
 1451	mutex_lock(&connector->dev->mode_config.mutex);
 1452	aconnector->base.force = force_state;
 1453	mutex_unlock(&connector->dev->mode_config.mutex);
 1454
 1455	mutex_lock(&aconnector->hpd_lock);
 1456	drm_kms_helper_connector_hotplug_event(connector);
 1457	mutex_unlock(&aconnector->hpd_lock);
 1458}
 1459
 1460static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
 1461{
 1462	struct hpd_rx_irq_offload_work *offload_work;
 1463	struct amdgpu_dm_connector *aconnector;
 1464	struct dc_link *dc_link;
 1465	struct amdgpu_device *adev;
 1466	enum dc_connection_type new_connection_type = dc_connection_none;
 1467	unsigned long flags;
 1468	union test_response test_response;
 1469
 1470	memset(&test_response, 0, sizeof(test_response));
 1471
 1472	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
 1473	aconnector = offload_work->offload_wq->aconnector;
 1474
 1475	if (!aconnector) {
 1476		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
 1477		goto skip;
 1478	}
 1479
 1480	adev = drm_to_adev(aconnector->base.dev);
 1481	dc_link = aconnector->dc_link;
 1482
 1483	mutex_lock(&aconnector->hpd_lock);
 1484	if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
 1485		DRM_ERROR("KMS: Failed to detect connector\n");
 1486	mutex_unlock(&aconnector->hpd_lock);
 1487
 1488	if (new_connection_type == dc_connection_none)
 1489		goto skip;
 1490
 1491	if (amdgpu_in_reset(adev))
 1492		goto skip;
 1493
 1494	if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
 1495		offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
 1496		dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
 1497		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
 1498		offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
 1499		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
 1500		goto skip;
 1501	}
 1502
 1503	mutex_lock(&adev->dm.dc_lock);
 1504	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
 1505		dc_link_dp_handle_automated_test(dc_link);
 1506
 1507		if (aconnector->timing_changed) {
 1508			/* force connector disconnect and reconnect */
 1509			force_connector_state(aconnector, DRM_FORCE_OFF);
 1510			msleep(100);
 1511			force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
 1512		}
 1513
 1514		test_response.bits.ACK = 1;
 1515
 1516		core_link_write_dpcd(
 1517		dc_link,
 1518		DP_TEST_RESPONSE,
 1519		&test_response.raw,
 1520		sizeof(test_response));
 1521	} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
 1522			dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
 1523			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
 1524		/* offload_work->data is from handle_hpd_rx_irq->
 1525		 * schedule_hpd_rx_offload_work.this is defer handle
 1526		 * for hpd short pulse. upon here, link status may be
 1527		 * changed, need get latest link status from dpcd
 1528		 * registers. if link status is good, skip run link
 1529		 * training again.
 1530		 */
 1531		union hpd_irq_data irq_data;
 1532
 1533		memset(&irq_data, 0, sizeof(irq_data));
 1534
 1535		/* before dc_link_dp_handle_link_loss, allow new link lost handle
 1536		 * request be added to work queue if link lost at end of dc_link_
 1537		 * dp_handle_link_loss
 1538		 */
 1539		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
 1540		offload_work->offload_wq->is_handling_link_loss = false;
 1541		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
 1542
 1543		if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
 1544			dc_link_check_link_loss_status(dc_link, &irq_data))
 1545			dc_link_dp_handle_link_loss(dc_link);
 1546	}
 1547	mutex_unlock(&adev->dm.dc_lock);
 1548
 1549skip:
 1550	kfree(offload_work);
 1551
 1552}
 1553
 1554static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
 1555{
 1556	int max_caps = dc->caps.max_links;
 1557	int i = 0;
 1558	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
 1559
 1560	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
 1561
 1562	if (!hpd_rx_offload_wq)
 1563		return NULL;
 1564
 1565
 1566	for (i = 0; i < max_caps; i++) {
 1567		hpd_rx_offload_wq[i].wq =
 1568				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
 1569
 1570		if (hpd_rx_offload_wq[i].wq == NULL) {
 1571			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
 1572			goto out_err;
 1573		}
 1574
 1575		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
 1576	}
 1577
 1578	return hpd_rx_offload_wq;
 1579
 1580out_err:
 1581	for (i = 0; i < max_caps; i++) {
 1582		if (hpd_rx_offload_wq[i].wq)
 1583			destroy_workqueue(hpd_rx_offload_wq[i].wq);
 1584	}
 1585	kfree(hpd_rx_offload_wq);
 1586	return NULL;
 1587}
 1588
 1589struct amdgpu_stutter_quirk {
 1590	u16 chip_vendor;
 1591	u16 chip_device;
 1592	u16 subsys_vendor;
 1593	u16 subsys_device;
 1594	u8 revision;
 1595};
 1596
 1597static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
 1598	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
 1599	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
 1600	{ 0, 0, 0, 0, 0 },
 1601};
 1602
 1603static bool dm_should_disable_stutter(struct pci_dev *pdev)
 1604{
 1605	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
 1606
 1607	while (p && p->chip_device != 0) {
 1608		if (pdev->vendor == p->chip_vendor &&
 1609		    pdev->device == p->chip_device &&
 1610		    pdev->subsystem_vendor == p->subsys_vendor &&
 1611		    pdev->subsystem_device == p->subsys_device &&
 1612		    pdev->revision == p->revision) {
 1613			return true;
 1614		}
 1615		++p;
 1616	}
 1617	return false;
 1618}
 1619
 1620struct amdgpu_dm_quirks {
 1621	bool aux_hpd_discon;
 1622	bool support_edp0_on_dp1;
 1623};
 1624
 1625static struct amdgpu_dm_quirks quirk_entries = {
 1626	.aux_hpd_discon = false,
 1627	.support_edp0_on_dp1 = false
 1628};
 1629
 1630static int edp0_on_dp1_callback(const struct dmi_system_id *id)
 1631{
 1632	quirk_entries.support_edp0_on_dp1 = true;
 1633	return 0;
 1634}
 1635
 1636static int aux_hpd_discon_callback(const struct dmi_system_id *id)
 1637{
 1638	quirk_entries.aux_hpd_discon = true;
 1639	return 0;
 1640}
 1641
 1642static const struct dmi_system_id dmi_quirk_table[] = {
 1643	{
 1644		.callback = aux_hpd_discon_callback,
 1645		.matches = {
 1646			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1647			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
 1648		},
 1649	},
 1650	{
 1651		.callback = aux_hpd_discon_callback,
 1652		.matches = {
 1653			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1654			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
 1655		},
 1656	},
 1657	{
 1658		.callback = aux_hpd_discon_callback,
 1659		.matches = {
 1660			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1661			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
 1662		},
 1663	},
 1664	{
 1665		.callback = aux_hpd_discon_callback,
 1666		.matches = {
 1667			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1668			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
 1669		},
 1670	},
 1671	{
 1672		.callback = aux_hpd_discon_callback,
 1673		.matches = {
 1674			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1675			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
 1676		},
 1677	},
 1678	{
 1679		.callback = aux_hpd_discon_callback,
 1680		.matches = {
 1681			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1682			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
 1683		},
 1684	},
 1685	{
 1686		.callback = aux_hpd_discon_callback,
 1687		.matches = {
 1688			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1689			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
 1690		},
 1691	},
 1692	{
 1693		.callback = aux_hpd_discon_callback,
 1694		.matches = {
 1695			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1696			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
 1697		},
 1698	},
 1699	{
 1700		.callback = aux_hpd_discon_callback,
 1701		.matches = {
 1702			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1703			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
 1704		},
 1705	},
 1706	{
 1707		.callback = edp0_on_dp1_callback,
 1708		.matches = {
 1709			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
 1710			DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
 1711		},
 1712	},
 1713	{
 1714		.callback = edp0_on_dp1_callback,
 1715		.matches = {
 1716			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
 1717			DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
 1718		},
 1719	},
 1720	{}
 1721	/* TODO: refactor this from a fixed table to a dynamic option */
 1722};
 1723
 1724static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
 1725{
 1726	int dmi_id;
 1727	struct drm_device *dev = dm->ddev;
 1728
 1729	dm->aux_hpd_discon_quirk = false;
 1730	init_data->flags.support_edp0_on_dp1 = false;
 1731
 1732	dmi_id = dmi_check_system(dmi_quirk_table);
 1733
 1734	if (!dmi_id)
 1735		return;
 1736
 1737	if (quirk_entries.aux_hpd_discon) {
 1738		dm->aux_hpd_discon_quirk = true;
 1739		drm_info(dev, "aux_hpd_discon_quirk attached\n");
 1740	}
 1741	if (quirk_entries.support_edp0_on_dp1) {
 1742		init_data->flags.support_edp0_on_dp1 = true;
 1743		drm_info(dev, "aux_hpd_discon_quirk attached\n");
 1744	}
 1745}
 1746
 1747void*
 1748dm_allocate_gpu_mem(
 1749		struct amdgpu_device *adev,
 1750		enum dc_gpu_mem_alloc_type type,
 1751		size_t size,
 1752		long long *addr)
 1753{
 1754	struct dal_allocation *da;
 1755	u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
 1756		AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
 1757	int ret;
 1758
 1759	da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
 1760	if (!da)
 1761		return NULL;
 1762
 1763	ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
 1764				      domain, &da->bo,
 1765				      &da->gpu_addr, &da->cpu_ptr);
 1766
 1767	*addr = da->gpu_addr;
 1768
 1769	if (ret) {
 1770		kfree(da);
 1771		return NULL;
 1772	}
 1773
 1774	/* add da to list in dm */
 1775	list_add(&da->list, &adev->dm.da_list);
 1776
 1777	return da->cpu_ptr;
 1778}
 1779
 1780void
 1781dm_free_gpu_mem(
 1782		struct amdgpu_device *adev,
 1783		enum dc_gpu_mem_alloc_type type,
 1784		void *pvMem)
 1785{
 1786	struct dal_allocation *da;
 1787
 1788	/* walk the da list in DM */
 1789	list_for_each_entry(da, &adev->dm.da_list, list) {
 1790		if (pvMem == da->cpu_ptr) {
 1791			amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
 1792			list_del(&da->list);
 1793			kfree(da);
 1794			break;
 1795		}
 1796	}
 1797
 1798}
 1799
 1800static enum dmub_status
 1801dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
 1802				 enum dmub_gpint_command command_code,
 1803				 uint16_t param,
 1804				 uint32_t timeout_us)
 1805{
 1806	union dmub_gpint_data_register reg, test;
 1807	uint32_t i;
 1808
 1809	/* Assume that VBIOS DMUB is ready to take commands */
 1810
 1811	reg.bits.status = 1;
 1812	reg.bits.command_code = command_code;
 1813	reg.bits.param = param;
 1814
 1815	cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all);
 1816
 1817	for (i = 0; i < timeout_us; ++i) {
 1818		udelay(1);
 1819
 1820		/* Check if our GPINT got acked */
 1821		reg.bits.status = 0;
 1822		test = (union dmub_gpint_data_register)
 1823			cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8);
 1824
 1825		if (test.all == reg.all)
 1826			return DMUB_STATUS_OK;
 1827	}
 1828
 1829	return DMUB_STATUS_TIMEOUT;
 1830}
 1831
 1832static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
 1833{
 1834	struct dml2_soc_bb *bb;
 1835	long long addr;
 1836	int i = 0;
 1837	uint16_t chunk;
 1838	enum dmub_gpint_command send_addrs[] = {
 1839		DMUB_GPINT__SET_BB_ADDR_WORD0,
 1840		DMUB_GPINT__SET_BB_ADDR_WORD1,
 1841		DMUB_GPINT__SET_BB_ADDR_WORD2,
 1842		DMUB_GPINT__SET_BB_ADDR_WORD3,
 1843	};
 1844	enum dmub_status ret;
 1845
 1846	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 1847	case IP_VERSION(4, 0, 1):
 1848		break;
 1849	default:
 1850		return NULL;
 1851	}
 1852
 1853	bb =  dm_allocate_gpu_mem(adev,
 1854				  DC_MEM_ALLOC_TYPE_GART,
 1855				  sizeof(struct dml2_soc_bb),
 1856				  &addr);
 1857	if (!bb)
 1858		return NULL;
 1859
 1860	for (i = 0; i < 4; i++) {
 1861		/* Extract 16-bit chunk */
 1862		chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF;
 1863		/* Send the chunk */
 1864		ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
 1865		if (ret != DMUB_STATUS_OK)
 1866			goto free_bb;
 1867	}
 1868
 1869	/* Now ask DMUB to copy the bb */
 1870	ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
 1871	if (ret != DMUB_STATUS_OK)
 1872		goto free_bb;
 1873
 1874	return bb;
 1875
 1876free_bb:
 1877	dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb);
 1878	return NULL;
 1879
 1880}
 1881
 1882static enum dmub_ips_disable_type dm_get_default_ips_mode(
 1883	struct amdgpu_device *adev)
 1884{
 1885	enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE;
 1886
 1887	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 1888	case IP_VERSION(3, 5, 0):
 1889		/*
 1890		 * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
 1891		 * cause a hard hang. A fix exists for newer PMFW.
 1892		 *
 1893		 * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
 1894		 * IPS state in all cases, except for s0ix and all displays off (DPMS),
 1895		 * where IPS2 is allowed.
 1896		 *
 1897		 * When checking pmfw version, use the major and minor only.
 1898		 */
 1899		if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
 1900			ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
 1901		else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
 1902			/*
 1903			 * Other ASICs with DCN35 that have residency issues with
 1904			 * IPS2 in idle.
 1905			 * We want them to use IPS2 only in display off cases.
 1906			 */
 1907			ret =  DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
 1908		break;
 1909	case IP_VERSION(3, 5, 1):
 1910		ret =  DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
 1911		break;
 1912	default:
 1913		/* ASICs older than DCN35 do not have IPSs */
 1914		if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0))
 1915			ret = DMUB_IPS_DISABLE_ALL;
 1916		break;
 1917	}
 1918
 1919	return ret;
 1920}
 1921
 1922static int amdgpu_dm_init(struct amdgpu_device *adev)
 1923{
 1924	struct dc_init_data init_data;
 1925	struct dc_callback_init init_params;
 1926	int r;
 1927
 1928	adev->dm.ddev = adev_to_drm(adev);
 1929	adev->dm.adev = adev;
 1930
 1931	/* Zero all the fields */
 1932	memset(&init_data, 0, sizeof(init_data));
 1933	memset(&init_params, 0, sizeof(init_params));
 1934
 1935	mutex_init(&adev->dm.dpia_aux_lock);
 1936	mutex_init(&adev->dm.dc_lock);
 1937	mutex_init(&adev->dm.audio_lock);
 1938
 1939	if (amdgpu_dm_irq_init(adev)) {
 1940		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
 1941		goto error;
 1942	}
 1943
 1944	init_data.asic_id.chip_family = adev->family;
 1945
 1946	init_data.asic_id.pci_revision_id = adev->pdev->revision;
 1947	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
 1948	init_data.asic_id.chip_id = adev->pdev->device;
 1949
 1950	init_data.asic_id.vram_width = adev->gmc.vram_width;
 1951	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
 1952	init_data.asic_id.atombios_base_address =
 1953		adev->mode_info.atom_context->bios;
 1954
 1955	init_data.driver = adev;
 1956
 1957	/* cgs_device was created in dm_sw_init() */
 1958	init_data.cgs_device = adev->dm.cgs_device;
 1959
 1960	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
 1961
 1962	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 1963	case IP_VERSION(2, 1, 0):
 1964		switch (adev->dm.dmcub_fw_version) {
 1965		case 0: /* development */
 1966		case 0x1: /* linux-firmware.git hash 6d9f399 */
 1967		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
 1968			init_data.flags.disable_dmcu = false;
 1969			break;
 1970		default:
 1971			init_data.flags.disable_dmcu = true;
 1972		}
 1973		break;
 1974	case IP_VERSION(2, 0, 3):
 1975		init_data.flags.disable_dmcu = true;
 1976		break;
 1977	default:
 1978		break;
 1979	}
 1980
 1981	/* APU support S/G display by default except:
 1982	 * ASICs before Carrizo,
 1983	 * RAVEN1 (Users reported stability issue)
 1984	 */
 1985
 1986	if (adev->asic_type < CHIP_CARRIZO) {
 1987		init_data.flags.gpu_vm_support = false;
 1988	} else if (adev->asic_type == CHIP_RAVEN) {
 1989		if (adev->apu_flags & AMD_APU_IS_RAVEN)
 1990			init_data.flags.gpu_vm_support = false;
 1991		else
 1992			init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
 1993	} else {
 1994		if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
 1995			init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
 1996		else
 1997			init_data.flags.gpu_vm_support =
 1998				(amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
 1999	}
 2000
 2001	adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
 2002
 2003	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
 2004		init_data.flags.fbc_support = true;
 2005
 2006	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
 2007		init_data.flags.multi_mon_pp_mclk_switch = true;
 2008
 2009	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
 2010		init_data.flags.disable_fractional_pwm = true;
 2011
 2012	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
 2013		init_data.flags.edp_no_power_sequencing = true;
 2014
 2015	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
 2016		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
 2017	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
 2018		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
 2019
 2020	init_data.flags.seamless_boot_edp_requested = false;
 2021
 2022	if (amdgpu_device_seamless_boot_supported(adev)) {
 2023		init_data.flags.seamless_boot_edp_requested = true;
 2024		init_data.flags.allow_seamless_boot_optimization = true;
 2025		DRM_INFO("Seamless boot condition check passed\n");
 2026	}
 2027
 2028	init_data.flags.enable_mipi_converter_optimization = true;
 2029
 2030	init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
 2031	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
 2032	init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
 2033
 2034	if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
 2035		init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
 2036	else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
 2037		init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
 2038	else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
 2039		init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
 2040	else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
 2041		init_data.flags.disable_ips = DMUB_IPS_ENABLE;
 2042	else
 2043		init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
 2044
 2045	init_data.flags.disable_ips_in_vpb = 0;
 2046
 2047	/* Enable DWB for tested platforms only */
 2048	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
 2049		init_data.num_virtual_links = 1;
 2050
 2051	retrieve_dmi_info(&adev->dm, &init_data);
 2052
 2053	if (adev->dm.bb_from_dmub)
 2054		init_data.bb_from_dmub = adev->dm.bb_from_dmub;
 2055	else
 2056		init_data.bb_from_dmub = NULL;
 2057
 2058	/* Display Core create. */
 2059	adev->dm.dc = dc_create(&init_data);
 2060
 2061	if (adev->dm.dc) {
 2062		DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
 2063			 dce_version_to_string(adev->dm.dc->ctx->dce_version));
 2064	} else {
 2065		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
 2066		goto error;
 2067	}
 2068
 2069	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
 2070		adev->dm.dc->debug.force_single_disp_pipe_split = false;
 2071		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
 2072	}
 2073
 2074	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
 2075		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
 2076	if (dm_should_disable_stutter(adev->pdev))
 2077		adev->dm.dc->debug.disable_stutter = true;
 2078
 2079	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
 2080		adev->dm.dc->debug.disable_stutter = true;
 2081
 2082	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
 2083		adev->dm.dc->debug.disable_dsc = true;
 2084
 2085	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
 2086		adev->dm.dc->debug.disable_clock_gate = true;
 2087
 2088	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
 2089		adev->dm.dc->debug.force_subvp_mclk_switch = true;
 2090
 2091	if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
 2092		adev->dm.dc->debug.using_dml2 = true;
 2093		adev->dm.dc->debug.using_dml21 = true;
 2094	}
 2095
 2096	adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
 2097
 2098	/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
 2099	adev->dm.dc->debug.ignore_cable_id = true;
 2100
 2101	if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
 2102		DRM_INFO("DP-HDMI FRL PCON supported\n");
 2103
 2104	r = dm_dmub_hw_init(adev);
 2105	if (r) {
 2106		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 2107		goto error;
 2108	}
 2109
 2110	dc_hardware_init(adev->dm.dc);
 2111
 2112	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
 2113	if (!adev->dm.hpd_rx_offload_wq) {
 2114		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
 2115		goto error;
 2116	}
 2117
 2118	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
 2119		struct dc_phy_addr_space_config pa_config;
 2120
 2121		mmhub_read_system_context(adev, &pa_config);
 2122
 2123		// Call the DC init_memory func
 2124		dc_setup_system_context(adev->dm.dc, &pa_config);
 2125	}
 2126
 2127	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
 2128	if (!adev->dm.freesync_module) {
 2129		DRM_ERROR(
 2130		"amdgpu: failed to initialize freesync_module.\n");
 2131	} else
 2132		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
 2133				adev->dm.freesync_module);
 2134
 2135	amdgpu_dm_init_color_mod();
 2136
 2137	if (adev->dm.dc->caps.max_links > 0) {
 2138		adev->dm.vblank_control_workqueue =
 2139			create_singlethread_workqueue("dm_vblank_control_workqueue");
 2140		if (!adev->dm.vblank_control_workqueue)
 2141			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
 2142	}
 2143
 2144	if (adev->dm.dc->caps.ips_support &&
 2145	    adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
 2146		adev->dm.idle_workqueue = idle_create_workqueue(adev);
 2147
 2148	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
 2149		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
 2150
 2151		if (!adev->dm.hdcp_workqueue)
 2152			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
 2153		else
 2154			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
 2155
 2156		dc_init_callbacks(adev->dm.dc, &init_params);
 2157	}
 2158	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
 2159		init_completion(&adev->dm.dmub_aux_transfer_done);
 2160		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
 2161		if (!adev->dm.dmub_notify) {
 2162			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
 2163			goto error;
 2164		}
 2165
 2166		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
 2167		if (!adev->dm.delayed_hpd_wq) {
 2168			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
 2169			goto error;
 2170		}
 2171
 2172		amdgpu_dm_outbox_init(adev);
 2173		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
 2174			dmub_aux_setconfig_callback, false)) {
 2175			DRM_ERROR("amdgpu: fail to register dmub aux callback");
 2176			goto error;
 2177		}
 2178		/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
 2179		 * It is expected that DMUB will resend any pending notifications at this point. Note
 2180		 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
 2181		 * align legacy interface initialization sequence. Connection status will be proactivly
 2182		 * detected once in the amdgpu_dm_initialize_drm_device.
 2183		 */
 2184		dc_enable_dmub_outbox(adev->dm.dc);
 2185
 2186		/* DPIA trace goes to dmesg logs only if outbox is enabled */
 2187		if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
 2188			dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
 2189	}
 2190
 2191	if (amdgpu_dm_initialize_drm_device(adev)) {
 2192		DRM_ERROR(
 2193		"amdgpu: failed to initialize sw for display support.\n");
 2194		goto error;
 2195	}
 2196
 2197	/* create fake encoders for MST */
 2198	dm_dp_create_fake_mst_encoders(adev);
 2199
 2200	/* TODO: Add_display_info? */
 2201
 2202	/* TODO use dynamic cursor width */
 2203	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
 2204	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
 2205
 2206	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
 2207		DRM_ERROR(
 2208		"amdgpu: failed to initialize sw for display support.\n");
 2209		goto error;
 2210	}
 2211
 2212#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 2213	adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
 2214	if (!adev->dm.secure_display_ctxs)
 2215		DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
 2216#endif
 2217
 2218	DRM_DEBUG_DRIVER("KMS initialized.\n");
 2219
 2220	return 0;
 2221error:
 2222	amdgpu_dm_fini(adev);
 2223
 2224	return -EINVAL;
 2225}
 2226
 2227static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block)
 2228{
 2229	struct amdgpu_device *adev = ip_block->adev;
 2230
 2231	amdgpu_dm_audio_fini(adev);
 2232
 2233	return 0;
 2234}
 2235
 2236static void amdgpu_dm_fini(struct amdgpu_device *adev)
 2237{
 2238	int i;
 2239
 2240	if (adev->dm.vblank_control_workqueue) {
 2241		destroy_workqueue(adev->dm.vblank_control_workqueue);
 2242		adev->dm.vblank_control_workqueue = NULL;
 2243	}
 2244
 2245	if (adev->dm.idle_workqueue) {
 2246		if (adev->dm.idle_workqueue->running) {
 2247			adev->dm.idle_workqueue->enable = false;
 2248			flush_work(&adev->dm.idle_workqueue->work);
 2249		}
 2250
 2251		kfree(adev->dm.idle_workqueue);
 2252		adev->dm.idle_workqueue = NULL;
 2253	}
 2254
 2255	amdgpu_dm_destroy_drm_device(&adev->dm);
 2256
 2257#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 2258	if (adev->dm.secure_display_ctxs) {
 2259		for (i = 0; i < adev->mode_info.num_crtc; i++) {
 2260			if (adev->dm.secure_display_ctxs[i].crtc) {
 2261				flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
 2262				flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
 2263			}
 2264		}
 2265		kfree(adev->dm.secure_display_ctxs);
 2266		adev->dm.secure_display_ctxs = NULL;
 2267	}
 2268#endif
 2269	if (adev->dm.hdcp_workqueue) {
 2270		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
 2271		adev->dm.hdcp_workqueue = NULL;
 2272	}
 2273
 2274	if (adev->dm.dc) {
 2275		dc_deinit_callbacks(adev->dm.dc);
 2276		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
 2277		if (dc_enable_dmub_notifications(adev->dm.dc)) {
 2278			kfree(adev->dm.dmub_notify);
 2279			adev->dm.dmub_notify = NULL;
 2280			destroy_workqueue(adev->dm.delayed_hpd_wq);
 2281			adev->dm.delayed_hpd_wq = NULL;
 2282		}
 2283	}
 2284
 2285	if (adev->dm.dmub_bo)
 2286		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
 2287				      &adev->dm.dmub_bo_gpu_addr,
 2288				      &adev->dm.dmub_bo_cpu_addr);
 2289
 2290	if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
 2291		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
 2292			if (adev->dm.hpd_rx_offload_wq[i].wq) {
 2293				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
 2294				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
 2295			}
 2296		}
 2297
 2298		kfree(adev->dm.hpd_rx_offload_wq);
 2299		adev->dm.hpd_rx_offload_wq = NULL;
 2300	}
 2301
 2302	/* DC Destroy TODO: Replace destroy DAL */
 2303	if (adev->dm.dc)
 2304		dc_destroy(&adev->dm.dc);
 2305	/*
 2306	 * TODO: pageflip, vlank interrupt
 2307	 *
 2308	 * amdgpu_dm_irq_fini(adev);
 2309	 */
 2310
 2311	if (adev->dm.cgs_device) {
 2312		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
 2313		adev->dm.cgs_device = NULL;
 2314	}
 2315	if (adev->dm.freesync_module) {
 2316		mod_freesync_destroy(adev->dm.freesync_module);
 2317		adev->dm.freesync_module = NULL;
 2318	}
 2319
 2320	mutex_destroy(&adev->dm.audio_lock);
 2321	mutex_destroy(&adev->dm.dc_lock);
 2322	mutex_destroy(&adev->dm.dpia_aux_lock);
 2323}
 2324
 2325static int load_dmcu_fw(struct amdgpu_device *adev)
 2326{
 2327	const char *fw_name_dmcu = NULL;
 2328	int r;
 2329	const struct dmcu_firmware_header_v1_0 *hdr;
 2330
 2331	switch (adev->asic_type) {
 2332#if defined(CONFIG_DRM_AMD_DC_SI)
 2333	case CHIP_TAHITI:
 2334	case CHIP_PITCAIRN:
 2335	case CHIP_VERDE:
 2336	case CHIP_OLAND:
 2337#endif
 2338	case CHIP_BONAIRE:
 2339	case CHIP_HAWAII:
 2340	case CHIP_KAVERI:
 2341	case CHIP_KABINI:
 2342	case CHIP_MULLINS:
 2343	case CHIP_TONGA:
 2344	case CHIP_FIJI:
 2345	case CHIP_CARRIZO:
 2346	case CHIP_STONEY:
 2347	case CHIP_POLARIS11:
 2348	case CHIP_POLARIS10:
 2349	case CHIP_POLARIS12:
 2350	case CHIP_VEGAM:
 2351	case CHIP_VEGA10:
 2352	case CHIP_VEGA12:
 2353	case CHIP_VEGA20:
 2354		return 0;
 2355	case CHIP_NAVI12:
 2356		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
 2357		break;
 2358	case CHIP_RAVEN:
 2359		if (ASICREV_IS_PICASSO(adev->external_rev_id))
 2360			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 2361		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
 2362			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 2363		else
 2364			return 0;
 2365		break;
 2366	default:
 2367		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 2368		case IP_VERSION(2, 0, 2):
 2369		case IP_VERSION(2, 0, 3):
 2370		case IP_VERSION(2, 0, 0):
 2371		case IP_VERSION(2, 1, 0):
 2372		case IP_VERSION(3, 0, 0):
 2373		case IP_VERSION(3, 0, 2):
 2374		case IP_VERSION(3, 0, 3):
 2375		case IP_VERSION(3, 0, 1):
 2376		case IP_VERSION(3, 1, 2):
 2377		case IP_VERSION(3, 1, 3):
 2378		case IP_VERSION(3, 1, 4):
 2379		case IP_VERSION(3, 1, 5):
 2380		case IP_VERSION(3, 1, 6):
 2381		case IP_VERSION(3, 2, 0):
 2382		case IP_VERSION(3, 2, 1):
 2383		case IP_VERSION(3, 5, 0):
 2384		case IP_VERSION(3, 5, 1):
 2385		case IP_VERSION(4, 0, 1):
 2386			return 0;
 2387		default:
 2388			break;
 2389		}
 2390		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 2391		return -EINVAL;
 2392	}
 2393
 2394	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 2395		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
 2396		return 0;
 2397	}
 2398
 2399	r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu);
 2400	if (r == -ENODEV) {
 2401		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
 2402		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
 2403		adev->dm.fw_dmcu = NULL;
 2404		return 0;
 2405	}
 2406	if (r) {
 2407		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
 2408			fw_name_dmcu);
 2409		amdgpu_ucode_release(&adev->dm.fw_dmcu);
 2410		return r;
 2411	}
 2412
 2413	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
 2414	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
 2415	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
 2416	adev->firmware.fw_size +=
 2417		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
 2418
 2419	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
 2420	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
 2421	adev->firmware.fw_size +=
 2422		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
 2423
 2424	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
 2425
 2426	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
 2427
 2428	return 0;
 2429}
 2430
 2431static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
 2432{
 2433	struct amdgpu_device *adev = ctx;
 2434
 2435	return dm_read_reg(adev->dm.dc->ctx, address);
 2436}
 2437
 2438static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
 2439				     uint32_t value)
 2440{
 2441	struct amdgpu_device *adev = ctx;
 2442
 2443	return dm_write_reg(adev->dm.dc->ctx, address, value);
 2444}
 2445
 2446static int dm_dmub_sw_init(struct amdgpu_device *adev)
 2447{
 2448	struct dmub_srv_create_params create_params;
 2449	struct dmub_srv_region_params region_params;
 2450	struct dmub_srv_region_info region_info;
 2451	struct dmub_srv_memory_params memory_params;
 2452	struct dmub_srv_fb_info *fb_info;
 2453	struct dmub_srv *dmub_srv;
 2454	const struct dmcub_firmware_header_v1_0 *hdr;
 2455	enum dmub_asic dmub_asic;
 2456	enum dmub_status status;
 2457	static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
 2458		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_0_INST_CONST
 2459		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_1_STACK
 2460		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_2_BSS_DATA
 2461		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_3_VBIOS
 2462		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_4_MAILBOX
 2463		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_5_TRACEBUFF
 2464		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_6_FW_STATE
 2465		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_7_SCRATCH_MEM
 2466		DMUB_WINDOW_MEMORY_TYPE_FB,		//DMUB_WINDOW_SHARED_STATE
 2467	};
 2468	int r;
 2469
 2470	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 2471	case IP_VERSION(2, 1, 0):
 2472		dmub_asic = DMUB_ASIC_DCN21;
 2473		break;
 2474	case IP_VERSION(3, 0, 0):
 2475		dmub_asic = DMUB_ASIC_DCN30;
 2476		break;
 2477	case IP_VERSION(3, 0, 1):
 2478		dmub_asic = DMUB_ASIC_DCN301;
 2479		break;
 2480	case IP_VERSION(3, 0, 2):
 2481		dmub_asic = DMUB_ASIC_DCN302;
 2482		break;
 2483	case IP_VERSION(3, 0, 3):
 2484		dmub_asic = DMUB_ASIC_DCN303;
 2485		break;
 2486	case IP_VERSION(3, 1, 2):
 2487	case IP_VERSION(3, 1, 3):
 2488		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
 2489		break;
 2490	case IP_VERSION(3, 1, 4):
 2491		dmub_asic = DMUB_ASIC_DCN314;
 2492		break;
 2493	case IP_VERSION(3, 1, 5):
 2494		dmub_asic = DMUB_ASIC_DCN315;
 2495		break;
 2496	case IP_VERSION(3, 1, 6):
 2497		dmub_asic = DMUB_ASIC_DCN316;
 2498		break;
 2499	case IP_VERSION(3, 2, 0):
 2500		dmub_asic = DMUB_ASIC_DCN32;
 2501		break;
 2502	case IP_VERSION(3, 2, 1):
 2503		dmub_asic = DMUB_ASIC_DCN321;
 2504		break;
 2505	case IP_VERSION(3, 5, 0):
 2506	case IP_VERSION(3, 5, 1):
 2507		dmub_asic = DMUB_ASIC_DCN35;
 2508		break;
 2509	case IP_VERSION(4, 0, 1):
 2510		dmub_asic = DMUB_ASIC_DCN401;
 2511		break;
 2512
 2513	default:
 2514		/* ASIC doesn't support DMUB. */
 2515		return 0;
 2516	}
 2517
 2518	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
 2519	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 2520
 2521	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 2522		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
 2523			AMDGPU_UCODE_ID_DMCUB;
 2524		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
 2525			adev->dm.dmub_fw;
 2526		adev->firmware.fw_size +=
 2527			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
 2528
 2529		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
 2530			 adev->dm.dmcub_fw_version);
 2531	}
 2532
 2533
 2534	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
 2535	dmub_srv = adev->dm.dmub_srv;
 2536
 2537	if (!dmub_srv) {
 2538		DRM_ERROR("Failed to allocate DMUB service!\n");
 2539		return -ENOMEM;
 2540	}
 2541
 2542	memset(&create_params, 0, sizeof(create_params));
 2543	create_params.user_ctx = adev;
 2544	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
 2545	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
 2546	create_params.asic = dmub_asic;
 2547
 2548	/* Create the DMUB service. */
 2549	status = dmub_srv_create(dmub_srv, &create_params);
 2550	if (status != DMUB_STATUS_OK) {
 2551		DRM_ERROR("Error creating DMUB service: %d\n", status);
 2552		return -EINVAL;
 2553	}
 2554
 2555	/* Calculate the size of all the regions for the DMUB service. */
 2556	memset(&region_params, 0, sizeof(region_params));
 2557
 2558	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
 2559					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
 2560	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
 2561	region_params.vbios_size = adev->bios_size;
 2562	region_params.fw_bss_data = region_params.bss_data_size ?
 2563		adev->dm.dmub_fw->data +
 2564		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 2565		le32_to_cpu(hdr->inst_const_bytes) : NULL;
 2566	region_params.fw_inst_const =
 2567		adev->dm.dmub_fw->data +
 2568		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 2569		PSP_HEADER_BYTES;
 2570	region_params.window_memory_type = window_memory_type;
 2571
 2572	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
 2573					   &region_info);
 2574
 2575	if (status != DMUB_STATUS_OK) {
 2576		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
 2577		return -EINVAL;
 2578	}
 2579
 2580	/*
 2581	 * Allocate a framebuffer based on the total size of all the regions.
 2582	 * TODO: Move this into GART.
 2583	 */
 2584	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
 2585				    AMDGPU_GEM_DOMAIN_VRAM |
 2586				    AMDGPU_GEM_DOMAIN_GTT,
 2587				    &adev->dm.dmub_bo,
 2588				    &adev->dm.dmub_bo_gpu_addr,
 2589				    &adev->dm.dmub_bo_cpu_addr);
 2590	if (r)
 2591		return r;
 2592
 2593	/* Rebase the regions on the framebuffer address. */
 2594	memset(&memory_params, 0, sizeof(memory_params));
 2595	memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
 2596	memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
 2597	memory_params.region_info = &region_info;
 2598	memory_params.window_memory_type = window_memory_type;
 2599
 2600	adev->dm.dmub_fb_info =
 2601		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
 2602	fb_info = adev->dm.dmub_fb_info;
 2603
 2604	if (!fb_info) {
 2605		DRM_ERROR(
 2606			"Failed to allocate framebuffer info for DMUB service!\n");
 2607		return -ENOMEM;
 2608	}
 2609
 2610	status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
 2611	if (status != DMUB_STATUS_OK) {
 2612		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
 2613		return -EINVAL;
 2614	}
 2615
 2616	adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev);
 2617
 2618	return 0;
 2619}
 2620
 2621static int dm_sw_init(struct amdgpu_ip_block *ip_block)
 2622{
 2623	struct amdgpu_device *adev = ip_block->adev;
 2624	int r;
 2625
 2626	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
 2627
 2628	if (!adev->dm.cgs_device) {
 2629		DRM_ERROR("amdgpu: failed to create cgs device.\n");
 2630		return -EINVAL;
 2631	}
 2632
 2633	/* Moved from dm init since we need to use allocations for storing bounding box data */
 2634	INIT_LIST_HEAD(&adev->dm.da_list);
 2635
 2636	r = dm_dmub_sw_init(adev);
 2637	if (r)
 2638		return r;
 2639
 2640	return load_dmcu_fw(adev);
 2641}
 2642
 2643static int dm_sw_fini(struct amdgpu_ip_block *ip_block)
 2644{
 2645	struct amdgpu_device *adev = ip_block->adev;
 2646	struct dal_allocation *da;
 2647
 2648	list_for_each_entry(da, &adev->dm.da_list, list) {
 2649		if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) {
 2650			amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
 2651			list_del(&da->list);
 2652			kfree(da);
 2653			adev->dm.bb_from_dmub = NULL;
 2654			break;
 2655		}
 2656	}
 2657
 2658
 2659	kfree(adev->dm.dmub_fb_info);
 2660	adev->dm.dmub_fb_info = NULL;
 2661
 2662	if (adev->dm.dmub_srv) {
 2663		dmub_srv_destroy(adev->dm.dmub_srv);
 2664		kfree(adev->dm.dmub_srv);
 2665		adev->dm.dmub_srv = NULL;
 2666	}
 2667
 2668	amdgpu_ucode_release(&adev->dm.dmub_fw);
 2669	amdgpu_ucode_release(&adev->dm.fw_dmcu);
 2670
 2671	return 0;
 2672}
 2673
 2674static int detect_mst_link_for_all_connectors(struct drm_device *dev)
 2675{
 2676	struct amdgpu_dm_connector *aconnector;
 2677	struct drm_connector *connector;
 2678	struct drm_connector_list_iter iter;
 2679	int ret = 0;
 2680
 2681	drm_connector_list_iter_begin(dev, &iter);
 2682	drm_for_each_connector_iter(connector, &iter) {
 2683
 2684		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 2685			continue;
 2686
 2687		aconnector = to_amdgpu_dm_connector(connector);
 2688		if (aconnector->dc_link->type == dc_connection_mst_branch &&
 2689		    aconnector->mst_mgr.aux) {
 2690			drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n",
 2691					 aconnector,
 2692					 aconnector->base.base.id);
 2693
 2694			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
 2695			if (ret < 0) {
 2696				drm_err(dev, "DM_MST: Failed to start MST\n");
 2697				aconnector->dc_link->type =
 2698					dc_connection_single;
 2699				ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
 2700								     aconnector->dc_link);
 2701				break;
 2702			}
 2703		}
 2704	}
 2705	drm_connector_list_iter_end(&iter);
 2706
 2707	return ret;
 2708}
 2709
 2710static int dm_late_init(struct amdgpu_ip_block *ip_block)
 2711{
 2712	struct amdgpu_device *adev = ip_block->adev;
 2713
 2714	struct dmcu_iram_parameters params;
 2715	unsigned int linear_lut[16];
 2716	int i;
 2717	struct dmcu *dmcu = NULL;
 2718
 2719	dmcu = adev->dm.dc->res_pool->dmcu;
 2720
 2721	for (i = 0; i < 16; i++)
 2722		linear_lut[i] = 0xFFFF * i / 15;
 2723
 2724	params.set = 0;
 2725	params.backlight_ramping_override = false;
 2726	params.backlight_ramping_start = 0xCCCC;
 2727	params.backlight_ramping_reduction = 0xCCCCCCCC;
 2728	params.backlight_lut_array_size = 16;
 2729	params.backlight_lut_array = linear_lut;
 2730
 2731	/* Min backlight level after ABM reduction,  Don't allow below 1%
 2732	 * 0xFFFF x 0.01 = 0x28F
 2733	 */
 2734	params.min_abm_backlight = 0x28F;
 2735	/* In the case where abm is implemented on dmcub,
 2736	 * dmcu object will be null.
 2737	 * ABM 2.4 and up are implemented on dmcub.
 2738	 */
 2739	if (dmcu) {
 2740		if (!dmcu_load_iram(dmcu, params))
 2741			return -EINVAL;
 2742	} else if (adev->dm.dc->ctx->dmub_srv) {
 2743		struct dc_link *edp_links[MAX_NUM_EDP];
 2744		int edp_num;
 2745
 2746		dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
 2747		for (i = 0; i < edp_num; i++) {
 2748			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
 2749				return -EINVAL;
 2750		}
 2751	}
 2752
 2753	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 2754}
 2755
 2756static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
 2757{
 2758	u8 buf[UUID_SIZE];
 2759	guid_t guid;
 2760	int ret;
 2761
 2762	mutex_lock(&mgr->lock);
 2763	if (!mgr->mst_primary)
 2764		goto out_fail;
 2765
 2766	if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
 2767		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
 2768		goto out_fail;
 2769	}
 2770
 2771	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
 2772				 DP_MST_EN |
 2773				 DP_UP_REQ_EN |
 2774				 DP_UPSTREAM_IS_SRC);
 2775	if (ret < 0) {
 2776		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
 2777		goto out_fail;
 2778	}
 2779
 2780	/* Some hubs forget their guids after they resume */
 2781	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
 2782	if (ret != sizeof(buf)) {
 2783		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
 2784		goto out_fail;
 2785	}
 2786
 2787	import_guid(&guid, buf);
 2788
 2789	if (guid_is_null(&guid)) {
 2790		guid_gen(&guid);
 2791		export_guid(buf, &guid);
 2792
 2793		ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf));
 2794
 2795		if (ret != sizeof(buf)) {
 2796			drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
 2797			goto out_fail;
 2798		}
 2799	}
 2800
 2801	guid_copy(&mgr->mst_primary->guid, &guid);
 2802
 2803out_fail:
 2804	mutex_unlock(&mgr->lock);
 2805}
 2806
 2807static void s3_handle_mst(struct drm_device *dev, bool suspend)
 2808{
 2809	struct amdgpu_dm_connector *aconnector;
 2810	struct drm_connector *connector;
 2811	struct drm_connector_list_iter iter;
 2812	struct drm_dp_mst_topology_mgr *mgr;
 2813
 2814	drm_connector_list_iter_begin(dev, &iter);
 2815	drm_for_each_connector_iter(connector, &iter) {
 2816
 2817		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 2818			continue;
 2819
 2820		aconnector = to_amdgpu_dm_connector(connector);
 2821		if (aconnector->dc_link->type != dc_connection_mst_branch ||
 2822		    aconnector->mst_root)
 2823			continue;
 2824
 2825		mgr = &aconnector->mst_mgr;
 2826
 2827		if (suspend) {
 2828			drm_dp_mst_topology_mgr_suspend(mgr);
 2829		} else {
 2830			/* if extended timeout is supported in hardware,
 2831			 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
 2832			 * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
 2833			 */
 2834			try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
 2835			if (!dp_is_lttpr_present(aconnector->dc_link))
 2836				try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
 2837
 2838			/* TODO: move resume_mst_branch_status() into drm mst resume again
 2839			 * once topology probing work is pulled out from mst resume into mst
 2840			 * resume 2nd step. mst resume 2nd step should be called after old
 2841			 * state getting restored (i.e. drm_atomic_helper_resume()).
 2842			 */
 2843			resume_mst_branch_status(mgr);
 2844		}
 2845	}
 2846	drm_connector_list_iter_end(&iter);
 2847}
 2848
 2849static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
 2850{
 2851	int ret = 0;
 2852
 2853	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
 2854	 * on window driver dc implementation.
 2855	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
 2856	 * should be passed to smu during boot up and resume from s3.
 2857	 * boot up: dc calculate dcn watermark clock settings within dc_create,
 2858	 * dcn20_resource_construct
 2859	 * then call pplib functions below to pass the settings to smu:
 2860	 * smu_set_watermarks_for_clock_ranges
 2861	 * smu_set_watermarks_table
 2862	 * navi10_set_watermarks_table
 2863	 * smu_write_watermarks_table
 2864	 *
 2865	 * For Renoir, clock settings of dcn watermark are also fixed values.
 2866	 * dc has implemented different flow for window driver:
 2867	 * dc_hardware_init / dc_set_power_state
 2868	 * dcn10_init_hw
 2869	 * notify_wm_ranges
 2870	 * set_wm_ranges
 2871	 * -- Linux
 2872	 * smu_set_watermarks_for_clock_ranges
 2873	 * renoir_set_watermarks_table
 2874	 * smu_write_watermarks_table
 2875	 *
 2876	 * For Linux,
 2877	 * dc_hardware_init -> amdgpu_dm_init
 2878	 * dc_set_power_state --> dm_resume
 2879	 *
 2880	 * therefore, this function apply to navi10/12/14 but not Renoir
 2881	 * *
 2882	 */
 2883	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 2884	case IP_VERSION(2, 0, 2):
 2885	case IP_VERSION(2, 0, 0):
 2886		break;
 2887	default:
 2888		return 0;
 2889	}
 2890
 2891	ret = amdgpu_dpm_write_watermarks_table(adev);
 2892	if (ret) {
 2893		DRM_ERROR("Failed to update WMTABLE!\n");
 2894		return ret;
 2895	}
 2896
 2897	return 0;
 2898}
 2899
 2900/**
 2901 * dm_hw_init() - Initialize DC device
 2902 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 2903 *
 2904 * Initialize the &struct amdgpu_display_manager device. This involves calling
 2905 * the initializers of each DM component, then populating the struct with them.
 2906 *
 2907 * Although the function implies hardware initialization, both hardware and
 2908 * software are initialized here. Splitting them out to their relevant init
 2909 * hooks is a future TODO item.
 2910 *
 2911 * Some notable things that are initialized here:
 2912 *
 2913 * - Display Core, both software and hardware
 2914 * - DC modules that we need (freesync and color management)
 2915 * - DRM software states
 2916 * - Interrupt sources and handlers
 2917 * - Vblank support
 2918 * - Debug FS entries, if enabled
 2919 */
 2920static int dm_hw_init(struct amdgpu_ip_block *ip_block)
 2921{
 2922	struct amdgpu_device *adev = ip_block->adev;
 2923	int r;
 2924
 2925	/* Create DAL display manager */
 2926	r = amdgpu_dm_init(adev);
 2927	if (r)
 2928		return r;
 2929	amdgpu_dm_hpd_init(adev);
 2930
 2931	return 0;
 2932}
 2933
 2934/**
 2935 * dm_hw_fini() - Teardown DC device
 2936 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 2937 *
 2938 * Teardown components within &struct amdgpu_display_manager that require
 2939 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
 2940 * were loaded. Also flush IRQ workqueues and disable them.
 2941 */
 2942static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
 2943{
 2944	struct amdgpu_device *adev = ip_block->adev;
 2945
 2946	amdgpu_dm_hpd_fini(adev);
 2947
 2948	amdgpu_dm_irq_fini(adev);
 2949	amdgpu_dm_fini(adev);
 2950	return 0;
 2951}
 2952
 2953
 2954static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
 2955				 struct dc_state *state, bool enable)
 2956{
 2957	enum dc_irq_source irq_source;
 2958	struct amdgpu_crtc *acrtc;
 2959	int rc = -EBUSY;
 2960	int i = 0;
 2961
 2962	for (i = 0; i < state->stream_count; i++) {
 2963		acrtc = get_crtc_by_otg_inst(
 2964				adev, state->stream_status[i].primary_otg_inst);
 2965
 2966		if (acrtc && state->stream_status[i].plane_count != 0) {
 2967			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
 2968			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
 2969			if (rc)
 2970				DRM_WARN("Failed to %s pflip interrupts\n",
 2971					 enable ? "enable" : "disable");
 2972
 2973			if (enable) {
 2974				if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
 2975					rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
 2976			} else
 2977				rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
 2978
 2979			if (rc)
 2980				DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
 2981
 2982			irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
 2983			/* During gpu-reset we disable and then enable vblank irq, so
 2984			 * don't use amdgpu_irq_get/put() to avoid refcount change.
 2985			 */
 2986			if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
 2987				DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
 2988		}
 2989	}
 2990
 2991}
 2992
 2993static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
 2994{
 2995	struct dc_state *context = NULL;
 2996	enum dc_status res = DC_ERROR_UNEXPECTED;
 2997	int i;
 2998	struct dc_stream_state *del_streams[MAX_PIPES];
 2999	int del_streams_count = 0;
 3000	struct dc_commit_streams_params params = {};
 3001
 3002	memset(del_streams, 0, sizeof(del_streams));
 3003
 3004	context = dc_state_create_current_copy(dc);
 3005	if (context == NULL)
 3006		goto context_alloc_fail;
 3007
 3008	/* First remove from context all streams */
 3009	for (i = 0; i < context->stream_count; i++) {
 3010		struct dc_stream_state *stream = context->streams[i];
 3011
 3012		del_streams[del_streams_count++] = stream;
 3013	}
 3014
 3015	/* Remove all planes for removed streams and then remove the streams */
 3016	for (i = 0; i < del_streams_count; i++) {
 3017		if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
 3018			res = DC_FAIL_DETACH_SURFACES;
 3019			goto fail;
 3020		}
 3021
 3022		res = dc_state_remove_stream(dc, context, del_streams[i]);
 3023		if (res != DC_OK)
 3024			goto fail;
 3025	}
 3026
 3027	params.streams = context->streams;
 3028	params.stream_count = context->stream_count;
 3029	res = dc_commit_streams(dc, &params);
 3030
 3031fail:
 3032	dc_state_release(context);
 3033
 3034context_alloc_fail:
 3035	return res;
 3036}
 3037
 3038static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
 3039{
 3040	int i;
 3041
 3042	if (dm->hpd_rx_offload_wq) {
 3043		for (i = 0; i < dm->dc->caps.max_links; i++)
 3044			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
 3045	}
 3046}
 3047
 3048static int dm_suspend(struct amdgpu_ip_block *ip_block)
 3049{
 3050	struct amdgpu_device *adev = ip_block->adev;
 3051	struct amdgpu_display_manager *dm = &adev->dm;
 3052	int ret = 0;
 3053
 3054	if (amdgpu_in_reset(adev)) {
 3055		mutex_lock(&dm->dc_lock);
 3056
 3057		dc_allow_idle_optimizations(adev->dm.dc, false);
 3058
 3059		dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
 3060
 3061		if (dm->cached_dc_state)
 3062			dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
 3063
 3064		amdgpu_dm_commit_zero_streams(dm->dc);
 3065
 3066		amdgpu_dm_irq_suspend(adev);
 3067
 3068		hpd_rx_irq_work_suspend(dm);
 3069
 3070		return ret;
 3071	}
 3072
 3073	WARN_ON(adev->dm.cached_state);
 3074	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
 3075	if (IS_ERR(adev->dm.cached_state))
 3076		return PTR_ERR(adev->dm.cached_state);
 3077
 3078	s3_handle_mst(adev_to_drm(adev), true);
 3079
 3080	amdgpu_dm_irq_suspend(adev);
 3081
 3082	hpd_rx_irq_work_suspend(dm);
 3083
 3084	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 3085
 3086	if (dm->dc->caps.ips_support && adev->in_s0ix)
 3087		dc_allow_idle_optimizations(dm->dc, true);
 3088
 3089	dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
 3090
 3091	return 0;
 3092}
 3093
 3094struct drm_connector *
 3095amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
 3096					     struct drm_crtc *crtc)
 3097{
 3098	u32 i;
 3099	struct drm_connector_state *new_con_state;
 3100	struct drm_connector *connector;
 3101	struct drm_crtc *crtc_from_state;
 3102
 3103	for_each_new_connector_in_state(state, connector, new_con_state, i) {
 3104		crtc_from_state = new_con_state->crtc;
 3105
 3106		if (crtc_from_state == crtc)
 3107			return connector;
 3108	}
 3109
 3110	return NULL;
 3111}
 3112
 3113static void emulated_link_detect(struct dc_link *link)
 3114{
 3115	struct dc_sink_init_data sink_init_data = { 0 };
 3116	struct display_sink_capability sink_caps = { 0 };
 3117	enum dc_edid_status edid_status;
 3118	struct dc_context *dc_ctx = link->ctx;
 3119	struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
 3120	struct dc_sink *sink = NULL;
 3121	struct dc_sink *prev_sink = NULL;
 3122
 3123	link->type = dc_connection_none;
 3124	prev_sink = link->local_sink;
 3125
 3126	if (prev_sink)
 3127		dc_sink_release(prev_sink);
 3128
 3129	switch (link->connector_signal) {
 3130	case SIGNAL_TYPE_HDMI_TYPE_A: {
 3131		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 3132		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
 3133		break;
 3134	}
 3135
 3136	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
 3137		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 3138		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
 3139		break;
 3140	}
 3141
 3142	case SIGNAL_TYPE_DVI_DUAL_LINK: {
 3143		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 3144		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
 3145		break;
 3146	}
 3147
 3148	case SIGNAL_TYPE_LVDS: {
 3149		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 3150		sink_caps.signal = SIGNAL_TYPE_LVDS;
 3151		break;
 3152	}
 3153
 3154	case SIGNAL_TYPE_EDP: {
 3155		sink_caps.transaction_type =
 3156			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
 3157		sink_caps.signal = SIGNAL_TYPE_EDP;
 3158		break;
 3159	}
 3160
 3161	case SIGNAL_TYPE_DISPLAY_PORT: {
 3162		sink_caps.transaction_type =
 3163			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
 3164		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
 3165		break;
 3166	}
 3167
 3168	default:
 3169		drm_err(dev, "Invalid connector type! signal:%d\n",
 3170			link->connector_signal);
 3171		return;
 3172	}
 3173
 3174	sink_init_data.link = link;
 3175	sink_init_data.sink_signal = sink_caps.signal;
 3176
 3177	sink = dc_sink_create(&sink_init_data);
 3178	if (!sink) {
 3179		drm_err(dev, "Failed to create sink!\n");
 3180		return;
 3181	}
 3182
 3183	/* dc_sink_create returns a new reference */
 3184	link->local_sink = sink;
 3185
 3186	edid_status = dm_helpers_read_local_edid(
 3187			link->ctx,
 3188			link,
 3189			sink);
 3190
 3191	if (edid_status != EDID_OK)
 3192		drm_err(dev, "Failed to read EDID\n");
 3193
 3194}
 3195
 3196static void dm_gpureset_commit_state(struct dc_state *dc_state,
 3197				     struct amdgpu_display_manager *dm)
 3198{
 3199	struct {
 3200		struct dc_surface_update surface_updates[MAX_SURFACES];
 3201		struct dc_plane_info plane_infos[MAX_SURFACES];
 3202		struct dc_scaling_info scaling_infos[MAX_SURFACES];
 3203		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
 3204		struct dc_stream_update stream_update;
 3205	} *bundle;
 3206	int k, m;
 3207
 3208	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 3209
 3210	if (!bundle) {
 3211		drm_err(dm->ddev, "Failed to allocate update bundle\n");
 3212		goto cleanup;
 3213	}
 3214
 3215	for (k = 0; k < dc_state->stream_count; k++) {
 3216		bundle->stream_update.stream = dc_state->streams[k];
 3217
 3218		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
 3219			bundle->surface_updates[m].surface =
 3220				dc_state->stream_status->plane_states[m];
 3221			bundle->surface_updates[m].surface->force_full_update =
 3222				true;
 3223		}
 3224
 3225		update_planes_and_stream_adapter(dm->dc,
 3226					 UPDATE_TYPE_FULL,
 3227					 dc_state->stream_status->plane_count,
 3228					 dc_state->streams[k],
 3229					 &bundle->stream_update,
 3230					 bundle->surface_updates);
 3231	}
 3232
 3233cleanup:
 3234	kfree(bundle);
 3235}
 3236
 3237static int dm_resume(struct amdgpu_ip_block *ip_block)
 3238{
 3239	struct amdgpu_device *adev = ip_block->adev;
 3240	struct drm_device *ddev = adev_to_drm(adev);
 3241	struct amdgpu_display_manager *dm = &adev->dm;
 3242	struct amdgpu_dm_connector *aconnector;
 3243	struct drm_connector *connector;
 3244	struct drm_connector_list_iter iter;
 3245	struct drm_crtc *crtc;
 3246	struct drm_crtc_state *new_crtc_state;
 3247	struct dm_crtc_state *dm_new_crtc_state;
 3248	struct drm_plane *plane;
 3249	struct drm_plane_state *new_plane_state;
 3250	struct dm_plane_state *dm_new_plane_state;
 3251	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
 3252	enum dc_connection_type new_connection_type = dc_connection_none;
 3253	struct dc_state *dc_state;
 3254	int i, r, j;
 3255	struct dc_commit_streams_params commit_params = {};
 3256
 3257	if (dm->dc->caps.ips_support) {
 3258		dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
 3259	}
 3260
 3261	if (amdgpu_in_reset(adev)) {
 3262		dc_state = dm->cached_dc_state;
 3263
 3264		/*
 3265		 * The dc->current_state is backed up into dm->cached_dc_state
 3266		 * before we commit 0 streams.
 3267		 *
 3268		 * DC will clear link encoder assignments on the real state
 3269		 * but the changes won't propagate over to the copy we made
 3270		 * before the 0 streams commit.
 3271		 *
 3272		 * DC expects that link encoder assignments are *not* valid
 3273		 * when committing a state, so as a workaround we can copy
 3274		 * off of the current state.
 3275		 *
 3276		 * We lose the previous assignments, but we had already
 3277		 * commit 0 streams anyway.
 3278		 */
 3279		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
 3280
 3281		r = dm_dmub_hw_init(adev);
 3282		if (r)
 3283			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 3284
 3285		dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
 3286		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 3287
 3288		dc_resume(dm->dc);
 3289
 3290		amdgpu_dm_irq_resume_early(adev);
 3291
 3292		for (i = 0; i < dc_state->stream_count; i++) {
 3293			dc_state->streams[i]->mode_changed = true;
 3294			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
 3295				dc_state->stream_status[i].plane_states[j]->update_flags.raw
 3296					= 0xffffffff;
 3297			}
 3298		}
 3299
 3300		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
 3301			amdgpu_dm_outbox_init(adev);
 3302			dc_enable_dmub_outbox(adev->dm.dc);
 3303		}
 3304
 3305		commit_params.streams = dc_state->streams;
 3306		commit_params.stream_count = dc_state->stream_count;
 3307		dc_exit_ips_for_hw_access(dm->dc);
 3308		WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
 3309
 3310		dm_gpureset_commit_state(dm->cached_dc_state, dm);
 3311
 3312		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
 3313
 3314		dc_state_release(dm->cached_dc_state);
 3315		dm->cached_dc_state = NULL;
 3316
 3317		amdgpu_dm_irq_resume_late(adev);
 3318
 3319		mutex_unlock(&dm->dc_lock);
 3320
 3321		return 0;
 3322	}
 3323	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
 3324	dc_state_release(dm_state->context);
 3325	dm_state->context = dc_state_create(dm->dc, NULL);
 3326	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
 3327
 3328	/* Before powering on DC we need to re-initialize DMUB. */
 3329	dm_dmub_hw_resume(adev);
 3330
 3331	/* Re-enable outbox interrupts for DPIA. */
 3332	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
 3333		amdgpu_dm_outbox_init(adev);
 3334		dc_enable_dmub_outbox(adev->dm.dc);
 3335	}
 3336
 3337	/* power on hardware */
 3338	dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
 3339	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 3340
 3341	/* program HPD filter */
 3342	dc_resume(dm->dc);
 3343
 3344	/*
 3345	 * early enable HPD Rx IRQ, should be done before set mode as short
 3346	 * pulse interrupts are used for MST
 3347	 */
 3348	amdgpu_dm_irq_resume_early(adev);
 3349
 3350	/* On resume we need to rewrite the MSTM control bits to enable MST*/
 3351	s3_handle_mst(ddev, false);
 3352
 3353	/* Do detection*/
 3354	drm_connector_list_iter_begin(ddev, &iter);
 3355	drm_for_each_connector_iter(connector, &iter) {
 3356
 3357		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 3358			continue;
 3359
 3360		aconnector = to_amdgpu_dm_connector(connector);
 3361
 3362		if (!aconnector->dc_link)
 3363			continue;
 3364
 3365		/*
 3366		 * this is the case when traversing through already created end sink
 3367		 * MST connectors, should be skipped
 3368		 */
 3369		if (aconnector->mst_root)
 3370			continue;
 3371
 3372		mutex_lock(&aconnector->hpd_lock);
 3373		if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
 3374			DRM_ERROR("KMS: Failed to detect connector\n");
 3375
 3376		if (aconnector->base.force && new_connection_type == dc_connection_none) {
 3377			emulated_link_detect(aconnector->dc_link);
 3378		} else {
 3379			mutex_lock(&dm->dc_lock);
 3380			dc_exit_ips_for_hw_access(dm->dc);
 3381			dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
 3382			mutex_unlock(&dm->dc_lock);
 3383		}
 3384
 3385		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
 3386			aconnector->fake_enable = false;
 3387
 3388		if (aconnector->dc_sink)
 3389			dc_sink_release(aconnector->dc_sink);
 3390		aconnector->dc_sink = NULL;
 3391		amdgpu_dm_update_connector_after_detect(aconnector);
 3392		mutex_unlock(&aconnector->hpd_lock);
 3393	}
 3394	drm_connector_list_iter_end(&iter);
 3395
 3396	/* Force mode set in atomic commit */
 3397	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
 3398		new_crtc_state->active_changed = true;
 3399		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 3400		reset_freesync_config_for_crtc(dm_new_crtc_state);
 3401	}
 3402
 3403	/*
 3404	 * atomic_check is expected to create the dc states. We need to release
 3405	 * them here, since they were duplicated as part of the suspend
 3406	 * procedure.
 3407	 */
 3408	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
 3409		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 3410		if (dm_new_crtc_state->stream) {
 3411			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
 3412			dc_stream_release(dm_new_crtc_state->stream);
 3413			dm_new_crtc_state->stream = NULL;
 3414		}
 3415		dm_new_crtc_state->base.color_mgmt_changed = true;
 3416	}
 3417
 3418	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
 3419		dm_new_plane_state = to_dm_plane_state(new_plane_state);
 3420		if (dm_new_plane_state->dc_state) {
 3421			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
 3422			dc_plane_state_release(dm_new_plane_state->dc_state);
 3423			dm_new_plane_state->dc_state = NULL;
 3424		}
 3425	}
 3426
 3427	drm_atomic_helper_resume(ddev, dm->cached_state);
 3428
 3429	dm->cached_state = NULL;
 3430
 3431	/* Do mst topology probing after resuming cached state*/
 3432	drm_connector_list_iter_begin(ddev, &iter);
 3433	drm_for_each_connector_iter(connector, &iter) {
 3434
 3435		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 3436			continue;
 3437
 3438		aconnector = to_amdgpu_dm_connector(connector);
 3439		if (aconnector->dc_link->type != dc_connection_mst_branch ||
 3440		    aconnector->mst_root)
 3441			continue;
 3442
 3443		drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
 3444	}
 3445	drm_connector_list_iter_end(&iter);
 3446
 3447	amdgpu_dm_irq_resume_late(adev);
 3448
 3449	amdgpu_dm_smu_write_watermarks_table(adev);
 3450
 3451	drm_kms_helper_hotplug_event(ddev);
 3452
 3453	return 0;
 3454}
 3455
 3456/**
 3457 * DOC: DM Lifecycle
 3458 *
 3459 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
 3460 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
 3461 * the base driver's device list to be initialized and torn down accordingly.
 3462 *
 3463 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
 3464 */
 3465
 3466static const struct amd_ip_funcs amdgpu_dm_funcs = {
 3467	.name = "dm",
 3468	.early_init = dm_early_init,
 3469	.late_init = dm_late_init,
 3470	.sw_init = dm_sw_init,
 3471	.sw_fini = dm_sw_fini,
 3472	.early_fini = amdgpu_dm_early_fini,
 3473	.hw_init = dm_hw_init,
 3474	.hw_fini = dm_hw_fini,
 3475	.suspend = dm_suspend,
 3476	.resume = dm_resume,
 3477	.is_idle = dm_is_idle,
 3478	.wait_for_idle = dm_wait_for_idle,
 3479	.check_soft_reset = dm_check_soft_reset,
 3480	.soft_reset = dm_soft_reset,
 3481	.set_clockgating_state = dm_set_clockgating_state,
 3482	.set_powergating_state = dm_set_powergating_state,
 3483};
 3484
 3485const struct amdgpu_ip_block_version dm_ip_block = {
 3486	.type = AMD_IP_BLOCK_TYPE_DCE,
 3487	.major = 1,
 3488	.minor = 0,
 3489	.rev = 0,
 3490	.funcs = &amdgpu_dm_funcs,
 3491};
 3492
 3493
 3494/**
 3495 * DOC: atomic
 3496 *
 3497 * *WIP*
 3498 */
 3499
 3500static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
 3501	.fb_create = amdgpu_display_user_framebuffer_create,
 3502	.get_format_info = amdgpu_dm_plane_get_format_info,
 3503	.atomic_check = amdgpu_dm_atomic_check,
 3504	.atomic_commit = drm_atomic_helper_commit,
 3505};
 3506
 3507static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
 3508	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
 3509	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
 3510};
 3511
 3512static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
 3513{
 3514	struct amdgpu_dm_backlight_caps *caps;
 3515	struct drm_connector *conn_base;
 3516	struct amdgpu_device *adev;
 3517	struct drm_luminance_range_info *luminance_range;
 3518	int min_input_signal_override;
 3519
 3520	if (aconnector->bl_idx == -1 ||
 3521	    aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
 3522		return;
 3523
 3524	conn_base = &aconnector->base;
 3525	adev = drm_to_adev(conn_base->dev);
 3526
 3527	caps = &adev->dm.backlight_caps[aconnector->bl_idx];
 3528	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
 3529	caps->aux_support = false;
 3530
 3531	if (caps->ext_caps->bits.oled == 1
 3532	    /*
 3533	     * ||
 3534	     * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
 3535	     * caps->ext_caps->bits.hdr_aux_backlight_control == 1
 3536	     */)
 3537		caps->aux_support = true;
 3538
 3539	if (amdgpu_backlight == 0)
 3540		caps->aux_support = false;
 3541	else if (amdgpu_backlight == 1)
 3542		caps->aux_support = true;
 3543	if (caps->aux_support)
 3544		aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX;
 3545
 3546	luminance_range = &conn_base->display_info.luminance_range;
 3547
 3548	if (luminance_range->max_luminance) {
 3549		caps->aux_min_input_signal = luminance_range->min_luminance;
 3550		caps->aux_max_input_signal = luminance_range->max_luminance;
 3551	} else {
 3552		caps->aux_min_input_signal = 0;
 3553		caps->aux_max_input_signal = 512;
 3554	}
 3555
 3556	min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
 3557	if (min_input_signal_override >= 0)
 3558		caps->min_input_signal = min_input_signal_override;
 3559}
 3560
 3561void amdgpu_dm_update_connector_after_detect(
 3562		struct amdgpu_dm_connector *aconnector)
 3563{
 3564	struct drm_connector *connector = &aconnector->base;
 3565	struct drm_device *dev = connector->dev;
 3566	struct dc_sink *sink;
 3567
 3568	/* MST handled by drm_mst framework */
 3569	if (aconnector->mst_mgr.mst_state == true)
 3570		return;
 3571
 3572	sink = aconnector->dc_link->local_sink;
 3573	if (sink)
 3574		dc_sink_retain(sink);
 3575
 3576	/*
 3577	 * Edid mgmt connector gets first update only in mode_valid hook and then
 3578	 * the connector sink is set to either fake or physical sink depends on link status.
 3579	 * Skip if already done during boot.
 3580	 */
 3581	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
 3582			&& aconnector->dc_em_sink) {
 3583
 3584		/*
 3585		 * For S3 resume with headless use eml_sink to fake stream
 3586		 * because on resume connector->sink is set to NULL
 3587		 */
 3588		mutex_lock(&dev->mode_config.mutex);
 3589
 3590		if (sink) {
 3591			if (aconnector->dc_sink) {
 3592				amdgpu_dm_update_freesync_caps(connector, NULL);
 3593				/*
 3594				 * retain and release below are used to
 3595				 * bump up refcount for sink because the link doesn't point
 3596				 * to it anymore after disconnect, so on next crtc to connector
 3597				 * reshuffle by UMD we will get into unwanted dc_sink release
 3598				 */
 3599				dc_sink_release(aconnector->dc_sink);
 3600			}
 3601			aconnector->dc_sink = sink;
 3602			dc_sink_retain(aconnector->dc_sink);
 3603			amdgpu_dm_update_freesync_caps(connector,
 3604					aconnector->drm_edid);
 3605		} else {
 3606			amdgpu_dm_update_freesync_caps(connector, NULL);
 3607			if (!aconnector->dc_sink) {
 3608				aconnector->dc_sink = aconnector->dc_em_sink;
 3609				dc_sink_retain(aconnector->dc_sink);
 3610			}
 3611		}
 3612
 3613		mutex_unlock(&dev->mode_config.mutex);
 3614
 3615		if (sink)
 3616			dc_sink_release(sink);
 3617		return;
 3618	}
 3619
 3620	/*
 3621	 * TODO: temporary guard to look for proper fix
 3622	 * if this sink is MST sink, we should not do anything
 3623	 */
 3624	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
 3625		dc_sink_release(sink);
 3626		return;
 3627	}
 3628
 3629	if (aconnector->dc_sink == sink) {
 3630		/*
 3631		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
 3632		 * Do nothing!!
 3633		 */
 3634		drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n",
 3635				 aconnector->connector_id);
 3636		if (sink)
 3637			dc_sink_release(sink);
 3638		return;
 3639	}
 3640
 3641	drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
 3642		    aconnector->connector_id, aconnector->dc_sink, sink);
 3643
 3644	mutex_lock(&dev->mode_config.mutex);
 3645
 3646	/*
 3647	 * 1. Update status of the drm connector
 3648	 * 2. Send an event and let userspace tell us what to do
 3649	 */
 3650	if (sink) {
 3651		/*
 3652		 * TODO: check if we still need the S3 mode update workaround.
 3653		 * If yes, put it here.
 3654		 */
 3655		if (aconnector->dc_sink) {
 3656			amdgpu_dm_update_freesync_caps(connector, NULL);
 3657			dc_sink_release(aconnector->dc_sink);
 3658		}
 3659
 3660		aconnector->dc_sink = sink;
 3661		dc_sink_retain(aconnector->dc_sink);
 3662		if (sink->dc_edid.length == 0) {
 3663			aconnector->drm_edid = NULL;
 3664			if (aconnector->dc_link->aux_mode) {
 3665				drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
 3666			}
 3667		} else {
 3668			const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid;
 3669
 3670			aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
 3671			drm_edid_connector_update(connector, aconnector->drm_edid);
 3672
 3673			if (aconnector->dc_link->aux_mode)
 3674				drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
 3675						  connector->display_info.source_physical_address);
 3676		}
 3677
 3678		if (!aconnector->timing_requested) {
 3679			aconnector->timing_requested =
 3680				kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
 3681			if (!aconnector->timing_requested)
 3682				drm_err(dev,
 3683					"failed to create aconnector->requested_timing\n");
 3684		}
 3685
 3686		amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
 3687		update_connector_ext_caps(aconnector);
 3688	} else {
 3689		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
 3690		amdgpu_dm_update_freesync_caps(connector, NULL);
 3691		aconnector->num_modes = 0;
 3692		dc_sink_release(aconnector->dc_sink);
 3693		aconnector->dc_sink = NULL;
 3694		drm_edid_free(aconnector->drm_edid);
 3695		aconnector->drm_edid = NULL;
 3696		kfree(aconnector->timing_requested);
 3697		aconnector->timing_requested = NULL;
 3698		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
 3699		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
 3700			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 3701	}
 3702
 3703	mutex_unlock(&dev->mode_config.mutex);
 3704
 3705	update_subconnector_property(aconnector);
 3706
 3707	if (sink)
 3708		dc_sink_release(sink);
 3709}
 3710
 3711static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
 3712{
 3713	struct drm_connector *connector = &aconnector->base;
 3714	struct drm_device *dev = connector->dev;
 3715	enum dc_connection_type new_connection_type = dc_connection_none;
 3716	struct amdgpu_device *adev = drm_to_adev(dev);
 3717	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
 3718	struct dc *dc = aconnector->dc_link->ctx->dc;
 3719	bool ret = false;
 3720
 3721	if (adev->dm.disable_hpd_irq)
 3722		return;
 3723
 3724	/*
 3725	 * In case of failure or MST no need to update connector status or notify the OS
 3726	 * since (for MST case) MST does this in its own context.
 3727	 */
 3728	mutex_lock(&aconnector->hpd_lock);
 3729
 3730	if (adev->dm.hdcp_workqueue) {
 3731		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
 3732		dm_con_state->update_hdcp = true;
 3733	}
 3734	if (aconnector->fake_enable)
 3735		aconnector->fake_enable = false;
 3736
 3737	aconnector->timing_changed = false;
 3738
 3739	if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
 3740		DRM_ERROR("KMS: Failed to detect connector\n");
 3741
 3742	if (aconnector->base.force && new_connection_type == dc_connection_none) {
 3743		emulated_link_detect(aconnector->dc_link);
 3744
 3745		drm_modeset_lock_all(dev);
 3746		dm_restore_drm_connector_state(dev, connector);
 3747		drm_modeset_unlock_all(dev);
 3748
 3749		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
 3750			drm_kms_helper_connector_hotplug_event(connector);
 3751	} else {
 3752		mutex_lock(&adev->dm.dc_lock);
 3753		dc_exit_ips_for_hw_access(dc);
 3754		ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 3755		mutex_unlock(&adev->dm.dc_lock);
 3756		if (ret) {
 3757			amdgpu_dm_update_connector_after_detect(aconnector);
 3758
 3759			drm_modeset_lock_all(dev);
 3760			dm_restore_drm_connector_state(dev, connector);
 3761			drm_modeset_unlock_all(dev);
 3762
 3763			if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
 3764				drm_kms_helper_connector_hotplug_event(connector);
 3765		}
 3766	}
 3767	mutex_unlock(&aconnector->hpd_lock);
 3768
 3769}
 3770
 3771static void handle_hpd_irq(void *param)
 3772{
 3773	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
 3774
 3775	handle_hpd_irq_helper(aconnector);
 3776
 3777}
 3778
 3779static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
 3780							union hpd_irq_data hpd_irq_data)
 3781{
 3782	struct hpd_rx_irq_offload_work *offload_work =
 3783				kzalloc(sizeof(*offload_work), GFP_KERNEL);
 3784
 3785	if (!offload_work) {
 3786		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
 3787		return;
 3788	}
 3789
 3790	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
 3791	offload_work->data = hpd_irq_data;
 3792	offload_work->offload_wq = offload_wq;
 3793
 3794	queue_work(offload_wq->wq, &offload_work->work);
 3795	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
 3796}
 3797
 3798static void handle_hpd_rx_irq(void *param)
 3799{
 3800	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
 3801	struct drm_connector *connector = &aconnector->base;
 3802	struct drm_device *dev = connector->dev;
 3803	struct dc_link *dc_link = aconnector->dc_link;
 3804	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
 3805	bool result = false;
 3806	enum dc_connection_type new_connection_type = dc_connection_none;
 3807	struct amdgpu_device *adev = drm_to_adev(dev);
 3808	union hpd_irq_data hpd_irq_data;
 3809	bool link_loss = false;
 3810	bool has_left_work = false;
 3811	int idx = dc_link->link_index;
 3812	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
 3813	struct dc *dc = aconnector->dc_link->ctx->dc;
 3814
 3815	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
 3816
 3817	if (adev->dm.disable_hpd_irq)
 3818		return;
 3819
 3820	/*
 3821	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
 3822	 * conflict, after implement i2c helper, this mutex should be
 3823	 * retired.
 3824	 */
 3825	mutex_lock(&aconnector->hpd_lock);
 3826
 3827	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
 3828						&link_loss, true, &has_left_work);
 3829
 3830	if (!has_left_work)
 3831		goto out;
 3832
 3833	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
 3834		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
 3835		goto out;
 3836	}
 3837
 3838	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
 3839		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
 3840			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
 3841			bool skip = false;
 3842
 3843			/*
 3844			 * DOWN_REP_MSG_RDY is also handled by polling method
 3845			 * mgr->cbs->poll_hpd_irq()
 3846			 */
 3847			spin_lock(&offload_wq->offload_lock);
 3848			skip = offload_wq->is_handling_mst_msg_rdy_event;
 3849
 3850			if (!skip)
 3851				offload_wq->is_handling_mst_msg_rdy_event = true;
 3852
 3853			spin_unlock(&offload_wq->offload_lock);
 3854
 3855			if (!skip)
 3856				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
 3857
 3858			goto out;
 3859		}
 3860
 3861		if (link_loss) {
 3862			bool skip = false;
 3863
 3864			spin_lock(&offload_wq->offload_lock);
 3865			skip = offload_wq->is_handling_link_loss;
 3866
 3867			if (!skip)
 3868				offload_wq->is_handling_link_loss = true;
 3869
 3870			spin_unlock(&offload_wq->offload_lock);
 3871
 3872			if (!skip)
 3873				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
 3874
 3875			goto out;
 3876		}
 3877	}
 3878
 3879out:
 3880	if (result && !is_mst_root_connector) {
 3881		/* Downstream Port status changed. */
 3882		if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
 3883			DRM_ERROR("KMS: Failed to detect connector\n");
 3884
 3885		if (aconnector->base.force && new_connection_type == dc_connection_none) {
 3886			emulated_link_detect(dc_link);
 3887
 3888			if (aconnector->fake_enable)
 3889				aconnector->fake_enable = false;
 3890
 3891			amdgpu_dm_update_connector_after_detect(aconnector);
 3892
 3893
 3894			drm_modeset_lock_all(dev);
 3895			dm_restore_drm_connector_state(dev, connector);
 3896			drm_modeset_unlock_all(dev);
 3897
 3898			drm_kms_helper_connector_hotplug_event(connector);
 3899		} else {
 3900			bool ret = false;
 3901
 3902			mutex_lock(&adev->dm.dc_lock);
 3903			dc_exit_ips_for_hw_access(dc);
 3904			ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
 3905			mutex_unlock(&adev->dm.dc_lock);
 3906
 3907			if (ret) {
 3908				if (aconnector->fake_enable)
 3909					aconnector->fake_enable = false;
 3910
 3911				amdgpu_dm_update_connector_after_detect(aconnector);
 3912
 3913				drm_modeset_lock_all(dev);
 3914				dm_restore_drm_connector_state(dev, connector);
 3915				drm_modeset_unlock_all(dev);
 3916
 3917				drm_kms_helper_connector_hotplug_event(connector);
 3918			}
 3919		}
 3920	}
 3921	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
 3922		if (adev->dm.hdcp_workqueue)
 3923			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
 3924	}
 3925
 3926	if (dc_link->type != dc_connection_mst_branch)
 3927		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
 3928
 3929	mutex_unlock(&aconnector->hpd_lock);
 3930}
 3931
 3932static int register_hpd_handlers(struct amdgpu_device *adev)
 3933{
 3934	struct drm_device *dev = adev_to_drm(adev);
 3935	struct drm_connector *connector;
 3936	struct amdgpu_dm_connector *aconnector;
 3937	const struct dc_link *dc_link;
 3938	struct dc_interrupt_params int_params = {0};
 3939
 3940	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 3941	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 3942
 3943	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
 3944		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
 3945			dmub_hpd_callback, true)) {
 3946			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
 3947			return -EINVAL;
 3948		}
 3949
 3950		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
 3951			dmub_hpd_callback, true)) {
 3952			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
 3953			return -EINVAL;
 3954		}
 3955
 3956		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
 3957			dmub_hpd_sense_callback, true)) {
 3958			DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
 3959			return -EINVAL;
 3960		}
 3961	}
 3962
 3963	list_for_each_entry(connector,
 3964			&dev->mode_config.connector_list, head)	{
 3965
 3966		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 3967			continue;
 3968
 3969		aconnector = to_amdgpu_dm_connector(connector);
 3970		dc_link = aconnector->dc_link;
 3971
 3972		if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
 3973			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
 3974			int_params.irq_source = dc_link->irq_source_hpd;
 3975
 3976			if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 3977				int_params.irq_source  < DC_IRQ_SOURCE_HPD1 ||
 3978				int_params.irq_source  > DC_IRQ_SOURCE_HPD6) {
 3979				DRM_ERROR("Failed to register hpd irq!\n");
 3980				return -EINVAL;
 3981			}
 3982
 3983			if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3984				handle_hpd_irq, (void *) aconnector))
 3985				return -ENOMEM;
 3986		}
 3987
 3988		if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
 3989
 3990			/* Also register for DP short pulse (hpd_rx). */
 3991			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
 3992			int_params.irq_source =	dc_link->irq_source_hpd_rx;
 3993
 3994			if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 3995				int_params.irq_source  < DC_IRQ_SOURCE_HPD1RX ||
 3996				int_params.irq_source  > DC_IRQ_SOURCE_HPD6RX) {
 3997				DRM_ERROR("Failed to register hpd rx irq!\n");
 3998				return -EINVAL;
 3999			}
 4000
 4001			if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4002				handle_hpd_rx_irq, (void *) aconnector))
 4003				return -ENOMEM;
 4004		}
 4005	}
 4006	return 0;
 4007}
 4008
 4009#if defined(CONFIG_DRM_AMD_DC_SI)
 4010/* Register IRQ sources and initialize IRQ callbacks */
 4011static int dce60_register_irq_handlers(struct amdgpu_device *adev)
 4012{
 4013	struct dc *dc = adev->dm.dc;
 4014	struct common_irq_params *c_irq_params;
 4015	struct dc_interrupt_params int_params = {0};
 4016	int r;
 4017	int i;
 4018	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 4019
 4020	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 4021	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 4022
 4023	/*
 4024	 * Actions of amdgpu_irq_add_id():
 4025	 * 1. Register a set() function with base driver.
 4026	 *    Base driver will call set() function to enable/disable an
 4027	 *    interrupt in DC hardware.
 4028	 * 2. Register amdgpu_dm_irq_handler().
 4029	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
 4030	 *    coming from DC hardware.
 4031	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
 4032	 *    for acknowledging and handling.
 4033	 */
 4034
 4035	/* Use VBLANK interrupt */
 4036	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 4037		r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
 4038		if (r) {
 4039			DRM_ERROR("Failed to add crtc irq id!\n");
 4040			return r;
 4041		}
 4042
 4043		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4044		int_params.irq_source =
 4045			dc_interrupt_to_irq_source(dc, i + 1, 0);
 4046
 4047		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4048			int_params.irq_source  < DC_IRQ_SOURCE_VBLANK1 ||
 4049			int_params.irq_source  > DC_IRQ_SOURCE_VBLANK6) {
 4050			DRM_ERROR("Failed to register vblank irq!\n");
 4051			return -EINVAL;
 4052		}
 4053
 4054		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 4055
 4056		c_irq_params->adev = adev;
 4057		c_irq_params->irq_src = int_params.irq_source;
 4058
 4059		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4060			dm_crtc_high_irq, c_irq_params))
 4061			return -ENOMEM;
 4062	}
 4063
 4064	/* Use GRPH_PFLIP interrupt */
 4065	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
 4066			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
 4067		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
 4068		if (r) {
 4069			DRM_ERROR("Failed to add page flip irq id!\n");
 4070			return r;
 4071		}
 4072
 4073		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4074		int_params.irq_source =
 4075			dc_interrupt_to_irq_source(dc, i, 0);
 4076
 4077		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4078			int_params.irq_source  < DC_IRQ_SOURCE_PFLIP_FIRST ||
 4079			int_params.irq_source  > DC_IRQ_SOURCE_PFLIP_LAST) {
 4080			DRM_ERROR("Failed to register pflip irq!\n");
 4081			return -EINVAL;
 4082		}
 4083
 4084		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
 4085
 4086		c_irq_params->adev = adev;
 4087		c_irq_params->irq_src = int_params.irq_source;
 4088
 4089		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4090			dm_pflip_high_irq, c_irq_params))
 4091			return -ENOMEM;
 4092	}
 4093
 4094	/* HPD */
 4095	r = amdgpu_irq_add_id(adev, client_id,
 4096			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
 4097	if (r) {
 4098		DRM_ERROR("Failed to add hpd irq id!\n");
 4099		return r;
 4100	}
 4101
 4102	r = register_hpd_handlers(adev);
 4103
 4104	return r;
 4105}
 4106#endif
 4107
 4108/* Register IRQ sources and initialize IRQ callbacks */
 4109static int dce110_register_irq_handlers(struct amdgpu_device *adev)
 4110{
 4111	struct dc *dc = adev->dm.dc;
 4112	struct common_irq_params *c_irq_params;
 4113	struct dc_interrupt_params int_params = {0};
 4114	int r;
 4115	int i;
 4116	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 4117
 4118	if (adev->family >= AMDGPU_FAMILY_AI)
 4119		client_id = SOC15_IH_CLIENTID_DCE;
 4120
 4121	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 4122	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 4123
 4124	/*
 4125	 * Actions of amdgpu_irq_add_id():
 4126	 * 1. Register a set() function with base driver.
 4127	 *    Base driver will call set() function to enable/disable an
 4128	 *    interrupt in DC hardware.
 4129	 * 2. Register amdgpu_dm_irq_handler().
 4130	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
 4131	 *    coming from DC hardware.
 4132	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
 4133	 *    for acknowledging and handling.
 4134	 */
 4135
 4136	/* Use VBLANK interrupt */
 4137	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
 4138		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
 4139		if (r) {
 4140			DRM_ERROR("Failed to add crtc irq id!\n");
 4141			return r;
 4142		}
 4143
 4144		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4145		int_params.irq_source =
 4146			dc_interrupt_to_irq_source(dc, i, 0);
 4147
 4148		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4149			int_params.irq_source  < DC_IRQ_SOURCE_VBLANK1 ||
 4150			int_params.irq_source  > DC_IRQ_SOURCE_VBLANK6) {
 4151			DRM_ERROR("Failed to register vblank irq!\n");
 4152			return -EINVAL;
 4153		}
 4154
 4155		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 4156
 4157		c_irq_params->adev = adev;
 4158		c_irq_params->irq_src = int_params.irq_source;
 4159
 4160		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4161			dm_crtc_high_irq, c_irq_params))
 4162			return -ENOMEM;
 4163	}
 4164
 4165	/* Use VUPDATE interrupt */
 4166	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
 4167		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
 4168		if (r) {
 4169			DRM_ERROR("Failed to add vupdate irq id!\n");
 4170			return r;
 4171		}
 4172
 4173		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4174		int_params.irq_source =
 4175			dc_interrupt_to_irq_source(dc, i, 0);
 4176
 4177		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4178			int_params.irq_source  < DC_IRQ_SOURCE_VUPDATE1 ||
 4179			int_params.irq_source  > DC_IRQ_SOURCE_VUPDATE6) {
 4180			DRM_ERROR("Failed to register vupdate irq!\n");
 4181			return -EINVAL;
 4182		}
 4183
 4184		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
 4185
 4186		c_irq_params->adev = adev;
 4187		c_irq_params->irq_src = int_params.irq_source;
 4188
 4189		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4190			dm_vupdate_high_irq, c_irq_params))
 4191			return -ENOMEM;
 4192	}
 4193
 4194	/* Use GRPH_PFLIP interrupt */
 4195	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
 4196			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
 4197		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
 4198		if (r) {
 4199			DRM_ERROR("Failed to add page flip irq id!\n");
 4200			return r;
 4201		}
 4202
 4203		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4204		int_params.irq_source =
 4205			dc_interrupt_to_irq_source(dc, i, 0);
 4206
 4207		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4208			int_params.irq_source  < DC_IRQ_SOURCE_PFLIP_FIRST ||
 4209			int_params.irq_source  > DC_IRQ_SOURCE_PFLIP_LAST) {
 4210			DRM_ERROR("Failed to register pflip irq!\n");
 4211			return -EINVAL;
 4212		}
 4213
 4214		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
 4215
 4216		c_irq_params->adev = adev;
 4217		c_irq_params->irq_src = int_params.irq_source;
 4218
 4219		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4220			dm_pflip_high_irq, c_irq_params))
 4221			return -ENOMEM;
 4222	}
 4223
 4224	/* HPD */
 4225	r = amdgpu_irq_add_id(adev, client_id,
 4226			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
 4227	if (r) {
 4228		DRM_ERROR("Failed to add hpd irq id!\n");
 4229		return r;
 4230	}
 4231
 4232	r = register_hpd_handlers(adev);
 4233
 4234	return r;
 4235}
 4236
 4237/* Register IRQ sources and initialize IRQ callbacks */
 4238static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 4239{
 4240	struct dc *dc = adev->dm.dc;
 4241	struct common_irq_params *c_irq_params;
 4242	struct dc_interrupt_params int_params = {0};
 4243	int r;
 4244	int i;
 4245#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 4246	static const unsigned int vrtl_int_srcid[] = {
 4247		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
 4248		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
 4249		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
 4250		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
 4251		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
 4252		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
 4253	};
 4254#endif
 4255
 4256	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 4257	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 4258
 4259	/*
 4260	 * Actions of amdgpu_irq_add_id():
 4261	 * 1. Register a set() function with base driver.
 4262	 *    Base driver will call set() function to enable/disable an
 4263	 *    interrupt in DC hardware.
 4264	 * 2. Register amdgpu_dm_irq_handler().
 4265	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
 4266	 *    coming from DC hardware.
 4267	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
 4268	 *    for acknowledging and handling.
 4269	 */
 4270
 4271	/* Use VSTARTUP interrupt */
 4272	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
 4273			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
 4274			i++) {
 4275		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
 4276
 4277		if (r) {
 4278			DRM_ERROR("Failed to add crtc irq id!\n");
 4279			return r;
 4280		}
 4281
 4282		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4283		int_params.irq_source =
 4284			dc_interrupt_to_irq_source(dc, i, 0);
 4285
 4286		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4287			int_params.irq_source  < DC_IRQ_SOURCE_VBLANK1 ||
 4288			int_params.irq_source  > DC_IRQ_SOURCE_VBLANK6) {
 4289			DRM_ERROR("Failed to register vblank irq!\n");
 4290			return -EINVAL;
 4291		}
 4292
 4293		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 4294
 4295		c_irq_params->adev = adev;
 4296		c_irq_params->irq_src = int_params.irq_source;
 4297
 4298		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4299			dm_crtc_high_irq, c_irq_params))
 4300			return -ENOMEM;
 4301	}
 4302
 4303	/* Use otg vertical line interrupt */
 4304#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 4305	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
 4306		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
 4307				vrtl_int_srcid[i], &adev->vline0_irq);
 4308
 4309		if (r) {
 4310			DRM_ERROR("Failed to add vline0 irq id!\n");
 4311			return r;
 4312		}
 4313
 4314		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4315		int_params.irq_source =
 4316			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
 4317
 4318		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4319			int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
 4320			int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
 4321			DRM_ERROR("Failed to register vline0 irq!\n");
 4322			return -EINVAL;
 4323		}
 4324
 4325		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
 4326					- DC_IRQ_SOURCE_DC1_VLINE0];
 4327
 4328		c_irq_params->adev = adev;
 4329		c_irq_params->irq_src = int_params.irq_source;
 4330
 4331		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4332			dm_dcn_vertical_interrupt0_high_irq,
 4333			c_irq_params))
 4334			return -ENOMEM;
 4335	}
 4336#endif
 4337
 4338	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
 4339	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
 4340	 * to trigger at end of each vblank, regardless of state of the lock,
 4341	 * matching DCE behaviour.
 4342	 */
 4343	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
 4344	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
 4345	     i++) {
 4346		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
 4347
 4348		if (r) {
 4349			DRM_ERROR("Failed to add vupdate irq id!\n");
 4350			return r;
 4351		}
 4352
 4353		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4354		int_params.irq_source =
 4355			dc_interrupt_to_irq_source(dc, i, 0);
 4356
 4357		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4358			int_params.irq_source  < DC_IRQ_SOURCE_VUPDATE1 ||
 4359			int_params.irq_source  > DC_IRQ_SOURCE_VUPDATE6) {
 4360			DRM_ERROR("Failed to register vupdate irq!\n");
 4361			return -EINVAL;
 4362		}
 4363
 4364		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
 4365
 4366		c_irq_params->adev = adev;
 4367		c_irq_params->irq_src = int_params.irq_source;
 4368
 4369		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4370			dm_vupdate_high_irq, c_irq_params))
 4371			return -ENOMEM;
 4372	}
 4373
 4374	/* Use GRPH_PFLIP interrupt */
 4375	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
 4376			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
 4377			i++) {
 4378		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
 4379		if (r) {
 4380			DRM_ERROR("Failed to add page flip irq id!\n");
 4381			return r;
 4382		}
 4383
 4384		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 4385		int_params.irq_source =
 4386			dc_interrupt_to_irq_source(dc, i, 0);
 4387
 4388		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
 4389			int_params.irq_source  < DC_IRQ_SOURCE_PFLIP_FIRST ||
 4390			int_params.irq_source  > DC_IRQ_SOURCE_PFLIP_LAST) {
 4391			DRM_ERROR("Failed to register pflip irq!\n");
 4392			return -EINVAL;
 4393		}
 4394
 4395		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
 4396
 4397		c_irq_params->adev = adev;
 4398		c_irq_params->irq_src = int_params.irq_source;
 4399
 4400		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4401			dm_pflip_high_irq, c_irq_params))
 4402			return -ENOMEM;
 4403	}
 4404
 4405	/* HPD */
 4406	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
 4407			&adev->hpd_irq);
 4408	if (r) {
 4409		DRM_ERROR("Failed to add hpd irq id!\n");
 4410		return r;
 4411	}
 4412
 4413	r = register_hpd_handlers(adev);
 4414
 4415	return r;
 4416}
 4417/* Register Outbox IRQ sources and initialize IRQ callbacks */
 4418static int register_outbox_irq_handlers(struct amdgpu_device *adev)
 4419{
 4420	struct dc *dc = adev->dm.dc;
 4421	struct common_irq_params *c_irq_params;
 4422	struct dc_interrupt_params int_params = {0};
 4423	int r, i;
 4424
 4425	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 4426	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 4427
 4428	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
 4429			&adev->dmub_outbox_irq);
 4430	if (r) {
 4431		DRM_ERROR("Failed to add outbox irq id!\n");
 4432		return r;
 4433	}
 4434
 4435	if (dc->ctx->dmub_srv) {
 4436		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
 4437		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
 4438		int_params.irq_source =
 4439		dc_interrupt_to_irq_source(dc, i, 0);
 4440
 4441		c_irq_params = &adev->dm.dmub_outbox_params[0];
 4442
 4443		c_irq_params->adev = adev;
 4444		c_irq_params->irq_src = int_params.irq_source;
 4445
 4446		if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
 4447			dm_dmub_outbox1_low_irq, c_irq_params))
 4448			return -ENOMEM;
 4449	}
 4450
 4451	return 0;
 4452}
 4453
 4454/*
 4455 * Acquires the lock for the atomic state object and returns
 4456 * the new atomic state.
 4457 *
 4458 * This should only be called during atomic check.
 4459 */
 4460int dm_atomic_get_state(struct drm_atomic_state *state,
 4461			struct dm_atomic_state **dm_state)
 4462{
 4463	struct drm_device *dev = state->dev;
 4464	struct amdgpu_device *adev = drm_to_adev(dev);
 4465	struct amdgpu_display_manager *dm = &adev->dm;
 4466	struct drm_private_state *priv_state;
 4467
 4468	if (*dm_state)
 4469		return 0;
 4470
 4471	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
 4472	if (IS_ERR(priv_state))
 4473		return PTR_ERR(priv_state);
 4474
 4475	*dm_state = to_dm_atomic_state(priv_state);
 4476
 4477	return 0;
 4478}
 4479
 4480static struct dm_atomic_state *
 4481dm_atomic_get_new_state(struct drm_atomic_state *state)
 4482{
 4483	struct drm_device *dev = state->dev;
 4484	struct amdgpu_device *adev = drm_to_adev(dev);
 4485	struct amdgpu_display_manager *dm = &adev->dm;
 4486	struct drm_private_obj *obj;
 4487	struct drm_private_state *new_obj_state;
 4488	int i;
 4489
 4490	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
 4491		if (obj->funcs == dm->atomic_obj.funcs)
 4492			return to_dm_atomic_state(new_obj_state);
 4493	}
 4494
 4495	return NULL;
 4496}
 4497
 4498static struct drm_private_state *
 4499dm_atomic_duplicate_state(struct drm_private_obj *obj)
 4500{
 4501	struct dm_atomic_state *old_state, *new_state;
 4502
 4503	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
 4504	if (!new_state)
 4505		return NULL;
 4506
 4507	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
 4508
 4509	old_state = to_dm_atomic_state(obj->state);
 4510
 4511	if (old_state && old_state->context)
 4512		new_state->context = dc_state_create_copy(old_state->context);
 4513
 4514	if (!new_state->context) {
 4515		kfree(new_state);
 4516		return NULL;
 4517	}
 4518
 4519	return &new_state->base;
 4520}
 4521
 4522static void dm_atomic_destroy_state(struct drm_private_obj *obj,
 4523				    struct drm_private_state *state)
 4524{
 4525	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 4526
 4527	if (dm_state && dm_state->context)
 4528		dc_state_release(dm_state->context);
 4529
 4530	kfree(dm_state);
 4531}
 4532
 4533static struct drm_private_state_funcs dm_atomic_state_funcs = {
 4534	.atomic_duplicate_state = dm_atomic_duplicate_state,
 4535	.atomic_destroy_state = dm_atomic_destroy_state,
 4536};
 4537
 4538static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 4539{
 4540	struct dm_atomic_state *state;
 4541	int r;
 4542
 4543	adev->mode_info.mode_config_initialized = true;
 4544
 4545	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
 4546	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
 4547
 4548	adev_to_drm(adev)->mode_config.max_width = 16384;
 4549	adev_to_drm(adev)->mode_config.max_height = 16384;
 4550
 4551	adev_to_drm(adev)->mode_config.preferred_depth = 24;
 4552	if (adev->asic_type == CHIP_HAWAII)
 4553		/* disable prefer shadow for now due to hibernation issues */
 4554		adev_to_drm(adev)->mode_config.prefer_shadow = 0;
 4555	else
 4556		adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 4557	/* indicates support for immediate flip */
 4558	adev_to_drm(adev)->mode_config.async_page_flip = true;
 4559
 4560	state = kzalloc(sizeof(*state), GFP_KERNEL);
 4561	if (!state)
 4562		return -ENOMEM;
 4563
 4564	state->context = dc_state_create_current_copy(adev->dm.dc);
 4565	if (!state->context) {
 4566		kfree(state);
 4567		return -ENOMEM;
 4568	}
 4569
 4570	drm_atomic_private_obj_init(adev_to_drm(adev),
 4571				    &adev->dm.atomic_obj,
 4572				    &state->base,
 4573				    &dm_atomic_state_funcs);
 4574
 4575	r = amdgpu_display_modeset_create_props(adev);
 4576	if (r) {
 4577		dc_state_release(state->context);
 4578		kfree(state);
 4579		return r;
 4580	}
 4581
 4582#ifdef AMD_PRIVATE_COLOR
 4583	if (amdgpu_dm_create_color_properties(adev)) {
 4584		dc_state_release(state->context);
 4585		kfree(state);
 4586		return -ENOMEM;
 4587	}
 4588#endif
 4589
 4590	r = amdgpu_dm_audio_init(adev);
 4591	if (r) {
 4592		dc_state_release(state->context);
 4593		kfree(state);
 4594		return r;
 4595	}
 4596
 4597	return 0;
 4598}
 4599
 4600#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
 4601#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
 4602#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2)
 4603#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
 4604
 4605static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
 4606					    int bl_idx)
 4607{
 4608#if defined(CONFIG_ACPI)
 4609	struct amdgpu_dm_backlight_caps caps;
 4610
 4611	memset(&caps, 0, sizeof(caps));
 4612
 4613	if (dm->backlight_caps[bl_idx].caps_valid)
 4614		return;
 4615
 4616	amdgpu_acpi_get_backlight_caps(&caps);
 4617
 4618	/* validate the firmware value is sane */
 4619	if (caps.caps_valid) {
 4620		int spread = caps.max_input_signal - caps.min_input_signal;
 4621
 4622		if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
 4623		    caps.min_input_signal < 0 ||
 4624		    spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
 4625		    spread < AMDGPU_DM_MIN_SPREAD) {
 4626			DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
 4627				      caps.min_input_signal, caps.max_input_signal);
 4628			caps.caps_valid = false;
 4629		}
 4630	}
 4631
 4632	if (caps.caps_valid) {
 4633		dm->backlight_caps[bl_idx].caps_valid = true;
 4634		if (caps.aux_support)
 4635			return;
 4636		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
 4637		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
 4638	} else {
 4639		dm->backlight_caps[bl_idx].min_input_signal =
 4640				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
 4641		dm->backlight_caps[bl_idx].max_input_signal =
 4642				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
 4643	}
 4644#else
 4645	if (dm->backlight_caps[bl_idx].aux_support)
 4646		return;
 4647
 4648	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
 4649	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
 4650#endif
 4651}
 4652
 4653static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
 4654				unsigned int *min, unsigned int *max)
 4655{
 4656	if (!caps)
 4657		return 0;
 4658
 4659	if (caps->aux_support) {
 4660		// Firmware limits are in nits, DC API wants millinits.
 4661		*max = 1000 * caps->aux_max_input_signal;
 4662		*min = 1000 * caps->aux_min_input_signal;
 4663	} else {
 4664		// Firmware limits are 8-bit, PWM control is 16-bit.
 4665		*max = 0x101 * caps->max_input_signal;
 4666		*min = 0x101 * caps->min_input_signal;
 4667	}
 4668	return 1;
 4669}
 4670
 4671static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
 4672					uint32_t brightness)
 4673{
 4674	unsigned int min, max;
 4675
 4676	if (!get_brightness_range(caps, &min, &max))
 4677		return brightness;
 4678
 4679	// Rescale 0..255 to min..max
 4680	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
 4681				       AMDGPU_MAX_BL_LEVEL);
 4682}
 4683
 4684static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
 4685				      uint32_t brightness)
 4686{
 4687	unsigned int min, max;
 4688
 4689	if (!get_brightness_range(caps, &min, &max))
 4690		return brightness;
 4691
 4692	if (brightness < min)
 4693		return 0;
 4694	// Rescale min..max to 0..255
 4695	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
 4696				 max - min);
 4697}
 4698
 4699static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
 4700					 int bl_idx,
 4701					 u32 user_brightness)
 4702{
 4703	struct amdgpu_dm_backlight_caps caps;
 4704	struct dc_link *link;
 4705	u32 brightness;
 4706	bool rc, reallow_idle = false;
 4707
 4708	amdgpu_dm_update_backlight_caps(dm, bl_idx);
 4709	caps = dm->backlight_caps[bl_idx];
 4710
 4711	dm->brightness[bl_idx] = user_brightness;
 4712	/* update scratch register */
 4713	if (bl_idx == 0)
 4714		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
 4715	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
 4716	link = (struct dc_link *)dm->backlight_link[bl_idx];
 4717
 4718	/* Change brightness based on AUX property */
 4719	mutex_lock(&dm->dc_lock);
 4720	if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
 4721		dc_allow_idle_optimizations(dm->dc, false);
 4722		reallow_idle = true;
 4723	}
 4724
 4725	if (caps.aux_support) {
 4726		rc = dc_link_set_backlight_level_nits(link, true, brightness,
 4727						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
 4728		if (!rc)
 4729			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
 4730	} else {
 4731		struct set_backlight_level_params backlight_level_params = { 0 };
 4732
 4733		backlight_level_params.backlight_pwm_u16_16 = brightness;
 4734		backlight_level_params.transition_time_in_ms = 0;
 4735
 4736		rc = dc_link_set_backlight_level(link, &backlight_level_params);
 4737		if (!rc)
 4738			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
 4739	}
 4740
 4741	if (dm->dc->caps.ips_support && reallow_idle)
 4742		dc_allow_idle_optimizations(dm->dc, true);
 4743
 4744	mutex_unlock(&dm->dc_lock);
 4745
 4746	if (rc)
 4747		dm->actual_brightness[bl_idx] = user_brightness;
 4748}
 4749
 4750static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 4751{
 4752	struct amdgpu_display_manager *dm = bl_get_data(bd);
 4753	int i;
 4754
 4755	for (i = 0; i < dm->num_of_edps; i++) {
 4756		if (bd == dm->backlight_dev[i])
 4757			break;
 4758	}
 4759	if (i >= AMDGPU_DM_MAX_NUM_EDP)
 4760		i = 0;
 4761	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
 4762
 4763	return 0;
 4764}
 4765
 4766static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
 4767					 int bl_idx)
 4768{
 4769	int ret;
 4770	struct amdgpu_dm_backlight_caps caps;
 4771	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
 4772
 4773	amdgpu_dm_update_backlight_caps(dm, bl_idx);
 4774	caps = dm->backlight_caps[bl_idx];
 4775
 4776	if (caps.aux_support) {
 4777		u32 avg, peak;
 4778		bool rc;
 4779
 4780		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
 4781		if (!rc)
 4782			return dm->brightness[bl_idx];
 4783		return convert_brightness_to_user(&caps, avg);
 4784	}
 4785
 4786	ret = dc_link_get_backlight_level(link);
 4787
 4788	if (ret == DC_ERROR_UNEXPECTED)
 4789		return dm->brightness[bl_idx];
 4790
 4791	return convert_brightness_to_user(&caps, ret);
 4792}
 4793
 4794static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
 4795{
 4796	struct amdgpu_display_manager *dm = bl_get_data(bd);
 4797	int i;
 4798
 4799	for (i = 0; i < dm->num_of_edps; i++) {
 4800		if (bd == dm->backlight_dev[i])
 4801			break;
 4802	}
 4803	if (i >= AMDGPU_DM_MAX_NUM_EDP)
 4804		i = 0;
 4805	return amdgpu_dm_backlight_get_level(dm, i);
 4806}
 4807
 4808static const struct backlight_ops amdgpu_dm_backlight_ops = {
 4809	.options = BL_CORE_SUSPENDRESUME,
 4810	.get_brightness = amdgpu_dm_backlight_get_brightness,
 4811	.update_status	= amdgpu_dm_backlight_update_status,
 4812};
 4813
 4814static void
 4815amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
 4816{
 4817	struct drm_device *drm = aconnector->base.dev;
 4818	struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
 4819	struct backlight_properties props = { 0 };
 4820	struct amdgpu_dm_backlight_caps caps = { 0 };
 4821	char bl_name[16];
 4822
 4823	if (aconnector->bl_idx == -1)
 4824		return;
 4825
 4826	if (!acpi_video_backlight_use_native()) {
 4827		drm_info(drm, "Skipping amdgpu DM backlight registration\n");
 4828		/* Try registering an ACPI video backlight device instead. */
 4829		acpi_video_register_backlight();
 4830		return;
 4831	}
 4832
 4833	amdgpu_acpi_get_backlight_caps(&caps);
 4834	if (caps.caps_valid) {
 4835		if (power_supply_is_system_supplied() > 0)
 4836			props.brightness = caps.ac_level;
 4837		else
 4838			props.brightness = caps.dc_level;
 4839	} else
 4840		props.brightness = AMDGPU_MAX_BL_LEVEL;
 4841
 4842	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
 4843	props.type = BACKLIGHT_RAW;
 4844
 4845	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
 4846		 drm->primary->index + aconnector->bl_idx);
 4847
 4848	dm->backlight_dev[aconnector->bl_idx] =
 4849		backlight_device_register(bl_name, aconnector->base.kdev, dm,
 4850					  &amdgpu_dm_backlight_ops, &props);
 4851
 4852	if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
 4853		DRM_ERROR("DM: Backlight registration failed!\n");
 4854		dm->backlight_dev[aconnector->bl_idx] = NULL;
 4855	} else
 4856		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
 4857}
 4858
 4859static int initialize_plane(struct amdgpu_display_manager *dm,
 4860			    struct amdgpu_mode_info *mode_info, int plane_id,
 4861			    enum drm_plane_type plane_type,
 4862			    const struct dc_plane_cap *plane_cap)
 4863{
 4864	struct drm_plane *plane;
 4865	unsigned long possible_crtcs;
 4866	int ret = 0;
 4867
 4868	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
 4869	if (!plane) {
 4870		DRM_ERROR("KMS: Failed to allocate plane\n");
 4871		return -ENOMEM;
 4872	}
 4873	plane->type = plane_type;
 4874
 4875	/*
 4876	 * HACK: IGT tests expect that the primary plane for a CRTC
 4877	 * can only have one possible CRTC. Only expose support for
 4878	 * any CRTC if they're not going to be used as a primary plane
 4879	 * for a CRTC - like overlay or underlay planes.
 4880	 */
 4881	possible_crtcs = 1 << plane_id;
 4882	if (plane_id >= dm->dc->caps.max_streams)
 4883		possible_crtcs = 0xff;
 4884
 4885	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
 4886
 4887	if (ret) {
 4888		DRM_ERROR("KMS: Failed to initialize plane\n");
 4889		kfree(plane);
 4890		return ret;
 4891	}
 4892
 4893	if (mode_info)
 4894		mode_info->planes[plane_id] = plane;
 4895
 4896	return ret;
 4897}
 4898
 4899
 4900static void setup_backlight_device(struct amdgpu_display_manager *dm,
 4901				   struct amdgpu_dm_connector *aconnector)
 4902{
 4903	struct dc_link *link = aconnector->dc_link;
 4904	int bl_idx = dm->num_of_edps;
 4905
 4906	if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
 4907	    link->type == dc_connection_none)
 4908		return;
 4909
 4910	if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
 4911		drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
 4912		return;
 4913	}
 4914
 4915	aconnector->bl_idx = bl_idx;
 4916
 4917	amdgpu_dm_update_backlight_caps(dm, bl_idx);
 4918	dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
 4919	dm->backlight_link[bl_idx] = link;
 4920	dm->num_of_edps++;
 4921
 4922	update_connector_ext_caps(aconnector);
 4923}
 4924
 4925static void amdgpu_set_panel_orientation(struct drm_connector *connector);
 4926
 4927/*
 4928 * In this architecture, the association
 4929 * connector -> encoder -> crtc
 4930 * id not really requried. The crtc and connector will hold the
 4931 * display_index as an abstraction to use with DAL component
 4932 *
 4933 * Returns 0 on success
 4934 */
 4935static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 4936{
 4937	struct amdgpu_display_manager *dm = &adev->dm;
 4938	s32 i;
 4939	struct amdgpu_dm_connector *aconnector = NULL;
 4940	struct amdgpu_encoder *aencoder = NULL;
 4941	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 4942	u32 link_cnt;
 4943	s32 primary_planes;
 4944	enum dc_connection_type new_connection_type = dc_connection_none;
 4945	const struct dc_plane_cap *plane;
 4946	bool psr_feature_enabled = false;
 4947	bool replay_feature_enabled = false;
 4948	int max_overlay = dm->dc->caps.max_slave_planes;
 4949
 4950	dm->display_indexes_num = dm->dc->caps.max_streams;
 4951	/* Update the actual used number of crtc */
 4952	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
 4953
 4954	amdgpu_dm_set_irq_funcs(adev);
 4955
 4956	link_cnt = dm->dc->caps.max_links;
 4957	if (amdgpu_dm_mode_config_init(dm->adev)) {
 4958		DRM_ERROR("DM: Failed to initialize mode config\n");
 4959		return -EINVAL;
 4960	}
 4961
 4962	/* There is one primary plane per CRTC */
 4963	primary_planes = dm->dc->caps.max_streams;
 4964	if (primary_planes > AMDGPU_MAX_PLANES) {
 4965		DRM_ERROR("DM: Plane nums out of 6 planes\n");
 4966		return -EINVAL;
 4967	}
 4968
 4969	/*
 4970	 * Initialize primary planes, implicit planes for legacy IOCTLS.
 4971	 * Order is reversed to match iteration order in atomic check.
 4972	 */
 4973	for (i = (primary_planes - 1); i >= 0; i--) {
 4974		plane = &dm->dc->caps.planes[i];
 4975
 4976		if (initialize_plane(dm, mode_info, i,
 4977				     DRM_PLANE_TYPE_PRIMARY, plane)) {
 4978			DRM_ERROR("KMS: Failed to initialize primary plane\n");
 4979			goto fail;
 4980		}
 4981	}
 4982
 4983	/*
 4984	 * Initialize overlay planes, index starting after primary planes.
 4985	 * These planes have a higher DRM index than the primary planes since
 4986	 * they should be considered as having a higher z-order.
 4987	 * Order is reversed to match iteration order in atomic check.
 4988	 *
 4989	 * Only support DCN for now, and only expose one so we don't encourage
 4990	 * userspace to use up all the pipes.
 4991	 */
 4992	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
 4993		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
 4994
 4995		/* Do not create overlay if MPO disabled */
 4996		if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
 4997			break;
 4998
 4999		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
 5000			continue;
 5001
 5002		if (!plane->pixel_format_support.argb8888)
 5003			continue;
 5004
 5005		if (max_overlay-- == 0)
 5006			break;
 5007
 5008		if (initialize_plane(dm, NULL, primary_planes + i,
 5009				     DRM_PLANE_TYPE_OVERLAY, plane)) {
 5010			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
 5011			goto fail;
 5012		}
 5013	}
 5014
 5015	for (i = 0; i < dm->dc->caps.max_streams; i++)
 5016		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
 5017			DRM_ERROR("KMS: Failed to initialize crtc\n");
 5018			goto fail;
 5019		}
 5020
 5021	/* Use Outbox interrupt */
 5022	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 5023	case IP_VERSION(3, 0, 0):
 5024	case IP_VERSION(3, 1, 2):
 5025	case IP_VERSION(3, 1, 3):
 5026	case IP_VERSION(3, 1, 4):
 5027	case IP_VERSION(3, 1, 5):
 5028	case IP_VERSION(3, 1, 6):
 5029	case IP_VERSION(3, 2, 0):
 5030	case IP_VERSION(3, 2, 1):
 5031	case IP_VERSION(2, 1, 0):
 5032	case IP_VERSION(3, 5, 0):
 5033	case IP_VERSION(3, 5, 1):
 5034	case IP_VERSION(4, 0, 1):
 5035		if (register_outbox_irq_handlers(dm->adev)) {
 5036			DRM_ERROR("DM: Failed to initialize IRQ\n");
 5037			goto fail;
 5038		}
 5039		break;
 5040	default:
 5041		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
 5042			      amdgpu_ip_version(adev, DCE_HWIP, 0));
 5043	}
 5044
 5045	/* Determine whether to enable PSR support by default. */
 5046	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
 5047		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 5048		case IP_VERSION(3, 1, 2):
 5049		case IP_VERSION(3, 1, 3):
 5050		case IP_VERSION(3, 1, 4):
 5051		case IP_VERSION(3, 1, 5):
 5052		case IP_VERSION(3, 1, 6):
 5053		case IP_VERSION(3, 2, 0):
 5054		case IP_VERSION(3, 2, 1):
 5055		case IP_VERSION(3, 5, 0):
 5056		case IP_VERSION(3, 5, 1):
 5057		case IP_VERSION(4, 0, 1):
 5058			psr_feature_enabled = true;
 5059			break;
 5060		default:
 5061			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
 5062			break;
 5063		}
 5064	}
 5065
 5066	/* Determine whether to enable Replay support by default. */
 5067	if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
 5068		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 5069		case IP_VERSION(3, 1, 4):
 5070		case IP_VERSION(3, 2, 0):
 5071		case IP_VERSION(3, 2, 1):
 5072		case IP_VERSION(3, 5, 0):
 5073		case IP_VERSION(3, 5, 1):
 5074			replay_feature_enabled = true;
 5075			break;
 5076
 5077		default:
 5078			replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
 5079			break;
 5080		}
 5081	}
 5082
 5083	if (link_cnt > MAX_LINKS) {
 5084		DRM_ERROR(
 5085			"KMS: Cannot support more than %d display indexes\n",
 5086				MAX_LINKS);
 5087		goto fail;
 5088	}
 5089
 5090	/* loops over all connectors on the board */
 5091	for (i = 0; i < link_cnt; i++) {
 5092		struct dc_link *link = NULL;
 5093
 5094		link = dc_get_link_at_index(dm->dc, i);
 5095
 5096		if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
 5097			struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
 5098
 5099			if (!wbcon) {
 5100				DRM_ERROR("KMS: Failed to allocate writeback connector\n");
 5101				continue;
 5102			}
 5103
 5104			if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
 5105				DRM_ERROR("KMS: Failed to initialize writeback connector\n");
 5106				kfree(wbcon);
 5107				continue;
 5108			}
 5109
 5110			link->psr_settings.psr_feature_enabled = false;
 5111			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
 5112
 5113			continue;
 5114		}
 5115
 5116		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
 5117		if (!aconnector)
 5118			goto fail;
 5119
 5120		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
 5121		if (!aencoder)
 5122			goto fail;
 5123
 5124		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
 5125			DRM_ERROR("KMS: Failed to initialize encoder\n");
 5126			goto fail;
 5127		}
 5128
 5129		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
 5130			DRM_ERROR("KMS: Failed to initialize connector\n");
 5131			goto fail;
 5132		}
 5133
 5134		if (dm->hpd_rx_offload_wq)
 5135			dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
 5136				aconnector;
 5137
 5138		if (!dc_link_detect_connection_type(link, &new_connection_type))
 5139			DRM_ERROR("KMS: Failed to detect connector\n");
 5140
 5141		if (aconnector->base.force && new_connection_type == dc_connection_none) {
 5142			emulated_link_detect(link);
 5143			amdgpu_dm_update_connector_after_detect(aconnector);
 5144		} else {
 5145			bool ret = false;
 5146
 5147			mutex_lock(&dm->dc_lock);
 5148			dc_exit_ips_for_hw_access(dm->dc);
 5149			ret = dc_link_detect(link, DETECT_REASON_BOOT);
 5150			mutex_unlock(&dm->dc_lock);
 5151
 5152			if (ret) {
 5153				amdgpu_dm_update_connector_after_detect(aconnector);
 5154				setup_backlight_device(dm, aconnector);
 5155
 5156				/* Disable PSR if Replay can be enabled */
 5157				if (replay_feature_enabled)
 5158					if (amdgpu_dm_set_replay_caps(link, aconnector))
 5159						psr_feature_enabled = false;
 5160
 5161				if (psr_feature_enabled)
 5162					amdgpu_dm_set_psr_caps(link);
 5163			}
 5164		}
 5165		amdgpu_set_panel_orientation(&aconnector->base);
 5166	}
 5167
 5168	/* Software is initialized. Now we can register interrupt handlers. */
 5169	switch (adev->asic_type) {
 5170#if defined(CONFIG_DRM_AMD_DC_SI)
 5171	case CHIP_TAHITI:
 5172	case CHIP_PITCAIRN:
 5173	case CHIP_VERDE:
 5174	case CHIP_OLAND:
 5175		if (dce60_register_irq_handlers(dm->adev)) {
 5176			DRM_ERROR("DM: Failed to initialize IRQ\n");
 5177			goto fail;
 5178		}
 5179		break;
 5180#endif
 5181	case CHIP_BONAIRE:
 5182	case CHIP_HAWAII:
 5183	case CHIP_KAVERI:
 5184	case CHIP_KABINI:
 5185	case CHIP_MULLINS:
 5186	case CHIP_TONGA:
 5187	case CHIP_FIJI:
 5188	case CHIP_CARRIZO:
 5189	case CHIP_STONEY:
 5190	case CHIP_POLARIS11:
 5191	case CHIP_POLARIS10:
 5192	case CHIP_POLARIS12:
 5193	case CHIP_VEGAM:
 5194	case CHIP_VEGA10:
 5195	case CHIP_VEGA12:
 5196	case CHIP_VEGA20:
 5197		if (dce110_register_irq_handlers(dm->adev)) {
 5198			DRM_ERROR("DM: Failed to initialize IRQ\n");
 5199			goto fail;
 5200		}
 5201		break;
 5202	default:
 5203		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 5204		case IP_VERSION(1, 0, 0):
 5205		case IP_VERSION(1, 0, 1):
 5206		case IP_VERSION(2, 0, 2):
 5207		case IP_VERSION(2, 0, 3):
 5208		case IP_VERSION(2, 0, 0):
 5209		case IP_VERSION(2, 1, 0):
 5210		case IP_VERSION(3, 0, 0):
 5211		case IP_VERSION(3, 0, 2):
 5212		case IP_VERSION(3, 0, 3):
 5213		case IP_VERSION(3, 0, 1):
 5214		case IP_VERSION(3, 1, 2):
 5215		case IP_VERSION(3, 1, 3):
 5216		case IP_VERSION(3, 1, 4):
 5217		case IP_VERSION(3, 1, 5):
 5218		case IP_VERSION(3, 1, 6):
 5219		case IP_VERSION(3, 2, 0):
 5220		case IP_VERSION(3, 2, 1):
 5221		case IP_VERSION(3, 5, 0):
 5222		case IP_VERSION(3, 5, 1):
 5223		case IP_VERSION(4, 0, 1):
 5224			if (dcn10_register_irq_handlers(dm->adev)) {
 5225				DRM_ERROR("DM: Failed to initialize IRQ\n");
 5226				goto fail;
 5227			}
 5228			break;
 5229		default:
 5230			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
 5231					amdgpu_ip_version(adev, DCE_HWIP, 0));
 5232			goto fail;
 5233		}
 5234		break;
 5235	}
 5236
 5237	return 0;
 5238fail:
 5239	kfree(aencoder);
 5240	kfree(aconnector);
 5241
 5242	return -EINVAL;
 5243}
 5244
 5245static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
 5246{
 5247	drm_atomic_private_obj_fini(&dm->atomic_obj);
 5248}
 5249
 5250/******************************************************************************
 5251 * amdgpu_display_funcs functions
 5252 *****************************************************************************/
 5253
 5254/*
 5255 * dm_bandwidth_update - program display watermarks
 5256 *
 5257 * @adev: amdgpu_device pointer
 5258 *
 5259 * Calculate and program the display watermarks and line buffer allocation.
 5260 */
 5261static void dm_bandwidth_update(struct amdgpu_device *adev)
 5262{
 5263	/* TODO: implement later */
 5264}
 5265
 5266static const struct amdgpu_display_funcs dm_display_funcs = {
 5267	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
 5268	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
 5269	.backlight_set_level = NULL, /* never called for DC */
 5270	.backlight_get_level = NULL, /* never called for DC */
 5271	.hpd_sense = NULL,/* called unconditionally */
 5272	.hpd_set_polarity = NULL, /* called unconditionally */
 5273	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
 5274	.page_flip_get_scanoutpos =
 5275		dm_crtc_get_scanoutpos,/* called unconditionally */
 5276	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
 5277	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
 5278};
 5279
 5280#if defined(CONFIG_DEBUG_KERNEL_DC)
 5281
 5282static ssize_t s3_debug_store(struct device *device,
 5283			      struct device_attribute *attr,
 5284			      const char *buf,
 5285			      size_t count)
 5286{
 5287	int ret;
 5288	int s3_state;
 5289	struct drm_device *drm_dev = dev_get_drvdata(device);
 5290	struct amdgpu_device *adev = drm_to_adev(drm_dev);
 5291	struct amdgpu_ip_block *ip_block;
 5292
 5293	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE);
 5294	if (!ip_block)
 5295		return -EINVAL;
 5296
 5297	ret = kstrtoint(buf, 0, &s3_state);
 5298
 5299	if (ret == 0) {
 5300		if (s3_state) {
 5301			dm_resume(ip_block);
 5302			drm_kms_helper_hotplug_event(adev_to_drm(adev));
 5303		} else
 5304			dm_suspend(ip_block);
 5305	}
 5306
 5307	return ret == 0 ? count : 0;
 5308}
 5309
 5310DEVICE_ATTR_WO(s3_debug);
 5311
 5312#endif
 5313
 5314static int dm_init_microcode(struct amdgpu_device *adev)
 5315{
 5316	char *fw_name_dmub;
 5317	int r;
 5318
 5319	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 5320	case IP_VERSION(2, 1, 0):
 5321		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
 5322		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
 5323			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
 5324		break;
 5325	case IP_VERSION(3, 0, 0):
 5326		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
 5327			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
 5328		else
 5329			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
 5330		break;
 5331	case IP_VERSION(3, 0, 1):
 5332		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
 5333		break;
 5334	case IP_VERSION(3, 0, 2):
 5335		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
 5336		break;
 5337	case IP_VERSION(3, 0, 3):
 5338		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
 5339		break;
 5340	case IP_VERSION(3, 1, 2):
 5341	case IP_VERSION(3, 1, 3):
 5342		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
 5343		break;
 5344	case IP_VERSION(3, 1, 4):
 5345		fw_name_dmub = FIRMWARE_DCN_314_DMUB;
 5346		break;
 5347	case IP_VERSION(3, 1, 5):
 5348		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
 5349		break;
 5350	case IP_VERSION(3, 1, 6):
 5351		fw_name_dmub = FIRMWARE_DCN316_DMUB;
 5352		break;
 5353	case IP_VERSION(3, 2, 0):
 5354		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
 5355		break;
 5356	case IP_VERSION(3, 2, 1):
 5357		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
 5358		break;
 5359	case IP_VERSION(3, 5, 0):
 5360		fw_name_dmub = FIRMWARE_DCN_35_DMUB;
 5361		break;
 5362	case IP_VERSION(3, 5, 1):
 5363		fw_name_dmub = FIRMWARE_DCN_351_DMUB;
 5364		break;
 5365	case IP_VERSION(4, 0, 1):
 5366		fw_name_dmub = FIRMWARE_DCN_401_DMUB;
 5367		break;
 5368	default:
 5369		/* ASIC doesn't support DMUB. */
 5370		return 0;
 5371	}
 5372	r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub);
 5373	return r;
 5374}
 5375
 5376static int dm_early_init(struct amdgpu_ip_block *ip_block)
 5377{
 5378	struct amdgpu_device *adev = ip_block->adev;
 5379	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 5380	struct atom_context *ctx = mode_info->atom_context;
 5381	int index = GetIndexIntoMasterTable(DATA, Object_Header);
 5382	u16 data_offset;
 5383
 5384	/* if there is no object header, skip DM */
 5385	if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
 5386		adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
 5387		dev_info(adev->dev, "No object header, skipping DM\n");
 5388		return -ENOENT;
 5389	}
 5390
 5391	switch (adev->asic_type) {
 5392#if defined(CONFIG_DRM_AMD_DC_SI)
 5393	case CHIP_TAHITI:
 5394	case CHIP_PITCAIRN:
 5395	case CHIP_VERDE:
 5396		adev->mode_info.num_crtc = 6;
 5397		adev->mode_info.num_hpd = 6;
 5398		adev->mode_info.num_dig = 6;
 5399		break;
 5400	case CHIP_OLAND:
 5401		adev->mode_info.num_crtc = 2;
 5402		adev->mode_info.num_hpd = 2;
 5403		adev->mode_info.num_dig = 2;
 5404		break;
 5405#endif
 5406	case CHIP_BONAIRE:
 5407	case CHIP_HAWAII:
 5408		adev->mode_info.num_crtc = 6;
 5409		adev->mode_info.num_hpd = 6;
 5410		adev->mode_info.num_dig = 6;
 5411		break;
 5412	case CHIP_KAVERI:
 5413		adev->mode_info.num_crtc = 4;
 5414		adev->mode_info.num_hpd = 6;
 5415		adev->mode_info.num_dig = 7;
 5416		break;
 5417	case CHIP_KABINI:
 5418	case CHIP_MULLINS:
 5419		adev->mode_info.num_crtc = 2;
 5420		adev->mode_info.num_hpd = 6;
 5421		adev->mode_info.num_dig = 6;
 5422		break;
 5423	case CHIP_FIJI:
 5424	case CHIP_TONGA:
 5425		adev->mode_info.num_crtc = 6;
 5426		adev->mode_info.num_hpd = 6;
 5427		adev->mode_info.num_dig = 7;
 5428		break;
 5429	case CHIP_CARRIZO:
 5430		adev->mode_info.num_crtc = 3;
 5431		adev->mode_info.num_hpd = 6;
 5432		adev->mode_info.num_dig = 9;
 5433		break;
 5434	case CHIP_STONEY:
 5435		adev->mode_info.num_crtc = 2;
 5436		adev->mode_info.num_hpd = 6;
 5437		adev->mode_info.num_dig = 9;
 5438		break;
 5439	case CHIP_POLARIS11:
 5440	case CHIP_POLARIS12:
 5441		adev->mode_info.num_crtc = 5;
 5442		adev->mode_info.num_hpd = 5;
 5443		adev->mode_info.num_dig = 5;
 5444		break;
 5445	case CHIP_POLARIS10:
 5446	case CHIP_VEGAM:
 5447		adev->mode_info.num_crtc = 6;
 5448		adev->mode_info.num_hpd = 6;
 5449		adev->mode_info.num_dig = 6;
 5450		break;
 5451	case CHIP_VEGA10:
 5452	case CHIP_VEGA12:
 5453	case CHIP_VEGA20:
 5454		adev->mode_info.num_crtc = 6;
 5455		adev->mode_info.num_hpd = 6;
 5456		adev->mode_info.num_dig = 6;
 5457		break;
 5458	default:
 5459
 5460		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 5461		case IP_VERSION(2, 0, 2):
 5462		case IP_VERSION(3, 0, 0):
 5463			adev->mode_info.num_crtc = 6;
 5464			adev->mode_info.num_hpd = 6;
 5465			adev->mode_info.num_dig = 6;
 5466			break;
 5467		case IP_VERSION(2, 0, 0):
 5468		case IP_VERSION(3, 0, 2):
 5469			adev->mode_info.num_crtc = 5;
 5470			adev->mode_info.num_hpd = 5;
 5471			adev->mode_info.num_dig = 5;
 5472			break;
 5473		case IP_VERSION(2, 0, 3):
 5474		case IP_VERSION(3, 0, 3):
 5475			adev->mode_info.num_crtc = 2;
 5476			adev->mode_info.num_hpd = 2;
 5477			adev->mode_info.num_dig = 2;
 5478			break;
 5479		case IP_VERSION(1, 0, 0):
 5480		case IP_VERSION(1, 0, 1):
 5481		case IP_VERSION(3, 0, 1):
 5482		case IP_VERSION(2, 1, 0):
 5483		case IP_VERSION(3, 1, 2):
 5484		case IP_VERSION(3, 1, 3):
 5485		case IP_VERSION(3, 1, 4):
 5486		case IP_VERSION(3, 1, 5):
 5487		case IP_VERSION(3, 1, 6):
 5488		case IP_VERSION(3, 2, 0):
 5489		case IP_VERSION(3, 2, 1):
 5490		case IP_VERSION(3, 5, 0):
 5491		case IP_VERSION(3, 5, 1):
 5492		case IP_VERSION(4, 0, 1):
 5493			adev->mode_info.num_crtc = 4;
 5494			adev->mode_info.num_hpd = 4;
 5495			adev->mode_info.num_dig = 4;
 5496			break;
 5497		default:
 5498			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
 5499					amdgpu_ip_version(adev, DCE_HWIP, 0));
 5500			return -EINVAL;
 5501		}
 5502		break;
 5503	}
 5504
 5505	if (adev->mode_info.funcs == NULL)
 5506		adev->mode_info.funcs = &dm_display_funcs;
 5507
 5508	/*
 5509	 * Note: Do NOT change adev->audio_endpt_rreg and
 5510	 * adev->audio_endpt_wreg because they are initialised in
 5511	 * amdgpu_device_init()
 5512	 */
 5513#if defined(CONFIG_DEBUG_KERNEL_DC)
 5514	device_create_file(
 5515		adev_to_drm(adev)->dev,
 5516		&dev_attr_s3_debug);
 5517#endif
 5518	adev->dc_enabled = true;
 5519
 5520	return dm_init_microcode(adev);
 5521}
 5522
 5523static bool modereset_required(struct drm_crtc_state *crtc_state)
 5524{
 5525	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
 5526}
 5527
 5528static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
 5529{
 5530	drm_encoder_cleanup(encoder);
 5531	kfree(encoder);
 5532}
 5533
 5534static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
 5535	.destroy = amdgpu_dm_encoder_destroy,
 5536};
 5537
 5538static int
 5539fill_plane_color_attributes(const struct drm_plane_state *plane_state,
 5540			    const enum surface_pixel_format format,
 5541			    enum dc_color_space *color_space)
 5542{
 5543	bool full_range;
 5544
 5545	*color_space = COLOR_SPACE_SRGB;
 5546
 5547	/* DRM color properties only affect non-RGB formats. */
 5548	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
 5549		return 0;
 5550
 5551	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
 5552
 5553	switch (plane_state->color_encoding) {
 5554	case DRM_COLOR_YCBCR_BT601:
 5555		if (full_range)
 5556			*color_space = COLOR_SPACE_YCBCR601;
 5557		else
 5558			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
 5559		break;
 5560
 5561	case DRM_COLOR_YCBCR_BT709:
 5562		if (full_range)
 5563			*color_space = COLOR_SPACE_YCBCR709;
 5564		else
 5565			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
 5566		break;
 5567
 5568	case DRM_COLOR_YCBCR_BT2020:
 5569		if (full_range)
 5570			*color_space = COLOR_SPACE_2020_YCBCR;
 5571		else
 5572			return -EINVAL;
 5573		break;
 5574
 5575	default:
 5576		return -EINVAL;
 5577	}
 5578
 5579	return 0;
 5580}
 5581
 5582static int
 5583fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
 5584			    const struct drm_plane_state *plane_state,
 5585			    const u64 tiling_flags,
 5586			    struct dc_plane_info *plane_info,
 5587			    struct dc_plane_address *address,
 5588			    bool tmz_surface)
 5589{
 5590	const struct drm_framebuffer *fb = plane_state->fb;
 5591	const struct amdgpu_framebuffer *afb =
 5592		to_amdgpu_framebuffer(plane_state->fb);
 5593	int ret;
 5594
 5595	memset(plane_info, 0, sizeof(*plane_info));
 5596
 5597	switch (fb->format->format) {
 5598	case DRM_FORMAT_C8:
 5599		plane_info->format =
 5600			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
 5601		break;
 5602	case DRM_FORMAT_RGB565:
 5603		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
 5604		break;
 5605	case DRM_FORMAT_XRGB8888:
 5606	case DRM_FORMAT_ARGB8888:
 5607		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
 5608		break;
 5609	case DRM_FORMAT_XRGB2101010:
 5610	case DRM_FORMAT_ARGB2101010:
 5611		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
 5612		break;
 5613	case DRM_FORMAT_XBGR2101010:
 5614	case DRM_FORMAT_ABGR2101010:
 5615		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
 5616		break;
 5617	case DRM_FORMAT_XBGR8888:
 5618	case DRM_FORMAT_ABGR8888:
 5619		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
 5620		break;
 5621	case DRM_FORMAT_NV21:
 5622		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
 5623		break;
 5624	case DRM_FORMAT_NV12:
 5625		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
 5626		break;
 5627	case DRM_FORMAT_P010:
 5628		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
 5629		break;
 5630	case DRM_FORMAT_XRGB16161616F:
 5631	case DRM_FORMAT_ARGB16161616F:
 5632		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
 5633		break;
 5634	case DRM_FORMAT_XBGR16161616F:
 5635	case DRM_FORMAT_ABGR16161616F:
 5636		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
 5637		break;
 5638	case DRM_FORMAT_XRGB16161616:
 5639	case DRM_FORMAT_ARGB16161616:
 5640		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
 5641		break;
 5642	case DRM_FORMAT_XBGR16161616:
 5643	case DRM_FORMAT_ABGR16161616:
 5644		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
 5645		break;
 5646	default:
 5647		DRM_ERROR(
 5648			"Unsupported screen format %p4cc\n",
 5649			&fb->format->format);
 5650		return -EINVAL;
 5651	}
 5652
 5653	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
 5654	case DRM_MODE_ROTATE_0:
 5655		plane_info->rotation = ROTATION_ANGLE_0;
 5656		break;
 5657	case DRM_MODE_ROTATE_90:
 5658		plane_info->rotation = ROTATION_ANGLE_90;
 5659		break;
 5660	case DRM_MODE_ROTATE_180:
 5661		plane_info->rotation = ROTATION_ANGLE_180;
 5662		break;
 5663	case DRM_MODE_ROTATE_270:
 5664		plane_info->rotation = ROTATION_ANGLE_270;
 5665		break;
 5666	default:
 5667		plane_info->rotation = ROTATION_ANGLE_0;
 5668		break;
 5669	}
 5670
 5671
 5672	plane_info->visible = true;
 5673	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
 5674
 5675	plane_info->layer_index = plane_state->normalized_zpos;
 5676
 5677	ret = fill_plane_color_attributes(plane_state, plane_info->format,
 5678					  &plane_info->color_space);
 5679	if (ret)
 5680		return ret;
 5681
 5682	ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
 5683					   plane_info->rotation, tiling_flags,
 5684					   &plane_info->tiling_info,
 5685					   &plane_info->plane_size,
 5686					   &plane_info->dcc, address,
 5687					   tmz_surface);
 5688	if (ret)
 5689		return ret;
 5690
 5691	amdgpu_dm_plane_fill_blending_from_plane_state(
 5692		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
 5693		&plane_info->global_alpha, &plane_info->global_alpha_value);
 5694
 5695	return 0;
 5696}
 5697
 5698static int fill_dc_plane_attributes(struct amdgpu_device *adev,
 5699				    struct dc_plane_state *dc_plane_state,
 5700				    struct drm_plane_state *plane_state,
 5701				    struct drm_crtc_state *crtc_state)
 5702{
 5703	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
 5704	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
 5705	struct dc_scaling_info scaling_info;
 5706	struct dc_plane_info plane_info;
 5707	int ret;
 5708
 5709	ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
 5710	if (ret)
 5711		return ret;
 5712
 5713	dc_plane_state->src_rect = scaling_info.src_rect;
 5714	dc_plane_state->dst_rect = scaling_info.dst_rect;
 5715	dc_plane_state->clip_rect = scaling_info.clip_rect;
 5716	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
 5717
 5718	ret = fill_dc_plane_info_and_addr(adev, plane_state,
 5719					  afb->tiling_flags,
 5720					  &plane_info,
 5721					  &dc_plane_state->address,
 5722					  afb->tmz_surface);
 5723	if (ret)
 5724		return ret;
 5725
 5726	dc_plane_state->format = plane_info.format;
 5727	dc_plane_state->color_space = plane_info.color_space;
 5728	dc_plane_state->format = plane_info.format;
 5729	dc_plane_state->plane_size = plane_info.plane_size;
 5730	dc_plane_state->rotation = plane_info.rotation;
 5731	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
 5732	dc_plane_state->stereo_format = plane_info.stereo_format;
 5733	dc_plane_state->tiling_info = plane_info.tiling_info;
 5734	dc_plane_state->visible = plane_info.visible;
 5735	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
 5736	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
 5737	dc_plane_state->global_alpha = plane_info.global_alpha;
 5738	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
 5739	dc_plane_state->dcc = plane_info.dcc;
 5740	dc_plane_state->layer_index = plane_info.layer_index;
 5741	dc_plane_state->flip_int_enabled = true;
 5742
 5743	/*
 5744	 * Always set input transfer function, since plane state is refreshed
 5745	 * every time.
 5746	 */
 5747	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
 5748						plane_state,
 5749						dc_plane_state);
 5750	if (ret)
 5751		return ret;
 5752
 5753	return 0;
 5754}
 5755
 5756static inline void fill_dc_dirty_rect(struct drm_plane *plane,
 5757				      struct rect *dirty_rect, int32_t x,
 5758				      s32 y, s32 width, s32 height,
 5759				      int *i, bool ffu)
 5760{
 5761	WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
 5762
 5763	dirty_rect->x = x;
 5764	dirty_rect->y = y;
 5765	dirty_rect->width = width;
 5766	dirty_rect->height = height;
 5767
 5768	if (ffu)
 5769		drm_dbg(plane->dev,
 5770			"[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
 5771			plane->base.id, width, height);
 5772	else
 5773		drm_dbg(plane->dev,
 5774			"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
 5775			plane->base.id, x, y, width, height);
 5776
 5777	(*i)++;
 5778}
 5779
 5780/**
 5781 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
 5782 *
 5783 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
 5784 *         remote fb
 5785 * @old_plane_state: Old state of @plane
 5786 * @new_plane_state: New state of @plane
 5787 * @crtc_state: New state of CRTC connected to the @plane
 5788 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
 5789 * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
 5790 *             If PSR SU is enabled and damage clips are available, only the regions of the screen
 5791 *             that have changed will be updated. If PSR SU is not enabled,
 5792 *             or if damage clips are not available, the entire screen will be updated.
 5793 * @dirty_regions_changed: dirty regions changed
 5794 *
 5795 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
 5796 * (referred to as "damage clips" in DRM nomenclature) that require updating on
 5797 * the eDP remote buffer. The responsibility of specifying the dirty regions is
 5798 * amdgpu_dm's.
 5799 *
 5800 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
 5801 * plane with regions that require flushing to the eDP remote buffer. In
 5802 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
 5803 * implicitly provide damage clips without any client support via the plane
 5804 * bounds.
 5805 */
 5806static void fill_dc_dirty_rects(struct drm_plane *plane,
 5807				struct drm_plane_state *old_plane_state,
 5808				struct drm_plane_state *new_plane_state,
 5809				struct drm_crtc_state *crtc_state,
 5810				struct dc_flip_addrs *flip_addrs,
 5811				bool is_psr_su,
 5812				bool *dirty_regions_changed)
 5813{
 5814	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
 5815	struct rect *dirty_rects = flip_addrs->dirty_rects;
 5816	u32 num_clips;
 5817	struct drm_mode_rect *clips;
 5818	bool bb_changed;
 5819	bool fb_changed;
 5820	u32 i = 0;
 5821	*dirty_regions_changed = false;
 5822
 5823	/*
 5824	 * Cursor plane has it's own dirty rect update interface. See
 5825	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
 5826	 */
 5827	if (plane->type == DRM_PLANE_TYPE_CURSOR)
 5828		return;
 5829
 5830	if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
 5831		goto ffu;
 5832
 5833	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
 5834	clips = drm_plane_get_damage_clips(new_plane_state);
 5835
 5836	if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
 5837						   is_psr_su)))
 5838		goto ffu;
 5839
 5840	if (!dm_crtc_state->mpo_requested) {
 5841		if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
 5842			goto ffu;
 5843
 5844		for (; flip_addrs->dirty_rect_count < num_clips; clips++)
 5845			fill_dc_dirty_rect(new_plane_state->plane,
 5846					   &dirty_rects[flip_addrs->dirty_rect_count],
 5847					   clips->x1, clips->y1,
 5848					   clips->x2 - clips->x1, clips->y2 - clips->y1,
 5849					   &flip_addrs->dirty_rect_count,
 5850					   false);
 5851		return;
 5852	}
 5853
 5854	/*
 5855	 * MPO is requested. Add entire plane bounding box to dirty rects if
 5856	 * flipped to or damaged.
 5857	 *
 5858	 * If plane is moved or resized, also add old bounding box to dirty
 5859	 * rects.
 5860	 */
 5861	fb_changed = old_plane_state->fb->base.id !=
 5862		     new_plane_state->fb->base.id;
 5863	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
 5864		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
 5865		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
 5866		      old_plane_state->crtc_h != new_plane_state->crtc_h);
 5867
 5868	drm_dbg(plane->dev,
 5869		"[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
 5870		new_plane_state->plane->base.id,
 5871		bb_changed, fb_changed, num_clips);
 5872
 5873	*dirty_regions_changed = bb_changed;
 5874
 5875	if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
 5876		goto ffu;
 5877
 5878	if (bb_changed) {
 5879		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
 5880				   new_plane_state->crtc_x,
 5881				   new_plane_state->crtc_y,
 5882				   new_plane_state->crtc_w,
 5883				   new_plane_state->crtc_h, &i, false);
 5884
 5885		/* Add old plane bounding-box if plane is moved or resized */
 5886		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
 5887				   old_plane_state->crtc_x,
 5888				   old_plane_state->crtc_y,
 5889				   old_plane_state->crtc_w,
 5890				   old_plane_state->crtc_h, &i, false);
 5891	}
 5892
 5893	if (num_clips) {
 5894		for (; i < num_clips; clips++)
 5895			fill_dc_dirty_rect(new_plane_state->plane,
 5896					   &dirty_rects[i], clips->x1,
 5897					   clips->y1, clips->x2 - clips->x1,
 5898					   clips->y2 - clips->y1, &i, false);
 5899	} else if (fb_changed && !bb_changed) {
 5900		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
 5901				   new_plane_state->crtc_x,
 5902				   new_plane_state->crtc_y,
 5903				   new_plane_state->crtc_w,
 5904				   new_plane_state->crtc_h, &i, false);
 5905	}
 5906
 5907	flip_addrs->dirty_rect_count = i;
 5908	return;
 5909
 5910ffu:
 5911	fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
 5912			   dm_crtc_state->base.mode.crtc_hdisplay,
 5913			   dm_crtc_state->base.mode.crtc_vdisplay,
 5914			   &flip_addrs->dirty_rect_count, true);
 5915}
 5916
 5917static void update_stream_scaling_settings(const struct drm_display_mode *mode,
 5918					   const struct dm_connector_state *dm_state,
 5919					   struct dc_stream_state *stream)
 5920{
 5921	enum amdgpu_rmx_type rmx_type;
 5922
 5923	struct rect src = { 0 }; /* viewport in composition space*/
 5924	struct rect dst = { 0 }; /* stream addressable area */
 5925
 5926	/* no mode. nothing to be done */
 5927	if (!mode)
 5928		return;
 5929
 5930	/* Full screen scaling by default */
 5931	src.width = mode->hdisplay;
 5932	src.height = mode->vdisplay;
 5933	dst.width = stream->timing.h_addressable;
 5934	dst.height = stream->timing.v_addressable;
 5935
 5936	if (dm_state) {
 5937		rmx_type = dm_state->scaling;
 5938		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
 5939			if (src.width * dst.height <
 5940					src.height * dst.width) {
 5941				/* height needs less upscaling/more downscaling */
 5942				dst.width = src.width *
 5943						dst.height / src.height;
 5944			} else {
 5945				/* width needs less upscaling/more downscaling */
 5946				dst.height = src.height *
 5947						dst.width / src.width;
 5948			}
 5949		} else if (rmx_type == RMX_CENTER) {
 5950			dst = src;
 5951		}
 5952
 5953		dst.x = (stream->timing.h_addressable - dst.width) / 2;
 5954		dst.y = (stream->timing.v_addressable - dst.height) / 2;
 5955
 5956		if (dm_state->underscan_enable) {
 5957			dst.x += dm_state->underscan_hborder / 2;
 5958			dst.y += dm_state->underscan_vborder / 2;
 5959			dst.width -= dm_state->underscan_hborder;
 5960			dst.height -= dm_state->underscan_vborder;
 5961		}
 5962	}
 5963
 5964	stream->src = src;
 5965	stream->dst = dst;
 5966
 5967	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
 5968		      dst.x, dst.y, dst.width, dst.height);
 5969
 5970}
 5971
 5972static enum dc_color_depth
 5973convert_color_depth_from_display_info(const struct drm_connector *connector,
 5974				      bool is_y420, int requested_bpc)
 5975{
 5976	u8 bpc;
 5977
 5978	if (is_y420) {
 5979		bpc = 8;
 5980
 5981		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
 5982		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
 5983			bpc = 16;
 5984		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
 5985			bpc = 12;
 5986		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
 5987			bpc = 10;
 5988	} else {
 5989		bpc = (uint8_t)connector->display_info.bpc;
 5990		/* Assume 8 bpc by default if no bpc is specified. */
 5991		bpc = bpc ? bpc : 8;
 5992	}
 5993
 5994	if (requested_bpc > 0) {
 5995		/*
 5996		 * Cap display bpc based on the user requested value.
 5997		 *
 5998		 * The value for state->max_bpc may not correctly updated
 5999		 * depending on when the connector gets added to the state
 6000		 * or if this was called outside of atomic check, so it
 6001		 * can't be used directly.
 6002		 */
 6003		bpc = min_t(u8, bpc, requested_bpc);
 6004
 6005		/* Round down to the nearest even number. */
 6006		bpc = bpc - (bpc & 1);
 6007	}
 6008
 6009	switch (bpc) {
 6010	case 0:
 6011		/*
 6012		 * Temporary Work around, DRM doesn't parse color depth for
 6013		 * EDID revision before 1.4
 6014		 * TODO: Fix edid parsing
 6015		 */
 6016		return COLOR_DEPTH_888;
 6017	case 6:
 6018		return COLOR_DEPTH_666;
 6019	case 8:
 6020		return COLOR_DEPTH_888;
 6021	case 10:
 6022		return COLOR_DEPTH_101010;
 6023	case 12:
 6024		return COLOR_DEPTH_121212;
 6025	case 14:
 6026		return COLOR_DEPTH_141414;
 6027	case 16:
 6028		return COLOR_DEPTH_161616;
 6029	default:
 6030		return COLOR_DEPTH_UNDEFINED;
 6031	}
 6032}
 6033
 6034static enum dc_aspect_ratio
 6035get_aspect_ratio(const struct drm_display_mode *mode_in)
 6036{
 6037	/* 1-1 mapping, since both enums follow the HDMI spec. */
 6038	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
 6039}
 6040
 6041static enum dc_color_space
 6042get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
 6043		       const struct drm_connector_state *connector_state)
 6044{
 6045	enum dc_color_space color_space = COLOR_SPACE_SRGB;
 6046
 6047	switch (connector_state->colorspace) {
 6048	case DRM_MODE_COLORIMETRY_BT601_YCC:
 6049		if (dc_crtc_timing->flags.Y_ONLY)
 6050			color_space = COLOR_SPACE_YCBCR601_LIMITED;
 6051		else
 6052			color_space = COLOR_SPACE_YCBCR601;
 6053		break;
 6054	case DRM_MODE_COLORIMETRY_BT709_YCC:
 6055		if (dc_crtc_timing->flags.Y_ONLY)
 6056			color_space = COLOR_SPACE_YCBCR709_LIMITED;
 6057		else
 6058			color_space = COLOR_SPACE_YCBCR709;
 6059		break;
 6060	case DRM_MODE_COLORIMETRY_OPRGB:
 6061		color_space = COLOR_SPACE_ADOBERGB;
 6062		break;
 6063	case DRM_MODE_COLORIMETRY_BT2020_RGB:
 6064	case DRM_MODE_COLORIMETRY_BT2020_YCC:
 6065		if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
 6066			color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
 6067		else
 6068			color_space = COLOR_SPACE_2020_YCBCR;
 6069		break;
 6070	case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
 6071	default:
 6072		if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
 6073			color_space = COLOR_SPACE_SRGB;
 6074		/*
 6075		 * 27030khz is the separation point between HDTV and SDTV
 6076		 * according to HDMI spec, we use YCbCr709 and YCbCr601
 6077		 * respectively
 6078		 */
 6079		} else if (dc_crtc_timing->pix_clk_100hz > 270300) {
 6080			if (dc_crtc_timing->flags.Y_ONLY)
 6081				color_space =
 6082					COLOR_SPACE_YCBCR709_LIMITED;
 6083			else
 6084				color_space = COLOR_SPACE_YCBCR709;
 6085		} else {
 6086			if (dc_crtc_timing->flags.Y_ONLY)
 6087				color_space =
 6088					COLOR_SPACE_YCBCR601_LIMITED;
 6089			else
 6090				color_space = COLOR_SPACE_YCBCR601;
 6091		}
 6092		break;
 6093	}
 6094
 6095	return color_space;
 6096}
 6097
 6098static enum display_content_type
 6099get_output_content_type(const struct drm_connector_state *connector_state)
 6100{
 6101	switch (connector_state->content_type) {
 6102	default:
 6103	case DRM_MODE_CONTENT_TYPE_NO_DATA:
 6104		return DISPLAY_CONTENT_TYPE_NO_DATA;
 6105	case DRM_MODE_CONTENT_TYPE_GRAPHICS:
 6106		return DISPLAY_CONTENT_TYPE_GRAPHICS;
 6107	case DRM_MODE_CONTENT_TYPE_PHOTO:
 6108		return DISPLAY_CONTENT_TYPE_PHOTO;
 6109	case DRM_MODE_CONTENT_TYPE_CINEMA:
 6110		return DISPLAY_CONTENT_TYPE_CINEMA;
 6111	case DRM_MODE_CONTENT_TYPE_GAME:
 6112		return DISPLAY_CONTENT_TYPE_GAME;
 6113	}
 6114}
 6115
 6116static bool adjust_colour_depth_from_display_info(
 6117	struct dc_crtc_timing *timing_out,
 6118	const struct drm_display_info *info)
 6119{
 6120	enum dc_color_depth depth = timing_out->display_color_depth;
 6121	int normalized_clk;
 6122
 6123	do {
 6124		normalized_clk = timing_out->pix_clk_100hz / 10;
 6125		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
 6126		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
 6127			normalized_clk /= 2;
 6128		/* Adjusting pix clock following on HDMI spec based on colour depth */
 6129		switch (depth) {
 6130		case COLOR_DEPTH_888:
 6131			break;
 6132		case COLOR_DEPTH_101010:
 6133			normalized_clk = (normalized_clk * 30) / 24;
 6134			break;
 6135		case COLOR_DEPTH_121212:
 6136			normalized_clk = (normalized_clk * 36) / 24;
 6137			break;
 6138		case COLOR_DEPTH_161616:
 6139			normalized_clk = (normalized_clk * 48) / 24;
 6140			break;
 6141		default:
 6142			/* The above depths are the only ones valid for HDMI. */
 6143			return false;
 6144		}
 6145		if (normalized_clk <= info->max_tmds_clock) {
 6146			timing_out->display_color_depth = depth;
 6147			return true;
 6148		}
 6149	} while (--depth > COLOR_DEPTH_666);
 6150	return false;
 6151}
 6152
 6153static void fill_stream_properties_from_drm_display_mode(
 6154	struct dc_stream_state *stream,
 6155	const struct drm_display_mode *mode_in,
 6156	const struct drm_connector *connector,
 6157	const struct drm_connector_state *connector_state,
 6158	const struct dc_stream_state *old_stream,
 6159	int requested_bpc)
 6160{
 6161	struct dc_crtc_timing *timing_out = &stream->timing;
 6162	const struct drm_display_info *info = &connector->display_info;
 6163	struct amdgpu_dm_connector *aconnector = NULL;
 6164	struct hdmi_vendor_infoframe hv_frame;
 6165	struct hdmi_avi_infoframe avi_frame;
 6166
 6167	if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
 6168		aconnector = to_amdgpu_dm_connector(connector);
 6169
 6170	memset(&hv_frame, 0, sizeof(hv_frame));
 6171	memset(&avi_frame, 0, sizeof(avi_frame));
 6172
 6173	timing_out->h_border_left = 0;
 6174	timing_out->h_border_right = 0;
 6175	timing_out->v_border_top = 0;
 6176	timing_out->v_border_bottom = 0;
 6177	/* TODO: un-hardcode */
 6178	if (drm_mode_is_420_only(info, mode_in)
 6179			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 6180		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
 6181	else if (drm_mode_is_420_also(info, mode_in)
 6182			&& aconnector
 6183			&& aconnector->force_yuv420_output)
 6184		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
 6185	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
 6186			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 6187		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
 6188	else
 6189		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
 6190
 6191	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
 6192	timing_out->display_color_depth = convert_color_depth_from_display_info(
 6193		connector,
 6194		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
 6195		requested_bpc);
 6196	timing_out->scan_type = SCANNING_TYPE_NODATA;
 6197	timing_out->hdmi_vic = 0;
 6198
 6199	if (old_stream) {
 6200		timing_out->vic = old_stream->timing.vic;
 6201		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
 6202		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
 6203	} else {
 6204		timing_out->vic = drm_match_cea_mode(mode_in);
 6205		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
 6206			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
 6207		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
 6208			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
 6209	}
 6210
 6211	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
 6212		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
 6213		timing_out->vic = avi_frame.video_code;
 6214		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
 6215		timing_out->hdmi_vic = hv_frame.vic;
 6216	}
 6217
 6218	if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
 6219		timing_out->h_addressable = mode_in->hdisplay;
 6220		timing_out->h_total = mode_in->htotal;
 6221		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
 6222		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
 6223		timing_out->v_total = mode_in->vtotal;
 6224		timing_out->v_addressable = mode_in->vdisplay;
 6225		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
 6226		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
 6227		timing_out->pix_clk_100hz = mode_in->clock * 10;
 6228	} else {
 6229		timing_out->h_addressable = mode_in->crtc_hdisplay;
 6230		timing_out->h_total = mode_in->crtc_htotal;
 6231		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
 6232		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
 6233		timing_out->v_total = mode_in->crtc_vtotal;
 6234		timing_out->v_addressable = mode_in->crtc_vdisplay;
 6235		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
 6236		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
 6237		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
 6238	}
 6239
 6240	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
 6241
 6242	stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
 6243	stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
 6244	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
 6245		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
 6246		    drm_mode_is_420_also(info, mode_in) &&
 6247		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
 6248			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
 6249			adjust_colour_depth_from_display_info(timing_out, info);
 6250		}
 6251	}
 6252
 6253	stream->output_color_space = get_output_color_space(timing_out, connector_state);
 6254	stream->content_type = get_output_content_type(connector_state);
 6255}
 6256
 6257static void fill_audio_info(struct audio_info *audio_info,
 6258			    const struct drm_connector *drm_connector,
 6259			    const struct dc_sink *dc_sink)
 6260{
 6261	int i = 0;
 6262	int cea_revision = 0;
 6263	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
 6264
 6265	audio_info->manufacture_id = edid_caps->manufacturer_id;
 6266	audio_info->product_id = edid_caps->product_id;
 6267
 6268	cea_revision = drm_connector->display_info.cea_rev;
 6269
 6270	strscpy(audio_info->display_name,
 6271		edid_caps->display_name,
 6272		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
 6273
 6274	if (cea_revision >= 3) {
 6275		audio_info->mode_count = edid_caps->audio_mode_count;
 6276
 6277		for (i = 0; i < audio_info->mode_count; ++i) {
 6278			audio_info->modes[i].format_code =
 6279					(enum audio_format_code)
 6280					(edid_caps->audio_modes[i].format_code);
 6281			audio_info->modes[i].channel_count =
 6282					edid_caps->audio_modes[i].channel_count;
 6283			audio_info->modes[i].sample_rates.all =
 6284					edid_caps->audio_modes[i].sample_rate;
 6285			audio_info->modes[i].sample_size =
 6286					edid_caps->audio_modes[i].sample_size;
 6287		}
 6288	}
 6289
 6290	audio_info->flags.all = edid_caps->speaker_flags;
 6291
 6292	/* TODO: We only check for the progressive mode, check for interlace mode too */
 6293	if (drm_connector->latency_present[0]) {
 6294		audio_info->video_latency = drm_connector->video_latency[0];
 6295		audio_info->audio_latency = drm_connector->audio_latency[0];
 6296	}
 6297
 6298	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
 6299
 6300}
 6301
 6302static void
 6303copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
 6304				      struct drm_display_mode *dst_mode)
 6305{
 6306	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
 6307	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
 6308	dst_mode->crtc_clock = src_mode->crtc_clock;
 6309	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
 6310	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
 6311	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
 6312	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
 6313	dst_mode->crtc_htotal = src_mode->crtc_htotal;
 6314	dst_mode->crtc_hskew = src_mode->crtc_hskew;
 6315	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
 6316	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
 6317	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
 6318	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
 6319	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
 6320}
 6321
 6322static void
 6323decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
 6324					const struct drm_display_mode *native_mode,
 6325					bool scale_enabled)
 6326{
 6327	if (scale_enabled) {
 6328		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
 6329	} else if (native_mode->clock == drm_mode->clock &&
 6330			native_mode->htotal == drm_mode->htotal &&
 6331			native_mode->vtotal == drm_mode->vtotal) {
 6332		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
 6333	} else {
 6334		/* no scaling nor amdgpu inserted, no need to patch */
 6335	}
 6336}
 6337
 6338static struct dc_sink *
 6339create_fake_sink(struct dc_link *link)
 6340{
 6341	struct dc_sink_init_data sink_init_data = { 0 };
 6342	struct dc_sink *sink = NULL;
 6343
 6344	sink_init_data.link = link;
 6345	sink_init_data.sink_signal = link->connector_signal;
 6346
 6347	sink = dc_sink_create(&sink_init_data);
 6348	if (!sink) {
 6349		DRM_ERROR("Failed to create sink!\n");
 6350		return NULL;
 6351	}
 6352	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
 6353
 6354	return sink;
 6355}
 6356
 6357static void set_multisync_trigger_params(
 6358		struct dc_stream_state *stream)
 6359{
 6360	struct dc_stream_state *master = NULL;
 6361
 6362	if (stream->triggered_crtc_reset.enabled) {
 6363		master = stream->triggered_crtc_reset.event_source;
 6364		stream->triggered_crtc_reset.event =
 6365			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
 6366			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
 6367		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
 6368	}
 6369}
 6370
 6371static void set_master_stream(struct dc_stream_state *stream_set[],
 6372			      int stream_count)
 6373{
 6374	int j, highest_rfr = 0, master_stream = 0;
 6375
 6376	for (j = 0;  j < stream_count; j++) {
 6377		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
 6378			int refresh_rate = 0;
 6379
 6380			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
 6381				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
 6382			if (refresh_rate > highest_rfr) {
 6383				highest_rfr = refresh_rate;
 6384				master_stream = j;
 6385			}
 6386		}
 6387	}
 6388	for (j = 0;  j < stream_count; j++) {
 6389		if (stream_set[j])
 6390			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
 6391	}
 6392}
 6393
 6394static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
 6395{
 6396	int i = 0;
 6397	struct dc_stream_state *stream;
 6398
 6399	if (context->stream_count < 2)
 6400		return;
 6401	for (i = 0; i < context->stream_count ; i++) {
 6402		if (!context->streams[i])
 6403			continue;
 6404		/*
 6405		 * TODO: add a function to read AMD VSDB bits and set
 6406		 * crtc_sync_master.multi_sync_enabled flag
 6407		 * For now it's set to false
 6408		 */
 6409	}
 6410
 6411	set_master_stream(context->streams, context->stream_count);
 6412
 6413	for (i = 0; i < context->stream_count ; i++) {
 6414		stream = context->streams[i];
 6415
 6416		if (!stream)
 6417			continue;
 6418
 6419		set_multisync_trigger_params(stream);
 6420	}
 6421}
 6422
 6423/**
 6424 * DOC: FreeSync Video
 6425 *
 6426 * When a userspace application wants to play a video, the content follows a
 6427 * standard format definition that usually specifies the FPS for that format.
 6428 * The below list illustrates some video format and the expected FPS,
 6429 * respectively:
 6430 *
 6431 * - TV/NTSC (23.976 FPS)
 6432 * - Cinema (24 FPS)
 6433 * - TV/PAL (25 FPS)
 6434 * - TV/NTSC (29.97 FPS)
 6435 * - TV/NTSC (30 FPS)
 6436 * - Cinema HFR (48 FPS)
 6437 * - TV/PAL (50 FPS)
 6438 * - Commonly used (60 FPS)
 6439 * - Multiples of 24 (48,72,96 FPS)
 6440 *
 6441 * The list of standards video format is not huge and can be added to the
 6442 * connector modeset list beforehand. With that, userspace can leverage
 6443 * FreeSync to extends the front porch in order to attain the target refresh
 6444 * rate. Such a switch will happen seamlessly, without screen blanking or
 6445 * reprogramming of the output in any other way. If the userspace requests a
 6446 * modesetting change compatible with FreeSync modes that only differ in the
 6447 * refresh rate, DC will skip the full update and avoid blink during the
 6448 * transition. For example, the video player can change the modesetting from
 6449 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
 6450 * causing any display blink. This same concept can be applied to a mode
 6451 * setting change.
 6452 */
 6453static struct drm_display_mode *
 6454get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
 6455		bool use_probed_modes)
 6456{
 6457	struct drm_display_mode *m, *m_pref = NULL;
 6458	u16 current_refresh, highest_refresh;
 6459	struct list_head *list_head = use_probed_modes ?
 6460		&aconnector->base.probed_modes :
 6461		&aconnector->base.modes;
 6462
 6463	if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 6464		return NULL;
 6465
 6466	if (aconnector->freesync_vid_base.clock != 0)
 6467		return &aconnector->freesync_vid_base;
 6468
 6469	/* Find the preferred mode */
 6470	list_for_each_entry(m, list_head, head) {
 6471		if (m->type & DRM_MODE_TYPE_PREFERRED) {
 6472			m_pref = m;
 6473			break;
 6474		}
 6475	}
 6476
 6477	if (!m_pref) {
 6478		/* Probably an EDID with no preferred mode. Fallback to first entry */
 6479		m_pref = list_first_entry_or_null(
 6480				&aconnector->base.modes, struct drm_display_mode, head);
 6481		if (!m_pref) {
 6482			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
 6483			return NULL;
 6484		}
 6485	}
 6486
 6487	highest_refresh = drm_mode_vrefresh(m_pref);
 6488
 6489	/*
 6490	 * Find the mode with highest refresh rate with same resolution.
 6491	 * For some monitors, preferred mode is not the mode with highest
 6492	 * supported refresh rate.
 6493	 */
 6494	list_for_each_entry(m, list_head, head) {
 6495		current_refresh  = drm_mode_vrefresh(m);
 6496
 6497		if (m->hdisplay == m_pref->hdisplay &&
 6498		    m->vdisplay == m_pref->vdisplay &&
 6499		    highest_refresh < current_refresh) {
 6500			highest_refresh = current_refresh;
 6501			m_pref = m;
 6502		}
 6503	}
 6504
 6505	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
 6506	return m_pref;
 6507}
 6508
 6509static bool is_freesync_video_mode(const struct drm_display_mode *mode,
 6510		struct amdgpu_dm_connector *aconnector)
 6511{
 6512	struct drm_display_mode *high_mode;
 6513	int timing_diff;
 6514
 6515	high_mode = get_highest_refresh_rate_mode(aconnector, false);
 6516	if (!high_mode || !mode)
 6517		return false;
 6518
 6519	timing_diff = high_mode->vtotal - mode->vtotal;
 6520
 6521	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
 6522	    high_mode->hdisplay != mode->hdisplay ||
 6523	    high_mode->vdisplay != mode->vdisplay ||
 6524	    high_mode->hsync_start != mode->hsync_start ||
 6525	    high_mode->hsync_end != mode->hsync_end ||
 6526	    high_mode->htotal != mode->htotal ||
 6527	    high_mode->hskew != mode->hskew ||
 6528	    high_mode->vscan != mode->vscan ||
 6529	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
 6530	    high_mode->vsync_end - mode->vsync_end != timing_diff)
 6531		return false;
 6532	else
 6533		return true;
 6534}
 6535
 6536#if defined(CONFIG_DRM_AMD_DC_FP)
 6537static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
 6538			    struct dc_sink *sink, struct dc_stream_state *stream,
 6539			    struct dsc_dec_dpcd_caps *dsc_caps)
 6540{
 6541	stream->timing.flags.DSC = 0;
 6542	dsc_caps->is_dsc_supported = false;
 6543
 6544	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
 6545	    sink->sink_signal == SIGNAL_TYPE_EDP)) {
 6546		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
 6547			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
 6548			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
 6549				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
 6550				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
 6551				dsc_caps);
 6552	}
 6553}
 6554
 6555static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
 6556				    struct dc_sink *sink, struct dc_stream_state *stream,
 6557				    struct dsc_dec_dpcd_caps *dsc_caps,
 6558				    uint32_t max_dsc_target_bpp_limit_override)
 6559{
 6560	const struct dc_link_settings *verified_link_cap = NULL;
 6561	u32 link_bw_in_kbps;
 6562	u32 edp_min_bpp_x16, edp_max_bpp_x16;
 6563	struct dc *dc = sink->ctx->dc;
 6564	struct dc_dsc_bw_range bw_range = {0};
 6565	struct dc_dsc_config dsc_cfg = {0};
 6566	struct dc_dsc_config_options dsc_options = {0};
 6567
 6568	dc_dsc_get_default_config_option(dc, &dsc_options);
 6569	dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
 6570
 6571	verified_link_cap = dc_link_get_link_cap(stream->link);
 6572	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
 6573	edp_min_bpp_x16 = 8 * 16;
 6574	edp_max_bpp_x16 = 8 * 16;
 6575
 6576	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
 6577		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
 6578
 6579	if (edp_max_bpp_x16 < edp_min_bpp_x16)
 6580		edp_min_bpp_x16 = edp_max_bpp_x16;
 6581
 6582	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
 6583				dc->debug.dsc_min_slice_height_override,
 6584				edp_min_bpp_x16, edp_max_bpp_x16,
 6585				dsc_caps,
 6586				&stream->timing,
 6587				dc_link_get_highest_encoding_format(aconnector->dc_link),
 6588				&bw_range)) {
 6589
 6590		if (bw_range.max_kbps < link_bw_in_kbps) {
 6591			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
 6592					dsc_caps,
 6593					&dsc_options,
 6594					0,
 6595					&stream->timing,
 6596					dc_link_get_highest_encoding_format(aconnector->dc_link),
 6597					&dsc_cfg)) {
 6598				stream->timing.dsc_cfg = dsc_cfg;
 6599				stream->timing.flags.DSC = 1;
 6600				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
 6601			}
 6602			return;
 6603		}
 6604	}
 6605
 6606	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
 6607				dsc_caps,
 6608				&dsc_options,
 6609				link_bw_in_kbps,
 6610				&stream->timing,
 6611				dc_link_get_highest_encoding_format(aconnector->dc_link),
 6612				&dsc_cfg)) {
 6613		stream->timing.dsc_cfg = dsc_cfg;
 6614		stream->timing.flags.DSC = 1;
 6615	}
 6616}
 6617
 6618static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
 6619					struct dc_sink *sink, struct dc_stream_state *stream,
 6620					struct dsc_dec_dpcd_caps *dsc_caps)
 6621{
 6622	struct drm_connector *drm_connector = &aconnector->base;
 6623	u32 link_bandwidth_kbps;
 6624	struct dc *dc = sink->ctx->dc;
 6625	u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
 6626	u32 dsc_max_supported_bw_in_kbps;
 6627	u32 max_dsc_target_bpp_limit_override =
 6628		drm_connector->display_info.max_dsc_bpp;
 6629	struct dc_dsc_config_options dsc_options = {0};
 6630
 6631	dc_dsc_get_default_config_option(dc, &dsc_options);
 6632	dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
 6633
 6634	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
 6635							dc_link_get_link_cap(aconnector->dc_link));
 6636
 6637	/* Set DSC policy according to dsc_clock_en */
 6638	dc_dsc_policy_set_enable_dsc_when_not_needed(
 6639		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
 6640
 6641	if (sink->sink_signal == SIGNAL_TYPE_EDP &&
 6642	    !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
 6643	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
 6644
 6645		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
 6646
 6647	} else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
 6648		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
 6649			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
 6650						dsc_caps,
 6651						&dsc_options,
 6652						link_bandwidth_kbps,
 6653						&stream->timing,
 6654						dc_link_get_highest_encoding_format(aconnector->dc_link),
 6655						&stream->timing.dsc_cfg)) {
 6656				stream->timing.flags.DSC = 1;
 6657				DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
 6658							__func__, drm_connector->name);
 6659			}
 6660		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
 6661			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
 6662					dc_link_get_highest_encoding_format(aconnector->dc_link));
 6663			max_supported_bw_in_kbps = link_bandwidth_kbps;
 6664			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
 6665
 6666			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
 6667					max_supported_bw_in_kbps > 0 &&
 6668					dsc_max_supported_bw_in_kbps > 0)
 6669				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
 6670						dsc_caps,
 6671						&dsc_options,
 6672						dsc_max_supported_bw_in_kbps,
 6673						&stream->timing,
 6674						dc_link_get_highest_encoding_format(aconnector->dc_link),
 6675						&stream->timing.dsc_cfg)) {
 6676					stream->timing.flags.DSC = 1;
 6677					DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
 6678									 __func__, drm_connector->name);
 6679				}
 6680		}
 6681	}
 6682
 6683	/* Overwrite the stream flag if DSC is enabled through debugfs */
 6684	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
 6685		stream->timing.flags.DSC = 1;
 6686
 6687	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
 6688		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
 6689
 6690	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
 6691		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
 6692
 6693	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
 6694		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
 6695}
 6696#endif
 6697
 6698static struct dc_stream_state *
 6699create_stream_for_sink(struct drm_connector *connector,
 6700		       const struct drm_display_mode *drm_mode,
 6701		       const struct dm_connector_state *dm_state,
 6702		       const struct dc_stream_state *old_stream,
 6703		       int requested_bpc)
 6704{
 6705	struct amdgpu_dm_connector *aconnector = NULL;
 6706	struct drm_display_mode *preferred_mode = NULL;
 6707	const struct drm_connector_state *con_state = &dm_state->base;
 6708	struct dc_stream_state *stream = NULL;
 6709	struct drm_display_mode mode;
 6710	struct drm_display_mode saved_mode;
 6711	struct drm_display_mode *freesync_mode = NULL;
 6712	bool native_mode_found = false;
 6713	bool recalculate_timing = false;
 6714	bool scale = dm_state->scaling != RMX_OFF;
 6715	int mode_refresh;
 6716	int preferred_refresh = 0;
 6717	enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
 6718#if defined(CONFIG_DRM_AMD_DC_FP)
 6719	struct dsc_dec_dpcd_caps dsc_caps;
 6720#endif
 6721	struct dc_link *link = NULL;
 6722	struct dc_sink *sink = NULL;
 6723
 6724	drm_mode_init(&mode, drm_mode);
 6725	memset(&saved_mode, 0, sizeof(saved_mode));
 6726
 6727	if (connector == NULL) {
 6728		DRM_ERROR("connector is NULL!\n");
 6729		return stream;
 6730	}
 6731
 6732	if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
 6733		aconnector = NULL;
 6734		aconnector = to_amdgpu_dm_connector(connector);
 6735		link = aconnector->dc_link;
 6736	} else {
 6737		struct drm_writeback_connector *wbcon = NULL;
 6738		struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
 6739
 6740		wbcon = drm_connector_to_writeback(connector);
 6741		dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
 6742		link = dm_wbcon->link;
 6743	}
 6744
 6745	if (!aconnector || !aconnector->dc_sink) {
 6746		sink = create_fake_sink(link);
 6747		if (!sink)
 6748			return stream;
 6749
 6750	} else {
 6751		sink = aconnector->dc_sink;
 6752		dc_sink_retain(sink);
 6753	}
 6754
 6755	stream = dc_create_stream_for_sink(sink);
 6756
 6757	if (stream == NULL) {
 6758		DRM_ERROR("Failed to create stream for sink!\n");
 6759		goto finish;
 6760	}
 6761
 6762	/* We leave this NULL for writeback connectors */
 6763	stream->dm_stream_context = aconnector;
 6764
 6765	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
 6766		connector->display_info.hdmi.scdc.scrambling.low_rates;
 6767
 6768	list_for_each_entry(preferred_mode, &connector->modes, head) {
 6769		/* Search for preferred mode */
 6770		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
 6771			native_mode_found = true;
 6772			break;
 6773		}
 6774	}
 6775	if (!native_mode_found)
 6776		preferred_mode = list_first_entry_or_null(
 6777				&connector->modes,
 6778				struct drm_display_mode,
 6779				head);
 6780
 6781	mode_refresh = drm_mode_vrefresh(&mode);
 6782
 6783	if (preferred_mode == NULL) {
 6784		/*
 6785		 * This may not be an error, the use case is when we have no
 6786		 * usermode calls to reset and set mode upon hotplug. In this
 6787		 * case, we call set mode ourselves to restore the previous mode
 6788		 * and the modelist may not be filled in time.
 6789		 */
 6790		DRM_DEBUG_DRIVER("No preferred mode found\n");
 6791	} else if (aconnector) {
 6792		recalculate_timing = amdgpu_freesync_vid_mode &&
 6793				 is_freesync_video_mode(&mode, aconnector);
 6794		if (recalculate_timing) {
 6795			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
 6796			drm_mode_copy(&saved_mode, &mode);
 6797			saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
 6798			drm_mode_copy(&mode, freesync_mode);
 6799			mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
 6800		} else {
 6801			decide_crtc_timing_for_drm_display_mode(
 6802					&mode, preferred_mode, scale);
 6803
 6804			preferred_refresh = drm_mode_vrefresh(preferred_mode);
 6805		}
 6806	}
 6807
 6808	if (recalculate_timing)
 6809		drm_mode_set_crtcinfo(&saved_mode, 0);
 6810
 6811	/*
 6812	 * If scaling is enabled and refresh rate didn't change
 6813	 * we copy the vic and polarities of the old timings
 6814	 */
 6815	if (!scale || mode_refresh != preferred_refresh)
 6816		fill_stream_properties_from_drm_display_mode(
 6817			stream, &mode, connector, con_state, NULL,
 6818			requested_bpc);
 6819	else
 6820		fill_stream_properties_from_drm_display_mode(
 6821			stream, &mode, connector, con_state, old_stream,
 6822			requested_bpc);
 6823
 6824	/* The rest isn't needed for writeback connectors */
 6825	if (!aconnector)
 6826		goto finish;
 6827
 6828	if (aconnector->timing_changed) {
 6829		drm_dbg(aconnector->base.dev,
 6830			"overriding timing for automated test, bpc %d, changing to %d\n",
 6831			stream->timing.display_color_depth,
 6832			aconnector->timing_requested->display_color_depth);
 6833		stream->timing = *aconnector->timing_requested;
 6834	}
 6835
 6836#if defined(CONFIG_DRM_AMD_DC_FP)
 6837	/* SST DSC determination policy */
 6838	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
 6839	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
 6840		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
 6841#endif
 6842
 6843	update_stream_scaling_settings(&mode, dm_state, stream);
 6844
 6845	fill_audio_info(
 6846		&stream->audio_info,
 6847		connector,
 6848		sink);
 6849
 6850	update_stream_signal(stream, sink);
 6851
 6852	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 6853		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
 6854
 6855	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
 6856	    stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
 6857	    stream->signal == SIGNAL_TYPE_EDP) {
 6858		const struct dc_edid_caps *edid_caps;
 6859		unsigned int disable_colorimetry = 0;
 6860
 6861		if (aconnector->dc_sink) {
 6862			edid_caps = &aconnector->dc_sink->edid_caps;
 6863			disable_colorimetry = edid_caps->panel_patch.disable_colorimetry;
 6864		}
 6865
 6866		//
 6867		// should decide stream support vsc sdp colorimetry capability
 6868		// before building vsc info packet
 6869		//
 6870		stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
 6871						      stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
 6872						      !disable_colorimetry;
 6873
 6874		if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
 6875			tf = TRANSFER_FUNC_GAMMA_22;
 6876		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
 6877		aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
 6878
 6879	}
 6880finish:
 6881	dc_sink_release(sink);
 6882
 6883	return stream;
 6884}
 6885
 6886static enum drm_connector_status
 6887amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
 6888{
 6889	bool connected;
 6890	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 6891
 6892	/*
 6893	 * Notes:
 6894	 * 1. This interface is NOT called in context of HPD irq.
 6895	 * 2. This interface *is called* in context of user-mode ioctl. Which
 6896	 * makes it a bad place for *any* MST-related activity.
 6897	 */
 6898
 6899	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
 6900	    !aconnector->fake_enable)
 6901		connected = (aconnector->dc_sink != NULL);
 6902	else
 6903		connected = (aconnector->base.force == DRM_FORCE_ON ||
 6904				aconnector->base.force == DRM_FORCE_ON_DIGITAL);
 6905
 6906	update_subconnector_property(aconnector);
 6907
 6908	return (connected ? connector_status_connected :
 6909			connector_status_disconnected);
 6910}
 6911
 6912int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
 6913					    struct drm_connector_state *connector_state,
 6914					    struct drm_property *property,
 6915					    uint64_t val)
 6916{
 6917	struct drm_device *dev = connector->dev;
 6918	struct amdgpu_device *adev = drm_to_adev(dev);
 6919	struct dm_connector_state *dm_old_state =
 6920		to_dm_connector_state(connector->state);
 6921	struct dm_connector_state *dm_new_state =
 6922		to_dm_connector_state(connector_state);
 6923
 6924	int ret = -EINVAL;
 6925
 6926	if (property == dev->mode_config.scaling_mode_property) {
 6927		enum amdgpu_rmx_type rmx_type;
 6928
 6929		switch (val) {
 6930		case DRM_MODE_SCALE_CENTER:
 6931			rmx_type = RMX_CENTER;
 6932			break;
 6933		case DRM_MODE_SCALE_ASPECT:
 6934			rmx_type = RMX_ASPECT;
 6935			break;
 6936		case DRM_MODE_SCALE_FULLSCREEN:
 6937			rmx_type = RMX_FULL;
 6938			break;
 6939		case DRM_MODE_SCALE_NONE:
 6940		default:
 6941			rmx_type = RMX_OFF;
 6942			break;
 6943		}
 6944
 6945		if (dm_old_state->scaling == rmx_type)
 6946			return 0;
 6947
 6948		dm_new_state->scaling = rmx_type;
 6949		ret = 0;
 6950	} else if (property == adev->mode_info.underscan_hborder_property) {
 6951		dm_new_state->underscan_hborder = val;
 6952		ret = 0;
 6953	} else if (property == adev->mode_info.underscan_vborder_property) {
 6954		dm_new_state->underscan_vborder = val;
 6955		ret = 0;
 6956	} else if (property == adev->mode_info.underscan_property) {
 6957		dm_new_state->underscan_enable = val;
 6958		ret = 0;
 6959	}
 6960
 6961	return ret;
 6962}
 6963
 6964int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
 6965					    const struct drm_connector_state *state,
 6966					    struct drm_property *property,
 6967					    uint64_t *val)
 6968{
 6969	struct drm_device *dev = connector->dev;
 6970	struct amdgpu_device *adev = drm_to_adev(dev);
 6971	struct dm_connector_state *dm_state =
 6972		to_dm_connector_state(state);
 6973	int ret = -EINVAL;
 6974
 6975	if (property == dev->mode_config.scaling_mode_property) {
 6976		switch (dm_state->scaling) {
 6977		case RMX_CENTER:
 6978			*val = DRM_MODE_SCALE_CENTER;
 6979			break;
 6980		case RMX_ASPECT:
 6981			*val = DRM_MODE_SCALE_ASPECT;
 6982			break;
 6983		case RMX_FULL:
 6984			*val = DRM_MODE_SCALE_FULLSCREEN;
 6985			break;
 6986		case RMX_OFF:
 6987		default:
 6988			*val = DRM_MODE_SCALE_NONE;
 6989			break;
 6990		}
 6991		ret = 0;
 6992	} else if (property == adev->mode_info.underscan_hborder_property) {
 6993		*val = dm_state->underscan_hborder;
 6994		ret = 0;
 6995	} else if (property == adev->mode_info.underscan_vborder_property) {
 6996		*val = dm_state->underscan_vborder;
 6997		ret = 0;
 6998	} else if (property == adev->mode_info.underscan_property) {
 6999		*val = dm_state->underscan_enable;
 7000		ret = 0;
 7001	}
 7002
 7003	return ret;
 7004}
 7005
 7006/**
 7007 * DOC: panel power savings
 7008 *
 7009 * The display manager allows you to set your desired **panel power savings**
 7010 * level (between 0-4, with 0 representing off), e.g. using the following::
 7011 *
 7012 *   # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
 7013 *
 7014 * Modifying this value can have implications on color accuracy, so tread
 7015 * carefully.
 7016 */
 7017
 7018static ssize_t panel_power_savings_show(struct device *device,
 7019					struct device_attribute *attr,
 7020					char *buf)
 7021{
 7022	struct drm_connector *connector = dev_get_drvdata(device);
 7023	struct drm_device *dev = connector->dev;
 7024	u8 val;
 7025
 7026	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 7027	val = to_dm_connector_state(connector->state)->abm_level ==
 7028		ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
 7029		to_dm_connector_state(connector->state)->abm_level;
 7030	drm_modeset_unlock(&dev->mode_config.connection_mutex);
 7031
 7032	return sysfs_emit(buf, "%u\n", val);
 7033}
 7034
 7035static ssize_t panel_power_savings_store(struct device *device,
 7036					 struct device_attribute *attr,
 7037					 const char *buf, size_t count)
 7038{
 7039	struct drm_connector *connector = dev_get_drvdata(device);
 7040	struct drm_device *dev = connector->dev;
 7041	long val;
 7042	int ret;
 7043
 7044	ret = kstrtol(buf, 0, &val);
 7045
 7046	if (ret)
 7047		return ret;
 7048
 7049	if (val < 0 || val > 4)
 7050		return -EINVAL;
 7051
 7052	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 7053	to_dm_connector_state(connector->state)->abm_level = val ?:
 7054		ABM_LEVEL_IMMEDIATE_DISABLE;
 7055	drm_modeset_unlock(&dev->mode_config.connection_mutex);
 7056
 7057	drm_kms_helper_hotplug_event(dev);
 7058
 7059	return count;
 7060}
 7061
 7062static DEVICE_ATTR_RW(panel_power_savings);
 7063
 7064static struct attribute *amdgpu_attrs[] = {
 7065	&dev_attr_panel_power_savings.attr,
 7066	NULL
 7067};
 7068
 7069static const struct attribute_group amdgpu_group = {
 7070	.name = "amdgpu",
 7071	.attrs = amdgpu_attrs
 7072};
 7073
 7074static bool
 7075amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector)
 7076{
 7077	if (amdgpu_dm_abm_level >= 0)
 7078		return false;
 7079
 7080	if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
 7081		return false;
 7082
 7083	/* check for OLED panels */
 7084	if (amdgpu_dm_connector->bl_idx >= 0) {
 7085		struct drm_device *drm = amdgpu_dm_connector->base.dev;
 7086		struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
 7087		struct amdgpu_dm_backlight_caps *caps;
 7088
 7089		caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx];
 7090		if (caps->aux_support)
 7091			return false;
 7092	}
 7093
 7094	return true;
 7095}
 7096
 7097static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
 7098{
 7099	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
 7100
 7101	if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
 7102		sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
 7103
 7104	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
 7105}
 7106
 7107static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
 7108{
 7109	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 7110	struct amdgpu_device *adev = drm_to_adev(connector->dev);
 7111	struct amdgpu_display_manager *dm = &adev->dm;
 7112
 7113	/*
 7114	 * Call only if mst_mgr was initialized before since it's not done
 7115	 * for all connector types.
 7116	 */
 7117	if (aconnector->mst_mgr.dev)
 7118		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
 7119
 7120	if (aconnector->bl_idx != -1) {
 7121		backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
 7122		dm->backlight_dev[aconnector->bl_idx] = NULL;
 7123	}
 7124
 7125	if (aconnector->dc_em_sink)
 7126		dc_sink_release(aconnector->dc_em_sink);
 7127	aconnector->dc_em_sink = NULL;
 7128	if (aconnector->dc_sink)
 7129		dc_sink_release(aconnector->dc_sink);
 7130	aconnector->dc_sink = NULL;
 7131
 7132	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
 7133	drm_connector_unregister(connector);
 7134	drm_connector_cleanup(connector);
 7135	if (aconnector->i2c) {
 7136		i2c_del_adapter(&aconnector->i2c->base);
 7137		kfree(aconnector->i2c);
 7138	}
 7139	kfree(aconnector->dm_dp_aux.aux.name);
 7140
 7141	kfree(connector);
 7142}
 7143
 7144void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
 7145{
 7146	struct dm_connector_state *state =
 7147		to_dm_connector_state(connector->state);
 7148
 7149	if (connector->state)
 7150		__drm_atomic_helper_connector_destroy_state(connector->state);
 7151
 7152	kfree(state);
 7153
 7154	state = kzalloc(sizeof(*state), GFP_KERNEL);
 7155
 7156	if (state) {
 7157		state->scaling = RMX_OFF;
 7158		state->underscan_enable = false;
 7159		state->underscan_hborder = 0;
 7160		state->underscan_vborder = 0;
 7161		state->base.max_requested_bpc = 8;
 7162		state->vcpi_slots = 0;
 7163		state->pbn = 0;
 7164
 7165		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
 7166			if (amdgpu_dm_abm_level <= 0)
 7167				state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
 7168			else
 7169				state->abm_level = amdgpu_dm_abm_level;
 7170		}
 7171
 7172		__drm_atomic_helper_connector_reset(connector, &state->base);
 7173	}
 7174}
 7175
 7176struct drm_connector_state *
 7177amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 7178{
 7179	struct dm_connector_state *state =
 7180		to_dm_connector_state(connector->state);
 7181
 7182	struct dm_connector_state *new_state =
 7183			kmemdup(state, sizeof(*state), GFP_KERNEL);
 7184
 7185	if (!new_state)
 7186		return NULL;
 7187
 7188	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
 7189
 7190	new_state->freesync_capable = state->freesync_capable;
 7191	new_state->abm_level = state->abm_level;
 7192	new_state->scaling = state->scaling;
 7193	new_state->underscan_enable = state->underscan_enable;
 7194	new_state->underscan_hborder = state->underscan_hborder;
 7195	new_state->underscan_vborder = state->underscan_vborder;
 7196	new_state->vcpi_slots = state->vcpi_slots;
 7197	new_state->pbn = state->pbn;
 7198	return &new_state->base;
 7199}
 7200
 7201static int
 7202amdgpu_dm_connector_late_register(struct drm_connector *connector)
 7203{
 7204	struct amdgpu_dm_connector *amdgpu_dm_connector =
 7205		to_amdgpu_dm_connector(connector);
 7206	int r;
 7207
 7208	if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) {
 7209		r = sysfs_create_group(&connector->kdev->kobj,
 7210				       &amdgpu_group);
 7211		if (r)
 7212			return r;
 7213	}
 7214
 7215	amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
 7216
 7217	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
 7218	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
 7219		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
 7220		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
 7221		if (r)
 7222			return r;
 7223	}
 7224
 7225#if defined(CONFIG_DEBUG_FS)
 7226	connector_debugfs_init(amdgpu_dm_connector);
 7227#endif
 7228
 7229	return 0;
 7230}
 7231
 7232static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
 7233{
 7234	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 7235	struct dc_link *dc_link = aconnector->dc_link;
 7236	struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
 7237	const struct drm_edid *drm_edid;
 7238	struct i2c_adapter *ddc;
 7239
 7240	if (dc_link && dc_link->aux_mode)
 7241		ddc = &aconnector->dm_dp_aux.aux.ddc;
 7242	else
 7243		ddc = &aconnector->i2c->base;
 7244
 7245	drm_edid = drm_edid_read_ddc(connector, ddc);
 7246	drm_edid_connector_update(connector, drm_edid);
 7247	if (!drm_edid) {
 7248		DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
 7249		return;
 7250	}
 7251
 7252	aconnector->drm_edid = drm_edid;
 7253	/* Update emulated (virtual) sink's EDID */
 7254	if (dc_em_sink && dc_link) {
 7255		// FIXME: Get rid of drm_edid_raw()
 7256		const struct edid *edid = drm_edid_raw(drm_edid);
 7257
 7258		memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
 7259		memmove(dc_em_sink->dc_edid.raw_edid, edid,
 7260			(edid->extensions + 1) * EDID_LENGTH);
 7261		dm_helpers_parse_edid_caps(
 7262			dc_link,
 7263			&dc_em_sink->dc_edid,
 7264			&dc_em_sink->edid_caps);
 7265	}
 7266}
 7267
 7268static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
 7269	.reset = amdgpu_dm_connector_funcs_reset,
 7270	.detect = amdgpu_dm_connector_detect,
 7271	.fill_modes = drm_helper_probe_single_connector_modes,
 7272	.destroy = amdgpu_dm_connector_destroy,
 7273	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
 7274	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 7275	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
 7276	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
 7277	.late_register = amdgpu_dm_connector_late_register,
 7278	.early_unregister = amdgpu_dm_connector_unregister,
 7279	.force = amdgpu_dm_connector_funcs_force
 7280};
 7281
 7282static int get_modes(struct drm_connector *connector)
 7283{
 7284	return amdgpu_dm_connector_get_modes(connector);
 7285}
 7286
 7287static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
 7288{
 7289	struct drm_connector *connector = &aconnector->base;
 7290	struct dc_link *dc_link = aconnector->dc_link;
 7291	struct dc_sink_init_data init_params = {
 7292			.link = aconnector->dc_link,
 7293			.sink_signal = SIGNAL_TYPE_VIRTUAL
 7294	};
 7295	const struct drm_edid *drm_edid;
 7296	const struct edid *edid;
 7297	struct i2c_adapter *ddc;
 7298
 7299	if (dc_link && dc_link->aux_mode)
 7300		ddc = &aconnector->dm_dp_aux.aux.ddc;
 7301	else
 7302		ddc = &aconnector->i2c->base;
 7303
 7304	drm_edid = drm_edid_read_ddc(connector, ddc);
 7305	drm_edid_connector_update(connector, drm_edid);
 7306	if (!drm_edid) {
 7307		DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
 7308		return;
 7309	}
 7310
 7311	if (connector->display_info.is_hdmi)
 7312		init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
 7313
 7314	aconnector->drm_edid = drm_edid;
 7315
 7316	edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
 7317	aconnector->dc_em_sink = dc_link_add_remote_sink(
 7318		aconnector->dc_link,
 7319		(uint8_t *)edid,
 7320		(edid->extensions + 1) * EDID_LENGTH,
 7321		&init_params);
 7322
 7323	if (aconnector->base.force == DRM_FORCE_ON) {
 7324		aconnector->dc_sink = aconnector->dc_link->local_sink ?
 7325		aconnector->dc_link->local_sink :
 7326		aconnector->dc_em_sink;
 7327		if (aconnector->dc_sink)
 7328			dc_sink_retain(aconnector->dc_sink);
 7329	}
 7330}
 7331
 7332static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
 7333{
 7334	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
 7335
 7336	/*
 7337	 * In case of headless boot with force on for DP managed connector
 7338	 * Those settings have to be != 0 to get initial modeset
 7339	 */
 7340	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
 7341		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
 7342		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
 7343	}
 7344
 7345	create_eml_sink(aconnector);
 7346}
 7347
 7348static enum dc_status dm_validate_stream_and_context(struct dc *dc,
 7349						struct dc_stream_state *stream)
 7350{
 7351	enum dc_status dc_result = DC_ERROR_UNEXPECTED;
 7352	struct dc_plane_state *dc_plane_state = NULL;
 7353	struct dc_state *dc_state = NULL;
 7354
 7355	if (!stream)
 7356		goto cleanup;
 7357
 7358	dc_plane_state = dc_create_plane_state(dc);
 7359	if (!dc_plane_state)
 7360		goto cleanup;
 7361
 7362	dc_state = dc_state_create(dc, NULL);
 7363	if (!dc_state)
 7364		goto cleanup;
 7365
 7366	/* populate stream to plane */
 7367	dc_plane_state->src_rect.height  = stream->src.height;
 7368	dc_plane_state->src_rect.width   = stream->src.width;
 7369	dc_plane_state->dst_rect.height  = stream->src.height;
 7370	dc_plane_state->dst_rect.width   = stream->src.width;
 7371	dc_plane_state->clip_rect.height = stream->src.height;
 7372	dc_plane_state->clip_rect.width  = stream->src.width;
 7373	dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
 7374	dc_plane_state->plane_size.surface_size.height = stream->src.height;
 7375	dc_plane_state->plane_size.surface_size.width  = stream->src.width;
 7376	dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
 7377	dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
 7378	dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
 7379	dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
 7380	dc_plane_state->rotation = ROTATION_ANGLE_0;
 7381	dc_plane_state->is_tiling_rotated = false;
 7382	dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
 7383
 7384	dc_result = dc_validate_stream(dc, stream);
 7385	if (dc_result == DC_OK)
 7386		dc_result = dc_validate_plane(dc, dc_plane_state);
 7387
 7388	if (dc_result == DC_OK)
 7389		dc_result = dc_state_add_stream(dc, dc_state, stream);
 7390
 7391	if (dc_result == DC_OK && !dc_state_add_plane(
 7392						dc,
 7393						stream,
 7394						dc_plane_state,
 7395						dc_state))
 7396		dc_result = DC_FAIL_ATTACH_SURFACES;
 7397
 7398	if (dc_result == DC_OK)
 7399		dc_result = dc_validate_global_state(dc, dc_state, true);
 7400
 7401cleanup:
 7402	if (dc_state)
 7403		dc_state_release(dc_state);
 7404
 7405	if (dc_plane_state)
 7406		dc_plane_state_release(dc_plane_state);
 7407
 7408	return dc_result;
 7409}
 7410
 7411struct dc_stream_state *
 7412create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 7413				const struct drm_display_mode *drm_mode,
 7414				const struct dm_connector_state *dm_state,
 7415				const struct dc_stream_state *old_stream)
 7416{
 7417	struct drm_connector *connector = &aconnector->base;
 7418	struct amdgpu_device *adev = drm_to_adev(connector->dev);
 7419	struct dc_stream_state *stream;
 7420	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
 7421	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
 7422	enum dc_status dc_result = DC_OK;
 7423	uint8_t bpc_limit = 6;
 7424
 7425	if (!dm_state)
 7426		return NULL;
 7427
 7428	if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
 7429	    aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
 7430		bpc_limit = 8;
 7431
 7432	do {
 7433		stream = create_stream_for_sink(connector, drm_mode,
 7434						dm_state, old_stream,
 7435						requested_bpc);
 7436		if (stream == NULL) {
 7437			DRM_ERROR("Failed to create stream for sink!\n");
 7438			break;
 7439		}
 7440
 7441		if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 7442			return stream;
 7443
 7444		dc_result = dc_validate_stream(adev->dm.dc, stream);
 7445		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
 7446			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
 7447
 7448		if (dc_result == DC_OK)
 7449			dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
 7450
 7451		if (dc_result != DC_OK) {
 7452			DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
 7453				      drm_mode->hdisplay,
 7454				      drm_mode->vdisplay,
 7455				      drm_mode->clock,
 7456				      dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
 7457				      dc_color_depth_to_str(stream->timing.display_color_depth),
 7458				      dc_status_to_str(dc_result));
 7459
 7460			dc_stream_release(stream);
 7461			stream = NULL;
 7462			requested_bpc -= 2; /* lower bpc to retry validation */
 7463		}
 7464
 7465	} while (stream == NULL && requested_bpc >= bpc_limit);
 7466
 7467	if ((dc_result == DC_FAIL_ENC_VALIDATE ||
 7468	     dc_result == DC_EXCEED_DONGLE_CAP) &&
 7469	     !aconnector->force_yuv420_output) {
 7470		DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
 7471				     __func__, __LINE__);
 7472
 7473		aconnector->force_yuv420_output = true;
 7474		stream = create_validate_stream_for_sink(aconnector, drm_mode,
 7475						dm_state, old_stream);
 7476		aconnector->force_yuv420_output = false;
 7477	}
 7478
 7479	return stream;
 7480}
 7481
 7482enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
 7483				   struct drm_display_mode *mode)
 7484{
 7485	int result = MODE_ERROR;
 7486	struct dc_sink *dc_sink;
 7487	/* TODO: Unhardcode stream count */
 7488	struct dc_stream_state *stream;
 7489	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 7490
 7491	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
 7492			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
 7493		return result;
 7494
 7495	/*
 7496	 * Only run this the first time mode_valid is called to initilialize
 7497	 * EDID mgmt
 7498	 */
 7499	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
 7500		!aconnector->dc_em_sink)
 7501		handle_edid_mgmt(aconnector);
 7502
 7503	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
 7504
 7505	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
 7506				aconnector->base.force != DRM_FORCE_ON) {
 7507		DRM_ERROR("dc_sink is NULL!\n");
 7508		goto fail;
 7509	}
 7510
 7511	drm_mode_set_crtcinfo(mode, 0);
 7512
 7513	stream = create_validate_stream_for_sink(aconnector, mode,
 7514						 to_dm_connector_state(connector->state),
 7515						 NULL);
 7516	if (stream) {
 7517		dc_stream_release(stream);
 7518		result = MODE_OK;
 7519	}
 7520
 7521fail:
 7522	/* TODO: error handling*/
 7523	return result;
 7524}
 7525
 7526static int fill_hdr_info_packet(const struct drm_connector_state *state,
 7527				struct dc_info_packet *out)
 7528{
 7529	struct hdmi_drm_infoframe frame;
 7530	unsigned char buf[30]; /* 26 + 4 */
 7531	ssize_t len;
 7532	int ret, i;
 7533
 7534	memset(out, 0, sizeof(*out));
 7535
 7536	if (!state->hdr_output_metadata)
 7537		return 0;
 7538
 7539	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
 7540	if (ret)
 7541		return ret;
 7542
 7543	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
 7544	if (len < 0)
 7545		return (int)len;
 7546
 7547	/* Static metadata is a fixed 26 bytes + 4 byte header. */
 7548	if (len != 30)
 7549		return -EINVAL;
 7550
 7551	/* Prepare the infopacket for DC. */
 7552	switch (state->connector->connector_type) {
 7553	case DRM_MODE_CONNECTOR_HDMIA:
 7554		out->hb0 = 0x87; /* type */
 7555		out->hb1 = 0x01; /* version */
 7556		out->hb2 = 0x1A; /* length */
 7557		out->sb[0] = buf[3]; /* checksum */
 7558		i = 1;
 7559		break;
 7560
 7561	case DRM_MODE_CONNECTOR_DisplayPort:
 7562	case DRM_MODE_CONNECTOR_eDP:
 7563		out->hb0 = 0x00; /* sdp id, zero */
 7564		out->hb1 = 0x87; /* type */
 7565		out->hb2 = 0x1D; /* payload len - 1 */
 7566		out->hb3 = (0x13 << 2); /* sdp version */
 7567		out->sb[0] = 0x01; /* version */
 7568		out->sb[1] = 0x1A; /* length */
 7569		i = 2;
 7570		break;
 7571
 7572	default:
 7573		return -EINVAL;
 7574	}
 7575
 7576	memcpy(&out->sb[i], &buf[4], 26);
 7577	out->valid = true;
 7578
 7579	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
 7580		       sizeof(out->sb), false);
 7581
 7582	return 0;
 7583}
 7584
 7585static int
 7586amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
 7587				 struct drm_atomic_state *state)
 7588{
 7589	struct drm_connector_state *new_con_state =
 7590		drm_atomic_get_new_connector_state(state, conn);
 7591	struct drm_connector_state *old_con_state =
 7592		drm_atomic_get_old_connector_state(state, conn);
 7593	struct drm_crtc *crtc = new_con_state->crtc;
 7594	struct drm_crtc_state *new_crtc_state;
 7595	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
 7596	int ret;
 7597
 7598	trace_amdgpu_dm_connector_atomic_check(new_con_state);
 7599
 7600	if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
 7601		ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
 7602		if (ret < 0)
 7603			return ret;
 7604	}
 7605
 7606	if (!crtc)
 7607		return 0;
 7608
 7609	if (new_con_state->colorspace != old_con_state->colorspace) {
 7610		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
 7611		if (IS_ERR(new_crtc_state))
 7612			return PTR_ERR(new_crtc_state);
 7613
 7614		new_crtc_state->mode_changed = true;
 7615	}
 7616
 7617	if (new_con_state->content_type != old_con_state->content_type) {
 7618		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
 7619		if (IS_ERR(new_crtc_state))
 7620			return PTR_ERR(new_crtc_state);
 7621
 7622		new_crtc_state->mode_changed = true;
 7623	}
 7624
 7625	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
 7626		struct dc_info_packet hdr_infopacket;
 7627
 7628		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
 7629		if (ret)
 7630			return ret;
 7631
 7632		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
 7633		if (IS_ERR(new_crtc_state))
 7634			return PTR_ERR(new_crtc_state);
 7635
 7636		/*
 7637		 * DC considers the stream backends changed if the
 7638		 * static metadata changes. Forcing the modeset also
 7639		 * gives a simple way for userspace to switch from
 7640		 * 8bpc to 10bpc when setting the metadata to enter
 7641		 * or exit HDR.
 7642		 *
 7643		 * Changing the static metadata after it's been
 7644		 * set is permissible, however. So only force a
 7645		 * modeset if we're entering or exiting HDR.
 7646		 */
 7647		new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
 7648			!old_con_state->hdr_output_metadata ||
 7649			!new_con_state->hdr_output_metadata;
 7650	}
 7651
 7652	return 0;
 7653}
 7654
 7655static const struct drm_connector_helper_funcs
 7656amdgpu_dm_connector_helper_funcs = {
 7657	/*
 7658	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
 7659	 * modes will be filtered by drm_mode_validate_size(), and those modes
 7660	 * are missing after user start lightdm. So we need to renew modes list.
 7661	 * in get_modes call back, not just return the modes count
 7662	 */
 7663	.get_modes = get_modes,
 7664	.mode_valid = amdgpu_dm_connector_mode_valid,
 7665	.atomic_check = amdgpu_dm_connector_atomic_check,
 7666};
 7667
 7668static void dm_encoder_helper_disable(struct drm_encoder *encoder)
 7669{
 7670
 7671}
 7672
 7673int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
 7674{
 7675	switch (display_color_depth) {
 7676	case COLOR_DEPTH_666:
 7677		return 6;
 7678	case COLOR_DEPTH_888:
 7679		return 8;
 7680	case COLOR_DEPTH_101010:
 7681		return 10;
 7682	case COLOR_DEPTH_121212:
 7683		return 12;
 7684	case COLOR_DEPTH_141414:
 7685		return 14;
 7686	case COLOR_DEPTH_161616:
 7687		return 16;
 7688	default:
 7689		break;
 7690	}
 7691	return 0;
 7692}
 7693
 7694static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
 7695					  struct drm_crtc_state *crtc_state,
 7696					  struct drm_connector_state *conn_state)
 7697{
 7698	struct drm_atomic_state *state = crtc_state->state;
 7699	struct drm_connector *connector = conn_state->connector;
 7700	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 7701	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
 7702	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
 7703	struct drm_dp_mst_topology_mgr *mst_mgr;
 7704	struct drm_dp_mst_port *mst_port;
 7705	struct drm_dp_mst_topology_state *mst_state;
 7706	enum dc_color_depth color_depth;
 7707	int clock, bpp = 0;
 7708	bool is_y420 = false;
 7709
 7710	if (!aconnector->mst_output_port)
 7711		return 0;
 7712
 7713	mst_port = aconnector->mst_output_port;
 7714	mst_mgr = &aconnector->mst_root->mst_mgr;
 7715
 7716	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
 7717		return 0;
 7718
 7719	mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
 7720	if (IS_ERR(mst_state))
 7721		return PTR_ERR(mst_state);
 7722
 7723	mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
 7724
 7725	if (!state->duplicated) {
 7726		int max_bpc = conn_state->max_requested_bpc;
 7727
 7728		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
 7729			  aconnector->force_yuv420_output;
 7730		color_depth = convert_color_depth_from_display_info(connector,
 7731								    is_y420,
 7732								    max_bpc);
 7733		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
 7734		clock = adjusted_mode->clock;
 7735		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
 7736	}
 7737
 7738	dm_new_connector_state->vcpi_slots =
 7739		drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
 7740					      dm_new_connector_state->pbn);
 7741	if (dm_new_connector_state->vcpi_slots < 0) {
 7742		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
 7743		return dm_new_connector_state->vcpi_slots;
 7744	}
 7745	return 0;
 7746}
 7747
 7748const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
 7749	.disable = dm_encoder_helper_disable,
 7750	.atomic_check = dm_encoder_helper_atomic_check
 7751};
 7752
 7753static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
 7754					    struct dc_state *dc_state,
 7755					    struct dsc_mst_fairness_vars *vars)
 7756{
 7757	struct dc_stream_state *stream = NULL;
 7758	struct drm_connector *connector;
 7759	struct drm_connector_state *new_con_state;
 7760	struct amdgpu_dm_connector *aconnector;
 7761	struct dm_connector_state *dm_conn_state;
 7762	int i, j, ret;
 7763	int vcpi, pbn_div, pbn = 0, slot_num = 0;
 7764
 7765	for_each_new_connector_in_state(state, connector, new_con_state, i) {
 7766
 7767		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 7768			continue;
 7769
 7770		aconnector = to_amdgpu_dm_connector(connector);
 7771
 7772		if (!aconnector->mst_output_port)
 7773			continue;
 7774
 7775		if (!new_con_state || !new_con_state->crtc)
 7776			continue;
 7777
 7778		dm_conn_state = to_dm_connector_state(new_con_state);
 7779
 7780		for (j = 0; j < dc_state->stream_count; j++) {
 7781			stream = dc_state->streams[j];
 7782			if (!stream)
 7783				continue;
 7784
 7785			if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
 7786				break;
 7787
 7788			stream = NULL;
 7789		}
 7790
 7791		if (!stream)
 7792			continue;
 7793
 7794		pbn_div = dm_mst_get_pbn_divider(stream->link);
 7795		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
 7796		for (j = 0; j < dc_state->stream_count; j++) {
 7797			if (vars[j].aconnector == aconnector) {
 7798				pbn = vars[j].pbn;
 7799				break;
 7800			}
 7801		}
 7802
 7803		if (j == dc_state->stream_count || pbn_div == 0)
 7804			continue;
 7805
 7806		slot_num = DIV_ROUND_UP(pbn, pbn_div);
 7807
 7808		if (stream->timing.flags.DSC != 1) {
 7809			dm_conn_state->pbn = pbn;
 7810			dm_conn_state->vcpi_slots = slot_num;
 7811
 7812			ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
 7813							   dm_conn_state->pbn, false);
 7814			if (ret < 0)
 7815				return ret;
 7816
 7817			continue;
 7818		}
 7819
 7820		vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
 7821		if (vcpi < 0)
 7822			return vcpi;
 7823
 7824		dm_conn_state->pbn = pbn;
 7825		dm_conn_state->vcpi_slots = vcpi;
 7826	}
 7827	return 0;
 7828}
 7829
 7830static int to_drm_connector_type(enum signal_type st)
 7831{
 7832	switch (st) {
 7833	case SIGNAL_TYPE_HDMI_TYPE_A:
 7834		return DRM_MODE_CONNECTOR_HDMIA;
 7835	case SIGNAL_TYPE_EDP:
 7836		return DRM_MODE_CONNECTOR_eDP;
 7837	case SIGNAL_TYPE_LVDS:
 7838		return DRM_MODE_CONNECTOR_LVDS;
 7839	case SIGNAL_TYPE_RGB:
 7840		return DRM_MODE_CONNECTOR_VGA;
 7841	case SIGNAL_TYPE_DISPLAY_PORT:
 7842	case SIGNAL_TYPE_DISPLAY_PORT_MST:
 7843		return DRM_MODE_CONNECTOR_DisplayPort;
 7844	case SIGNAL_TYPE_DVI_DUAL_LINK:
 7845	case SIGNAL_TYPE_DVI_SINGLE_LINK:
 7846		return DRM_MODE_CONNECTOR_DVID;
 7847	case SIGNAL_TYPE_VIRTUAL:
 7848		return DRM_MODE_CONNECTOR_VIRTUAL;
 7849
 7850	default:
 7851		return DRM_MODE_CONNECTOR_Unknown;
 7852	}
 7853}
 7854
 7855static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
 7856{
 7857	struct drm_encoder *encoder;
 7858
 7859	/* There is only one encoder per connector */
 7860	drm_connector_for_each_possible_encoder(connector, encoder)
 7861		return encoder;
 7862
 7863	return NULL;
 7864}
 7865
 7866static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
 7867{
 7868	struct drm_encoder *encoder;
 7869	struct amdgpu_encoder *amdgpu_encoder;
 7870
 7871	encoder = amdgpu_dm_connector_to_encoder(connector);
 7872
 7873	if (encoder == NULL)
 7874		return;
 7875
 7876	amdgpu_encoder = to_amdgpu_encoder(encoder);
 7877
 7878	amdgpu_encoder->native_mode.clock = 0;
 7879
 7880	if (!list_empty(&connector->probed_modes)) {
 7881		struct drm_display_mode *preferred_mode = NULL;
 7882
 7883		list_for_each_entry(preferred_mode,
 7884				    &connector->probed_modes,
 7885				    head) {
 7886			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
 7887				amdgpu_encoder->native_mode = *preferred_mode;
 7888
 7889			break;
 7890		}
 7891
 7892	}
 7893}
 7894
 7895static struct drm_display_mode *
 7896amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
 7897			     char *name,
 7898			     int hdisplay, int vdisplay)
 7899{
 7900	struct drm_device *dev = encoder->dev;
 7901	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 7902	struct drm_display_mode *mode = NULL;
 7903	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
 7904
 7905	mode = drm_mode_duplicate(dev, native_mode);
 7906
 7907	if (mode == NULL)
 7908		return NULL;
 7909
 7910	mode->hdisplay = hdisplay;
 7911	mode->vdisplay = vdisplay;
 7912	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
 7913	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
 7914
 7915	return mode;
 7916
 7917}
 7918
 7919static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
 7920						 struct drm_connector *connector)
 7921{
 7922	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 7923	struct drm_display_mode *mode = NULL;
 7924	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
 7925	struct amdgpu_dm_connector *amdgpu_dm_connector =
 7926				to_amdgpu_dm_connector(connector);
 7927	int i;
 7928	int n;
 7929	struct mode_size {
 7930		char name[DRM_DISPLAY_MODE_LEN];
 7931		int w;
 7932		int h;
 7933	} common_modes[] = {
 7934		{  "640x480",  640,  480},
 7935		{  "800x600",  800,  600},
 7936		{ "1024x768", 1024,  768},
 7937		{ "1280x720", 1280,  720},
 7938		{ "1280x800", 1280,  800},
 7939		{"1280x1024", 1280, 1024},
 7940		{ "1440x900", 1440,  900},
 7941		{"1680x1050", 1680, 1050},
 7942		{"1600x1200", 1600, 1200},
 7943		{"1920x1080", 1920, 1080},
 7944		{"1920x1200", 1920, 1200}
 7945	};
 7946
 7947	n = ARRAY_SIZE(common_modes);
 7948
 7949	for (i = 0; i < n; i++) {
 7950		struct drm_display_mode *curmode = NULL;
 7951		bool mode_existed = false;
 7952
 7953		if (common_modes[i].w > native_mode->hdisplay ||
 7954		    common_modes[i].h > native_mode->vdisplay ||
 7955		   (common_modes[i].w == native_mode->hdisplay &&
 7956		    common_modes[i].h == native_mode->vdisplay))
 7957			continue;
 7958
 7959		list_for_each_entry(curmode, &connector->probed_modes, head) {
 7960			if (common_modes[i].w == curmode->hdisplay &&
 7961			    common_modes[i].h == curmode->vdisplay) {
 7962				mode_existed = true;
 7963				break;
 7964			}
 7965		}
 7966
 7967		if (mode_existed)
 7968			continue;
 7969
 7970		mode = amdgpu_dm_create_common_mode(encoder,
 7971				common_modes[i].name, common_modes[i].w,
 7972				common_modes[i].h);
 7973		if (!mode)
 7974			continue;
 7975
 7976		drm_mode_probed_add(connector, mode);
 7977		amdgpu_dm_connector->num_modes++;
 7978	}
 7979}
 7980
 7981static void amdgpu_set_panel_orientation(struct drm_connector *connector)
 7982{
 7983	struct drm_encoder *encoder;
 7984	struct amdgpu_encoder *amdgpu_encoder;
 7985	const struct drm_display_mode *native_mode;
 7986
 7987	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
 7988	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
 7989		return;
 7990
 7991	mutex_lock(&connector->dev->mode_config.mutex);
 7992	amdgpu_dm_connector_get_modes(connector);
 7993	mutex_unlock(&connector->dev->mode_config.mutex);
 7994
 7995	encoder = amdgpu_dm_connector_to_encoder(connector);
 7996	if (!encoder)
 7997		return;
 7998
 7999	amdgpu_encoder = to_amdgpu_encoder(encoder);
 8000
 8001	native_mode = &amdgpu_encoder->native_mode;
 8002	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
 8003		return;
 8004
 8005	drm_connector_set_panel_orientation_with_quirk(connector,
 8006						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
 8007						       native_mode->hdisplay,
 8008						       native_mode->vdisplay);
 8009}
 8010
 8011static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
 8012					      const struct drm_edid *drm_edid)
 8013{
 8014	struct amdgpu_dm_connector *amdgpu_dm_connector =
 8015			to_amdgpu_dm_connector(connector);
 8016
 8017	if (drm_edid) {
 8018		/* empty probed_modes */
 8019		INIT_LIST_HEAD(&connector->probed_modes);
 8020		amdgpu_dm_connector->num_modes =
 8021				drm_edid_connector_add_modes(connector);
 8022
 8023		/* sorting the probed modes before calling function
 8024		 * amdgpu_dm_get_native_mode() since EDID can have
 8025		 * more than one preferred mode. The modes that are
 8026		 * later in the probed mode list could be of higher
 8027		 * and preferred resolution. For example, 3840x2160
 8028		 * resolution in base EDID preferred timing and 4096x2160
 8029		 * preferred resolution in DID extension block later.
 8030		 */
 8031		drm_mode_sort(&connector->probed_modes);
 8032		amdgpu_dm_get_native_mode(connector);
 8033
 8034		/* Freesync capabilities are reset by calling
 8035		 * drm_edid_connector_add_modes() and need to be
 8036		 * restored here.
 8037		 */
 8038		amdgpu_dm_update_freesync_caps(connector, drm_edid);
 8039	} else {
 8040		amdgpu_dm_connector->num_modes = 0;
 8041	}
 8042}
 8043
 8044static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
 8045			      struct drm_display_mode *mode)
 8046{
 8047	struct drm_display_mode *m;
 8048
 8049	list_for_each_entry(m, &aconnector->base.probed_modes, head) {
 8050		if (drm_mode_equal(m, mode))
 8051			return true;
 8052	}
 8053
 8054	return false;
 8055}
 8056
 8057static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
 8058{
 8059	const struct drm_display_mode *m;
 8060	struct drm_display_mode *new_mode;
 8061	uint i;
 8062	u32 new_modes_count = 0;
 8063
 8064	/* Standard FPS values
 8065	 *
 8066	 * 23.976       - TV/NTSC
 8067	 * 24           - Cinema
 8068	 * 25           - TV/PAL
 8069	 * 29.97        - TV/NTSC
 8070	 * 30           - TV/NTSC
 8071	 * 48           - Cinema HFR
 8072	 * 50           - TV/PAL
 8073	 * 60           - Commonly used
 8074	 * 48,72,96,120 - Multiples of 24
 8075	 */
 8076	static const u32 common_rates[] = {
 8077		23976, 24000, 25000, 29970, 30000,
 8078		48000, 50000, 60000, 72000, 96000, 120000
 8079	};
 8080
 8081	/*
 8082	 * Find mode with highest refresh rate with the same resolution
 8083	 * as the preferred mode. Some monitors report a preferred mode
 8084	 * with lower resolution than the highest refresh rate supported.
 8085	 */
 8086
 8087	m = get_highest_refresh_rate_mode(aconnector, true);
 8088	if (!m)
 8089		return 0;
 8090
 8091	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
 8092		u64 target_vtotal, target_vtotal_diff;
 8093		u64 num, den;
 8094
 8095		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
 8096			continue;
 8097
 8098		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
 8099		    common_rates[i] > aconnector->max_vfreq * 1000)
 8100			continue;
 8101
 8102		num = (unsigned long long)m->clock * 1000 * 1000;
 8103		den = common_rates[i] * (unsigned long long)m->htotal;
 8104		target_vtotal = div_u64(num, den);
 8105		target_vtotal_diff = target_vtotal - m->vtotal;
 8106
 8107		/* Check for illegal modes */
 8108		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
 8109		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
 8110		    m->vtotal + target_vtotal_diff < m->vsync_end)
 8111			continue;
 8112
 8113		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
 8114		if (!new_mode)
 8115			goto out;
 8116
 8117		new_mode->vtotal += (u16)target_vtotal_diff;
 8118		new_mode->vsync_start += (u16)target_vtotal_diff;
 8119		new_mode->vsync_end += (u16)target_vtotal_diff;
 8120		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
 8121		new_mode->type |= DRM_MODE_TYPE_DRIVER;
 8122
 8123		if (!is_duplicate_mode(aconnector, new_mode)) {
 8124			drm_mode_probed_add(&aconnector->base, new_mode);
 8125			new_modes_count += 1;
 8126		} else
 8127			drm_mode_destroy(aconnector->base.dev, new_mode);
 8128	}
 8129 out:
 8130	return new_modes_count;
 8131}
 8132
 8133static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
 8134						   const struct drm_edid *drm_edid)
 8135{
 8136	struct amdgpu_dm_connector *amdgpu_dm_connector =
 8137		to_amdgpu_dm_connector(connector);
 8138
 8139	if (!(amdgpu_freesync_vid_mode && drm_edid))
 8140		return;
 8141
 8142	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
 8143		amdgpu_dm_connector->num_modes +=
 8144			add_fs_modes(amdgpu_dm_connector);
 8145}
 8146
 8147static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
 8148{
 8149	struct amdgpu_dm_connector *amdgpu_dm_connector =
 8150			to_amdgpu_dm_connector(connector);
 8151	struct drm_encoder *encoder;
 8152	const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
 8153	struct dc_link_settings *verified_link_cap =
 8154			&amdgpu_dm_connector->dc_link->verified_link_cap;
 8155	const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
 8156
 8157	encoder = amdgpu_dm_connector_to_encoder(connector);
 8158
 8159	if (!drm_edid) {
 8160		amdgpu_dm_connector->num_modes =
 8161				drm_add_modes_noedid(connector, 640, 480);
 8162		if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
 8163			amdgpu_dm_connector->num_modes +=
 8164				drm_add_modes_noedid(connector, 1920, 1080);
 8165	} else {
 8166		amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
 8167		if (encoder)
 8168			amdgpu_dm_connector_add_common_modes(encoder, connector);
 8169		amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
 8170	}
 8171	amdgpu_dm_fbc_init(connector);
 8172
 8173	return amdgpu_dm_connector->num_modes;
 8174}
 8175
 8176static const u32 supported_colorspaces =
 8177	BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
 8178	BIT(DRM_MODE_COLORIMETRY_OPRGB) |
 8179	BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
 8180	BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
 8181
 8182void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 8183				     struct amdgpu_dm_connector *aconnector,
 8184				     int connector_type,
 8185				     struct dc_link *link,
 8186				     int link_index)
 8187{
 8188	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
 8189
 8190	/*
 8191	 * Some of the properties below require access to state, like bpc.
 8192	 * Allocate some default initial connector state with our reset helper.
 8193	 */
 8194	if (aconnector->base.funcs->reset)
 8195		aconnector->base.funcs->reset(&aconnector->base);
 8196
 8197	aconnector->connector_id = link_index;
 8198	aconnector->bl_idx = -1;
 8199	aconnector->dc_link = link;
 8200	aconnector->base.interlace_allowed = false;
 8201	aconnector->base.doublescan_allowed = false;
 8202	aconnector->base.stereo_allowed = false;
 8203	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
 8204	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
 8205	aconnector->audio_inst = -1;
 8206	aconnector->pack_sdp_v1_3 = false;
 8207	aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
 8208	memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
 8209	mutex_init(&aconnector->hpd_lock);
 8210	mutex_init(&aconnector->handle_mst_msg_ready);
 8211
 8212	/*
 8213	 * configure support HPD hot plug connector_>polled default value is 0
 8214	 * which means HPD hot plug not supported
 8215	 */
 8216	switch (connector_type) {
 8217	case DRM_MODE_CONNECTOR_HDMIA:
 8218		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 8219		aconnector->base.ycbcr_420_allowed =
 8220			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
 8221		break;
 8222	case DRM_MODE_CONNECTOR_DisplayPort:
 8223		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 8224		link->link_enc = link_enc_cfg_get_link_enc(link);
 8225		ASSERT(link->link_enc);
 8226		if (link->link_enc)
 8227			aconnector->base.ycbcr_420_allowed =
 8228			link->link_enc->features.dp_ycbcr420_supported ? true : false;
 8229		break;
 8230	case DRM_MODE_CONNECTOR_DVID:
 8231		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 8232		break;
 8233	default:
 8234		break;
 8235	}
 8236
 8237	drm_object_attach_property(&aconnector->base.base,
 8238				dm->ddev->mode_config.scaling_mode_property,
 8239				DRM_MODE_SCALE_NONE);
 8240
 8241	drm_object_attach_property(&aconnector->base.base,
 8242				adev->mode_info.underscan_property,
 8243				UNDERSCAN_OFF);
 8244	drm_object_attach_property(&aconnector->base.base,
 8245				adev->mode_info.underscan_hborder_property,
 8246				0);
 8247	drm_object_attach_property(&aconnector->base.base,
 8248				adev->mode_info.underscan_vborder_property,
 8249				0);
 8250
 8251	if (!aconnector->mst_root)
 8252		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 8253
 8254	aconnector->base.state->max_bpc = 16;
 8255	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
 8256
 8257	if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
 8258		/* Content Type is currently only implemented for HDMI. */
 8259		drm_connector_attach_content_type_property(&aconnector->base);
 8260	}
 8261
 8262	if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
 8263		if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
 8264			drm_connector_attach_colorspace_property(&aconnector->base);
 8265	} else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
 8266		   connector_type == DRM_MODE_CONNECTOR_eDP) {
 8267		if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
 8268			drm_connector_attach_colorspace_property(&aconnector->base);
 8269	}
 8270
 8271	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
 8272	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
 8273	    connector_type == DRM_MODE_CONNECTOR_eDP) {
 8274		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
 8275
 8276		if (!aconnector->mst_root)
 8277			drm_connector_attach_vrr_capable_property(&aconnector->base);
 8278
 8279		if (adev->dm.hdcp_workqueue)
 8280			drm_connector_attach_content_protection_property(&aconnector->base, true);
 8281	}
 8282}
 8283
 8284static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
 8285			      struct i2c_msg *msgs, int num)
 8286{
 8287	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
 8288	struct ddc_service *ddc_service = i2c->ddc_service;
 8289	struct i2c_command cmd;
 8290	int i;
 8291	int result = -EIO;
 8292
 8293	if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
 8294		return result;
 8295
 8296	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
 8297
 8298	if (!cmd.payloads)
 8299		return result;
 8300
 8301	cmd.number_of_payloads = num;
 8302	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
 8303	cmd.speed = 100;
 8304
 8305	for (i = 0; i < num; i++) {
 8306		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
 8307		cmd.payloads[i].address = msgs[i].addr;
 8308		cmd.payloads[i].length = msgs[i].len;
 8309		cmd.payloads[i].data = msgs[i].buf;
 8310	}
 8311
 8312	if (dc_submit_i2c(
 8313			ddc_service->ctx->dc,
 8314			ddc_service->link->link_index,
 8315			&cmd))
 8316		result = num;
 8317
 8318	kfree(cmd.payloads);
 8319	return result;
 8320}
 8321
 8322static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
 8323{
 8324	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 8325}
 8326
 8327static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
 8328	.master_xfer = amdgpu_dm_i2c_xfer,
 8329	.functionality = amdgpu_dm_i2c_func,
 8330};
 8331
 8332static struct amdgpu_i2c_adapter *
 8333create_i2c(struct ddc_service *ddc_service,
 8334	   int link_index,
 8335	   int *res)
 8336{
 8337	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
 8338	struct amdgpu_i2c_adapter *i2c;
 8339
 8340	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
 8341	if (!i2c)
 8342		return NULL;
 8343	i2c->base.owner = THIS_MODULE;
 8344	i2c->base.dev.parent = &adev->pdev->dev;
 8345	i2c->base.algo = &amdgpu_dm_i2c_algo;
 8346	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
 8347	i2c_set_adapdata(&i2c->base, i2c);
 8348	i2c->ddc_service = ddc_service;
 8349
 8350	return i2c;
 8351}
 8352
 8353
 8354/*
 8355 * Note: this function assumes that dc_link_detect() was called for the
 8356 * dc_link which will be represented by this aconnector.
 8357 */
 8358static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 8359				    struct amdgpu_dm_connector *aconnector,
 8360				    u32 link_index,
 8361				    struct amdgpu_encoder *aencoder)
 8362{
 8363	int res = 0;
 8364	int connector_type;
 8365	struct dc *dc = dm->dc;
 8366	struct dc_link *link = dc_get_link_at_index(dc, link_index);
 8367	struct amdgpu_i2c_adapter *i2c;
 8368
 8369	/* Not needed for writeback connector */
 8370	link->priv = aconnector;
 8371
 8372
 8373	i2c = create_i2c(link->ddc, link->link_index, &res);
 8374	if (!i2c) {
 8375		DRM_ERROR("Failed to create i2c adapter data\n");
 8376		return -ENOMEM;
 8377	}
 8378
 8379	aconnector->i2c = i2c;
 8380	res = i2c_add_adapter(&i2c->base);
 8381
 8382	if (res) {
 8383		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
 8384		goto out_free;
 8385	}
 8386
 8387	connector_type = to_drm_connector_type(link->connector_signal);
 8388
 8389	res = drm_connector_init_with_ddc(
 8390			dm->ddev,
 8391			&aconnector->base,
 8392			&amdgpu_dm_connector_funcs,
 8393			connector_type,
 8394			&i2c->base);
 8395
 8396	if (res) {
 8397		DRM_ERROR("connector_init failed\n");
 8398		aconnector->connector_id = -1;
 8399		goto out_free;
 8400	}
 8401
 8402	drm_connector_helper_add(
 8403			&aconnector->base,
 8404			&amdgpu_dm_connector_helper_funcs);
 8405
 8406	amdgpu_dm_connector_init_helper(
 8407		dm,
 8408		aconnector,
 8409		connector_type,
 8410		link,
 8411		link_index);
 8412
 8413	drm_connector_attach_encoder(
 8414		&aconnector->base, &aencoder->base);
 8415
 8416	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
 8417		|| connector_type == DRM_MODE_CONNECTOR_eDP)
 8418		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
 8419
 8420out_free:
 8421	if (res) {
 8422		kfree(i2c);
 8423		aconnector->i2c = NULL;
 8424	}
 8425	return res;
 8426}
 8427
 8428int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
 8429{
 8430	switch (adev->mode_info.num_crtc) {
 8431	case 1:
 8432		return 0x1;
 8433	case 2:
 8434		return 0x3;
 8435	case 3:
 8436		return 0x7;
 8437	case 4:
 8438		return 0xf;
 8439	case 5:
 8440		return 0x1f;
 8441	case 6:
 8442	default:
 8443		return 0x3f;
 8444	}
 8445}
 8446
 8447static int amdgpu_dm_encoder_init(struct drm_device *dev,
 8448				  struct amdgpu_encoder *aencoder,
 8449				  uint32_t link_index)
 8450{
 8451	struct amdgpu_device *adev = drm_to_adev(dev);
 8452
 8453	int res = drm_encoder_init(dev,
 8454				   &aencoder->base,
 8455				   &amdgpu_dm_encoder_funcs,
 8456				   DRM_MODE_ENCODER_TMDS,
 8457				   NULL);
 8458
 8459	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
 8460
 8461	if (!res)
 8462		aencoder->encoder_id = link_index;
 8463	else
 8464		aencoder->encoder_id = -1;
 8465
 8466	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
 8467
 8468	return res;
 8469}
 8470
 8471static void manage_dm_interrupts(struct amdgpu_device *adev,
 8472				 struct amdgpu_crtc *acrtc,
 8473				 struct dm_crtc_state *acrtc_state)
 8474{
 8475	struct drm_vblank_crtc_config config = {0};
 8476	struct dc_crtc_timing *timing;
 8477	int offdelay;
 8478
 8479	if (acrtc_state) {
 8480		if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
 8481		    IP_VERSION(3, 5, 0) ||
 8482		    acrtc_state->stream->link->psr_settings.psr_version <
 8483		    DC_PSR_VERSION_UNSUPPORTED ||
 8484		    !(adev->flags & AMD_IS_APU)) {
 8485			timing = &acrtc_state->stream->timing;
 8486
 8487			/* at least 2 frames */
 8488			offdelay = DIV64_U64_ROUND_UP((u64)20 *
 8489						      timing->v_total *
 8490						      timing->h_total,
 8491						      timing->pix_clk_100hz);
 8492
 8493			config.offdelay_ms = offdelay ?: 30;
 8494		} else {
 8495			config.disable_immediate = true;
 8496		}
 8497
 8498		drm_crtc_vblank_on_config(&acrtc->base,
 8499					  &config);
 8500	} else {
 8501		drm_crtc_vblank_off(&acrtc->base);
 8502	}
 8503}
 8504
 8505static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
 8506				      struct amdgpu_crtc *acrtc)
 8507{
 8508	int irq_type =
 8509		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
 8510
 8511	/**
 8512	 * This reads the current state for the IRQ and force reapplies
 8513	 * the setting to hardware.
 8514	 */
 8515	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
 8516}
 8517
 8518static bool
 8519is_scaling_state_different(const struct dm_connector_state *dm_state,
 8520			   const struct dm_connector_state *old_dm_state)
 8521{
 8522	if (dm_state->scaling != old_dm_state->scaling)
 8523		return true;
 8524	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
 8525		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
 8526			return true;
 8527	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
 8528		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
 8529			return true;
 8530	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
 8531		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
 8532		return true;
 8533	return false;
 8534}
 8535
 8536static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
 8537					    struct drm_crtc_state *old_crtc_state,
 8538					    struct drm_connector_state *new_conn_state,
 8539					    struct drm_connector_state *old_conn_state,
 8540					    const struct drm_connector *connector,
 8541					    struct hdcp_workqueue *hdcp_w)
 8542{
 8543	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 8544	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
 8545
 8546	pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
 8547		connector->index, connector->status, connector->dpms);
 8548	pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
 8549		old_conn_state->content_protection, new_conn_state->content_protection);
 8550
 8551	if (old_crtc_state)
 8552		pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
 8553		old_crtc_state->enable,
 8554		old_crtc_state->active,
 8555		old_crtc_state->mode_changed,
 8556		old_crtc_state->active_changed,
 8557		old_crtc_state->connectors_changed);
 8558
 8559	if (new_crtc_state)
 8560		pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
 8561		new_crtc_state->enable,
 8562		new_crtc_state->active,
 8563		new_crtc_state->mode_changed,
 8564		new_crtc_state->active_changed,
 8565		new_crtc_state->connectors_changed);
 8566
 8567	/* hdcp content type change */
 8568	if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
 8569	    new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
 8570		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 8571		pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
 8572		return true;
 8573	}
 8574
 8575	/* CP is being re enabled, ignore this */
 8576	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
 8577	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
 8578		if (new_crtc_state && new_crtc_state->mode_changed) {
 8579			new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 8580			pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
 8581			return true;
 8582		}
 8583		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
 8584		pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
 8585		return false;
 8586	}
 8587
 8588	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
 8589	 *
 8590	 * Handles:	UNDESIRED -> ENABLED
 8591	 */
 8592	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
 8593	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
 8594		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 8595
 8596	/* Stream removed and re-enabled
 8597	 *
 8598	 * Can sometimes overlap with the HPD case,
 8599	 * thus set update_hdcp to false to avoid
 8600	 * setting HDCP multiple times.
 8601	 *
 8602	 * Handles:	DESIRED -> DESIRED (Special case)
 8603	 */
 8604	if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
 8605		new_conn_state->crtc && new_conn_state->crtc->enabled &&
 8606		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
 8607		dm_con_state->update_hdcp = false;
 8608		pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
 8609			__func__);
 8610		return true;
 8611	}
 8612
 8613	/* Hot-plug, headless s3, dpms
 8614	 *
 8615	 * Only start HDCP if the display is connected/enabled.
 8616	 * update_hdcp flag will be set to false until the next
 8617	 * HPD comes in.
 8618	 *
 8619	 * Handles:	DESIRED -> DESIRED (Special case)
 8620	 */
 8621	if (dm_con_state->update_hdcp &&
 8622	new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
 8623	connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
 8624		dm_con_state->update_hdcp = false;
 8625		pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
 8626			__func__);
 8627		return true;
 8628	}
 8629
 8630	if (old_conn_state->content_protection == new_conn_state->content_protection) {
 8631		if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
 8632			if (new_crtc_state && new_crtc_state->mode_changed) {
 8633				pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
 8634					__func__);
 8635				return true;
 8636			}
 8637			pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
 8638				__func__);
 8639			return false;
 8640		}
 8641
 8642		pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
 8643		return false;
 8644	}
 8645
 8646	if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
 8647		pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
 8648			__func__);
 8649		return true;
 8650	}
 8651
 8652	pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
 8653	return false;
 8654}
 8655
 8656static void remove_stream(struct amdgpu_device *adev,
 8657			  struct amdgpu_crtc *acrtc,
 8658			  struct dc_stream_state *stream)
 8659{
 8660	/* this is the update mode case */
 8661
 8662	acrtc->otg_inst = -1;
 8663	acrtc->enabled = false;
 8664}
 8665
 8666static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
 8667{
 8668
 8669	assert_spin_locked(&acrtc->base.dev->event_lock);
 8670	WARN_ON(acrtc->event);
 8671
 8672	acrtc->event = acrtc->base.state->event;
 8673
 8674	/* Set the flip status */
 8675	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
 8676
 8677	/* Mark this event as consumed */
 8678	acrtc->base.state->event = NULL;
 8679
 8680	drm_dbg_state(acrtc->base.dev,
 8681		      "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
 8682		      acrtc->crtc_id);
 8683}
 8684
 8685static void update_freesync_state_on_stream(
 8686	struct amdgpu_display_manager *dm,
 8687	struct dm_crtc_state *new_crtc_state,
 8688	struct dc_stream_state *new_stream,
 8689	struct dc_plane_state *surface,
 8690	u32 flip_timestamp_in_us)
 8691{
 8692	struct mod_vrr_params vrr_params;
 8693	struct dc_info_packet vrr_infopacket = {0};
 8694	struct amdgpu_device *adev = dm->adev;
 8695	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
 8696	unsigned long flags;
 8697	bool pack_sdp_v1_3 = false;
 8698	struct amdgpu_dm_connector *aconn;
 8699	enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
 8700
 8701	if (!new_stream)
 8702		return;
 8703
 8704	/*
 8705	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
 8706	 * For now it's sufficient to just guard against these conditions.
 8707	 */
 8708
 8709	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
 8710		return;
 8711
 8712	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 8713	vrr_params = acrtc->dm_irq_params.vrr_params;
 8714
 8715	if (surface) {
 8716		mod_freesync_handle_preflip(
 8717			dm->freesync_module,
 8718			surface,
 8719			new_stream,
 8720			flip_timestamp_in_us,
 8721			&vrr_params);
 8722
 8723		if (adev->family < AMDGPU_FAMILY_AI &&
 8724		    amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
 8725			mod_freesync_handle_v_update(dm->freesync_module,
 8726						     new_stream, &vrr_params);
 8727
 8728			/* Need to call this before the frame ends. */
 8729			dc_stream_adjust_vmin_vmax(dm->dc,
 8730						   new_crtc_state->stream,
 8731						   &vrr_params.adjust);
 8732		}
 8733	}
 8734
 8735	aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
 8736
 8737	if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
 8738		pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
 8739
 8740		if (aconn->vsdb_info.amd_vsdb_version == 1)
 8741			packet_type = PACKET_TYPE_FS_V1;
 8742		else if (aconn->vsdb_info.amd_vsdb_version == 2)
 8743			packet_type = PACKET_TYPE_FS_V2;
 8744		else if (aconn->vsdb_info.amd_vsdb_version == 3)
 8745			packet_type = PACKET_TYPE_FS_V3;
 8746
 8747		mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
 8748					&new_stream->adaptive_sync_infopacket);
 8749	}
 8750
 8751	mod_freesync_build_vrr_infopacket(
 8752		dm->freesync_module,
 8753		new_stream,
 8754		&vrr_params,
 8755		packet_type,
 8756		TRANSFER_FUNC_UNKNOWN,
 8757		&vrr_infopacket,
 8758		pack_sdp_v1_3);
 8759
 8760	new_crtc_state->freesync_vrr_info_changed |=
 8761		(memcmp(&new_crtc_state->vrr_infopacket,
 8762			&vrr_infopacket,
 8763			sizeof(vrr_infopacket)) != 0);
 8764
 8765	acrtc->dm_irq_params.vrr_params = vrr_params;
 8766	new_crtc_state->vrr_infopacket = vrr_infopacket;
 8767
 8768	new_stream->vrr_infopacket = vrr_infopacket;
 8769	new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
 8770
 8771	if (new_crtc_state->freesync_vrr_info_changed)
 8772		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
 8773			      new_crtc_state->base.crtc->base.id,
 8774			      (int)new_crtc_state->base.vrr_enabled,
 8775			      (int)vrr_params.state);
 8776
 8777	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 8778}
 8779
 8780static void update_stream_irq_parameters(
 8781	struct amdgpu_display_manager *dm,
 8782	struct dm_crtc_state *new_crtc_state)
 8783{
 8784	struct dc_stream_state *new_stream = new_crtc_state->stream;
 8785	struct mod_vrr_params vrr_params;
 8786	struct mod_freesync_config config = new_crtc_state->freesync_config;
 8787	struct amdgpu_device *adev = dm->adev;
 8788	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
 8789	unsigned long flags;
 8790
 8791	if (!new_stream)
 8792		return;
 8793
 8794	/*
 8795	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
 8796	 * For now it's sufficient to just guard against these conditions.
 8797	 */
 8798	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
 8799		return;
 8800
 8801	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 8802	vrr_params = acrtc->dm_irq_params.vrr_params;
 8803
 8804	if (new_crtc_state->vrr_supported &&
 8805	    config.min_refresh_in_uhz &&
 8806	    config.max_refresh_in_uhz) {
 8807		/*
 8808		 * if freesync compatible mode was set, config.state will be set
 8809		 * in atomic check
 8810		 */
 8811		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
 8812		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
 8813		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
 8814			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
 8815			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
 8816			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
 8817			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
 8818		} else {
 8819			config.state = new_crtc_state->base.vrr_enabled ?
 8820						     VRR_STATE_ACTIVE_VARIABLE :
 8821						     VRR_STATE_INACTIVE;
 8822		}
 8823	} else {
 8824		config.state = VRR_STATE_UNSUPPORTED;
 8825	}
 8826
 8827	mod_freesync_build_vrr_params(dm->freesync_module,
 8828				      new_stream,
 8829				      &config, &vrr_params);
 8830
 8831	new_crtc_state->freesync_config = config;
 8832	/* Copy state for access from DM IRQ handler */
 8833	acrtc->dm_irq_params.freesync_config = config;
 8834	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
 8835	acrtc->dm_irq_params.vrr_params = vrr_params;
 8836	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 8837}
 8838
 8839static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
 8840					    struct dm_crtc_state *new_state)
 8841{
 8842	bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
 8843	bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
 8844
 8845	if (!old_vrr_active && new_vrr_active) {
 8846		/* Transition VRR inactive -> active:
 8847		 * While VRR is active, we must not disable vblank irq, as a
 8848		 * reenable after disable would compute bogus vblank/pflip
 8849		 * timestamps if it likely happened inside display front-porch.
 8850		 *
 8851		 * We also need vupdate irq for the actual core vblank handling
 8852		 * at end of vblank.
 8853		 */
 8854		WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
 8855		WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
 8856		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
 8857				 __func__, new_state->base.crtc->base.id);
 8858	} else if (old_vrr_active && !new_vrr_active) {
 8859		/* Transition VRR active -> inactive:
 8860		 * Allow vblank irq disable again for fixed refresh rate.
 8861		 */
 8862		WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
 8863		drm_crtc_vblank_put(new_state->base.crtc);
 8864		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
 8865				 __func__, new_state->base.crtc->base.id);
 8866	}
 8867}
 8868
 8869static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
 8870{
 8871	struct drm_plane *plane;
 8872	struct drm_plane_state *old_plane_state;
 8873	int i;
 8874
 8875	/*
 8876	 * TODO: Make this per-stream so we don't issue redundant updates for
 8877	 * commits with multiple streams.
 8878	 */
 8879	for_each_old_plane_in_state(state, plane, old_plane_state, i)
 8880		if (plane->type == DRM_PLANE_TYPE_CURSOR)
 8881			amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
 8882}
 8883
 8884static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
 8885{
 8886	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
 8887
 8888	return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
 8889}
 8890
 8891static void amdgpu_dm_update_cursor(struct drm_plane *plane,
 8892				    struct drm_plane_state *old_plane_state,
 8893				    struct dc_stream_update *update)
 8894{
 8895	struct amdgpu_device *adev = drm_to_adev(plane->dev);
 8896	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
 8897	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
 8898	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
 8899	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 8900	uint64_t address = afb ? afb->address : 0;
 8901	struct dc_cursor_position position = {0};
 8902	struct dc_cursor_attributes attributes;
 8903	int ret;
 8904
 8905	if (!plane->state->fb && !old_plane_state->fb)
 8906		return;
 8907
 8908	drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
 8909		       amdgpu_crtc->crtc_id, plane->state->crtc_w,
 8910		       plane->state->crtc_h);
 8911
 8912	ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
 8913	if (ret)
 8914		return;
 8915
 8916	if (!position.enable) {
 8917		/* turn off cursor */
 8918		if (crtc_state && crtc_state->stream) {
 8919			dc_stream_set_cursor_position(crtc_state->stream,
 8920						      &position);
 8921			update->cursor_position = &crtc_state->stream->cursor_position;
 8922		}
 8923		return;
 8924	}
 8925
 8926	amdgpu_crtc->cursor_width = plane->state->crtc_w;
 8927	amdgpu_crtc->cursor_height = plane->state->crtc_h;
 8928
 8929	memset(&attributes, 0, sizeof(attributes));
 8930	attributes.address.high_part = upper_32_bits(address);
 8931	attributes.address.low_part  = lower_32_bits(address);
 8932	attributes.width             = plane->state->crtc_w;
 8933	attributes.height            = plane->state->crtc_h;
 8934	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
 8935	attributes.rotation_angle    = 0;
 8936	attributes.attribute_flags.value = 0;
 8937
 8938	/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
 8939	 * legacy gamma setup.
 8940	 */
 8941	if (crtc_state->cm_is_degamma_srgb &&
 8942	    adev->dm.dc->caps.color.dpp.gamma_corr)
 8943		attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
 8944
 8945	if (afb)
 8946		attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
 8947
 8948	if (crtc_state->stream) {
 8949		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
 8950						     &attributes))
 8951			DRM_ERROR("DC failed to set cursor attributes\n");
 8952
 8953		update->cursor_attributes = &crtc_state->stream->cursor_attributes;
 8954
 8955		if (!dc_stream_set_cursor_position(crtc_state->stream,
 8956						   &position))
 8957			DRM_ERROR("DC failed to set cursor position\n");
 8958
 8959		update->cursor_position = &crtc_state->stream->cursor_position;
 8960	}
 8961}
 8962
 8963static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
 8964					  const struct dm_crtc_state *acrtc_state,
 8965					  const u64 current_ts)
 8966{
 8967	struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
 8968	struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
 8969	struct amdgpu_dm_connector *aconn =
 8970		(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
 8971	bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
 8972
 8973	if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
 8974		if (pr->config.replay_supported && !pr->replay_feature_enabled)
 8975			amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
 8976		else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
 8977			     !psr->psr_feature_enabled)
 8978			if (!aconn->disallow_edp_enter_psr)
 8979				amdgpu_dm_link_setup_psr(acrtc_state->stream);
 8980	}
 8981
 8982	/* Decrement skip count when SR is enabled and we're doing fast updates. */
 8983	if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
 8984	    (psr->psr_feature_enabled || pr->config.replay_supported)) {
 8985		if (aconn->sr_skip_count > 0)
 8986			aconn->sr_skip_count--;
 8987
 8988		/* Allow SR when skip count is 0. */
 8989		acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
 8990
 8991		/*
 8992		 * If sink supports PSR SU/Panel Replay, there is no need to rely on
 8993		 * a vblank event disable request to enable PSR/RP. PSR SU/RP
 8994		 * can be enabled immediately once OS demonstrates an
 8995		 * adequate number of fast atomic commits to notify KMD
 8996		 * of update events. See `vblank_control_worker()`.
 8997		 */
 8998		if (!vrr_active &&
 8999		    acrtc_attach->dm_irq_params.allow_sr_entry &&
 9000#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
 9001		    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
 9002#endif
 9003		    (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
 9004			if (pr->replay_feature_enabled && !pr->replay_allow_active)
 9005				amdgpu_dm_replay_enable(acrtc_state->stream, true);
 9006			if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
 9007			    !psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
 9008				amdgpu_dm_psr_enable(acrtc_state->stream);
 9009		}
 9010	} else {
 9011		acrtc_attach->dm_irq_params.allow_sr_entry = false;
 9012	}
 9013}
 9014
 9015static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 9016				    struct drm_device *dev,
 9017				    struct amdgpu_display_manager *dm,
 9018				    struct drm_crtc *pcrtc,
 9019				    bool wait_for_vblank)
 9020{
 9021	u32 i;
 9022	u64 timestamp_ns = ktime_get_ns();
 9023	struct drm_plane *plane;
 9024	struct drm_plane_state *old_plane_state, *new_plane_state;
 9025	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
 9026	struct drm_crtc_state *new_pcrtc_state =
 9027			drm_atomic_get_new_crtc_state(state, pcrtc);
 9028	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
 9029	struct dm_crtc_state *dm_old_crtc_state =
 9030			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
 9031	int planes_count = 0, vpos, hpos;
 9032	unsigned long flags;
 9033	u32 target_vblank, last_flip_vblank;
 9034	bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
 9035	bool cursor_update = false;
 9036	bool pflip_present = false;
 9037	bool dirty_rects_changed = false;
 9038	bool updated_planes_and_streams = false;
 9039	struct {
 9040		struct dc_surface_update surface_updates[MAX_SURFACES];
 9041		struct dc_plane_info plane_infos[MAX_SURFACES];
 9042		struct dc_scaling_info scaling_infos[MAX_SURFACES];
 9043		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
 9044		struct dc_stream_update stream_update;
 9045	} *bundle;
 9046
 9047	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 9048
 9049	if (!bundle) {
 9050		drm_err(dev, "Failed to allocate update bundle\n");
 9051		goto cleanup;
 9052	}
 9053
 9054	/*
 9055	 * Disable the cursor first if we're disabling all the planes.
 9056	 * It'll remain on the screen after the planes are re-enabled
 9057	 * if we don't.
 9058	 *
 9059	 * If the cursor is transitioning from native to overlay mode, the
 9060	 * native cursor needs to be disabled first.
 9061	 */
 9062	if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE &&
 9063	    dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
 9064		struct dc_cursor_position cursor_position = {0};
 9065
 9066		if (!dc_stream_set_cursor_position(acrtc_state->stream,
 9067						   &cursor_position))
 9068			drm_err(dev, "DC failed to disable native cursor\n");
 9069
 9070		bundle->stream_update.cursor_position =
 9071				&acrtc_state->stream->cursor_position;
 9072	}
 9073
 9074	if (acrtc_state->active_planes == 0 &&
 9075	    dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
 9076		amdgpu_dm_commit_cursors(state);
 9077
 9078	/* update planes when needed */
 9079	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
 9080		struct drm_crtc *crtc = new_plane_state->crtc;
 9081		struct drm_crtc_state *new_crtc_state;
 9082		struct drm_framebuffer *fb = new_plane_state->fb;
 9083		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
 9084		bool plane_needs_flip;
 9085		struct dc_plane_state *dc_plane;
 9086		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
 9087
 9088		/* Cursor plane is handled after stream updates */
 9089		if (plane->type == DRM_PLANE_TYPE_CURSOR &&
 9090		    acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
 9091			if ((fb && crtc == pcrtc) ||
 9092			    (old_plane_state->fb && old_plane_state->crtc == pcrtc)) {
 9093				cursor_update = true;
 9094				if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0)
 9095					amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update);
 9096			}
 9097
 9098			continue;
 9099		}
 9100
 9101		if (!fb || !crtc || pcrtc != crtc)
 9102			continue;
 9103
 9104		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
 9105		if (!new_crtc_state->active)
 9106			continue;
 9107
 9108		dc_plane = dm_new_plane_state->dc_state;
 9109		if (!dc_plane)
 9110			continue;
 9111
 9112		bundle->surface_updates[planes_count].surface = dc_plane;
 9113		if (new_pcrtc_state->color_mgmt_changed) {
 9114			bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
 9115			bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
 9116			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
 9117			bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
 9118			bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
 9119			bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
 9120			bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
 9121		}
 9122
 9123		amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
 9124				     &bundle->scaling_infos[planes_count]);
 9125
 9126		bundle->surface_updates[planes_count].scaling_info =
 9127			&bundle->scaling_infos[planes_count];
 9128
 9129		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
 9130
 9131		pflip_present = pflip_present || plane_needs_flip;
 9132
 9133		if (!plane_needs_flip) {
 9134			planes_count += 1;
 9135			continue;
 9136		}
 9137
 9138		fill_dc_plane_info_and_addr(
 9139			dm->adev, new_plane_state,
 9140			afb->tiling_flags,
 9141			&bundle->plane_infos[planes_count],
 9142			&bundle->flip_addrs[planes_count].address,
 9143			afb->tmz_surface);
 9144
 9145		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
 9146				 new_plane_state->plane->index,
 9147				 bundle->plane_infos[planes_count].dcc.enable);
 9148
 9149		bundle->surface_updates[planes_count].plane_info =
 9150			&bundle->plane_infos[planes_count];
 9151
 9152		if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
 9153		    acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
 9154			fill_dc_dirty_rects(plane, old_plane_state,
 9155					    new_plane_state, new_crtc_state,
 9156					    &bundle->flip_addrs[planes_count],
 9157					    acrtc_state->stream->link->psr_settings.psr_version ==
 9158					    DC_PSR_VERSION_SU_1,
 9159					    &dirty_rects_changed);
 9160
 9161			/*
 9162			 * If the dirty regions changed, PSR-SU need to be disabled temporarily
 9163			 * and enabled it again after dirty regions are stable to avoid video glitch.
 9164			 * PSR-SU will be enabled in vblank_control_worker() if user pause the video
 9165			 * during the PSR-SU was disabled.
 9166			 */
 9167			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
 9168			    acrtc_attach->dm_irq_params.allow_sr_entry &&
 9169#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
 9170			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
 9171#endif
 9172			    dirty_rects_changed) {
 9173				mutex_lock(&dm->dc_lock);
 9174				acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
 9175				timestamp_ns;
 9176				if (acrtc_state->stream->link->psr_settings.psr_allow_active)
 9177					amdgpu_dm_psr_disable(acrtc_state->stream, true);
 9178				mutex_unlock(&dm->dc_lock);
 9179			}
 9180		}
 9181
 9182		/*
 9183		 * Only allow immediate flips for fast updates that don't
 9184		 * change memory domain, FB pitch, DCC state, rotation or
 9185		 * mirroring.
 9186		 *
 9187		 * dm_crtc_helper_atomic_check() only accepts async flips with
 9188		 * fast updates.
 9189		 */
 9190		if (crtc->state->async_flip &&
 9191		    (acrtc_state->update_type != UPDATE_TYPE_FAST ||
 9192		     get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
 9193			drm_warn_once(state->dev,
 9194				      "[PLANE:%d:%s] async flip with non-fast update\n",
 9195				      plane->base.id, plane->name);
 9196
 9197		bundle->flip_addrs[planes_count].flip_immediate =
 9198			crtc->state->async_flip &&
 9199			acrtc_state->update_type == UPDATE_TYPE_FAST &&
 9200			get_mem_type(old_plane_state->fb) == get_mem_type(fb);
 9201
 9202		timestamp_ns = ktime_get_ns();
 9203		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
 9204		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
 9205		bundle->surface_updates[planes_count].surface = dc_plane;
 9206
 9207		if (!bundle->surface_updates[planes_count].surface) {
 9208			DRM_ERROR("No surface for CRTC: id=%d\n",
 9209					acrtc_attach->crtc_id);
 9210			continue;
 9211		}
 9212
 9213		if (plane == pcrtc->primary)
 9214			update_freesync_state_on_stream(
 9215				dm,
 9216				acrtc_state,
 9217				acrtc_state->stream,
 9218				dc_plane,
 9219				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
 9220
 9221		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
 9222				 __func__,
 9223				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
 9224				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
 9225
 9226		planes_count += 1;
 9227
 9228	}
 9229
 9230	if (pflip_present) {
 9231		if (!vrr_active) {
 9232			/* Use old throttling in non-vrr fixed refresh rate mode
 9233			 * to keep flip scheduling based on target vblank counts
 9234			 * working in a backwards compatible way, e.g., for
 9235			 * clients using the GLX_OML_sync_control extension or
 9236			 * DRI3/Present extension with defined target_msc.
 9237			 */
 9238			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
 9239		} else {
 9240			/* For variable refresh rate mode only:
 9241			 * Get vblank of last completed flip to avoid > 1 vrr
 9242			 * flips per video frame by use of throttling, but allow
 9243			 * flip programming anywhere in the possibly large
 9244			 * variable vrr vblank interval for fine-grained flip
 9245			 * timing control and more opportunity to avoid stutter
 9246			 * on late submission of flips.
 9247			 */
 9248			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 9249			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
 9250			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 9251		}
 9252
 9253		target_vblank = last_flip_vblank + wait_for_vblank;
 9254
 9255		/*
 9256		 * Wait until we're out of the vertical blank period before the one
 9257		 * targeted by the flip
 9258		 */
 9259		while ((acrtc_attach->enabled &&
 9260			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
 9261							    0, &vpos, &hpos, NULL,
 9262							    NULL, &pcrtc->hwmode)
 9263			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
 9264			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
 9265			(int)(target_vblank -
 9266			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
 9267			usleep_range(1000, 1100);
 9268		}
 9269
 9270		/**
 9271		 * Prepare the flip event for the pageflip interrupt to handle.
 9272		 *
 9273		 * This only works in the case where we've already turned on the
 9274		 * appropriate hardware blocks (eg. HUBP) so in the transition case
 9275		 * from 0 -> n planes we have to skip a hardware generated event
 9276		 * and rely on sending it from software.
 9277		 */
 9278		if (acrtc_attach->base.state->event &&
 9279		    acrtc_state->active_planes > 0) {
 9280			drm_crtc_vblank_get(pcrtc);
 9281
 9282			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 9283
 9284			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
 9285			prepare_flip_isr(acrtc_attach);
 9286
 9287			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 9288		}
 9289
 9290		if (acrtc_state->stream) {
 9291			if (acrtc_state->freesync_vrr_info_changed)
 9292				bundle->stream_update.vrr_infopacket =
 9293					&acrtc_state->stream->vrr_infopacket;
 9294		}
 9295	} else if (cursor_update && acrtc_state->active_planes > 0) {
 9296		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 9297		if (acrtc_attach->base.state->event) {
 9298			drm_crtc_vblank_get(pcrtc);
 9299			acrtc_attach->event = acrtc_attach->base.state->event;
 9300			acrtc_attach->base.state->event = NULL;
 9301		}
 9302		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 9303	}
 9304
 9305	/* Update the planes if changed or disable if we don't have any. */
 9306	if ((planes_count || acrtc_state->active_planes == 0) &&
 9307		acrtc_state->stream) {
 9308		/*
 9309		 * If PSR or idle optimizations are enabled then flush out
 9310		 * any pending work before hardware programming.
 9311		 */
 9312		if (dm->vblank_control_workqueue)
 9313			flush_workqueue(dm->vblank_control_workqueue);
 9314
 9315		bundle->stream_update.stream = acrtc_state->stream;
 9316		if (new_pcrtc_state->mode_changed) {
 9317			bundle->stream_update.src = acrtc_state->stream->src;
 9318			bundle->stream_update.dst = acrtc_state->stream->dst;
 9319		}
 9320
 9321		if (new_pcrtc_state->color_mgmt_changed) {
 9322			/*
 9323			 * TODO: This isn't fully correct since we've actually
 9324			 * already modified the stream in place.
 9325			 */
 9326			bundle->stream_update.gamut_remap =
 9327				&acrtc_state->stream->gamut_remap_matrix;
 9328			bundle->stream_update.output_csc_transform =
 9329				&acrtc_state->stream->csc_color_matrix;
 9330			bundle->stream_update.out_transfer_func =
 9331				&acrtc_state->stream->out_transfer_func;
 9332			bundle->stream_update.lut3d_func =
 9333				(struct dc_3dlut *) acrtc_state->stream->lut3d_func;
 9334			bundle->stream_update.func_shaper =
 9335				(struct dc_transfer_func *) acrtc_state->stream->func_shaper;
 9336		}
 9337
 9338		acrtc_state->stream->abm_level = acrtc_state->abm_level;
 9339		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
 9340			bundle->stream_update.abm_level = &acrtc_state->abm_level;
 9341
 9342		mutex_lock(&dm->dc_lock);
 9343		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
 9344			if (acrtc_state->stream->link->replay_settings.replay_allow_active)
 9345				amdgpu_dm_replay_disable(acrtc_state->stream);
 9346			if (acrtc_state->stream->link->psr_settings.psr_allow_active)
 9347				amdgpu_dm_psr_disable(acrtc_state->stream, true);
 9348		}
 9349		mutex_unlock(&dm->dc_lock);
 9350
 9351		/*
 9352		 * If FreeSync state on the stream has changed then we need to
 9353		 * re-adjust the min/max bounds now that DC doesn't handle this
 9354		 * as part of commit.
 9355		 */
 9356		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
 9357			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 9358			dc_stream_adjust_vmin_vmax(
 9359				dm->dc, acrtc_state->stream,
 9360				&acrtc_attach->dm_irq_params.vrr_params.adjust);
 9361			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 9362		}
 9363		mutex_lock(&dm->dc_lock);
 9364		update_planes_and_stream_adapter(dm->dc,
 9365					 acrtc_state->update_type,
 9366					 planes_count,
 9367					 acrtc_state->stream,
 9368					 &bundle->stream_update,
 9369					 bundle->surface_updates);
 9370		updated_planes_and_streams = true;
 9371
 9372		/**
 9373		 * Enable or disable the interrupts on the backend.
 9374		 *
 9375		 * Most pipes are put into power gating when unused.
 9376		 *
 9377		 * When power gating is enabled on a pipe we lose the
 9378		 * interrupt enablement state when power gating is disabled.
 9379		 *
 9380		 * So we need to update the IRQ control state in hardware
 9381		 * whenever the pipe turns on (since it could be previously
 9382		 * power gated) or off (since some pipes can't be power gated
 9383		 * on some ASICs).
 9384		 */
 9385		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
 9386			dm_update_pflip_irq_state(drm_to_adev(dev),
 9387						  acrtc_attach);
 9388
 9389		amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
 9390		mutex_unlock(&dm->dc_lock);
 9391	}
 9392
 9393	/*
 9394	 * Update cursor state *after* programming all the planes.
 9395	 * This avoids redundant programming in the case where we're going
 9396	 * to be disabling a single plane - those pipes are being disabled.
 9397	 */
 9398	if (acrtc_state->active_planes &&
 9399	    (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) &&
 9400	    acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
 9401		amdgpu_dm_commit_cursors(state);
 9402
 9403cleanup:
 9404	kfree(bundle);
 9405}
 9406
 9407static void amdgpu_dm_commit_audio(struct drm_device *dev,
 9408				   struct drm_atomic_state *state)
 9409{
 9410	struct amdgpu_device *adev = drm_to_adev(dev);
 9411	struct amdgpu_dm_connector *aconnector;
 9412	struct drm_connector *connector;
 9413	struct drm_connector_state *old_con_state, *new_con_state;
 9414	struct drm_crtc_state *new_crtc_state;
 9415	struct dm_crtc_state *new_dm_crtc_state;
 9416	const struct dc_stream_status *status;
 9417	int i, inst;
 9418
 9419	/* Notify device removals. */
 9420	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 9421		if (old_con_state->crtc != new_con_state->crtc) {
 9422			/* CRTC changes require notification. */
 9423			goto notify;
 9424		}
 9425
 9426		if (!new_con_state->crtc)
 9427			continue;
 9428
 9429		new_crtc_state = drm_atomic_get_new_crtc_state(
 9430			state, new_con_state->crtc);
 9431
 9432		if (!new_crtc_state)
 9433			continue;
 9434
 9435		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 9436			continue;
 9437
 9438notify:
 9439		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 9440			continue;
 9441
 9442		aconnector = to_amdgpu_dm_connector(connector);
 9443
 9444		mutex_lock(&adev->dm.audio_lock);
 9445		inst = aconnector->audio_inst;
 9446		aconnector->audio_inst = -1;
 9447		mutex_unlock(&adev->dm.audio_lock);
 9448
 9449		amdgpu_dm_audio_eld_notify(adev, inst);
 9450	}
 9451
 9452	/* Notify audio device additions. */
 9453	for_each_new_connector_in_state(state, connector, new_con_state, i) {
 9454		if (!new_con_state->crtc)
 9455			continue;
 9456
 9457		new_crtc_state = drm_atomic_get_new_crtc_state(
 9458			state, new_con_state->crtc);
 9459
 9460		if (!new_crtc_state)
 9461			continue;
 9462
 9463		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 9464			continue;
 9465
 9466		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
 9467		if (!new_dm_crtc_state->stream)
 9468			continue;
 9469
 9470		status = dc_stream_get_status(new_dm_crtc_state->stream);
 9471		if (!status)
 9472			continue;
 9473
 9474		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 9475			continue;
 9476
 9477		aconnector = to_amdgpu_dm_connector(connector);
 9478
 9479		mutex_lock(&adev->dm.audio_lock);
 9480		inst = status->audio_inst;
 9481		aconnector->audio_inst = inst;
 9482		mutex_unlock(&adev->dm.audio_lock);
 9483
 9484		amdgpu_dm_audio_eld_notify(adev, inst);
 9485	}
 9486}
 9487
 9488/*
 9489 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
 9490 * @crtc_state: the DRM CRTC state
 9491 * @stream_state: the DC stream state.
 9492 *
 9493 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
 9494 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
 9495 */
 9496static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
 9497						struct dc_stream_state *stream_state)
 9498{
 9499	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
 9500}
 9501
 9502static void dm_clear_writeback(struct amdgpu_display_manager *dm,
 9503			      struct dm_crtc_state *crtc_state)
 9504{
 9505	dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
 9506}
 9507
 9508static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
 9509					struct dc_state *dc_state)
 9510{
 9511	struct drm_device *dev = state->dev;
 9512	struct amdgpu_device *adev = drm_to_adev(dev);
 9513	struct amdgpu_display_manager *dm = &adev->dm;
 9514	struct drm_crtc *crtc;
 9515	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 9516	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
 9517	struct drm_connector_state *old_con_state;
 9518	struct drm_connector *connector;
 9519	bool mode_set_reset_required = false;
 9520	u32 i;
 9521	struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
 9522	bool set_backlight_level = false;
 9523
 9524	/* Disable writeback */
 9525	for_each_old_connector_in_state(state, connector, old_con_state, i) {
 9526		struct dm_connector_state *dm_old_con_state;
 9527		struct amdgpu_crtc *acrtc;
 9528
 9529		if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
 9530			continue;
 9531
 9532		old_crtc_state = NULL;
 9533
 9534		dm_old_con_state = to_dm_connector_state(old_con_state);
 9535		if (!dm_old_con_state->base.crtc)
 9536			continue;
 9537
 9538		acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
 9539		if (acrtc)
 9540			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
 9541
 9542		if (!acrtc || !acrtc->wb_enabled)
 9543			continue;
 9544
 9545		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 9546
 9547		dm_clear_writeback(dm, dm_old_crtc_state);
 9548		acrtc->wb_enabled = false;
 9549	}
 9550
 9551	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
 9552				      new_crtc_state, i) {
 9553		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 9554
 9555		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 9556
 9557		if (old_crtc_state->active &&
 9558		    (!new_crtc_state->active ||
 9559		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
 9560			manage_dm_interrupts(adev, acrtc, NULL);
 9561			dc_stream_release(dm_old_crtc_state->stream);
 9562		}
 9563	}
 9564
 9565	drm_atomic_helper_calc_timestamping_constants(state);
 9566
 9567	/* update changed items */
 9568	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 9569		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 9570
 9571		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9572		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 9573
 9574		drm_dbg_state(state->dev,
 9575			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
 9576			acrtc->crtc_id,
 9577			new_crtc_state->enable,
 9578			new_crtc_state->active,
 9579			new_crtc_state->planes_changed,
 9580			new_crtc_state->mode_changed,
 9581			new_crtc_state->active_changed,
 9582			new_crtc_state->connectors_changed);
 9583
 9584		/* Disable cursor if disabling crtc */
 9585		if (old_crtc_state->active && !new_crtc_state->active) {
 9586			struct dc_cursor_position position;
 9587
 9588			memset(&position, 0, sizeof(position));
 9589			mutex_lock(&dm->dc_lock);
 9590			dc_exit_ips_for_hw_access(dm->dc);
 9591			dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
 9592			mutex_unlock(&dm->dc_lock);
 9593		}
 9594
 9595		/* Copy all transient state flags into dc state */
 9596		if (dm_new_crtc_state->stream) {
 9597			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
 9598							    dm_new_crtc_state->stream);
 9599		}
 9600
 9601		/* handles headless hotplug case, updating new_state and
 9602		 * aconnector as needed
 9603		 */
 9604
 9605		if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
 9606
 9607			drm_dbg_atomic(dev,
 9608				       "Atomic commit: SET crtc id %d: [%p]\n",
 9609				       acrtc->crtc_id, acrtc);
 9610
 9611			if (!dm_new_crtc_state->stream) {
 9612				/*
 9613				 * this could happen because of issues with
 9614				 * userspace notifications delivery.
 9615				 * In this case userspace tries to set mode on
 9616				 * display which is disconnected in fact.
 9617				 * dc_sink is NULL in this case on aconnector.
 9618				 * We expect reset mode will come soon.
 9619				 *
 9620				 * This can also happen when unplug is done
 9621				 * during resume sequence ended
 9622				 *
 9623				 * In this case, we want to pretend we still
 9624				 * have a sink to keep the pipe running so that
 9625				 * hw state is consistent with the sw state
 9626				 */
 9627				drm_dbg_atomic(dev,
 9628					       "Failed to create new stream for crtc %d\n",
 9629						acrtc->base.base.id);
 9630				continue;
 9631			}
 9632
 9633			if (dm_old_crtc_state->stream)
 9634				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
 9635
 9636			pm_runtime_get_noresume(dev->dev);
 9637
 9638			acrtc->enabled = true;
 9639			acrtc->hw_mode = new_crtc_state->mode;
 9640			crtc->hwmode = new_crtc_state->mode;
 9641			mode_set_reset_required = true;
 9642			set_backlight_level = true;
 9643		} else if (modereset_required(new_crtc_state)) {
 9644			drm_dbg_atomic(dev,
 9645				       "Atomic commit: RESET. crtc id %d:[%p]\n",
 9646				       acrtc->crtc_id, acrtc);
 9647			/* i.e. reset mode */
 9648			if (dm_old_crtc_state->stream)
 9649				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
 9650
 9651			mode_set_reset_required = true;
 9652		}
 9653	} /* for_each_crtc_in_state() */
 9654
 9655	/* if there mode set or reset, disable eDP PSR, Replay */
 9656	if (mode_set_reset_required) {
 9657		if (dm->vblank_control_workqueue)
 9658			flush_workqueue(dm->vblank_control_workqueue);
 9659
 9660		amdgpu_dm_replay_disable_all(dm);
 9661		amdgpu_dm_psr_disable_all(dm);
 9662	}
 9663
 9664	dm_enable_per_frame_crtc_master_sync(dc_state);
 9665	mutex_lock(&dm->dc_lock);
 9666	dc_exit_ips_for_hw_access(dm->dc);
 9667	WARN_ON(!dc_commit_streams(dm->dc, &params));
 9668
 9669	/* Allow idle optimization when vblank count is 0 for display off */
 9670	if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev))
 9671		dc_allow_idle_optimizations(dm->dc, true);
 9672	mutex_unlock(&dm->dc_lock);
 9673
 9674	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
 9675		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 9676
 9677		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9678
 9679		if (dm_new_crtc_state->stream != NULL) {
 9680			const struct dc_stream_status *status =
 9681					dc_stream_get_status(dm_new_crtc_state->stream);
 9682
 9683			if (!status)
 9684				status = dc_state_get_stream_status(dc_state,
 9685									 dm_new_crtc_state->stream);
 9686			if (!status)
 9687				drm_err(dev,
 9688					"got no status for stream %p on acrtc%p\n",
 9689					dm_new_crtc_state->stream, acrtc);
 9690			else
 9691				acrtc->otg_inst = status->primary_otg_inst;
 9692		}
 9693	}
 9694
 9695	/* During boot up and resume the DC layer will reset the panel brightness
 9696	 * to fix a flicker issue.
 9697	 * It will cause the dm->actual_brightness is not the current panel brightness
 9698	 * level. (the dm->brightness is the correct panel level)
 9699	 * So we set the backlight level with dm->brightness value after set mode
 9700	 */
 9701	if (set_backlight_level) {
 9702		for (i = 0; i < dm->num_of_edps; i++) {
 9703			if (dm->backlight_dev[i])
 9704				amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
 9705		}
 9706	}
 9707}
 9708
 9709static void dm_set_writeback(struct amdgpu_display_manager *dm,
 9710			      struct dm_crtc_state *crtc_state,
 9711			      struct drm_connector *connector,
 9712			      struct drm_connector_state *new_con_state)
 9713{
 9714	struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
 9715	struct amdgpu_device *adev = dm->adev;
 9716	struct amdgpu_crtc *acrtc;
 9717	struct dc_writeback_info *wb_info;
 9718	struct pipe_ctx *pipe = NULL;
 9719	struct amdgpu_framebuffer *afb;
 9720	int i = 0;
 9721
 9722	wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
 9723	if (!wb_info) {
 9724		DRM_ERROR("Failed to allocate wb_info\n");
 9725		return;
 9726	}
 9727
 9728	acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
 9729	if (!acrtc) {
 9730		DRM_ERROR("no amdgpu_crtc found\n");
 9731		kfree(wb_info);
 9732		return;
 9733	}
 9734
 9735	afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
 9736	if (!afb) {
 9737		DRM_ERROR("No amdgpu_framebuffer found\n");
 9738		kfree(wb_info);
 9739		return;
 9740	}
 9741
 9742	for (i = 0; i < MAX_PIPES; i++) {
 9743		if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
 9744			pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
 9745			break;
 9746		}
 9747	}
 9748
 9749	/* fill in wb_info */
 9750	wb_info->wb_enabled = true;
 9751
 9752	wb_info->dwb_pipe_inst = 0;
 9753	wb_info->dwb_params.dwbscl_black_color = 0;
 9754	wb_info->dwb_params.hdr_mult = 0x1F000;
 9755	wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
 9756	wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
 9757	wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
 9758	wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
 9759
 9760	/* width & height from crtc */
 9761	wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
 9762	wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
 9763	wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
 9764	wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
 9765
 9766	wb_info->dwb_params.cnv_params.crop_en = false;
 9767	wb_info->dwb_params.stereo_params.stereo_enabled = false;
 9768
 9769	wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff;	// 10 bits
 9770	wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
 9771	wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
 9772	wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
 9773
 9774	wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
 9775
 9776	wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
 9777
 9778	wb_info->dwb_params.scaler_taps.h_taps = 4;
 9779	wb_info->dwb_params.scaler_taps.v_taps = 4;
 9780	wb_info->dwb_params.scaler_taps.h_taps_c = 2;
 9781	wb_info->dwb_params.scaler_taps.v_taps_c = 2;
 9782	wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
 9783
 9784	wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
 9785	wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
 9786
 9787	for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
 9788		wb_info->mcif_buf_params.luma_address[i] = afb->address;
 9789		wb_info->mcif_buf_params.chroma_address[i] = 0;
 9790	}
 9791
 9792	wb_info->mcif_buf_params.p_vmid = 1;
 9793	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
 9794		wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
 9795		wb_info->mcif_warmup_params.region_size =
 9796			wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
 9797	}
 9798	wb_info->mcif_warmup_params.p_vmid = 1;
 9799	wb_info->writeback_source_plane = pipe->plane_state;
 9800
 9801	dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
 9802
 9803	acrtc->wb_pending = true;
 9804	acrtc->wb_conn = wb_conn;
 9805	drm_writeback_queue_job(wb_conn, new_con_state);
 9806}
 9807
 9808/**
 9809 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
 9810 * @state: The atomic state to commit
 9811 *
 9812 * This will tell DC to commit the constructed DC state from atomic_check,
 9813 * programming the hardware. Any failures here implies a hardware failure, since
 9814 * atomic check should have filtered anything non-kosher.
 9815 */
 9816static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 9817{
 9818	struct drm_device *dev = state->dev;
 9819	struct amdgpu_device *adev = drm_to_adev(dev);
 9820	struct amdgpu_display_manager *dm = &adev->dm;
 9821	struct dm_atomic_state *dm_state;
 9822	struct dc_state *dc_state = NULL;
 9823	u32 i, j;
 9824	struct drm_crtc *crtc;
 9825	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 9826	unsigned long flags;
 9827	bool wait_for_vblank = true;
 9828	struct drm_connector *connector;
 9829	struct drm_connector_state *old_con_state, *new_con_state;
 9830	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
 9831	int crtc_disable_count = 0;
 9832
 9833	trace_amdgpu_dm_atomic_commit_tail_begin(state);
 9834
 9835	drm_atomic_helper_update_legacy_modeset_state(dev, state);
 9836	drm_dp_mst_atomic_wait_for_dependencies(state);
 9837
 9838	dm_state = dm_atomic_get_new_state(state);
 9839	if (dm_state && dm_state->context) {
 9840		dc_state = dm_state->context;
 9841		amdgpu_dm_commit_streams(state, dc_state);
 9842	}
 9843
 9844	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 9845		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
 9846		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
 9847		struct amdgpu_dm_connector *aconnector;
 9848
 9849		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
 9850			continue;
 9851
 9852		aconnector = to_amdgpu_dm_connector(connector);
 9853
 9854		if (!adev->dm.hdcp_workqueue)
 9855			continue;
 9856
 9857		pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
 9858
 9859		if (!connector)
 9860			continue;
 9861
 9862		pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
 9863			connector->index, connector->status, connector->dpms);
 9864		pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
 9865			old_con_state->content_protection, new_con_state->content_protection);
 9866
 9867		if (aconnector->dc_sink) {
 9868			if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
 9869				aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
 9870				pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
 9871				aconnector->dc_sink->edid_caps.display_name);
 9872			}
 9873		}
 9874
 9875		new_crtc_state = NULL;
 9876		old_crtc_state = NULL;
 9877
 9878		if (acrtc) {
 9879			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
 9880			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
 9881		}
 9882
 9883		if (old_crtc_state)
 9884			pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
 9885			old_crtc_state->enable,
 9886			old_crtc_state->active,
 9887			old_crtc_state->mode_changed,
 9888			old_crtc_state->active_changed,
 9889			old_crtc_state->connectors_changed);
 9890
 9891		if (new_crtc_state)
 9892			pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
 9893			new_crtc_state->enable,
 9894			new_crtc_state->active,
 9895			new_crtc_state->mode_changed,
 9896			new_crtc_state->active_changed,
 9897			new_crtc_state->connectors_changed);
 9898	}
 9899
 9900	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 9901		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
 9902		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
 9903		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 9904
 9905		if (!adev->dm.hdcp_workqueue)
 9906			continue;
 9907
 9908		new_crtc_state = NULL;
 9909		old_crtc_state = NULL;
 9910
 9911		if (acrtc) {
 9912			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
 9913			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
 9914		}
 9915
 9916		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9917
 9918		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
 9919		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
 9920			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
 9921			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 9922			dm_new_con_state->update_hdcp = true;
 9923			continue;
 9924		}
 9925
 9926		if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
 9927											old_con_state, connector, adev->dm.hdcp_workqueue)) {
 9928			/* when display is unplugged from mst hub, connctor will
 9929			 * be destroyed within dm_dp_mst_connector_destroy. connector
 9930			 * hdcp perperties, like type, undesired, desired, enabled,
 9931			 * will be lost. So, save hdcp properties into hdcp_work within
 9932			 * amdgpu_dm_atomic_commit_tail. if the same display is
 9933			 * plugged back with same display index, its hdcp properties
 9934			 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
 9935			 */
 9936
 9937			bool enable_encryption = false;
 9938
 9939			if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
 9940				enable_encryption = true;
 9941
 9942			if (aconnector->dc_link && aconnector->dc_sink &&
 9943				aconnector->dc_link->type == dc_connection_mst_branch) {
 9944				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
 9945				struct hdcp_workqueue *hdcp_w =
 9946					&hdcp_work[aconnector->dc_link->link_index];
 9947
 9948				hdcp_w->hdcp_content_type[connector->index] =
 9949					new_con_state->hdcp_content_type;
 9950				hdcp_w->content_protection[connector->index] =
 9951					new_con_state->content_protection;
 9952			}
 9953
 9954			if (new_crtc_state && new_crtc_state->mode_changed &&
 9955				new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
 9956				enable_encryption = true;
 9957
 9958			DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
 9959
 9960			if (aconnector->dc_link)
 9961				hdcp_update_display(
 9962					adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
 9963					new_con_state->hdcp_content_type, enable_encryption);
 9964		}
 9965	}
 9966
 9967	/* Handle connector state changes */
 9968	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 9969		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
 9970		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
 9971		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
 9972		struct dc_surface_update *dummy_updates;
 9973		struct dc_stream_update stream_update;
 9974		struct dc_info_packet hdr_packet;
 9975		struct dc_stream_status *status = NULL;
 9976		bool abm_changed, hdr_changed, scaling_changed;
 9977
 9978		memset(&stream_update, 0, sizeof(stream_update));
 9979
 9980		if (acrtc) {
 9981			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
 9982			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
 9983		}
 9984
 9985		/* Skip any modesets/resets */
 9986		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
 9987			continue;
 9988
 9989		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9990		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 9991
 9992		scaling_changed = is_scaling_state_different(dm_new_con_state,
 9993							     dm_old_con_state);
 9994
 9995		abm_changed = dm_new_crtc_state->abm_level !=
 9996			      dm_old_crtc_state->abm_level;
 9997
 9998		hdr_changed =
 9999			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10000
10001		if (!scaling_changed && !abm_changed && !hdr_changed)
10002			continue;
10003
10004		stream_update.stream = dm_new_crtc_state->stream;
10005		if (scaling_changed) {
10006			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10007					dm_new_con_state, dm_new_crtc_state->stream);
10008
10009			stream_update.src = dm_new_crtc_state->stream->src;
10010			stream_update.dst = dm_new_crtc_state->stream->dst;
10011		}
10012
10013		if (abm_changed) {
10014			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10015
10016			stream_update.abm_level = &dm_new_crtc_state->abm_level;
10017		}
10018
10019		if (hdr_changed) {
10020			fill_hdr_info_packet(new_con_state, &hdr_packet);
10021			stream_update.hdr_static_metadata = &hdr_packet;
10022		}
10023
10024		status = dc_stream_get_status(dm_new_crtc_state->stream);
10025
10026		if (WARN_ON(!status))
10027			continue;
10028
10029		WARN_ON(!status->plane_count);
10030
10031		/*
10032		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10033		 * Here we create an empty update on each plane.
10034		 * To fix this, DC should permit updating only stream properties.
10035		 */
10036		dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
10037		if (!dummy_updates) {
10038			DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
10039			continue;
10040		}
10041		for (j = 0; j < status->plane_count; j++)
10042			dummy_updates[j].surface = status->plane_states[0];
10043
10044		sort(dummy_updates, status->plane_count,
10045		     sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
10046
10047		mutex_lock(&dm->dc_lock);
10048		dc_exit_ips_for_hw_access(dm->dc);
10049		dc_update_planes_and_stream(dm->dc,
10050					    dummy_updates,
10051					    status->plane_count,
10052					    dm_new_crtc_state->stream,
10053					    &stream_update);
10054		mutex_unlock(&dm->dc_lock);
10055		kfree(dummy_updates);
10056	}
10057
10058	/**
10059	 * Enable interrupts for CRTCs that are newly enabled or went through
10060	 * a modeset. It was intentionally deferred until after the front end
10061	 * state was modified to wait until the OTG was on and so the IRQ
10062	 * handlers didn't access stale or invalid state.
10063	 */
10064	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10065		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10066#ifdef CONFIG_DEBUG_FS
10067		enum amdgpu_dm_pipe_crc_source cur_crc_src;
10068#endif
10069		/* Count number of newly disabled CRTCs for dropping PM refs later. */
10070		if (old_crtc_state->active && !new_crtc_state->active)
10071			crtc_disable_count++;
10072
10073		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10074		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10075
10076		/* For freesync config update on crtc state and params for irq */
10077		update_stream_irq_parameters(dm, dm_new_crtc_state);
10078
10079#ifdef CONFIG_DEBUG_FS
10080		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10081		cur_crc_src = acrtc->dm_irq_params.crc_src;
10082		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10083#endif
10084
10085		if (new_crtc_state->active &&
10086		    (!old_crtc_state->active ||
10087		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10088			dc_stream_retain(dm_new_crtc_state->stream);
10089			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10090			manage_dm_interrupts(adev, acrtc, dm_new_crtc_state);
10091		}
10092		/* Handle vrr on->off / off->on transitions */
10093		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
10094
10095#ifdef CONFIG_DEBUG_FS
10096		if (new_crtc_state->active &&
10097		    (!old_crtc_state->active ||
10098		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10099			/**
10100			 * Frontend may have changed so reapply the CRC capture
10101			 * settings for the stream.
10102			 */
10103			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10104#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10105				if (amdgpu_dm_crc_window_is_activated(crtc)) {
10106					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10107					acrtc->dm_irq_params.window_param.update_win = true;
10108
10109					/**
10110					 * It takes 2 frames for HW to stably generate CRC when
10111					 * resuming from suspend, so we set skip_frame_cnt 2.
10112					 */
10113					acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
10114					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10115				}
10116#endif
10117				if (amdgpu_dm_crtc_configure_crc_source(
10118					crtc, dm_new_crtc_state, cur_crc_src))
10119					drm_dbg_atomic(dev, "Failed to configure crc source");
10120			}
10121		}
10122#endif
10123	}
10124
10125	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10126		if (new_crtc_state->async_flip)
10127			wait_for_vblank = false;
10128
10129	/* update planes when needed per crtc*/
10130	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10131		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10132
10133		if (dm_new_crtc_state->stream)
10134			amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
10135	}
10136
10137	/* Enable writeback */
10138	for_each_new_connector_in_state(state, connector, new_con_state, i) {
10139		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10140		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10141
10142		if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
10143			continue;
10144
10145		if (!new_con_state->writeback_job)
10146			continue;
10147
10148		new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10149
10150		if (!new_crtc_state)
10151			continue;
10152
10153		if (acrtc->wb_enabled)
10154			continue;
10155
10156		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10157
10158		dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
10159		acrtc->wb_enabled = true;
10160	}
10161
10162	/* Update audio instances for each connector. */
10163	amdgpu_dm_commit_audio(dev, state);
10164
10165	/* restore the backlight level */
10166	for (i = 0; i < dm->num_of_edps; i++) {
10167		if (dm->backlight_dev[i] &&
10168		    (dm->actual_brightness[i] != dm->brightness[i]))
10169			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10170	}
10171
10172	/*
10173	 * send vblank event on all events not handled in flip and
10174	 * mark consumed event for drm_atomic_helper_commit_hw_done
10175	 */
10176	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10177	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10178
10179		if (new_crtc_state->event)
10180			drm_send_event_locked(dev, &new_crtc_state->event->base);
10181
10182		new_crtc_state->event = NULL;
10183	}
10184	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10185
10186	/* Signal HW programming completion */
10187	drm_atomic_helper_commit_hw_done(state);
10188
10189	if (wait_for_vblank)
10190		drm_atomic_helper_wait_for_flip_done(dev, state);
10191
10192	drm_atomic_helper_cleanup_planes(dev, state);
10193
10194	/* Don't free the memory if we are hitting this as part of suspend.
10195	 * This way we don't free any memory during suspend; see
10196	 * amdgpu_bo_free_kernel().  The memory will be freed in the first
10197	 * non-suspend modeset or when the driver is torn down.
10198	 */
10199	if (!adev->in_suspend) {
10200		/* return the stolen vga memory back to VRAM */
10201		if (!adev->mman.keep_stolen_vga_memory)
10202			amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10203		amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10204	}
10205
10206	/*
10207	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10208	 * so we can put the GPU into runtime suspend if we're not driving any
10209	 * displays anymore
10210	 */
10211	for (i = 0; i < crtc_disable_count; i++)
10212		pm_runtime_put_autosuspend(dev->dev);
10213	pm_runtime_mark_last_busy(dev->dev);
10214
10215	trace_amdgpu_dm_atomic_commit_tail_finish(state);
10216}
10217
10218static int dm_force_atomic_commit(struct drm_connector *connector)
10219{
10220	int ret = 0;
10221	struct drm_device *ddev = connector->dev;
10222	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10223	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10224	struct drm_plane *plane = disconnected_acrtc->base.primary;
10225	struct drm_connector_state *conn_state;
10226	struct drm_crtc_state *crtc_state;
10227	struct drm_plane_state *plane_state;
10228
10229	if (!state)
10230		return -ENOMEM;
10231
10232	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10233
10234	/* Construct an atomic state to restore previous display setting */
10235
10236	/*
10237	 * Attach connectors to drm_atomic_state
10238	 */
10239	conn_state = drm_atomic_get_connector_state(state, connector);
10240
10241	ret = PTR_ERR_OR_ZERO(conn_state);
10242	if (ret)
10243		goto out;
10244
10245	/* Attach crtc to drm_atomic_state*/
10246	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10247
10248	ret = PTR_ERR_OR_ZERO(crtc_state);
10249	if (ret)
10250		goto out;
10251
10252	/* force a restore */
10253	crtc_state->mode_changed = true;
10254
10255	/* Attach plane to drm_atomic_state */
10256	plane_state = drm_atomic_get_plane_state(state, plane);
10257
10258	ret = PTR_ERR_OR_ZERO(plane_state);
10259	if (ret)
10260		goto out;
10261
10262	/* Call commit internally with the state we just constructed */
10263	ret = drm_atomic_commit(state);
10264
10265out:
10266	drm_atomic_state_put(state);
10267	if (ret)
10268		DRM_ERROR("Restoring old state failed with %i\n", ret);
10269
10270	return ret;
10271}
10272
10273/*
10274 * This function handles all cases when set mode does not come upon hotplug.
10275 * This includes when a display is unplugged then plugged back into the
10276 * same port and when running without usermode desktop manager supprot
10277 */
10278void dm_restore_drm_connector_state(struct drm_device *dev,
10279				    struct drm_connector *connector)
10280{
10281	struct amdgpu_dm_connector *aconnector;
10282	struct amdgpu_crtc *disconnected_acrtc;
10283	struct dm_crtc_state *acrtc_state;
10284
10285	if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
10286		return;
10287
10288	aconnector = to_amdgpu_dm_connector(connector);
10289
10290	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10291		return;
10292
10293	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10294	if (!disconnected_acrtc)
10295		return;
10296
10297	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10298	if (!acrtc_state->stream)
10299		return;
10300
10301	/*
10302	 * If the previous sink is not released and different from the current,
10303	 * we deduce we are in a state where we can not rely on usermode call
10304	 * to turn on the display, so we do it here
10305	 */
10306	if (acrtc_state->stream->sink != aconnector->dc_sink)
10307		dm_force_atomic_commit(&aconnector->base);
10308}
10309
10310/*
10311 * Grabs all modesetting locks to serialize against any blocking commits,
10312 * Waits for completion of all non blocking commits.
10313 */
10314static int do_aquire_global_lock(struct drm_device *dev,
10315				 struct drm_atomic_state *state)
10316{
10317	struct drm_crtc *crtc;
10318	struct drm_crtc_commit *commit;
10319	long ret;
10320
10321	/*
10322	 * Adding all modeset locks to aquire_ctx will
10323	 * ensure that when the framework release it the
10324	 * extra locks we are locking here will get released to
10325	 */
10326	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10327	if (ret)
10328		return ret;
10329
10330	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10331		spin_lock(&crtc->commit_lock);
10332		commit = list_first_entry_or_null(&crtc->commit_list,
10333				struct drm_crtc_commit, commit_entry);
10334		if (commit)
10335			drm_crtc_commit_get(commit);
10336		spin_unlock(&crtc->commit_lock);
10337
10338		if (!commit)
10339			continue;
10340
10341		/*
10342		 * Make sure all pending HW programming completed and
10343		 * page flips done
10344		 */
10345		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10346
10347		if (ret > 0)
10348			ret = wait_for_completion_interruptible_timeout(
10349					&commit->flip_done, 10*HZ);
10350
10351		if (ret == 0)
10352			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
10353				  crtc->base.id, crtc->name);
10354
10355		drm_crtc_commit_put(commit);
10356	}
10357
10358	return ret < 0 ? ret : 0;
10359}
10360
10361static void get_freesync_config_for_crtc(
10362	struct dm_crtc_state *new_crtc_state,
10363	struct dm_connector_state *new_con_state)
10364{
10365	struct mod_freesync_config config = {0};
10366	struct amdgpu_dm_connector *aconnector;
10367	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10368	int vrefresh = drm_mode_vrefresh(mode);
10369	bool fs_vid_mode = false;
10370
10371	if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
10372		return;
10373
10374	aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
10375
10376	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10377					vrefresh >= aconnector->min_vfreq &&
10378					vrefresh <= aconnector->max_vfreq;
10379
10380	if (new_crtc_state->vrr_supported) {
10381		new_crtc_state->stream->ignore_msa_timing_param = true;
10382		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10383
10384		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10385		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10386		config.vsif_supported = true;
10387		config.btr = true;
10388
10389		if (fs_vid_mode) {
10390			config.state = VRR_STATE_ACTIVE_FIXED;
10391			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10392			goto out;
10393		} else if (new_crtc_state->base.vrr_enabled) {
10394			config.state = VRR_STATE_ACTIVE_VARIABLE;
10395		} else {
10396			config.state = VRR_STATE_INACTIVE;
10397		}
10398	}
10399out:
10400	new_crtc_state->freesync_config = config;
10401}
10402
10403static void reset_freesync_config_for_crtc(
10404	struct dm_crtc_state *new_crtc_state)
10405{
10406	new_crtc_state->vrr_supported = false;
10407
10408	memset(&new_crtc_state->vrr_infopacket, 0,
10409	       sizeof(new_crtc_state->vrr_infopacket));
10410}
10411
10412static bool
10413is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10414				 struct drm_crtc_state *new_crtc_state)
10415{
10416	const struct drm_display_mode *old_mode, *new_mode;
10417
10418	if (!old_crtc_state || !new_crtc_state)
10419		return false;
10420
10421	old_mode = &old_crtc_state->mode;
10422	new_mode = &new_crtc_state->mode;
10423
10424	if (old_mode->clock       == new_mode->clock &&
10425	    old_mode->hdisplay    == new_mode->hdisplay &&
10426	    old_mode->vdisplay    == new_mode->vdisplay &&
10427	    old_mode->htotal      == new_mode->htotal &&
10428	    old_mode->vtotal      != new_mode->vtotal &&
10429	    old_mode->hsync_start == new_mode->hsync_start &&
10430	    old_mode->vsync_start != new_mode->vsync_start &&
10431	    old_mode->hsync_end   == new_mode->hsync_end &&
10432	    old_mode->vsync_end   != new_mode->vsync_end &&
10433	    old_mode->hskew       == new_mode->hskew &&
10434	    old_mode->vscan       == new_mode->vscan &&
10435	    (old_mode->vsync_end - old_mode->vsync_start) ==
10436	    (new_mode->vsync_end - new_mode->vsync_start))
10437		return true;
10438
10439	return false;
10440}
10441
10442static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
10443{
10444	u64 num, den, res;
10445	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10446
10447	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10448
10449	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10450	den = (unsigned long long)new_crtc_state->mode.htotal *
10451	      (unsigned long long)new_crtc_state->mode.vtotal;
10452
10453	res = div_u64(num, den);
10454	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10455}
10456
10457static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10458			 struct drm_atomic_state *state,
10459			 struct drm_crtc *crtc,
10460			 struct drm_crtc_state *old_crtc_state,
10461			 struct drm_crtc_state *new_crtc_state,
10462			 bool enable,
10463			 bool *lock_and_validation_needed)
10464{
10465	struct dm_atomic_state *dm_state = NULL;
10466	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10467	struct dc_stream_state *new_stream;
10468	int ret = 0;
10469
10470	/*
10471	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10472	 * update changed items
10473	 */
10474	struct amdgpu_crtc *acrtc = NULL;
10475	struct drm_connector *connector = NULL;
10476	struct amdgpu_dm_connector *aconnector = NULL;
10477	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10478	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10479
10480	new_stream = NULL;
10481
10482	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10483	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10484	acrtc = to_amdgpu_crtc(crtc);
10485	connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10486	if (connector)
10487		aconnector = to_amdgpu_dm_connector(connector);
10488
10489	/* TODO This hack should go away */
10490	if (connector && enable) {
10491		/* Make sure fake sink is created in plug-in scenario */
10492		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10493									connector);
10494		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10495									connector);
10496
10497		if (IS_ERR(drm_new_conn_state)) {
10498			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10499			goto fail;
10500		}
10501
10502		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10503		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10504
10505		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10506			goto skip_modeset;
10507
10508		new_stream = create_validate_stream_for_sink(aconnector,
10509							     &new_crtc_state->mode,
10510							     dm_new_conn_state,
10511							     dm_old_crtc_state->stream);
10512
10513		/*
10514		 * we can have no stream on ACTION_SET if a display
10515		 * was disconnected during S3, in this case it is not an
10516		 * error, the OS will be updated after detection, and
10517		 * will do the right thing on next atomic commit
10518		 */
10519
10520		if (!new_stream) {
10521			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10522					__func__, acrtc->base.base.id);
10523			ret = -ENOMEM;
10524			goto fail;
10525		}
10526
10527		/*
10528		 * TODO: Check VSDB bits to decide whether this should
10529		 * be enabled or not.
10530		 */
10531		new_stream->triggered_crtc_reset.enabled =
10532			dm->force_timing_sync;
10533
10534		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10535
10536		ret = fill_hdr_info_packet(drm_new_conn_state,
10537					   &new_stream->hdr_static_metadata);
10538		if (ret)
10539			goto fail;
10540
10541		/*
10542		 * If we already removed the old stream from the context
10543		 * (and set the new stream to NULL) then we can't reuse
10544		 * the old stream even if the stream and scaling are unchanged.
10545		 * We'll hit the BUG_ON and black screen.
10546		 *
10547		 * TODO: Refactor this function to allow this check to work
10548		 * in all conditions.
10549		 */
10550		if (amdgpu_freesync_vid_mode &&
10551		    dm_new_crtc_state->stream &&
10552		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10553			goto skip_modeset;
10554
10555		if (dm_new_crtc_state->stream &&
10556		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10557		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10558			new_crtc_state->mode_changed = false;
10559			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10560					 new_crtc_state->mode_changed);
10561		}
10562	}
10563
10564	/* mode_changed flag may get updated above, need to check again */
10565	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10566		goto skip_modeset;
10567
10568	drm_dbg_state(state->dev,
10569		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
10570		acrtc->crtc_id,
10571		new_crtc_state->enable,
10572		new_crtc_state->active,
10573		new_crtc_state->planes_changed,
10574		new_crtc_state->mode_changed,
10575		new_crtc_state->active_changed,
10576		new_crtc_state->connectors_changed);
10577
10578	/* Remove stream for any changed/disabled CRTC */
10579	if (!enable) {
10580
10581		if (!dm_old_crtc_state->stream)
10582			goto skip_modeset;
10583
10584		/* Unset freesync video if it was active before */
10585		if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
10586			dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
10587			dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
10588		}
10589
10590		/* Now check if we should set freesync video mode */
10591		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10592		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10593		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
10594		    is_timing_unchanged_for_freesync(new_crtc_state,
10595						     old_crtc_state)) {
10596			new_crtc_state->mode_changed = false;
10597			DRM_DEBUG_DRIVER(
10598				"Mode change not required for front porch change, setting mode_changed to %d",
10599				new_crtc_state->mode_changed);
10600
10601			set_freesync_fixed_config(dm_new_crtc_state);
10602
10603			goto skip_modeset;
10604		} else if (amdgpu_freesync_vid_mode && aconnector &&
10605			   is_freesync_video_mode(&new_crtc_state->mode,
10606						  aconnector)) {
10607			struct drm_display_mode *high_mode;
10608
10609			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10610			if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
10611				set_freesync_fixed_config(dm_new_crtc_state);
10612		}
10613
10614		ret = dm_atomic_get_state(state, &dm_state);
10615		if (ret)
10616			goto fail;
10617
10618		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10619				crtc->base.id);
10620
10621		/* i.e. reset mode */
10622		if (dc_state_remove_stream(
10623				dm->dc,
10624				dm_state->context,
10625				dm_old_crtc_state->stream) != DC_OK) {
10626			ret = -EINVAL;
10627			goto fail;
10628		}
10629
10630		dc_stream_release(dm_old_crtc_state->stream);
10631		dm_new_crtc_state->stream = NULL;
10632
10633		reset_freesync_config_for_crtc(dm_new_crtc_state);
10634
10635		*lock_and_validation_needed = true;
10636
10637	} else {/* Add stream for any updated/enabled CRTC */
10638		/*
10639		 * Quick fix to prevent NULL pointer on new_stream when
10640		 * added MST connectors not found in existing crtc_state in the chained mode
10641		 * TODO: need to dig out the root cause of that
10642		 */
10643		if (!connector)
10644			goto skip_modeset;
10645
10646		if (modereset_required(new_crtc_state))
10647			goto skip_modeset;
10648
10649		if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
10650				     dm_old_crtc_state->stream)) {
10651
10652			WARN_ON(dm_new_crtc_state->stream);
10653
10654			ret = dm_atomic_get_state(state, &dm_state);
10655			if (ret)
10656				goto fail;
10657
10658			dm_new_crtc_state->stream = new_stream;
10659
10660			dc_stream_retain(new_stream);
10661
10662			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10663					 crtc->base.id);
10664
10665			if (dc_state_add_stream(
10666					dm->dc,
10667					dm_state->context,
10668					dm_new_crtc_state->stream) != DC_OK) {
10669				ret = -EINVAL;
10670				goto fail;
10671			}
10672
10673			*lock_and_validation_needed = true;
10674		}
10675	}
10676
10677skip_modeset:
10678	/* Release extra reference */
10679	if (new_stream)
10680		dc_stream_release(new_stream);
10681
10682	/*
10683	 * We want to do dc stream updates that do not require a
10684	 * full modeset below.
10685	 */
10686	if (!(enable && connector && new_crtc_state->active))
10687		return 0;
10688	/*
10689	 * Given above conditions, the dc state cannot be NULL because:
10690	 * 1. We're in the process of enabling CRTCs (just been added
10691	 *    to the dc context, or already is on the context)
10692	 * 2. Has a valid connector attached, and
10693	 * 3. Is currently active and enabled.
10694	 * => The dc stream state currently exists.
10695	 */
10696	BUG_ON(dm_new_crtc_state->stream == NULL);
10697
10698	/* Scaling or underscan settings */
10699	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10700				drm_atomic_crtc_needs_modeset(new_crtc_state))
10701		update_stream_scaling_settings(
10702			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10703
10704	/* ABM settings */
10705	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10706
10707	/*
10708	 * Color management settings. We also update color properties
10709	 * when a modeset is needed, to ensure it gets reprogrammed.
10710	 */
10711	if (dm_new_crtc_state->base.color_mgmt_changed ||
10712	    dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
10713	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10714		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10715		if (ret)
10716			goto fail;
10717	}
10718
10719	/* Update Freesync settings. */
10720	get_freesync_config_for_crtc(dm_new_crtc_state,
10721				     dm_new_conn_state);
10722
10723	return ret;
10724
10725fail:
10726	if (new_stream)
10727		dc_stream_release(new_stream);
10728	return ret;
10729}
10730
10731static bool should_reset_plane(struct drm_atomic_state *state,
10732			       struct drm_plane *plane,
10733			       struct drm_plane_state *old_plane_state,
10734			       struct drm_plane_state *new_plane_state)
10735{
10736	struct drm_plane *other;
10737	struct drm_plane_state *old_other_state, *new_other_state;
10738	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10739	struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state;
10740	struct amdgpu_device *adev = drm_to_adev(plane->dev);
10741	int i;
10742
10743	/*
10744	 * TODO: Remove this hack for all asics once it proves that the
10745	 * fast updates works fine on DCN3.2+.
10746	 */
10747	if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
10748	    state->allow_modeset)
10749		return true;
10750
10751	/* Exit early if we know that we're adding or removing the plane. */
10752	if (old_plane_state->crtc != new_plane_state->crtc)
10753		return true;
10754
10755	/* old crtc == new_crtc == NULL, plane not in context. */
10756	if (!new_plane_state->crtc)
10757		return false;
10758
10759	new_crtc_state =
10760		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10761	old_crtc_state =
10762		drm_atomic_get_old_crtc_state(state, old_plane_state->crtc);
10763
10764	if (!new_crtc_state)
10765		return true;
10766
10767	/*
10768	 * A change in cursor mode means a new dc pipe needs to be acquired or
10769	 * released from the state
10770	 */
10771	old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
10772	new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
10773	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
10774	    old_dm_crtc_state != NULL &&
10775	    old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) {
10776		return true;
10777	}
10778
10779	/* CRTC Degamma changes currently require us to recreate planes. */
10780	if (new_crtc_state->color_mgmt_changed)
10781		return true;
10782
10783	/*
10784	 * On zpos change, planes need to be reordered by removing and re-adding
10785	 * them one by one to the dc state, in order of descending zpos.
10786	 *
10787	 * TODO: We can likely skip bandwidth validation if the only thing that
10788	 * changed about the plane was it'z z-ordering.
10789	 */
10790	if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos)
10791		return true;
10792
10793	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10794		return true;
10795
10796	/*
10797	 * If there are any new primary or overlay planes being added or
10798	 * removed then the z-order can potentially change. To ensure
10799	 * correct z-order and pipe acquisition the current DC architecture
10800	 * requires us to remove and recreate all existing planes.
10801	 *
10802	 * TODO: Come up with a more elegant solution for this.
10803	 */
10804	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10805		struct amdgpu_framebuffer *old_afb, *new_afb;
10806		struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
10807
10808		dm_new_other_state = to_dm_plane_state(new_other_state);
10809		dm_old_other_state = to_dm_plane_state(old_other_state);
10810
10811		if (other->type == DRM_PLANE_TYPE_CURSOR)
10812			continue;
10813
10814		if (old_other_state->crtc != new_plane_state->crtc &&
10815		    new_other_state->crtc != new_plane_state->crtc)
10816			continue;
10817
10818		if (old_other_state->crtc != new_other_state->crtc)
10819			return true;
10820
10821		/* Src/dst size and scaling updates. */
10822		if (old_other_state->src_w != new_other_state->src_w ||
10823		    old_other_state->src_h != new_other_state->src_h ||
10824		    old_other_state->crtc_w != new_other_state->crtc_w ||
10825		    old_other_state->crtc_h != new_other_state->crtc_h)
10826			return true;
10827
10828		/* Rotation / mirroring updates. */
10829		if (old_other_state->rotation != new_other_state->rotation)
10830			return true;
10831
10832		/* Blending updates. */
10833		if (old_other_state->pixel_blend_mode !=
10834		    new_other_state->pixel_blend_mode)
10835			return true;
10836
10837		/* Alpha updates. */
10838		if (old_other_state->alpha != new_other_state->alpha)
10839			return true;
10840
10841		/* Colorspace changes. */
10842		if (old_other_state->color_range != new_other_state->color_range ||
10843		    old_other_state->color_encoding != new_other_state->color_encoding)
10844			return true;
10845
10846		/* HDR/Transfer Function changes. */
10847		if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
10848		    dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
10849		    dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
10850		    dm_old_other_state->ctm != dm_new_other_state->ctm ||
10851		    dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
10852		    dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
10853		    dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
10854		    dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
10855		    dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
10856			return true;
10857
10858		/* Framebuffer checks fall at the end. */
10859		if (!old_other_state->fb || !new_other_state->fb)
10860			continue;
10861
10862		/* Pixel format changes can require bandwidth updates. */
10863		if (old_other_state->fb->format != new_other_state->fb->format)
10864			return true;
10865
10866		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10867		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10868
10869		/* Tiling and DCC changes also require bandwidth updates. */
10870		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10871		    old_afb->base.modifier != new_afb->base.modifier)
10872			return true;
10873	}
10874
10875	return false;
10876}
10877
10878static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10879			      struct drm_plane_state *new_plane_state,
10880			      struct drm_framebuffer *fb)
10881{
10882	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10883	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10884	unsigned int pitch;
10885	bool linear;
10886
10887	if (fb->width > new_acrtc->max_cursor_width ||
10888	    fb->height > new_acrtc->max_cursor_height) {
10889		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10890				 new_plane_state->fb->width,
10891				 new_plane_state->fb->height);
10892		return -EINVAL;
10893	}
10894	if (new_plane_state->src_w != fb->width << 16 ||
10895	    new_plane_state->src_h != fb->height << 16) {
10896		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10897		return -EINVAL;
10898	}
10899
10900	/* Pitch in pixels */
10901	pitch = fb->pitches[0] / fb->format->cpp[0];
10902
10903	if (fb->width != pitch) {
10904		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10905				 fb->width, pitch);
10906		return -EINVAL;
10907	}
10908
10909	switch (pitch) {
10910	case 64:
10911	case 128:
10912	case 256:
10913		/* FB pitch is supported by cursor plane */
10914		break;
10915	default:
10916		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10917		return -EINVAL;
10918	}
10919
10920	/* Core DRM takes care of checking FB modifiers, so we only need to
10921	 * check tiling flags when the FB doesn't have a modifier.
10922	 */
10923	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10924		if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
10925			linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0;
10926		} else if (adev->family >= AMDGPU_FAMILY_AI) {
10927			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10928		} else {
10929			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10930				 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10931				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10932		}
10933		if (!linear) {
10934			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10935			return -EINVAL;
10936		}
10937	}
10938
10939	return 0;
10940}
10941
10942/*
10943 * Helper function for checking the cursor in native mode
10944 */
10945static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc,
10946					struct drm_plane *plane,
10947					struct drm_plane_state *new_plane_state,
10948					bool enable)
10949{
10950
10951	struct amdgpu_crtc *new_acrtc;
10952	int ret;
10953
10954	if (!enable || !new_plane_crtc ||
10955	    drm_atomic_plane_disabling(plane->state, new_plane_state))
10956		return 0;
10957
10958	new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10959
10960	if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10961		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10962		return -EINVAL;
10963	}
10964
10965	if (new_plane_state->fb) {
10966		ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10967						new_plane_state->fb);
10968		if (ret)
10969			return ret;
10970	}
10971
10972	return 0;
10973}
10974
10975static bool dm_should_update_native_cursor(struct drm_atomic_state *state,
10976					   struct drm_crtc *old_plane_crtc,
10977					   struct drm_crtc *new_plane_crtc,
10978					   bool enable)
10979{
10980	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10981	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10982
10983	if (!enable) {
10984		if (old_plane_crtc == NULL)
10985			return true;
10986
10987		old_crtc_state = drm_atomic_get_old_crtc_state(
10988			state, old_plane_crtc);
10989		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10990
10991		return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
10992	} else {
10993		if (new_plane_crtc == NULL)
10994			return true;
10995
10996		new_crtc_state = drm_atomic_get_new_crtc_state(
10997			state, new_plane_crtc);
10998		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10999
11000		return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
11001	}
11002}
11003
11004static int dm_update_plane_state(struct dc *dc,
11005				 struct drm_atomic_state *state,
11006				 struct drm_plane *plane,
11007				 struct drm_plane_state *old_plane_state,
11008				 struct drm_plane_state *new_plane_state,
11009				 bool enable,
11010				 bool *lock_and_validation_needed,
11011				 bool *is_top_most_overlay)
11012{
11013
11014	struct dm_atomic_state *dm_state = NULL;
11015	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
11016	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11017	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
11018	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
11019	bool needs_reset, update_native_cursor;
11020	int ret = 0;
11021
11022
11023	new_plane_crtc = new_plane_state->crtc;
11024	old_plane_crtc = old_plane_state->crtc;
11025	dm_new_plane_state = to_dm_plane_state(new_plane_state);
11026	dm_old_plane_state = to_dm_plane_state(old_plane_state);
11027
11028	update_native_cursor = dm_should_update_native_cursor(state,
11029							      old_plane_crtc,
11030							      new_plane_crtc,
11031							      enable);
11032
11033	if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) {
11034		ret = dm_check_native_cursor_state(new_plane_crtc, plane,
11035						    new_plane_state, enable);
11036		if (ret)
11037			return ret;
11038
11039		return 0;
11040	}
11041
11042	needs_reset = should_reset_plane(state, plane, old_plane_state,
11043					 new_plane_state);
11044
11045	/* Remove any changed/removed planes */
11046	if (!enable) {
11047		if (!needs_reset)
11048			return 0;
11049
11050		if (!old_plane_crtc)
11051			return 0;
11052
11053		old_crtc_state = drm_atomic_get_old_crtc_state(
11054				state, old_plane_crtc);
11055		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11056
11057		if (!dm_old_crtc_state->stream)
11058			return 0;
11059
11060		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
11061				plane->base.id, old_plane_crtc->base.id);
11062
11063		ret = dm_atomic_get_state(state, &dm_state);
11064		if (ret)
11065			return ret;
11066
11067		if (!dc_state_remove_plane(
11068				dc,
11069				dm_old_crtc_state->stream,
11070				dm_old_plane_state->dc_state,
11071				dm_state->context)) {
11072
11073			return -EINVAL;
11074		}
11075
11076		if (dm_old_plane_state->dc_state)
11077			dc_plane_state_release(dm_old_plane_state->dc_state);
11078
11079		dm_new_plane_state->dc_state = NULL;
11080
11081		*lock_and_validation_needed = true;
11082
11083	} else { /* Add new planes */
11084		struct dc_plane_state *dc_new_plane_state;
11085
11086		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
11087			return 0;
11088
11089		if (!new_plane_crtc)
11090			return 0;
11091
11092		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
11093		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11094
11095		if (!dm_new_crtc_state->stream)
11096			return 0;
11097
11098		if (!needs_reset)
11099			return 0;
11100
11101		ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11102		if (ret)
11103			goto out;
11104
11105		WARN_ON(dm_new_plane_state->dc_state);
11106
11107		dc_new_plane_state = dc_create_plane_state(dc);
11108		if (!dc_new_plane_state) {
11109			ret = -ENOMEM;
11110			goto out;
11111		}
11112
11113		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11114				 plane->base.id, new_plane_crtc->base.id);
11115
11116		ret = fill_dc_plane_attributes(
11117			drm_to_adev(new_plane_crtc->dev),
11118			dc_new_plane_state,
11119			new_plane_state,
11120			new_crtc_state);
11121		if (ret) {
11122			dc_plane_state_release(dc_new_plane_state);
11123			goto out;
11124		}
11125
11126		ret = dm_atomic_get_state(state, &dm_state);
11127		if (ret) {
11128			dc_plane_state_release(dc_new_plane_state);
11129			goto out;
11130		}
11131
11132		/*
11133		 * Any atomic check errors that occur after this will
11134		 * not need a release. The plane state will be attached
11135		 * to the stream, and therefore part of the atomic
11136		 * state. It'll be released when the atomic state is
11137		 * cleaned.
11138		 */
11139		if (!dc_state_add_plane(
11140				dc,
11141				dm_new_crtc_state->stream,
11142				dc_new_plane_state,
11143				dm_state->context)) {
11144
11145			dc_plane_state_release(dc_new_plane_state);
11146			ret = -EINVAL;
11147			goto out;
11148		}
11149
11150		dm_new_plane_state->dc_state = dc_new_plane_state;
11151
11152		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11153
11154		/* Tell DC to do a full surface update every time there
11155		 * is a plane change. Inefficient, but works for now.
11156		 */
11157		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11158
11159		*lock_and_validation_needed = true;
11160	}
11161
11162out:
11163	/* If enabling cursor overlay failed, attempt fallback to native mode */
11164	if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) {
11165		ret = dm_check_native_cursor_state(new_plane_crtc, plane,
11166						    new_plane_state, enable);
11167		if (ret)
11168			return ret;
11169
11170		dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE;
11171	}
11172
11173	return ret;
11174}
11175
11176static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11177				       int *src_w, int *src_h)
11178{
11179	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11180	case DRM_MODE_ROTATE_90:
11181	case DRM_MODE_ROTATE_270:
11182		*src_w = plane_state->src_h >> 16;
11183		*src_h = plane_state->src_w >> 16;
11184		break;
11185	case DRM_MODE_ROTATE_0:
11186	case DRM_MODE_ROTATE_180:
11187	default:
11188		*src_w = plane_state->src_w >> 16;
11189		*src_h = plane_state->src_h >> 16;
11190		break;
11191	}
11192}
11193
11194static void
11195dm_get_plane_scale(struct drm_plane_state *plane_state,
11196		   int *out_plane_scale_w, int *out_plane_scale_h)
11197{
11198	int plane_src_w, plane_src_h;
11199
11200	dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
11201	*out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
11202	*out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
11203}
11204
11205/*
11206 * The normalized_zpos value cannot be used by this iterator directly. It's only
11207 * calculated for enabled planes, potentially causing normalized_zpos collisions
11208 * between enabled/disabled planes in the atomic state. We need a unique value
11209 * so that the iterator will not generate the same object twice, or loop
11210 * indefinitely.
11211 */
11212static inline struct __drm_planes_state *__get_next_zpos(
11213	struct drm_atomic_state *state,
11214	struct __drm_planes_state *prev)
11215{
11216	unsigned int highest_zpos = 0, prev_zpos = 256;
11217	uint32_t highest_id = 0, prev_id = UINT_MAX;
11218	struct drm_plane_state *new_plane_state;
11219	struct drm_plane *plane;
11220	int i, highest_i = -1;
11221
11222	if (prev != NULL) {
11223		prev_zpos = prev->new_state->zpos;
11224		prev_id = prev->ptr->base.id;
11225	}
11226
11227	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
11228		/* Skip planes with higher zpos than the previously returned */
11229		if (new_plane_state->zpos > prev_zpos ||
11230		    (new_plane_state->zpos == prev_zpos &&
11231		     plane->base.id >= prev_id))
11232			continue;
11233
11234		/* Save the index of the plane with highest zpos */
11235		if (new_plane_state->zpos > highest_zpos ||
11236		    (new_plane_state->zpos == highest_zpos &&
11237		     plane->base.id > highest_id)) {
11238			highest_zpos = new_plane_state->zpos;
11239			highest_id = plane->base.id;
11240			highest_i = i;
11241		}
11242	}
11243
11244	if (highest_i < 0)
11245		return NULL;
11246
11247	return &state->planes[highest_i];
11248}
11249
11250/*
11251 * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate
11252 * by descending zpos, as read from the new plane state. This is the same
11253 * ordering as defined by drm_atomic_normalize_zpos().
11254 */
11255#define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \
11256	for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \
11257	     __i != NULL; __i = __get_next_zpos((__state), __i))		\
11258		for_each_if(((plane) = __i->ptr,				\
11259			     (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
11260			     (old_plane_state) = __i->old_state,		\
11261			     (new_plane_state) = __i->new_state, 1))
11262
11263static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11264{
11265	struct drm_connector *connector;
11266	struct drm_connector_state *conn_state, *old_conn_state;
11267	struct amdgpu_dm_connector *aconnector = NULL;
11268	int i;
11269
11270	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11271		if (!conn_state->crtc)
11272			conn_state = old_conn_state;
11273
11274		if (conn_state->crtc != crtc)
11275			continue;
11276
11277		if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
11278			continue;
11279
11280		aconnector = to_amdgpu_dm_connector(connector);
11281		if (!aconnector->mst_output_port || !aconnector->mst_root)
11282			aconnector = NULL;
11283		else
11284			break;
11285	}
11286
11287	if (!aconnector)
11288		return 0;
11289
11290	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
11291}
11292
11293/**
11294 * DOC: Cursor Modes - Native vs Overlay
11295 *
11296 * In native mode, the cursor uses a integrated cursor pipe within each DCN hw
11297 * plane. It does not require a dedicated hw plane to enable, but it is
11298 * subjected to the same z-order and scaling as the hw plane. It also has format
11299 * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB
11300 * hw plane.
11301 *
11302 * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its
11303 * own scaling and z-pos. It also has no blending restrictions. It lends to a
11304 * cursor behavior more akin to a DRM client's expectations. However, it does
11305 * occupy an extra DCN plane, and therefore will only be used if a DCN plane is
11306 * available.
11307 */
11308
11309/**
11310 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc
11311 * @adev: amdgpu device
11312 * @state: DRM atomic state
11313 * @dm_crtc_state: amdgpu state for the CRTC containing the cursor
11314 * @cursor_mode: Returns the required cursor mode on dm_crtc_state
11315 *
11316 * Get whether the cursor should be enabled in native mode, or overlay mode, on
11317 * the dm_crtc_state.
11318 *
11319 * The cursor should be enabled in overlay mode if there exists an underlying
11320 * plane - on which the cursor may be blended - that is either YUV formatted, or
11321 * scaled differently from the cursor.
11322 *
11323 * Since zpos info is required, drm_atomic_normalize_zpos must be called before
11324 * calling this function.
11325 *
11326 * Return: 0 on success, or an error code if getting the cursor plane state
11327 * failed.
11328 */
11329static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
11330				   struct drm_atomic_state *state,
11331				   struct dm_crtc_state *dm_crtc_state,
11332				   enum amdgpu_dm_cursor_mode *cursor_mode)
11333{
11334	struct drm_plane_state *old_plane_state, *plane_state, *cursor_state;
11335	struct drm_crtc_state *crtc_state = &dm_crtc_state->base;
11336	struct drm_plane *plane;
11337	bool consider_mode_change = false;
11338	bool entire_crtc_covered = false;
11339	bool cursor_changed = false;
11340	int underlying_scale_w, underlying_scale_h;
11341	int cursor_scale_w, cursor_scale_h;
11342	int i;
11343
11344	/* Overlay cursor not supported on HW before DCN
11345	 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions
11346	 * as previous DCN generations, so enable native mode on DCN401 in addition to DCE
11347	 */
11348	if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 ||
11349	    amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
11350		*cursor_mode = DM_CURSOR_NATIVE_MODE;
11351		return 0;
11352	}
11353
11354	/* Init cursor_mode to be the same as current */
11355	*cursor_mode = dm_crtc_state->cursor_mode;
11356
11357	/*
11358	 * Cursor mode can change if a plane's format changes, scale changes, is
11359	 * enabled/disabled, or z-order changes.
11360	 */
11361	for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) {
11362		int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
11363
11364		/* Only care about planes on this CRTC */
11365		if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0)
11366			continue;
11367
11368		if (plane->type == DRM_PLANE_TYPE_CURSOR)
11369			cursor_changed = true;
11370
11371		if (drm_atomic_plane_enabling(old_plane_state, plane_state) ||
11372		    drm_atomic_plane_disabling(old_plane_state, plane_state) ||
11373		    old_plane_state->fb->format != plane_state->fb->format) {
11374			consider_mode_change = true;
11375			break;
11376		}
11377
11378		dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h);
11379		dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
11380		if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
11381			consider_mode_change = true;
11382			break;
11383		}
11384	}
11385
11386	if (!consider_mode_change && !crtc_state->zpos_changed)
11387		return 0;
11388
11389	/*
11390	 * If no cursor change on this CRTC, and not enabled on this CRTC, then
11391	 * no need to set cursor mode. This avoids needlessly locking the cursor
11392	 * state.
11393	 */
11394	if (!cursor_changed &&
11395	    !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) {
11396		return 0;
11397	}
11398
11399	cursor_state = drm_atomic_get_plane_state(state,
11400						  crtc_state->crtc->cursor);
11401	if (IS_ERR(cursor_state))
11402		return PTR_ERR(cursor_state);
11403
11404	/* Cursor is disabled */
11405	if (!cursor_state->fb)
11406		return 0;
11407
11408	/* For all planes in descending z-order (all of which are below cursor
11409	 * as per zpos definitions), check their scaling and format
11410	 */
11411	for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) {
11412
11413		/* Only care about non-cursor planes on this CRTC */
11414		if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 ||
11415		    plane->type == DRM_PLANE_TYPE_CURSOR)
11416			continue;
11417
11418		/* Underlying plane is YUV format - use overlay cursor */
11419		if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) {
11420			*cursor_mode = DM_CURSOR_OVERLAY_MODE;
11421			return 0;
11422		}
11423
11424		dm_get_plane_scale(plane_state,
11425				   &underlying_scale_w, &underlying_scale_h);
11426		dm_get_plane_scale(cursor_state,
11427				   &cursor_scale_w, &cursor_scale_h);
11428
11429		/* Underlying plane has different scale - use overlay cursor */
11430		if (cursor_scale_w != underlying_scale_w &&
11431		    cursor_scale_h != underlying_scale_h) {
11432			*cursor_mode = DM_CURSOR_OVERLAY_MODE;
11433			return 0;
11434		}
11435
11436		/* If this plane covers the whole CRTC, no need to check planes underneath */
11437		if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 &&
11438		    plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay &&
11439		    plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) {
11440			entire_crtc_covered = true;
11441			break;
11442		}
11443	}
11444
11445	/* If planes do not cover the entire CRTC, use overlay mode to enable
11446	 * cursor over holes
11447	 */
11448	if (entire_crtc_covered)
11449		*cursor_mode = DM_CURSOR_NATIVE_MODE;
11450	else
11451		*cursor_mode = DM_CURSOR_OVERLAY_MODE;
11452
11453	return 0;
11454}
11455
11456static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
11457					    struct drm_atomic_state *state,
11458					    struct drm_crtc_state *crtc_state)
11459{
11460	struct drm_plane *plane;
11461	struct drm_plane_state *new_plane_state, *old_plane_state;
11462
11463	drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
11464		new_plane_state = drm_atomic_get_plane_state(state, plane);
11465		old_plane_state = drm_atomic_get_plane_state(state, plane);
11466
11467		if (old_plane_state->fb && new_plane_state->fb &&
11468		    get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
11469			return true;
11470	}
11471
11472	return false;
11473}
11474
11475/**
11476 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11477 *
11478 * @dev: The DRM device
11479 * @state: The atomic state to commit
11480 *
11481 * Validate that the given atomic state is programmable by DC into hardware.
11482 * This involves constructing a &struct dc_state reflecting the new hardware
11483 * state we wish to commit, then querying DC to see if it is programmable. It's
11484 * important not to modify the existing DC state. Otherwise, atomic_check
11485 * may unexpectedly commit hardware changes.
11486 *
11487 * When validating the DC state, it's important that the right locks are
11488 * acquired. For full updates case which removes/adds/updates streams on one
11489 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11490 * that any such full update commit will wait for completion of any outstanding
11491 * flip using DRMs synchronization events.
11492 *
11493 * Note that DM adds the affected connectors for all CRTCs in state, when that
11494 * might not seem necessary. This is because DC stream creation requires the
11495 * DC sink, which is tied to the DRM connector state. Cleaning this up should
11496 * be possible but non-trivial - a possible TODO item.
11497 *
11498 * Return: -Error code if validation failed.
11499 */
11500static int amdgpu_dm_atomic_check(struct drm_device *dev,
11501				  struct drm_atomic_state *state)
11502{
11503	struct amdgpu_device *adev = drm_to_adev(dev);
11504	struct dm_atomic_state *dm_state = NULL;
11505	struct dc *dc = adev->dm.dc;
11506	struct drm_connector *connector;
11507	struct drm_connector_state *old_con_state, *new_con_state;
11508	struct drm_crtc *crtc;
11509	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11510	struct drm_plane *plane;
11511	struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state;
11512	enum dc_status status;
11513	int ret, i;
11514	bool lock_and_validation_needed = false;
11515	bool is_top_most_overlay = true;
11516	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11517	struct drm_dp_mst_topology_mgr *mgr;
11518	struct drm_dp_mst_topology_state *mst_state;
11519	struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
11520
11521	trace_amdgpu_dm_atomic_check_begin(state);
11522
11523	ret = drm_atomic_helper_check_modeset(dev, state);
11524	if (ret) {
11525		drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n");
11526		goto fail;
11527	}
11528
11529	/* Check connector changes */
11530	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11531		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11532		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11533
11534		/* Skip connectors that are disabled or part of modeset already. */
11535		if (!new_con_state->crtc)
11536			continue;
11537
11538		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11539		if (IS_ERR(new_crtc_state)) {
11540			drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n");
11541			ret = PTR_ERR(new_crtc_state);
11542			goto fail;
11543		}
11544
11545		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
11546		    dm_old_con_state->scaling != dm_new_con_state->scaling)
11547			new_crtc_state->connectors_changed = true;
11548	}
11549
11550	if (dc_resource_is_dsc_encoding_supported(dc)) {
11551		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11552			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11553				ret = add_affected_mst_dsc_crtcs(state, crtc);
11554				if (ret) {
11555					drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n");
11556					goto fail;
11557				}
11558			}
11559		}
11560	}
11561	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11562		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11563
11564		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11565		    !new_crtc_state->color_mgmt_changed &&
11566		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11567			dm_old_crtc_state->dsc_force_changed == false)
11568			continue;
11569
11570		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11571		if (ret) {
11572			drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n");
11573			goto fail;
11574		}
11575
11576		if (!new_crtc_state->enable)
11577			continue;
11578
11579		ret = drm_atomic_add_affected_connectors(state, crtc);
11580		if (ret) {
11581			drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n");
11582			goto fail;
11583		}
11584
11585		ret = drm_atomic_add_affected_planes(state, crtc);
11586		if (ret) {
11587			drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n");
11588			goto fail;
11589		}
11590
11591		if (dm_old_crtc_state->dsc_force_changed)
11592			new_crtc_state->mode_changed = true;
11593	}
11594
11595	/*
11596	 * Add all primary and overlay planes on the CRTC to the state
11597	 * whenever a plane is enabled to maintain correct z-ordering
11598	 * and to enable fast surface updates.
11599	 */
11600	drm_for_each_crtc(crtc, dev) {
11601		bool modified = false;
11602
11603		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11604			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11605				continue;
11606
11607			if (new_plane_state->crtc == crtc ||
11608			    old_plane_state->crtc == crtc) {
11609				modified = true;
11610				break;
11611			}
11612		}
11613
11614		if (!modified)
11615			continue;
11616
11617		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11618			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11619				continue;
11620
11621			new_plane_state =
11622				drm_atomic_get_plane_state(state, plane);
11623
11624			if (IS_ERR(new_plane_state)) {
11625				ret = PTR_ERR(new_plane_state);
11626				drm_dbg_atomic(dev, "new_plane_state is BAD\n");
11627				goto fail;
11628			}
11629		}
11630	}
11631
11632	/*
11633	 * DC consults the zpos (layer_index in DC terminology) to determine the
11634	 * hw plane on which to enable the hw cursor (see
11635	 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
11636	 * atomic state, so call drm helper to normalize zpos.
11637	 */
11638	ret = drm_atomic_normalize_zpos(dev, state);
11639	if (ret) {
11640		drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
11641		goto fail;
11642	}
11643
11644	/*
11645	 * Determine whether cursors on each CRTC should be enabled in native or
11646	 * overlay mode.
11647	 */
11648	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11649		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11650
11651		ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
11652					      &dm_new_crtc_state->cursor_mode);
11653		if (ret) {
11654			drm_dbg(dev, "Failed to determine cursor mode\n");
11655			goto fail;
11656		}
11657
11658		/*
11659		 * If overlay cursor is needed, DC cannot go through the
11660		 * native cursor update path. All enabled planes on the CRTC
11661		 * need to be added for DC to not disable a plane by mistake
11662		 */
11663		if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) {
11664			ret = drm_atomic_add_affected_planes(state, crtc);
11665			if (ret)
11666				goto fail;
11667		}
11668	}
11669
11670	/* Remove exiting planes if they are modified */
11671	for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
11672
11673		ret = dm_update_plane_state(dc, state, plane,
11674					    old_plane_state,
11675					    new_plane_state,
11676					    false,
11677					    &lock_and_validation_needed,
11678					    &is_top_most_overlay);
11679		if (ret) {
11680			drm_dbg_atomic(dev, "dm_update_plane_state() failed\n");
11681			goto fail;
11682		}
11683	}
11684
11685	/* Disable all crtcs which require disable */
11686	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11687		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11688					   old_crtc_state,
11689					   new_crtc_state,
11690					   false,
11691					   &lock_and_validation_needed);
11692		if (ret) {
11693			drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n");
11694			goto fail;
11695		}
11696	}
11697
11698	/* Enable all crtcs which require enable */
11699	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11700		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11701					   old_crtc_state,
11702					   new_crtc_state,
11703					   true,
11704					   &lock_and_validation_needed);
11705		if (ret) {
11706			drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n");
11707			goto fail;
11708		}
11709	}
11710
11711	/* Add new/modified planes */
11712	for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
11713		ret = dm_update_plane_state(dc, state, plane,
11714					    old_plane_state,
11715					    new_plane_state,
11716					    true,
11717					    &lock_and_validation_needed,
11718					    &is_top_most_overlay);
11719		if (ret) {
11720			drm_dbg_atomic(dev, "dm_update_plane_state() failed\n");
11721			goto fail;
11722		}
11723	}
11724
11725#if defined(CONFIG_DRM_AMD_DC_FP)
11726	if (dc_resource_is_dsc_encoding_supported(dc)) {
11727		ret = pre_validate_dsc(state, &dm_state, vars);
11728		if (ret != 0)
11729			goto fail;
11730	}
11731#endif
11732
11733	/* Run this here since we want to validate the streams we created */
11734	ret = drm_atomic_helper_check_planes(dev, state);
11735	if (ret) {
11736		drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n");
11737		goto fail;
11738	}
11739
11740	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11741		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11742		if (dm_new_crtc_state->mpo_requested)
11743			drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc);
11744	}
11745
11746	/* Check cursor restrictions */
11747	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11748		enum amdgpu_dm_cursor_mode required_cursor_mode;
11749		int is_rotated, is_scaled;
11750
11751		/* Overlay cusor not subject to native cursor restrictions */
11752		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11753		if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
11754			continue;
11755
11756		/* Check if rotation or scaling is enabled on DCN401 */
11757		if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) &&
11758		    amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
11759			new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
11760
11761			is_rotated = new_cursor_state &&
11762				((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0);
11763			is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) ||
11764				(new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h));
11765
11766			if (is_rotated || is_scaled) {
11767				drm_dbg_driver(
11768					crtc->dev,
11769					"[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n",
11770					crtc->base.id, crtc->name);
11771				ret = -EINVAL;
11772				goto fail;
11773			}
11774		}
11775
11776		/* If HW can only do native cursor, check restrictions again */
11777		ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
11778					      &required_cursor_mode);
11779		if (ret) {
11780			drm_dbg_driver(crtc->dev,
11781				       "[CRTC:%d:%s] Checking cursor mode failed\n",
11782				       crtc->base.id, crtc->name);
11783			goto fail;
11784		} else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) {
11785			drm_dbg_driver(crtc->dev,
11786				       "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n",
11787				       crtc->base.id, crtc->name);
11788			ret = -EINVAL;
11789			goto fail;
11790		}
11791	}
11792
11793	if (state->legacy_cursor_update) {
11794		/*
11795		 * This is a fast cursor update coming from the plane update
11796		 * helper, check if it can be done asynchronously for better
11797		 * performance.
11798		 */
11799		state->async_update =
11800			!drm_atomic_helper_async_check(dev, state);
11801
11802		/*
11803		 * Skip the remaining global validation if this is an async
11804		 * update. Cursor updates can be done without affecting
11805		 * state or bandwidth calcs and this avoids the performance
11806		 * penalty of locking the private state object and
11807		 * allocating a new dc_state.
11808		 */
11809		if (state->async_update)
11810			return 0;
11811	}
11812
11813	/* Check scaling and underscan changes*/
11814	/* TODO Removed scaling changes validation due to inability to commit
11815	 * new stream into context w\o causing full reset. Need to
11816	 * decide how to handle.
11817	 */
11818	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11819		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11820		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11821		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11822
11823		/* Skip any modesets/resets */
11824		if (!acrtc || drm_atomic_crtc_needs_modeset(
11825				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11826			continue;
11827
11828		/* Skip any thing not scale or underscan changes */
11829		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11830			continue;
11831
11832		lock_and_validation_needed = true;
11833	}
11834
11835	/* set the slot info for each mst_state based on the link encoding format */
11836	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11837		struct amdgpu_dm_connector *aconnector;
11838		struct drm_connector *connector;
11839		struct drm_connector_list_iter iter;
11840		u8 link_coding_cap;
11841
11842		drm_connector_list_iter_begin(dev, &iter);
11843		drm_for_each_connector_iter(connector, &iter) {
11844			if (connector->index == mst_state->mgr->conn_base_id) {
11845				aconnector = to_amdgpu_dm_connector(connector);
11846				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11847				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11848
11849				break;
11850			}
11851		}
11852		drm_connector_list_iter_end(&iter);
11853	}
11854
11855	/**
11856	 * Streams and planes are reset when there are changes that affect
11857	 * bandwidth. Anything that affects bandwidth needs to go through
11858	 * DC global validation to ensure that the configuration can be applied
11859	 * to hardware.
11860	 *
11861	 * We have to currently stall out here in atomic_check for outstanding
11862	 * commits to finish in this case because our IRQ handlers reference
11863	 * DRM state directly - we can end up disabling interrupts too early
11864	 * if we don't.
11865	 *
11866	 * TODO: Remove this stall and drop DM state private objects.
11867	 */
11868	if (lock_and_validation_needed) {
11869		ret = dm_atomic_get_state(state, &dm_state);
11870		if (ret) {
11871			drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n");
11872			goto fail;
11873		}
11874
11875		ret = do_aquire_global_lock(dev, state);
11876		if (ret) {
11877			drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n");
11878			goto fail;
11879		}
11880
11881#if defined(CONFIG_DRM_AMD_DC_FP)
11882		if (dc_resource_is_dsc_encoding_supported(dc)) {
11883			ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
11884			if (ret) {
11885				drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n");
11886				ret = -EINVAL;
11887				goto fail;
11888			}
11889		}
11890#endif
11891
11892		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11893		if (ret) {
11894			drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n");
11895			goto fail;
11896		}
11897
11898		/*
11899		 * Perform validation of MST topology in the state:
11900		 * We need to perform MST atomic check before calling
11901		 * dc_validate_global_state(), or there is a chance
11902		 * to get stuck in an infinite loop and hang eventually.
11903		 */
11904		ret = drm_dp_mst_atomic_check(state);
11905		if (ret) {
11906			drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
11907			goto fail;
11908		}
11909		status = dc_validate_global_state(dc, dm_state->context, true);
11910		if (status != DC_OK) {
11911			drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
11912				       dc_status_to_str(status), status);
11913			ret = -EINVAL;
11914			goto fail;
11915		}
11916	} else {
11917		/*
11918		 * The commit is a fast update. Fast updates shouldn't change
11919		 * the DC context, affect global validation, and can have their
11920		 * commit work done in parallel with other commits not touching
11921		 * the same resource. If we have a new DC context as part of
11922		 * the DM atomic state from validation we need to free it and
11923		 * retain the existing one instead.
11924		 *
11925		 * Furthermore, since the DM atomic state only contains the DC
11926		 * context and can safely be annulled, we can free the state
11927		 * and clear the associated private object now to free
11928		 * some memory and avoid a possible use-after-free later.
11929		 */
11930
11931		for (i = 0; i < state->num_private_objs; i++) {
11932			struct drm_private_obj *obj = state->private_objs[i].ptr;
11933
11934			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11935				int j = state->num_private_objs-1;
11936
11937				dm_atomic_destroy_state(obj,
11938						state->private_objs[i].state);
11939
11940				/* If i is not at the end of the array then the
11941				 * last element needs to be moved to where i was
11942				 * before the array can safely be truncated.
11943				 */
11944				if (i != j)
11945					state->private_objs[i] =
11946						state->private_objs[j];
11947
11948				state->private_objs[j].ptr = NULL;
11949				state->private_objs[j].state = NULL;
11950				state->private_objs[j].old_state = NULL;
11951				state->private_objs[j].new_state = NULL;
11952
11953				state->num_private_objs = j;
11954				break;
11955			}
11956		}
11957	}
11958
11959	/* Store the overall update type for use later in atomic check. */
11960	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11961		struct dm_crtc_state *dm_new_crtc_state =
11962			to_dm_crtc_state(new_crtc_state);
11963
11964		/*
11965		 * Only allow async flips for fast updates that don't change
11966		 * the FB pitch, the DCC state, rotation, mem_type, etc.
11967		 */
11968		if (new_crtc_state->async_flip &&
11969		    (lock_and_validation_needed ||
11970		     amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
11971			drm_dbg_atomic(crtc->dev,
11972				       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
11973				       crtc->base.id, crtc->name);
11974			ret = -EINVAL;
11975			goto fail;
11976		}
11977
11978		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11979			UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
11980	}
11981
11982	/* Must be success */
11983	WARN_ON(ret);
11984
11985	trace_amdgpu_dm_atomic_check_finish(state, ret);
11986
11987	return ret;
11988
11989fail:
11990	if (ret == -EDEADLK)
11991		drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n");
11992	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11993		drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n");
11994	else
11995		drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret);
11996
11997	trace_amdgpu_dm_atomic_check_finish(state, ret);
11998
11999	return ret;
12000}
12001
12002static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
12003		unsigned int offset,
12004		unsigned int total_length,
12005		u8 *data,
12006		unsigned int length,
12007		struct amdgpu_hdmi_vsdb_info *vsdb)
12008{
12009	bool res;
12010	union dmub_rb_cmd cmd;
12011	struct dmub_cmd_send_edid_cea *input;
12012	struct dmub_cmd_edid_cea_output *output;
12013
12014	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
12015		return false;
12016
12017	memset(&cmd, 0, sizeof(cmd));
12018
12019	input = &cmd.edid_cea.data.input;
12020
12021	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
12022	cmd.edid_cea.header.sub_type = 0;
12023	cmd.edid_cea.header.payload_bytes =
12024		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
12025	input->offset = offset;
12026	input->length = length;
12027	input->cea_total_length = total_length;
12028	memcpy(input->payload, data, length);
12029
12030	res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
12031	if (!res) {
12032		DRM_ERROR("EDID CEA parser failed\n");
12033		return false;
12034	}
12035
12036	output = &cmd.edid_cea.data.output;
12037
12038	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
12039		if (!output->ack.success) {
12040			DRM_ERROR("EDID CEA ack failed at offset %d\n",
12041					output->ack.offset);
12042		}
12043	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
12044		if (!output->amd_vsdb.vsdb_found)
12045			return false;
12046
12047		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
12048		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
12049		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
12050		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
12051	} else {
12052		DRM_WARN("Unknown EDID CEA parser results\n");
12053		return false;
12054	}
12055
12056	return true;
12057}
12058
12059static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
12060		u8 *edid_ext, int len,
12061		struct amdgpu_hdmi_vsdb_info *vsdb_info)
12062{
12063	int i;
12064
12065	/* send extension block to DMCU for parsing */
12066	for (i = 0; i < len; i += 8) {
12067		bool res;
12068		int offset;
12069
12070		/* send 8 bytes a time */
12071		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
12072			return false;
12073
12074		if (i+8 == len) {
12075			/* EDID block sent completed, expect result */
12076			int version, min_rate, max_rate;
12077
12078			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
12079			if (res) {
12080				/* amd vsdb found */
12081				vsdb_info->freesync_supported = 1;
12082				vsdb_info->amd_vsdb_version = version;
12083				vsdb_info->min_refresh_rate_hz = min_rate;
12084				vsdb_info->max_refresh_rate_hz = max_rate;
12085				return true;
12086			}
12087			/* not amd vsdb */
12088			return false;
12089		}
12090
12091		/* check for ack*/
12092		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
12093		if (!res)
12094			return false;
12095	}
12096
12097	return false;
12098}
12099
12100static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
12101		u8 *edid_ext, int len,
12102		struct amdgpu_hdmi_vsdb_info *vsdb_info)
12103{
12104	int i;
12105
12106	/* send extension block to DMCU for parsing */
12107	for (i = 0; i < len; i += 8) {
12108		/* send 8 bytes a time */
12109		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
12110			return false;
12111	}
12112
12113	return vsdb_info->freesync_supported;
12114}
12115
12116static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
12117		u8 *edid_ext, int len,
12118		struct amdgpu_hdmi_vsdb_info *vsdb_info)
12119{
12120	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
12121	bool ret;
12122
12123	mutex_lock(&adev->dm.dc_lock);
12124	if (adev->dm.dmub_srv)
12125		ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
12126	else
12127		ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
12128	mutex_unlock(&adev->dm.dc_lock);
12129	return ret;
12130}
12131
12132static void parse_edid_displayid_vrr(struct drm_connector *connector,
12133				     const struct edid *edid)
12134{
12135	u8 *edid_ext = NULL;
12136	int i;
12137	int j = 0;
12138	u16 min_vfreq;
12139	u16 max_vfreq;
12140
12141	if (edid == NULL || edid->extensions == 0)
12142		return;
12143
12144	/* Find DisplayID extension */
12145	for (i = 0; i < edid->extensions; i++) {
12146		edid_ext = (void *)(edid + (i + 1));
12147		if (edid_ext[0] == DISPLAYID_EXT)
12148			break;
12149	}
12150
12151	if (edid_ext == NULL)
12152		return;
12153
12154	while (j < EDID_LENGTH) {
12155		/* Get dynamic video timing range from DisplayID if available */
12156		if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25	&&
12157		    (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
12158			min_vfreq = edid_ext[j+9];
12159			if (edid_ext[j+1] & 7)
12160				max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
12161			else
12162				max_vfreq = edid_ext[j+10];
12163
12164			if (max_vfreq && min_vfreq) {
12165				connector->display_info.monitor_range.max_vfreq = max_vfreq;
12166				connector->display_info.monitor_range.min_vfreq = min_vfreq;
12167
12168				return;
12169			}
12170		}
12171		j++;
12172	}
12173}
12174
12175static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
12176			  const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
12177{
12178	u8 *edid_ext = NULL;
12179	int i;
12180	int j = 0;
12181
12182	if (edid == NULL || edid->extensions == 0)
12183		return -ENODEV;
12184
12185	/* Find DisplayID extension */
12186	for (i = 0; i < edid->extensions; i++) {
12187		edid_ext = (void *)(edid + (i + 1));
12188		if (edid_ext[0] == DISPLAYID_EXT)
12189			break;
12190	}
12191
12192	while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
12193		struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
12194		unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
12195
12196		if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
12197				amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
12198			vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
12199			vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
12200			DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
12201
12202			return true;
12203		}
12204		j++;
12205	}
12206
12207	return false;
12208}
12209
12210static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
12211			       const struct edid *edid,
12212			       struct amdgpu_hdmi_vsdb_info *vsdb_info)
12213{
12214	u8 *edid_ext = NULL;
12215	int i;
12216	bool valid_vsdb_found = false;
12217
12218	/*----- drm_find_cea_extension() -----*/
12219	/* No EDID or EDID extensions */
12220	if (edid == NULL || edid->extensions == 0)
12221		return -ENODEV;
12222
12223	/* Find CEA extension */
12224	for (i = 0; i < edid->extensions; i++) {
12225		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
12226		if (edid_ext[0] == CEA_EXT)
12227			break;
12228	}
12229
12230	if (i == edid->extensions)
12231		return -ENODEV;
12232
12233	/*----- cea_db_offsets() -----*/
12234	if (edid_ext[0] != CEA_EXT)
12235		return -ENODEV;
12236
12237	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
12238
12239	return valid_vsdb_found ? i : -ENODEV;
12240}
12241
12242/**
12243 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
12244 *
12245 * @connector: Connector to query.
12246 * @drm_edid: DRM EDID from monitor
12247 *
12248 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
12249 * track of some of the display information in the internal data struct used by
12250 * amdgpu_dm. This function checks which type of connector we need to set the
12251 * FreeSync parameters.
12252 */
12253void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
12254				    const struct drm_edid *drm_edid)
12255{
12256	int i = 0;
12257	struct amdgpu_dm_connector *amdgpu_dm_connector =
12258			to_amdgpu_dm_connector(connector);
12259	struct dm_connector_state *dm_con_state = NULL;
12260	struct dc_sink *sink;
12261	struct amdgpu_device *adev = drm_to_adev(connector->dev);
12262	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
12263	const struct edid *edid;
12264	bool freesync_capable = false;
12265	enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
12266
12267	if (!connector->state) {
12268		DRM_ERROR("%s - Connector has no state", __func__);
12269		goto update;
12270	}
12271
12272	sink = amdgpu_dm_connector->dc_sink ?
12273		amdgpu_dm_connector->dc_sink :
12274		amdgpu_dm_connector->dc_em_sink;
12275
12276	drm_edid_connector_update(connector, drm_edid);
12277
12278	if (!drm_edid || !sink) {
12279		dm_con_state = to_dm_connector_state(connector->state);
12280
12281		amdgpu_dm_connector->min_vfreq = 0;
12282		amdgpu_dm_connector->max_vfreq = 0;
12283		freesync_capable = false;
12284
12285		goto update;
12286	}
12287
12288	dm_con_state = to_dm_connector_state(connector->state);
12289
12290	if (!adev->dm.freesync_module)
12291		goto update;
12292
12293	edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
12294
12295	/* Some eDP panels only have the refresh rate range info in DisplayID */
12296	if ((connector->display_info.monitor_range.min_vfreq == 0 ||
12297	     connector->display_info.monitor_range.max_vfreq == 0))
12298		parse_edid_displayid_vrr(connector, edid);
12299
12300	if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
12301		     sink->sink_signal == SIGNAL_TYPE_EDP)) {
12302		if (amdgpu_dm_connector->dc_link &&
12303		    amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
12304			amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
12305			amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
12306			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
12307				freesync_capable = true;
12308		}
12309
12310		parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
12311
12312		if (vsdb_info.replay_mode) {
12313			amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
12314			amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
12315			amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
12316		}
12317
12318	} else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
12319		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
12320		if (i >= 0 && vsdb_info.freesync_supported) {
12321			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
12322			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
12323			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
12324				freesync_capable = true;
12325
12326			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
12327			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
12328		}
12329	}
12330
12331	if (amdgpu_dm_connector->dc_link)
12332		as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
12333
12334	if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
12335		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
12336		if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
12337
12338			amdgpu_dm_connector->pack_sdp_v1_3 = true;
12339			amdgpu_dm_connector->as_type = as_type;
12340			amdgpu_dm_connector->vsdb_info = vsdb_info;
12341
12342			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
12343			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
12344			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
12345				freesync_capable = true;
12346
12347			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
12348			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
12349		}
12350	}
12351
12352update:
12353	if (dm_con_state)
12354		dm_con_state->freesync_capable = freesync_capable;
12355
12356	if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable &&
12357	    amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) {
12358		amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false;
12359		amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false;
12360	}
12361
12362	if (connector->vrr_capable_property)
12363		drm_connector_set_vrr_capable_property(connector,
12364						       freesync_capable);
12365}
12366
12367void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
12368{
12369	struct amdgpu_device *adev = drm_to_adev(dev);
12370	struct dc *dc = adev->dm.dc;
12371	int i;
12372
12373	mutex_lock(&adev->dm.dc_lock);
12374	if (dc->current_state) {
12375		for (i = 0; i < dc->current_state->stream_count; ++i)
12376			dc->current_state->streams[i]
12377				->triggered_crtc_reset.enabled =
12378				adev->dm.force_timing_sync;
12379
12380		dm_enable_per_frame_crtc_master_sync(dc->current_state);
12381		dc_trigger_sync(dc, dc->current_state);
12382	}
12383	mutex_unlock(&adev->dm.dc_lock);
12384}
12385
12386static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
12387{
12388	if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
12389		dc_exit_ips_for_hw_access(dc);
12390}
12391
12392void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
12393		       u32 value, const char *func_name)
12394{
12395#ifdef DM_CHECK_ADDR_0
12396	if (address == 0) {
12397		drm_err(adev_to_drm(ctx->driver_context),
12398			"invalid register write. address = 0");
12399		return;
12400	}
12401#endif
12402
12403	amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
12404	cgs_write_register(ctx->cgs_device, address, value);
12405	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
12406}
12407
12408uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
12409			  const char *func_name)
12410{
12411	u32 value;
12412#ifdef DM_CHECK_ADDR_0
12413	if (address == 0) {
12414		drm_err(adev_to_drm(ctx->driver_context),
12415			"invalid register read; address = 0\n");
12416		return 0;
12417	}
12418#endif
12419
12420	if (ctx->dmub_srv &&
12421	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
12422	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
12423		ASSERT(false);
12424		return 0;
12425	}
12426
12427	amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
12428
12429	value = cgs_read_register(ctx->cgs_device, address);
12430
12431	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
12432
12433	return value;
12434}
12435
12436int amdgpu_dm_process_dmub_aux_transfer_sync(
12437		struct dc_context *ctx,
12438		unsigned int link_index,
12439		struct aux_payload *payload,
12440		enum aux_return_code_type *operation_result)
12441{
12442	struct amdgpu_device *adev = ctx->driver_context;
12443	struct dmub_notification *p_notify = adev->dm.dmub_notify;
12444	int ret = -1;
12445
12446	mutex_lock(&adev->dm.dpia_aux_lock);
12447	if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
12448		*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
12449		goto out;
12450	}
12451
12452	if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
12453		DRM_ERROR("wait_for_completion_timeout timeout!");
12454		*operation_result = AUX_RET_ERROR_TIMEOUT;
12455		goto out;
12456	}
12457
12458	if (p_notify->result != AUX_RET_SUCCESS) {
12459		/*
12460		 * Transient states before tunneling is enabled could
12461		 * lead to this error. We can ignore this for now.
12462		 */
12463		if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
12464			DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
12465					payload->address, payload->length,
12466					p_notify->result);
12467		}
12468		*operation_result = AUX_RET_ERROR_INVALID_REPLY;
12469		goto out;
12470	}
12471
12472
12473	payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12474	if (!payload->write && p_notify->aux_reply.length &&
12475			(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
12476
12477		if (payload->length != p_notify->aux_reply.length) {
12478			DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
12479				p_notify->aux_reply.length,
12480					payload->address, payload->length);
12481			*operation_result = AUX_RET_ERROR_INVALID_REPLY;
12482			goto out;
12483		}
12484
12485		memcpy(payload->data, p_notify->aux_reply.data,
12486				p_notify->aux_reply.length);
12487	}
12488
12489	/* success */
12490	ret = p_notify->aux_reply.length;
12491	*operation_result = p_notify->result;
12492out:
12493	reinit_completion(&adev->dm.dmub_aux_transfer_done);
12494	mutex_unlock(&adev->dm.dpia_aux_lock);
12495	return ret;
12496}
12497
12498int amdgpu_dm_process_dmub_set_config_sync(
12499		struct dc_context *ctx,
12500		unsigned int link_index,
12501		struct set_config_cmd_payload *payload,
12502		enum set_config_status *operation_result)
12503{
12504	struct amdgpu_device *adev = ctx->driver_context;
12505	bool is_cmd_complete;
12506	int ret;
12507
12508	mutex_lock(&adev->dm.dpia_aux_lock);
12509	is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
12510			link_index, payload, adev->dm.dmub_notify);
12511
12512	if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
12513		ret = 0;
12514		*operation_result = adev->dm.dmub_notify->sc_status;
12515	} else {
12516		DRM_ERROR("wait_for_completion_timeout timeout!");
12517		ret = -1;
12518		*operation_result = SET_CONFIG_UNKNOWN_ERROR;
12519	}
12520
12521	if (!is_cmd_complete)
12522		reinit_completion(&adev->dm.dmub_aux_transfer_done);
12523	mutex_unlock(&adev->dm.dpia_aux_lock);
12524	return ret;
12525}
12526
12527bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
12528{
12529	return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
12530}
12531
12532bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
12533{
12534	return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
12535}