Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
    1/*
    2 * Copyright 2015 Advanced Micro Devices, Inc.
    3 *
    4 * Permission is hereby granted, free of charge, to any person obtaining a
    5 * copy of this software and associated documentation files (the "Software"),
    6 * to deal in the Software without restriction, including without limitation
    7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
    8 * and/or sell copies of the Software, and to permit persons to whom the
    9 * Software is furnished to do so, subject to the following conditions:
   10 *
   11 * The above copyright notice and this permission notice shall be included in
   12 * all copies or substantial portions of the Software.
   13 *
   14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
   18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
   19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
   20 * OTHER DEALINGS IN THE SOFTWARE.
   21 *
   22 * Authors: AMD
   23 *
   24 */
   25
   26/* The caprices of the preprocessor require that this be declared right here */
   27#define CREATE_TRACE_POINTS
   28
   29#include "dm_services_types.h"
   30#include "dc.h"
   31#include "dc_link_dp.h"
   32#include "dc/inc/core_types.h"
   33#include "dal_asic_id.h"
   34#include "dmub/dmub_srv.h"
   35#include "dc/inc/hw/dmcu.h"
   36#include "dc/inc/hw/abm.h"
   37#include "dc/dc_dmub_srv.h"
   38#include "dc/dc_edid_parser.h"
   39#include "dc/dc_stat.h"
   40#include "amdgpu_dm_trace.h"
   41
   42#include "vid.h"
   43#include "amdgpu.h"
   44#include "amdgpu_display.h"
   45#include "amdgpu_ucode.h"
   46#include "atom.h"
   47#include "amdgpu_dm.h"
   48#ifdef CONFIG_DRM_AMD_DC_HDCP
   49#include "amdgpu_dm_hdcp.h"
   50#include <drm/drm_hdcp.h>
   51#endif
   52#include "amdgpu_pm.h"
   53
   54#include "amd_shared.h"
   55#include "amdgpu_dm_irq.h"
   56#include "dm_helpers.h"
   57#include "amdgpu_dm_mst_types.h"
   58#if defined(CONFIG_DEBUG_FS)
   59#include "amdgpu_dm_debugfs.h"
   60#endif
   61#include "amdgpu_dm_psr.h"
   62
   63#include "ivsrcid/ivsrcid_vislands30.h"
   64
   65#include "i2caux_interface.h"
   66#include <linux/module.h>
   67#include <linux/moduleparam.h>
   68#include <linux/types.h>
   69#include <linux/pm_runtime.h>
   70#include <linux/pci.h>
   71#include <linux/firmware.h>
   72#include <linux/component.h>
   73
   74#include <drm/drm_atomic.h>
   75#include <drm/drm_atomic_uapi.h>
   76#include <drm/drm_atomic_helper.h>
   77#include <drm/drm_dp_mst_helper.h>
   78#include <drm/drm_fb_helper.h>
   79#include <drm/drm_fourcc.h>
   80#include <drm/drm_edid.h>
   81#include <drm/drm_vblank.h>
   82#include <drm/drm_audio_component.h>
   83
   84#if defined(CONFIG_DRM_AMD_DC_DCN)
   85#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
   86
   87#include "dcn/dcn_1_0_offset.h"
   88#include "dcn/dcn_1_0_sh_mask.h"
   89#include "soc15_hw_ip.h"
   90#include "vega10_ip_offset.h"
   91
   92#include "soc15_common.h"
   93#endif
   94
   95#include "modules/inc/mod_freesync.h"
   96#include "modules/power/power_helpers.h"
   97#include "modules/inc/mod_info_packet.h"
   98
   99#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
  100MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
  101#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
  102MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
  103#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
  104MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
  105#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
  106MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
  107#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
  108MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
  109#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
  110MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
  111#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
  112MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
  113#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
  114MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
  115
  116#define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
  117MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
  118
  119#define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
  120MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
  121
  122/* Number of bytes in PSP header for firmware. */
  123#define PSP_HEADER_BYTES 0x100
  124
  125/* Number of bytes in PSP footer for firmware. */
  126#define PSP_FOOTER_BYTES 0x100
  127
  128/**
  129 * DOC: overview
  130 *
  131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
  132 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
  133 * requests into DC requests, and DC responses into DRM responses.
  134 *
  135 * The root control structure is &struct amdgpu_display_manager.
  136 */
  137
  138/* basic init/fini API */
  139static int amdgpu_dm_init(struct amdgpu_device *adev);
  140static void amdgpu_dm_fini(struct amdgpu_device *adev);
  141static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
  142
  143static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
  144{
  145	switch (link->dpcd_caps.dongle_type) {
  146	case DISPLAY_DONGLE_NONE:
  147		return DRM_MODE_SUBCONNECTOR_Native;
  148	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
  149		return DRM_MODE_SUBCONNECTOR_VGA;
  150	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
  151	case DISPLAY_DONGLE_DP_DVI_DONGLE:
  152		return DRM_MODE_SUBCONNECTOR_DVID;
  153	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
  154	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
  155		return DRM_MODE_SUBCONNECTOR_HDMIA;
  156	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
  157	default:
  158		return DRM_MODE_SUBCONNECTOR_Unknown;
  159	}
  160}
  161
  162static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
  163{
  164	struct dc_link *link = aconnector->dc_link;
  165	struct drm_connector *connector = &aconnector->base;
  166	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
  167
  168	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
  169		return;
  170
  171	if (aconnector->dc_sink)
  172		subconnector = get_subconnector_type(link);
  173
  174	drm_object_property_set_value(&connector->base,
  175			connector->dev->mode_config.dp_subconnector_property,
  176			subconnector);
  177}
  178
  179/*
  180 * initializes drm_device display related structures, based on the information
  181 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
  182 * drm_encoder, drm_mode_config
  183 *
  184 * Returns 0 on success
  185 */
  186static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
  187/* removes and deallocates the drm structures, created by the above function */
  188static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
  189
  190static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
  191				struct drm_plane *plane,
  192				unsigned long possible_crtcs,
  193				const struct dc_plane_cap *plane_cap);
  194static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
  195			       struct drm_plane *plane,
  196			       uint32_t link_index);
  197static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
  198				    struct amdgpu_dm_connector *amdgpu_dm_connector,
  199				    uint32_t link_index,
  200				    struct amdgpu_encoder *amdgpu_encoder);
  201static int amdgpu_dm_encoder_init(struct drm_device *dev,
  202				  struct amdgpu_encoder *aencoder,
  203				  uint32_t link_index);
  204
  205static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
  206
  207static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
  208
  209static int amdgpu_dm_atomic_check(struct drm_device *dev,
  210				  struct drm_atomic_state *state);
  211
  212static void handle_cursor_update(struct drm_plane *plane,
  213				 struct drm_plane_state *old_plane_state);
  214
  215static const struct drm_format_info *
  216amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
  217
  218static bool
  219is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
  220				 struct drm_crtc_state *new_crtc_state);
  221/*
  222 * dm_vblank_get_counter
  223 *
  224 * @brief
  225 * Get counter for number of vertical blanks
  226 *
  227 * @param
  228 * struct amdgpu_device *adev - [in] desired amdgpu device
  229 * int disp_idx - [in] which CRTC to get the counter from
  230 *
  231 * @return
  232 * Counter for vertical blanks
  233 */
  234static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  235{
  236	if (crtc >= adev->mode_info.num_crtc)
  237		return 0;
  238	else {
  239		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  240
  241		if (acrtc->dm_irq_params.stream == NULL) {
  242			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
  243				  crtc);
  244			return 0;
  245		}
  246
  247		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
  248	}
  249}
  250
  251static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  252				  u32 *vbl, u32 *position)
  253{
  254	uint32_t v_blank_start, v_blank_end, h_position, v_position;
  255
  256	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  257		return -EINVAL;
  258	else {
  259		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  260
  261		if (acrtc->dm_irq_params.stream ==  NULL) {
  262			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
  263				  crtc);
  264			return 0;
  265		}
  266
  267		/*
  268		 * TODO rework base driver to use values directly.
  269		 * for now parse it back into reg-format
  270		 */
  271		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
  272					 &v_blank_start,
  273					 &v_blank_end,
  274					 &h_position,
  275					 &v_position);
  276
  277		*position = v_position | (h_position << 16);
  278		*vbl = v_blank_start | (v_blank_end << 16);
  279	}
  280
  281	return 0;
  282}
  283
  284static bool dm_is_idle(void *handle)
  285{
  286	/* XXX todo */
  287	return true;
  288}
  289
  290static int dm_wait_for_idle(void *handle)
  291{
  292	/* XXX todo */
  293	return 0;
  294}
  295
  296static bool dm_check_soft_reset(void *handle)
  297{
  298	return false;
  299}
  300
  301static int dm_soft_reset(void *handle)
  302{
  303	/* XXX todo */
  304	return 0;
  305}
  306
  307static struct amdgpu_crtc *
  308get_crtc_by_otg_inst(struct amdgpu_device *adev,
  309		     int otg_inst)
  310{
  311	struct drm_device *dev = adev_to_drm(adev);
  312	struct drm_crtc *crtc;
  313	struct amdgpu_crtc *amdgpu_crtc;
  314
  315	if (WARN_ON(otg_inst == -1))
  316		return adev->mode_info.crtcs[0];
  317
  318	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  319		amdgpu_crtc = to_amdgpu_crtc(crtc);
  320
  321		if (amdgpu_crtc->otg_inst == otg_inst)
  322			return amdgpu_crtc;
  323	}
  324
  325	return NULL;
  326}
  327
  328static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
  329{
  330	return acrtc->dm_irq_params.freesync_config.state ==
  331		       VRR_STATE_ACTIVE_VARIABLE ||
  332	       acrtc->dm_irq_params.freesync_config.state ==
  333		       VRR_STATE_ACTIVE_FIXED;
  334}
  335
  336static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
  337{
  338	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
  339	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
  340}
  341
  342static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
  343					      struct dm_crtc_state *new_state)
  344{
  345	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
  346		return true;
  347	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
  348		return true;
  349	else
  350		return false;
  351}
  352
  353/**
  354 * dm_pflip_high_irq() - Handle pageflip interrupt
  355 * @interrupt_params: ignored
  356 *
  357 * Handles the pageflip interrupt by notifying all interested parties
  358 * that the pageflip has been completed.
  359 */
  360static void dm_pflip_high_irq(void *interrupt_params)
  361{
  362	struct amdgpu_crtc *amdgpu_crtc;
  363	struct common_irq_params *irq_params = interrupt_params;
  364	struct amdgpu_device *adev = irq_params->adev;
  365	unsigned long flags;
  366	struct drm_pending_vblank_event *e;
  367	uint32_t vpos, hpos, v_blank_start, v_blank_end;
  368	bool vrr_active;
  369
  370	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  371
  372	/* IRQ could occur when in initial stage */
  373	/* TODO work and BO cleanup */
  374	if (amdgpu_crtc == NULL) {
  375		DC_LOG_PFLIP("CRTC is null, returning.\n");
  376		return;
  377	}
  378
  379	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
  380
  381	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  382		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
  383						 amdgpu_crtc->pflip_status,
  384						 AMDGPU_FLIP_SUBMITTED,
  385						 amdgpu_crtc->crtc_id,
  386						 amdgpu_crtc);
  387		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  388		return;
  389	}
  390
  391	/* page flip completed. */
  392	e = amdgpu_crtc->event;
  393	amdgpu_crtc->event = NULL;
  394
  395	WARN_ON(!e);
  396
  397	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
  398
  399	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
  400	if (!vrr_active ||
  401	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
  402				      &v_blank_end, &hpos, &vpos) ||
  403	    (vpos < v_blank_start)) {
  404		/* Update to correct count and vblank timestamp if racing with
  405		 * vblank irq. This also updates to the correct vblank timestamp
  406		 * even in VRR mode, as scanout is past the front-porch atm.
  407		 */
  408		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
  409
  410		/* Wake up userspace by sending the pageflip event with proper
  411		 * count and timestamp of vblank of flip completion.
  412		 */
  413		if (e) {
  414			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
  415
  416			/* Event sent, so done with vblank for this flip */
  417			drm_crtc_vblank_put(&amdgpu_crtc->base);
  418		}
  419	} else if (e) {
  420		/* VRR active and inside front-porch: vblank count and
  421		 * timestamp for pageflip event will only be up to date after
  422		 * drm_crtc_handle_vblank() has been executed from late vblank
  423		 * irq handler after start of back-porch (vline 0). We queue the
  424		 * pageflip event for send-out by drm_crtc_handle_vblank() with
  425		 * updated timestamp and count, once it runs after us.
  426		 *
  427		 * We need to open-code this instead of using the helper
  428		 * drm_crtc_arm_vblank_event(), as that helper would
  429		 * call drm_crtc_accurate_vblank_count(), which we must
  430		 * not call in VRR mode while we are in front-porch!
  431		 */
  432
  433		/* sequence will be replaced by real count during send-out. */
  434		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
  435		e->pipe = amdgpu_crtc->crtc_id;
  436
  437		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
  438		e = NULL;
  439	}
  440
  441	/* Keep track of vblank of this flip for flip throttling. We use the
  442	 * cooked hw counter, as that one incremented at start of this vblank
  443	 * of pageflip completion, so last_flip_vblank is the forbidden count
  444	 * for queueing new pageflips if vsync + VRR is enabled.
  445	 */
  446	amdgpu_crtc->dm_irq_params.last_flip_vblank =
  447		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
  448
  449	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  450	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  451
  452	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
  453		     amdgpu_crtc->crtc_id, amdgpu_crtc,
  454		     vrr_active, (int) !e);
  455}
  456
  457static void dm_vupdate_high_irq(void *interrupt_params)
  458{
  459	struct common_irq_params *irq_params = interrupt_params;
  460	struct amdgpu_device *adev = irq_params->adev;
  461	struct amdgpu_crtc *acrtc;
  462	struct drm_device *drm_dev;
  463	struct drm_vblank_crtc *vblank;
  464	ktime_t frame_duration_ns, previous_timestamp;
  465	unsigned long flags;
  466	int vrr_active;
  467
  468	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
  469
  470	if (acrtc) {
  471		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
  472		drm_dev = acrtc->base.dev;
  473		vblank = &drm_dev->vblank[acrtc->base.index];
  474		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
  475		frame_duration_ns = vblank->time - previous_timestamp;
  476
  477		if (frame_duration_ns > 0) {
  478			trace_amdgpu_refresh_rate_track(acrtc->base.index,
  479						frame_duration_ns,
  480						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
  481			atomic64_set(&irq_params->previous_timestamp, vblank->time);
  482		}
  483
  484		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
  485			      acrtc->crtc_id,
  486			      vrr_active);
  487
  488		/* Core vblank handling is done here after end of front-porch in
  489		 * vrr mode, as vblank timestamping will give valid results
  490		 * while now done after front-porch. This will also deliver
  491		 * page-flip completion events that have been queued to us
  492		 * if a pageflip happened inside front-porch.
  493		 */
  494		if (vrr_active) {
  495			drm_crtc_handle_vblank(&acrtc->base);
  496
  497			/* BTR processing for pre-DCE12 ASICs */
  498			if (acrtc->dm_irq_params.stream &&
  499			    adev->family < AMDGPU_FAMILY_AI) {
  500				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
  501				mod_freesync_handle_v_update(
  502				    adev->dm.freesync_module,
  503				    acrtc->dm_irq_params.stream,
  504				    &acrtc->dm_irq_params.vrr_params);
  505
  506				dc_stream_adjust_vmin_vmax(
  507				    adev->dm.dc,
  508				    acrtc->dm_irq_params.stream,
  509				    &acrtc->dm_irq_params.vrr_params.adjust);
  510				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  511			}
  512		}
  513	}
  514}
  515
  516/**
  517 * dm_crtc_high_irq() - Handles CRTC interrupt
  518 * @interrupt_params: used for determining the CRTC instance
  519 *
  520 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
  521 * event handler.
  522 */
  523static void dm_crtc_high_irq(void *interrupt_params)
  524{
  525	struct common_irq_params *irq_params = interrupt_params;
  526	struct amdgpu_device *adev = irq_params->adev;
  527	struct amdgpu_crtc *acrtc;
  528	unsigned long flags;
  529	int vrr_active;
  530
  531	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
  532	if (!acrtc)
  533		return;
  534
  535	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
  536
  537	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
  538		      vrr_active, acrtc->dm_irq_params.active_planes);
  539
  540	/**
  541	 * Core vblank handling at start of front-porch is only possible
  542	 * in non-vrr mode, as only there vblank timestamping will give
  543	 * valid results while done in front-porch. Otherwise defer it
  544	 * to dm_vupdate_high_irq after end of front-porch.
  545	 */
  546	if (!vrr_active)
  547		drm_crtc_handle_vblank(&acrtc->base);
  548
  549	/**
  550	 * Following stuff must happen at start of vblank, for crc
  551	 * computation and below-the-range btr support in vrr mode.
  552	 */
  553	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
  554
  555	/* BTR updates need to happen before VUPDATE on Vega and above. */
  556	if (adev->family < AMDGPU_FAMILY_AI)
  557		return;
  558
  559	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
  560
  561	if (acrtc->dm_irq_params.stream &&
  562	    acrtc->dm_irq_params.vrr_params.supported &&
  563	    acrtc->dm_irq_params.freesync_config.state ==
  564		    VRR_STATE_ACTIVE_VARIABLE) {
  565		mod_freesync_handle_v_update(adev->dm.freesync_module,
  566					     acrtc->dm_irq_params.stream,
  567					     &acrtc->dm_irq_params.vrr_params);
  568
  569		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
  570					   &acrtc->dm_irq_params.vrr_params.adjust);
  571	}
  572
  573	/*
  574	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
  575	 * In that case, pageflip completion interrupts won't fire and pageflip
  576	 * completion events won't get delivered. Prevent this by sending
  577	 * pending pageflip events from here if a flip is still pending.
  578	 *
  579	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
  580	 * avoid race conditions between flip programming and completion,
  581	 * which could cause too early flip completion events.
  582	 */
  583	if (adev->family >= AMDGPU_FAMILY_RV &&
  584	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
  585	    acrtc->dm_irq_params.active_planes == 0) {
  586		if (acrtc->event) {
  587			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
  588			acrtc->event = NULL;
  589			drm_crtc_vblank_put(&acrtc->base);
  590		}
  591		acrtc->pflip_status = AMDGPU_FLIP_NONE;
  592	}
  593
  594	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  595}
  596
  597#if defined(CONFIG_DRM_AMD_DC_DCN)
  598#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
  599/**
  600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
  601 * DCN generation ASICs
  602 * @interrupt_params: interrupt parameters
  603 *
  604 * Used to set crc window/read out crc value at vertical line 0 position
  605 */
  606static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
  607{
  608	struct common_irq_params *irq_params = interrupt_params;
  609	struct amdgpu_device *adev = irq_params->adev;
  610	struct amdgpu_crtc *acrtc;
  611
  612	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
  613
  614	if (!acrtc)
  615		return;
  616
  617	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
  618}
  619#endif
  620
  621/**
  622 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
  623 * @interrupt_params: used for determining the Outbox instance
  624 *
  625 * Handles the Outbox Interrupt
  626 * event handler.
  627 */
  628#define DMUB_TRACE_MAX_READ 64
  629static void dm_dmub_outbox1_low_irq(void *interrupt_params)
  630{
  631	struct dmub_notification notify;
  632	struct common_irq_params *irq_params = interrupt_params;
  633	struct amdgpu_device *adev = irq_params->adev;
  634	struct amdgpu_display_manager *dm = &adev->dm;
  635	struct dmcub_trace_buf_entry entry = { 0 };
  636	uint32_t count = 0;
  637
  638	if (dc_enable_dmub_notifications(adev->dm.dc)) {
  639		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
  640			do {
  641				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
  642			} while (notify.pending_notification);
  643
  644			if (adev->dm.dmub_notify)
  645				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
  646			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
  647				complete(&adev->dm.dmub_aux_transfer_done);
  648			// TODO : HPD Implementation
  649
  650		} else {
  651			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
  652		}
  653	}
  654
  655
  656	do {
  657		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
  658			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
  659							entry.param0, entry.param1);
  660
  661			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
  662				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
  663		} else
  664			break;
  665
  666		count++;
  667
  668	} while (count <= DMUB_TRACE_MAX_READ);
  669
  670	ASSERT(count <= DMUB_TRACE_MAX_READ);
  671}
  672#endif
  673
  674static int dm_set_clockgating_state(void *handle,
  675		  enum amd_clockgating_state state)
  676{
  677	return 0;
  678}
  679
  680static int dm_set_powergating_state(void *handle,
  681		  enum amd_powergating_state state)
  682{
  683	return 0;
  684}
  685
  686/* Prototypes of private functions */
  687static int dm_early_init(void* handle);
  688
  689/* Allocate memory for FBC compressed data  */
  690static void amdgpu_dm_fbc_init(struct drm_connector *connector)
  691{
  692	struct drm_device *dev = connector->dev;
  693	struct amdgpu_device *adev = drm_to_adev(dev);
  694	struct dm_compressor_info *compressor = &adev->dm.compressor;
  695	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
  696	struct drm_display_mode *mode;
  697	unsigned long max_size = 0;
  698
  699	if (adev->dm.dc->fbc_compressor == NULL)
  700		return;
  701
  702	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
  703		return;
  704
  705	if (compressor->bo_ptr)
  706		return;
  707
  708
  709	list_for_each_entry(mode, &connector->modes, head) {
  710		if (max_size < mode->htotal * mode->vtotal)
  711			max_size = mode->htotal * mode->vtotal;
  712	}
  713
  714	if (max_size) {
  715		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
  716			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
  717			    &compressor->gpu_addr, &compressor->cpu_addr);
  718
  719		if (r)
  720			DRM_ERROR("DM: Failed to initialize FBC\n");
  721		else {
  722			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
  723			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
  724		}
  725
  726	}
  727
  728}
  729
  730static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
  731					  int pipe, bool *enabled,
  732					  unsigned char *buf, int max_bytes)
  733{
  734	struct drm_device *dev = dev_get_drvdata(kdev);
  735	struct amdgpu_device *adev = drm_to_adev(dev);
  736	struct drm_connector *connector;
  737	struct drm_connector_list_iter conn_iter;
  738	struct amdgpu_dm_connector *aconnector;
  739	int ret = 0;
  740
  741	*enabled = false;
  742
  743	mutex_lock(&adev->dm.audio_lock);
  744
  745	drm_connector_list_iter_begin(dev, &conn_iter);
  746	drm_for_each_connector_iter(connector, &conn_iter) {
  747		aconnector = to_amdgpu_dm_connector(connector);
  748		if (aconnector->audio_inst != port)
  749			continue;
  750
  751		*enabled = true;
  752		ret = drm_eld_size(connector->eld);
  753		memcpy(buf, connector->eld, min(max_bytes, ret));
  754
  755		break;
  756	}
  757	drm_connector_list_iter_end(&conn_iter);
  758
  759	mutex_unlock(&adev->dm.audio_lock);
  760
  761	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
  762
  763	return ret;
  764}
  765
  766static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
  767	.get_eld = amdgpu_dm_audio_component_get_eld,
  768};
  769
  770static int amdgpu_dm_audio_component_bind(struct device *kdev,
  771				       struct device *hda_kdev, void *data)
  772{
  773	struct drm_device *dev = dev_get_drvdata(kdev);
  774	struct amdgpu_device *adev = drm_to_adev(dev);
  775	struct drm_audio_component *acomp = data;
  776
  777	acomp->ops = &amdgpu_dm_audio_component_ops;
  778	acomp->dev = kdev;
  779	adev->dm.audio_component = acomp;
  780
  781	return 0;
  782}
  783
  784static void amdgpu_dm_audio_component_unbind(struct device *kdev,
  785					  struct device *hda_kdev, void *data)
  786{
  787	struct drm_device *dev = dev_get_drvdata(kdev);
  788	struct amdgpu_device *adev = drm_to_adev(dev);
  789	struct drm_audio_component *acomp = data;
  790
  791	acomp->ops = NULL;
  792	acomp->dev = NULL;
  793	adev->dm.audio_component = NULL;
  794}
  795
  796static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
  797	.bind	= amdgpu_dm_audio_component_bind,
  798	.unbind	= amdgpu_dm_audio_component_unbind,
  799};
  800
  801static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
  802{
  803	int i, ret;
  804
  805	if (!amdgpu_audio)
  806		return 0;
  807
  808	adev->mode_info.audio.enabled = true;
  809
  810	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
  811
  812	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  813		adev->mode_info.audio.pin[i].channels = -1;
  814		adev->mode_info.audio.pin[i].rate = -1;
  815		adev->mode_info.audio.pin[i].bits_per_sample = -1;
  816		adev->mode_info.audio.pin[i].status_bits = 0;
  817		adev->mode_info.audio.pin[i].category_code = 0;
  818		adev->mode_info.audio.pin[i].connected = false;
  819		adev->mode_info.audio.pin[i].id =
  820			adev->dm.dc->res_pool->audios[i]->inst;
  821		adev->mode_info.audio.pin[i].offset = 0;
  822	}
  823
  824	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
  825	if (ret < 0)
  826		return ret;
  827
  828	adev->dm.audio_registered = true;
  829
  830	return 0;
  831}
  832
  833static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
  834{
  835	if (!amdgpu_audio)
  836		return;
  837
  838	if (!adev->mode_info.audio.enabled)
  839		return;
  840
  841	if (adev->dm.audio_registered) {
  842		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
  843		adev->dm.audio_registered = false;
  844	}
  845
  846	/* TODO: Disable audio? */
  847
  848	adev->mode_info.audio.enabled = false;
  849}
  850
  851static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
  852{
  853	struct drm_audio_component *acomp = adev->dm.audio_component;
  854
  855	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
  856		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
  857
  858		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
  859						 pin, -1);
  860	}
  861}
  862
  863static int dm_dmub_hw_init(struct amdgpu_device *adev)
  864{
  865	const struct dmcub_firmware_header_v1_0 *hdr;
  866	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
  867	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
  868	const struct firmware *dmub_fw = adev->dm.dmub_fw;
  869	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
  870	struct abm *abm = adev->dm.dc->res_pool->abm;
  871	struct dmub_srv_hw_params hw_params;
  872	enum dmub_status status;
  873	const unsigned char *fw_inst_const, *fw_bss_data;
  874	uint32_t i, fw_inst_const_size, fw_bss_data_size;
  875	bool has_hw_support;
  876
  877	if (!dmub_srv)
  878		/* DMUB isn't supported on the ASIC. */
  879		return 0;
  880
  881	if (!fb_info) {
  882		DRM_ERROR("No framebuffer info for DMUB service.\n");
  883		return -EINVAL;
  884	}
  885
  886	if (!dmub_fw) {
  887		/* Firmware required for DMUB support. */
  888		DRM_ERROR("No firmware provided for DMUB.\n");
  889		return -EINVAL;
  890	}
  891
  892	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
  893	if (status != DMUB_STATUS_OK) {
  894		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
  895		return -EINVAL;
  896	}
  897
  898	if (!has_hw_support) {
  899		DRM_INFO("DMUB unsupported on ASIC\n");
  900		return 0;
  901	}
  902
  903	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
  904
  905	fw_inst_const = dmub_fw->data +
  906			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
  907			PSP_HEADER_BYTES;
  908
  909	fw_bss_data = dmub_fw->data +
  910		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
  911		      le32_to_cpu(hdr->inst_const_bytes);
  912
  913	/* Copy firmware and bios info into FB memory. */
  914	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
  915			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
  916
  917	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
  918
  919	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
  920	 * amdgpu_ucode_init_single_fw will load dmub firmware
  921	 * fw_inst_const part to cw0; otherwise, the firmware back door load
  922	 * will be done by dm_dmub_hw_init
  923	 */
  924	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
  925		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
  926				fw_inst_const_size);
  927	}
  928
  929	if (fw_bss_data_size)
  930		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
  931		       fw_bss_data, fw_bss_data_size);
  932
  933	/* Copy firmware bios info into FB memory. */
  934	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
  935	       adev->bios_size);
  936
  937	/* Reset regions that need to be reset. */
  938	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
  939	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
  940
  941	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
  942	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
  943
  944	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
  945	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
  946
  947	/* Initialize hardware. */
  948	memset(&hw_params, 0, sizeof(hw_params));
  949	hw_params.fb_base = adev->gmc.fb_start;
  950	hw_params.fb_offset = adev->gmc.aper_base;
  951
  952	/* backdoor load firmware and trigger dmub running */
  953	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
  954		hw_params.load_inst_const = true;
  955
  956	if (dmcu)
  957		hw_params.psp_version = dmcu->psp_version;
  958
  959	for (i = 0; i < fb_info->num_fb; ++i)
  960		hw_params.fb[i] = &fb_info->fb[i];
  961
  962	status = dmub_srv_hw_init(dmub_srv, &hw_params);
  963	if (status != DMUB_STATUS_OK) {
  964		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
  965		return -EINVAL;
  966	}
  967
  968	/* Wait for firmware load to finish. */
  969	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
  970	if (status != DMUB_STATUS_OK)
  971		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
  972
  973	/* Init DMCU and ABM if available. */
  974	if (dmcu && abm) {
  975		dmcu->funcs->dmcu_init(dmcu);
  976		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
  977	}
  978
  979	if (!adev->dm.dc->ctx->dmub_srv)
  980		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
  981	if (!adev->dm.dc->ctx->dmub_srv) {
  982		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
  983		return -ENOMEM;
  984	}
  985
  986	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
  987		 adev->dm.dmcub_fw_version);
  988
  989	return 0;
  990}
  991
  992#if defined(CONFIG_DRM_AMD_DC_DCN)
  993static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
  994{
  995	uint64_t pt_base;
  996	uint32_t logical_addr_low;
  997	uint32_t logical_addr_high;
  998	uint32_t agp_base, agp_bot, agp_top;
  999	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 1000
 1001	memset(pa_config, 0, sizeof(*pa_config));
 1002
 1003	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
 1004	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 1005
 1006	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 1007		/*
 1008		 * Raven2 has a HW issue that it is unable to use the vram which
 1009		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 1010		 * workaround that increase system aperture high address (add 1)
 1011		 * to get rid of the VM fault and hardware hang.
 1012		 */
 1013		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
 1014	else
 1015		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
 1016
 1017	agp_base = 0;
 1018	agp_bot = adev->gmc.agp_start >> 24;
 1019	agp_top = adev->gmc.agp_end >> 24;
 1020
 1021
 1022	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
 1023	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
 1024	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
 1025	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
 1026	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
 1027	page_table_base.low_part = lower_32_bits(pt_base);
 1028
 1029	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
 1030	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
 1031
 1032	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
 1033	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
 1034	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
 1035
 1036	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
 1037	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
 1038	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
 1039
 1040	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
 1041	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
 1042	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
 1043
 1044	pa_config->is_hvm_enabled = 0;
 1045
 1046}
 1047#endif
 1048#if defined(CONFIG_DRM_AMD_DC_DCN)
 1049static void event_mall_stutter(struct work_struct *work)
 1050{
 1051
 1052	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
 1053	struct amdgpu_display_manager *dm = vblank_work->dm;
 1054
 1055	mutex_lock(&dm->dc_lock);
 1056
 1057	if (vblank_work->enable)
 1058		dm->active_vblank_irq_count++;
 1059	else if(dm->active_vblank_irq_count)
 1060		dm->active_vblank_irq_count--;
 1061
 1062	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
 1063
 1064	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
 1065
 1066	mutex_unlock(&dm->dc_lock);
 1067}
 1068
 1069static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
 1070{
 1071
 1072	int max_caps = dc->caps.max_links;
 1073	struct vblank_workqueue *vblank_work;
 1074	int i = 0;
 1075
 1076	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
 1077	if (ZERO_OR_NULL_PTR(vblank_work)) {
 1078		kfree(vblank_work);
 1079		return NULL;
 1080	}
 1081
 1082	for (i = 0; i < max_caps; i++)
 1083		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
 1084
 1085	return vblank_work;
 1086}
 1087#endif
 1088static int amdgpu_dm_init(struct amdgpu_device *adev)
 1089{
 1090	struct dc_init_data init_data;
 1091#ifdef CONFIG_DRM_AMD_DC_HDCP
 1092	struct dc_callback_init init_params;
 1093#endif
 1094	int r;
 1095
 1096	adev->dm.ddev = adev_to_drm(adev);
 1097	adev->dm.adev = adev;
 1098
 1099	/* Zero all the fields */
 1100	memset(&init_data, 0, sizeof(init_data));
 1101#ifdef CONFIG_DRM_AMD_DC_HDCP
 1102	memset(&init_params, 0, sizeof(init_params));
 1103#endif
 1104
 1105	mutex_init(&adev->dm.dc_lock);
 1106	mutex_init(&adev->dm.audio_lock);
 1107#if defined(CONFIG_DRM_AMD_DC_DCN)
 1108	spin_lock_init(&adev->dm.vblank_lock);
 1109#endif
 1110
 1111	if(amdgpu_dm_irq_init(adev)) {
 1112		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
 1113		goto error;
 1114	}
 1115
 1116	init_data.asic_id.chip_family = adev->family;
 1117
 1118	init_data.asic_id.pci_revision_id = adev->pdev->revision;
 1119	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
 1120	init_data.asic_id.chip_id = adev->pdev->device;
 1121
 1122	init_data.asic_id.vram_width = adev->gmc.vram_width;
 1123	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
 1124	init_data.asic_id.atombios_base_address =
 1125		adev->mode_info.atom_context->bios;
 1126
 1127	init_data.driver = adev;
 1128
 1129	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
 1130
 1131	if (!adev->dm.cgs_device) {
 1132		DRM_ERROR("amdgpu: failed to create cgs device.\n");
 1133		goto error;
 1134	}
 1135
 1136	init_data.cgs_device = adev->dm.cgs_device;
 1137
 1138	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
 1139
 1140	switch (adev->asic_type) {
 1141	case CHIP_CARRIZO:
 1142	case CHIP_STONEY:
 1143	case CHIP_RAVEN:
 1144	case CHIP_RENOIR:
 1145		init_data.flags.gpu_vm_support = true;
 1146		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
 1147			init_data.flags.disable_dmcu = true;
 1148		break;
 1149	case CHIP_VANGOGH:
 1150	case CHIP_YELLOW_CARP:
 1151		init_data.flags.gpu_vm_support = true;
 1152		break;
 1153	default:
 1154		break;
 1155	}
 1156
 1157	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
 1158		init_data.flags.fbc_support = true;
 1159
 1160	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
 1161		init_data.flags.multi_mon_pp_mclk_switch = true;
 1162
 1163	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
 1164		init_data.flags.disable_fractional_pwm = true;
 1165
 1166	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
 1167		init_data.flags.edp_no_power_sequencing = true;
 1168
 1169	init_data.flags.power_down_display_on_boot = true;
 1170
 1171	INIT_LIST_HEAD(&adev->dm.da_list);
 1172	/* Display Core create. */
 1173	adev->dm.dc = dc_create(&init_data);
 1174
 1175	if (adev->dm.dc) {
 1176		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
 1177	} else {
 1178		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
 1179		goto error;
 1180	}
 1181
 1182	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
 1183		adev->dm.dc->debug.force_single_disp_pipe_split = false;
 1184		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
 1185	}
 1186
 1187	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
 1188		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
 1189
 1190	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
 1191		adev->dm.dc->debug.disable_stutter = true;
 1192
 1193	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
 1194		adev->dm.dc->debug.disable_dsc = true;
 1195
 1196	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
 1197		adev->dm.dc->debug.disable_clock_gate = true;
 1198
 1199	r = dm_dmub_hw_init(adev);
 1200	if (r) {
 1201		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 1202		goto error;
 1203	}
 1204
 1205	dc_hardware_init(adev->dm.dc);
 1206
 1207#if defined(CONFIG_DRM_AMD_DC_DCN)
 1208	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
 1209		struct dc_phy_addr_space_config pa_config;
 1210
 1211		mmhub_read_system_context(adev, &pa_config);
 1212
 1213		// Call the DC init_memory func
 1214		dc_setup_system_context(adev->dm.dc, &pa_config);
 1215	}
 1216#endif
 1217
 1218	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
 1219	if (!adev->dm.freesync_module) {
 1220		DRM_ERROR(
 1221		"amdgpu: failed to initialize freesync_module.\n");
 1222	} else
 1223		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
 1224				adev->dm.freesync_module);
 1225
 1226	amdgpu_dm_init_color_mod();
 1227
 1228#if defined(CONFIG_DRM_AMD_DC_DCN)
 1229	if (adev->dm.dc->caps.max_links > 0) {
 1230		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
 1231
 1232		if (!adev->dm.vblank_workqueue)
 1233			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
 1234		else
 1235			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
 1236	}
 1237#endif
 1238
 1239#ifdef CONFIG_DRM_AMD_DC_HDCP
 1240	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
 1241		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
 1242
 1243		if (!adev->dm.hdcp_workqueue)
 1244			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
 1245		else
 1246			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
 1247
 1248		dc_init_callbacks(adev->dm.dc, &init_params);
 1249	}
 1250#endif
 1251#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 1252	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
 1253#endif
 1254	if (dc_enable_dmub_notifications(adev->dm.dc)) {
 1255		init_completion(&adev->dm.dmub_aux_transfer_done);
 1256		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
 1257		if (!adev->dm.dmub_notify) {
 1258			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
 1259			goto error;
 1260		}
 1261		amdgpu_dm_outbox_init(adev);
 1262	}
 1263
 1264	if (amdgpu_dm_initialize_drm_device(adev)) {
 1265		DRM_ERROR(
 1266		"amdgpu: failed to initialize sw for display support.\n");
 1267		goto error;
 1268	}
 1269
 1270	/* create fake encoders for MST */
 1271	dm_dp_create_fake_mst_encoders(adev);
 1272
 1273	/* TODO: Add_display_info? */
 1274
 1275	/* TODO use dynamic cursor width */
 1276	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
 1277	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
 1278
 1279	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
 1280		DRM_ERROR(
 1281		"amdgpu: failed to initialize sw for display support.\n");
 1282		goto error;
 1283	}
 1284
 1285
 1286	DRM_DEBUG_DRIVER("KMS initialized.\n");
 1287
 1288	return 0;
 1289error:
 1290	amdgpu_dm_fini(adev);
 1291
 1292	return -EINVAL;
 1293}
 1294
 1295static int amdgpu_dm_early_fini(void *handle)
 1296{
 1297	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 1298
 1299	amdgpu_dm_audio_fini(adev);
 1300
 1301	return 0;
 1302}
 1303
 1304static void amdgpu_dm_fini(struct amdgpu_device *adev)
 1305{
 1306	int i;
 1307
 1308	for (i = 0; i < adev->dm.display_indexes_num; i++) {
 1309		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
 1310	}
 1311
 1312	amdgpu_dm_destroy_drm_device(&adev->dm);
 1313
 1314#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 1315	if (adev->dm.crc_rd_wrk) {
 1316		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
 1317		kfree(adev->dm.crc_rd_wrk);
 1318		adev->dm.crc_rd_wrk = NULL;
 1319	}
 1320#endif
 1321#ifdef CONFIG_DRM_AMD_DC_HDCP
 1322	if (adev->dm.hdcp_workqueue) {
 1323		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
 1324		adev->dm.hdcp_workqueue = NULL;
 1325	}
 1326
 1327	if (adev->dm.dc)
 1328		dc_deinit_callbacks(adev->dm.dc);
 1329#endif
 1330
 1331#if defined(CONFIG_DRM_AMD_DC_DCN)
 1332	if (adev->dm.vblank_workqueue) {
 1333		adev->dm.vblank_workqueue->dm = NULL;
 1334		kfree(adev->dm.vblank_workqueue);
 1335		adev->dm.vblank_workqueue = NULL;
 1336	}
 1337#endif
 1338
 1339	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
 1340
 1341	if (dc_enable_dmub_notifications(adev->dm.dc)) {
 1342		kfree(adev->dm.dmub_notify);
 1343		adev->dm.dmub_notify = NULL;
 1344	}
 1345
 1346	if (adev->dm.dmub_bo)
 1347		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
 1348				      &adev->dm.dmub_bo_gpu_addr,
 1349				      &adev->dm.dmub_bo_cpu_addr);
 1350
 1351	/* DC Destroy TODO: Replace destroy DAL */
 1352	if (adev->dm.dc)
 1353		dc_destroy(&adev->dm.dc);
 1354	/*
 1355	 * TODO: pageflip, vlank interrupt
 1356	 *
 1357	 * amdgpu_dm_irq_fini(adev);
 1358	 */
 1359
 1360	if (adev->dm.cgs_device) {
 1361		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
 1362		adev->dm.cgs_device = NULL;
 1363	}
 1364	if (adev->dm.freesync_module) {
 1365		mod_freesync_destroy(adev->dm.freesync_module);
 1366		adev->dm.freesync_module = NULL;
 1367	}
 1368
 1369	mutex_destroy(&adev->dm.audio_lock);
 1370	mutex_destroy(&adev->dm.dc_lock);
 1371
 1372	return;
 1373}
 1374
 1375static int load_dmcu_fw(struct amdgpu_device *adev)
 1376{
 1377	const char *fw_name_dmcu = NULL;
 1378	int r;
 1379	const struct dmcu_firmware_header_v1_0 *hdr;
 1380
 1381	switch(adev->asic_type) {
 1382#if defined(CONFIG_DRM_AMD_DC_SI)
 1383	case CHIP_TAHITI:
 1384	case CHIP_PITCAIRN:
 1385	case CHIP_VERDE:
 1386	case CHIP_OLAND:
 1387#endif
 1388	case CHIP_BONAIRE:
 1389	case CHIP_HAWAII:
 1390	case CHIP_KAVERI:
 1391	case CHIP_KABINI:
 1392	case CHIP_MULLINS:
 1393	case CHIP_TONGA:
 1394	case CHIP_FIJI:
 1395	case CHIP_CARRIZO:
 1396	case CHIP_STONEY:
 1397	case CHIP_POLARIS11:
 1398	case CHIP_POLARIS10:
 1399	case CHIP_POLARIS12:
 1400	case CHIP_VEGAM:
 1401	case CHIP_VEGA10:
 1402	case CHIP_VEGA12:
 1403	case CHIP_VEGA20:
 1404	case CHIP_NAVI10:
 1405	case CHIP_NAVI14:
 1406	case CHIP_RENOIR:
 1407	case CHIP_SIENNA_CICHLID:
 1408	case CHIP_NAVY_FLOUNDER:
 1409	case CHIP_DIMGREY_CAVEFISH:
 1410	case CHIP_BEIGE_GOBY:
 1411	case CHIP_VANGOGH:
 1412	case CHIP_YELLOW_CARP:
 1413		return 0;
 1414	case CHIP_NAVI12:
 1415		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
 1416		break;
 1417	case CHIP_RAVEN:
 1418		if (ASICREV_IS_PICASSO(adev->external_rev_id))
 1419			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 1420		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
 1421			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 1422		else
 1423			return 0;
 1424		break;
 1425	default:
 1426		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 1427		return -EINVAL;
 1428	}
 1429
 1430	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 1431		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
 1432		return 0;
 1433	}
 1434
 1435	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
 1436	if (r == -ENOENT) {
 1437		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
 1438		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
 1439		adev->dm.fw_dmcu = NULL;
 1440		return 0;
 1441	}
 1442	if (r) {
 1443		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
 1444			fw_name_dmcu);
 1445		return r;
 1446	}
 1447
 1448	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
 1449	if (r) {
 1450		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
 1451			fw_name_dmcu);
 1452		release_firmware(adev->dm.fw_dmcu);
 1453		adev->dm.fw_dmcu = NULL;
 1454		return r;
 1455	}
 1456
 1457	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
 1458	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
 1459	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
 1460	adev->firmware.fw_size +=
 1461		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
 1462
 1463	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
 1464	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
 1465	adev->firmware.fw_size +=
 1466		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
 1467
 1468	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
 1469
 1470	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
 1471
 1472	return 0;
 1473}
 1474
 1475static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
 1476{
 1477	struct amdgpu_device *adev = ctx;
 1478
 1479	return dm_read_reg(adev->dm.dc->ctx, address);
 1480}
 1481
 1482static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
 1483				     uint32_t value)
 1484{
 1485	struct amdgpu_device *adev = ctx;
 1486
 1487	return dm_write_reg(adev->dm.dc->ctx, address, value);
 1488}
 1489
 1490static int dm_dmub_sw_init(struct amdgpu_device *adev)
 1491{
 1492	struct dmub_srv_create_params create_params;
 1493	struct dmub_srv_region_params region_params;
 1494	struct dmub_srv_region_info region_info;
 1495	struct dmub_srv_fb_params fb_params;
 1496	struct dmub_srv_fb_info *fb_info;
 1497	struct dmub_srv *dmub_srv;
 1498	const struct dmcub_firmware_header_v1_0 *hdr;
 1499	const char *fw_name_dmub;
 1500	enum dmub_asic dmub_asic;
 1501	enum dmub_status status;
 1502	int r;
 1503
 1504	switch (adev->asic_type) {
 1505	case CHIP_RENOIR:
 1506		dmub_asic = DMUB_ASIC_DCN21;
 1507		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
 1508		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
 1509			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
 1510		break;
 1511	case CHIP_SIENNA_CICHLID:
 1512		dmub_asic = DMUB_ASIC_DCN30;
 1513		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
 1514		break;
 1515	case CHIP_NAVY_FLOUNDER:
 1516		dmub_asic = DMUB_ASIC_DCN30;
 1517		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
 1518		break;
 1519	case CHIP_VANGOGH:
 1520		dmub_asic = DMUB_ASIC_DCN301;
 1521		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
 1522		break;
 1523	case CHIP_DIMGREY_CAVEFISH:
 1524		dmub_asic = DMUB_ASIC_DCN302;
 1525		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
 1526		break;
 1527	case CHIP_BEIGE_GOBY:
 1528		dmub_asic = DMUB_ASIC_DCN303;
 1529		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
 1530		break;
 1531	case CHIP_YELLOW_CARP:
 1532		dmub_asic = DMUB_ASIC_DCN31;
 1533		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
 1534		break;
 1535
 1536	default:
 1537		/* ASIC doesn't support DMUB. */
 1538		return 0;
 1539	}
 1540
 1541	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
 1542	if (r) {
 1543		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
 1544		return 0;
 1545	}
 1546
 1547	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
 1548	if (r) {
 1549		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
 1550		return 0;
 1551	}
 1552
 1553	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
 1554	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 1555
 1556	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 1557		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
 1558			AMDGPU_UCODE_ID_DMCUB;
 1559		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
 1560			adev->dm.dmub_fw;
 1561		adev->firmware.fw_size +=
 1562			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
 1563
 1564		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
 1565			 adev->dm.dmcub_fw_version);
 1566	}
 1567
 1568
 1569	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
 1570	dmub_srv = adev->dm.dmub_srv;
 1571
 1572	if (!dmub_srv) {
 1573		DRM_ERROR("Failed to allocate DMUB service!\n");
 1574		return -ENOMEM;
 1575	}
 1576
 1577	memset(&create_params, 0, sizeof(create_params));
 1578	create_params.user_ctx = adev;
 1579	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
 1580	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
 1581	create_params.asic = dmub_asic;
 1582
 1583	/* Create the DMUB service. */
 1584	status = dmub_srv_create(dmub_srv, &create_params);
 1585	if (status != DMUB_STATUS_OK) {
 1586		DRM_ERROR("Error creating DMUB service: %d\n", status);
 1587		return -EINVAL;
 1588	}
 1589
 1590	/* Calculate the size of all the regions for the DMUB service. */
 1591	memset(&region_params, 0, sizeof(region_params));
 1592
 1593	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
 1594					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
 1595	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
 1596	region_params.vbios_size = adev->bios_size;
 1597	region_params.fw_bss_data = region_params.bss_data_size ?
 1598		adev->dm.dmub_fw->data +
 1599		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 1600		le32_to_cpu(hdr->inst_const_bytes) : NULL;
 1601	region_params.fw_inst_const =
 1602		adev->dm.dmub_fw->data +
 1603		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 1604		PSP_HEADER_BYTES;
 1605
 1606	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
 1607					   &region_info);
 1608
 1609	if (status != DMUB_STATUS_OK) {
 1610		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
 1611		return -EINVAL;
 1612	}
 1613
 1614	/*
 1615	 * Allocate a framebuffer based on the total size of all the regions.
 1616	 * TODO: Move this into GART.
 1617	 */
 1618	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
 1619				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
 1620				    &adev->dm.dmub_bo_gpu_addr,
 1621				    &adev->dm.dmub_bo_cpu_addr);
 1622	if (r)
 1623		return r;
 1624
 1625	/* Rebase the regions on the framebuffer address. */
 1626	memset(&fb_params, 0, sizeof(fb_params));
 1627	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
 1628	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
 1629	fb_params.region_info = &region_info;
 1630
 1631	adev->dm.dmub_fb_info =
 1632		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
 1633	fb_info = adev->dm.dmub_fb_info;
 1634
 1635	if (!fb_info) {
 1636		DRM_ERROR(
 1637			"Failed to allocate framebuffer info for DMUB service!\n");
 1638		return -ENOMEM;
 1639	}
 1640
 1641	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
 1642	if (status != DMUB_STATUS_OK) {
 1643		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
 1644		return -EINVAL;
 1645	}
 1646
 1647	return 0;
 1648}
 1649
 1650static int dm_sw_init(void *handle)
 1651{
 1652	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 1653	int r;
 1654
 1655	r = dm_dmub_sw_init(adev);
 1656	if (r)
 1657		return r;
 1658
 1659	return load_dmcu_fw(adev);
 1660}
 1661
 1662static int dm_sw_fini(void *handle)
 1663{
 1664	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 1665
 1666	kfree(adev->dm.dmub_fb_info);
 1667	adev->dm.dmub_fb_info = NULL;
 1668
 1669	if (adev->dm.dmub_srv) {
 1670		dmub_srv_destroy(adev->dm.dmub_srv);
 1671		adev->dm.dmub_srv = NULL;
 1672	}
 1673
 1674	release_firmware(adev->dm.dmub_fw);
 1675	adev->dm.dmub_fw = NULL;
 1676
 1677	release_firmware(adev->dm.fw_dmcu);
 1678	adev->dm.fw_dmcu = NULL;
 1679
 1680	return 0;
 1681}
 1682
 1683static int detect_mst_link_for_all_connectors(struct drm_device *dev)
 1684{
 1685	struct amdgpu_dm_connector *aconnector;
 1686	struct drm_connector *connector;
 1687	struct drm_connector_list_iter iter;
 1688	int ret = 0;
 1689
 1690	drm_connector_list_iter_begin(dev, &iter);
 1691	drm_for_each_connector_iter(connector, &iter) {
 1692		aconnector = to_amdgpu_dm_connector(connector);
 1693		if (aconnector->dc_link->type == dc_connection_mst_branch &&
 1694		    aconnector->mst_mgr.aux) {
 1695			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
 1696					 aconnector,
 1697					 aconnector->base.base.id);
 1698
 1699			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
 1700			if (ret < 0) {
 1701				DRM_ERROR("DM_MST: Failed to start MST\n");
 1702				aconnector->dc_link->type =
 1703					dc_connection_single;
 1704				break;
 1705			}
 1706		}
 1707	}
 1708	drm_connector_list_iter_end(&iter);
 1709
 1710	return ret;
 1711}
 1712
 1713static int dm_late_init(void *handle)
 1714{
 1715	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 1716
 1717	struct dmcu_iram_parameters params;
 1718	unsigned int linear_lut[16];
 1719	int i;
 1720	struct dmcu *dmcu = NULL;
 1721
 1722	dmcu = adev->dm.dc->res_pool->dmcu;
 1723
 1724	for (i = 0; i < 16; i++)
 1725		linear_lut[i] = 0xFFFF * i / 15;
 1726
 1727	params.set = 0;
 1728	params.backlight_ramping_override = false;
 1729	params.backlight_ramping_start = 0xCCCC;
 1730	params.backlight_ramping_reduction = 0xCCCCCCCC;
 1731	params.backlight_lut_array_size = 16;
 1732	params.backlight_lut_array = linear_lut;
 1733
 1734	/* Min backlight level after ABM reduction,  Don't allow below 1%
 1735	 * 0xFFFF x 0.01 = 0x28F
 1736	 */
 1737	params.min_abm_backlight = 0x28F;
 1738	/* In the case where abm is implemented on dmcub,
 1739	* dmcu object will be null.
 1740	* ABM 2.4 and up are implemented on dmcub.
 1741	*/
 1742	if (dmcu) {
 1743		if (!dmcu_load_iram(dmcu, params))
 1744			return -EINVAL;
 1745	} else if (adev->dm.dc->ctx->dmub_srv) {
 1746		struct dc_link *edp_links[MAX_NUM_EDP];
 1747		int edp_num;
 1748
 1749		get_edp_links(adev->dm.dc, edp_links, &edp_num);
 1750		for (i = 0; i < edp_num; i++) {
 1751			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
 1752				return -EINVAL;
 1753		}
 1754	}
 1755
 1756	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 1757}
 1758
 1759static void s3_handle_mst(struct drm_device *dev, bool suspend)
 1760{
 1761	struct amdgpu_dm_connector *aconnector;
 1762	struct drm_connector *connector;
 1763	struct drm_connector_list_iter iter;
 1764	struct drm_dp_mst_topology_mgr *mgr;
 1765	int ret;
 1766	bool need_hotplug = false;
 1767
 1768	drm_connector_list_iter_begin(dev, &iter);
 1769	drm_for_each_connector_iter(connector, &iter) {
 1770		aconnector = to_amdgpu_dm_connector(connector);
 1771		if (aconnector->dc_link->type != dc_connection_mst_branch ||
 1772		    aconnector->mst_port)
 1773			continue;
 1774
 1775		mgr = &aconnector->mst_mgr;
 1776
 1777		if (suspend) {
 1778			drm_dp_mst_topology_mgr_suspend(mgr);
 1779		} else {
 1780			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
 1781			if (ret < 0) {
 1782				drm_dp_mst_topology_mgr_set_mst(mgr, false);
 1783				need_hotplug = true;
 1784			}
 1785		}
 1786	}
 1787	drm_connector_list_iter_end(&iter);
 1788
 1789	if (need_hotplug)
 1790		drm_kms_helper_hotplug_event(dev);
 1791}
 1792
 1793static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
 1794{
 1795	struct smu_context *smu = &adev->smu;
 1796	int ret = 0;
 1797
 1798	if (!is_support_sw_smu(adev))
 1799		return 0;
 1800
 1801	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
 1802	 * on window driver dc implementation.
 1803	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
 1804	 * should be passed to smu during boot up and resume from s3.
 1805	 * boot up: dc calculate dcn watermark clock settings within dc_create,
 1806	 * dcn20_resource_construct
 1807	 * then call pplib functions below to pass the settings to smu:
 1808	 * smu_set_watermarks_for_clock_ranges
 1809	 * smu_set_watermarks_table
 1810	 * navi10_set_watermarks_table
 1811	 * smu_write_watermarks_table
 1812	 *
 1813	 * For Renoir, clock settings of dcn watermark are also fixed values.
 1814	 * dc has implemented different flow for window driver:
 1815	 * dc_hardware_init / dc_set_power_state
 1816	 * dcn10_init_hw
 1817	 * notify_wm_ranges
 1818	 * set_wm_ranges
 1819	 * -- Linux
 1820	 * smu_set_watermarks_for_clock_ranges
 1821	 * renoir_set_watermarks_table
 1822	 * smu_write_watermarks_table
 1823	 *
 1824	 * For Linux,
 1825	 * dc_hardware_init -> amdgpu_dm_init
 1826	 * dc_set_power_state --> dm_resume
 1827	 *
 1828	 * therefore, this function apply to navi10/12/14 but not Renoir
 1829	 * *
 1830	 */
 1831	switch(adev->asic_type) {
 1832	case CHIP_NAVI10:
 1833	case CHIP_NAVI14:
 1834	case CHIP_NAVI12:
 1835		break;
 1836	default:
 1837		return 0;
 1838	}
 1839
 1840	ret = smu_write_watermarks_table(smu);
 1841	if (ret) {
 1842		DRM_ERROR("Failed to update WMTABLE!\n");
 1843		return ret;
 1844	}
 1845
 1846	return 0;
 1847}
 1848
 1849/**
 1850 * dm_hw_init() - Initialize DC device
 1851 * @handle: The base driver device containing the amdgpu_dm device.
 1852 *
 1853 * Initialize the &struct amdgpu_display_manager device. This involves calling
 1854 * the initializers of each DM component, then populating the struct with them.
 1855 *
 1856 * Although the function implies hardware initialization, both hardware and
 1857 * software are initialized here. Splitting them out to their relevant init
 1858 * hooks is a future TODO item.
 1859 *
 1860 * Some notable things that are initialized here:
 1861 *
 1862 * - Display Core, both software and hardware
 1863 * - DC modules that we need (freesync and color management)
 1864 * - DRM software states
 1865 * - Interrupt sources and handlers
 1866 * - Vblank support
 1867 * - Debug FS entries, if enabled
 1868 */
 1869static int dm_hw_init(void *handle)
 1870{
 1871	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 1872	/* Create DAL display manager */
 1873	amdgpu_dm_init(adev);
 1874	amdgpu_dm_hpd_init(adev);
 1875
 1876	return 0;
 1877}
 1878
 1879/**
 1880 * dm_hw_fini() - Teardown DC device
 1881 * @handle: The base driver device containing the amdgpu_dm device.
 1882 *
 1883 * Teardown components within &struct amdgpu_display_manager that require
 1884 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
 1885 * were loaded. Also flush IRQ workqueues and disable them.
 1886 */
 1887static int dm_hw_fini(void *handle)
 1888{
 1889	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 1890
 1891	amdgpu_dm_hpd_fini(adev);
 1892
 1893	amdgpu_dm_irq_fini(adev);
 1894	amdgpu_dm_fini(adev);
 1895	return 0;
 1896}
 1897
 1898
 1899static int dm_enable_vblank(struct drm_crtc *crtc);
 1900static void dm_disable_vblank(struct drm_crtc *crtc);
 1901
 1902static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
 1903				 struct dc_state *state, bool enable)
 1904{
 1905	enum dc_irq_source irq_source;
 1906	struct amdgpu_crtc *acrtc;
 1907	int rc = -EBUSY;
 1908	int i = 0;
 1909
 1910	for (i = 0; i < state->stream_count; i++) {
 1911		acrtc = get_crtc_by_otg_inst(
 1912				adev, state->stream_status[i].primary_otg_inst);
 1913
 1914		if (acrtc && state->stream_status[i].plane_count != 0) {
 1915			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
 1916			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
 1917			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
 1918				      acrtc->crtc_id, enable ? "en" : "dis", rc);
 1919			if (rc)
 1920				DRM_WARN("Failed to %s pflip interrupts\n",
 1921					 enable ? "enable" : "disable");
 1922
 1923			if (enable) {
 1924				rc = dm_enable_vblank(&acrtc->base);
 1925				if (rc)
 1926					DRM_WARN("Failed to enable vblank interrupts\n");
 1927			} else {
 1928				dm_disable_vblank(&acrtc->base);
 1929			}
 1930
 1931		}
 1932	}
 1933
 1934}
 1935
 1936static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
 1937{
 1938	struct dc_state *context = NULL;
 1939	enum dc_status res = DC_ERROR_UNEXPECTED;
 1940	int i;
 1941	struct dc_stream_state *del_streams[MAX_PIPES];
 1942	int del_streams_count = 0;
 1943
 1944	memset(del_streams, 0, sizeof(del_streams));
 1945
 1946	context = dc_create_state(dc);
 1947	if (context == NULL)
 1948		goto context_alloc_fail;
 1949
 1950	dc_resource_state_copy_construct_current(dc, context);
 1951
 1952	/* First remove from context all streams */
 1953	for (i = 0; i < context->stream_count; i++) {
 1954		struct dc_stream_state *stream = context->streams[i];
 1955
 1956		del_streams[del_streams_count++] = stream;
 1957	}
 1958
 1959	/* Remove all planes for removed streams and then remove the streams */
 1960	for (i = 0; i < del_streams_count; i++) {
 1961		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
 1962			res = DC_FAIL_DETACH_SURFACES;
 1963			goto fail;
 1964		}
 1965
 1966		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
 1967		if (res != DC_OK)
 1968			goto fail;
 1969	}
 1970
 1971
 1972	res = dc_validate_global_state(dc, context, false);
 1973
 1974	if (res != DC_OK) {
 1975		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
 1976		goto fail;
 1977	}
 1978
 1979	res = dc_commit_state(dc, context);
 1980
 1981fail:
 1982	dc_release_state(context);
 1983
 1984context_alloc_fail:
 1985	return res;
 1986}
 1987
 1988static int dm_suspend(void *handle)
 1989{
 1990	struct amdgpu_device *adev = handle;
 1991	struct amdgpu_display_manager *dm = &adev->dm;
 1992	int ret = 0;
 1993
 1994	if (amdgpu_in_reset(adev)) {
 1995		mutex_lock(&dm->dc_lock);
 1996
 1997#if defined(CONFIG_DRM_AMD_DC_DCN)
 1998		dc_allow_idle_optimizations(adev->dm.dc, false);
 1999#endif
 2000
 2001		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
 2002
 2003		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
 2004
 2005		amdgpu_dm_commit_zero_streams(dm->dc);
 2006
 2007		amdgpu_dm_irq_suspend(adev);
 2008
 2009		return ret;
 2010	}
 2011
 2012	WARN_ON(adev->dm.cached_state);
 2013	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
 2014
 2015	s3_handle_mst(adev_to_drm(adev), true);
 2016
 2017	amdgpu_dm_irq_suspend(adev);
 2018
 2019	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 2020
 2021	return 0;
 2022}
 2023
 2024static struct amdgpu_dm_connector *
 2025amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
 2026					     struct drm_crtc *crtc)
 2027{
 2028	uint32_t i;
 2029	struct drm_connector_state *new_con_state;
 2030	struct drm_connector *connector;
 2031	struct drm_crtc *crtc_from_state;
 2032
 2033	for_each_new_connector_in_state(state, connector, new_con_state, i) {
 2034		crtc_from_state = new_con_state->crtc;
 2035
 2036		if (crtc_from_state == crtc)
 2037			return to_amdgpu_dm_connector(connector);
 2038	}
 2039
 2040	return NULL;
 2041}
 2042
 2043static void emulated_link_detect(struct dc_link *link)
 2044{
 2045	struct dc_sink_init_data sink_init_data = { 0 };
 2046	struct display_sink_capability sink_caps = { 0 };
 2047	enum dc_edid_status edid_status;
 2048	struct dc_context *dc_ctx = link->ctx;
 2049	struct dc_sink *sink = NULL;
 2050	struct dc_sink *prev_sink = NULL;
 2051
 2052	link->type = dc_connection_none;
 2053	prev_sink = link->local_sink;
 2054
 2055	if (prev_sink)
 2056		dc_sink_release(prev_sink);
 2057
 2058	switch (link->connector_signal) {
 2059	case SIGNAL_TYPE_HDMI_TYPE_A: {
 2060		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 2061		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
 2062		break;
 2063	}
 2064
 2065	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
 2066		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 2067		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
 2068		break;
 2069	}
 2070
 2071	case SIGNAL_TYPE_DVI_DUAL_LINK: {
 2072		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 2073		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
 2074		break;
 2075	}
 2076
 2077	case SIGNAL_TYPE_LVDS: {
 2078		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 2079		sink_caps.signal = SIGNAL_TYPE_LVDS;
 2080		break;
 2081	}
 2082
 2083	case SIGNAL_TYPE_EDP: {
 2084		sink_caps.transaction_type =
 2085			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
 2086		sink_caps.signal = SIGNAL_TYPE_EDP;
 2087		break;
 2088	}
 2089
 2090	case SIGNAL_TYPE_DISPLAY_PORT: {
 2091		sink_caps.transaction_type =
 2092			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
 2093		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
 2094		break;
 2095	}
 2096
 2097	default:
 2098		DC_ERROR("Invalid connector type! signal:%d\n",
 2099			link->connector_signal);
 2100		return;
 2101	}
 2102
 2103	sink_init_data.link = link;
 2104	sink_init_data.sink_signal = sink_caps.signal;
 2105
 2106	sink = dc_sink_create(&sink_init_data);
 2107	if (!sink) {
 2108		DC_ERROR("Failed to create sink!\n");
 2109		return;
 2110	}
 2111
 2112	/* dc_sink_create returns a new reference */
 2113	link->local_sink = sink;
 2114
 2115	edid_status = dm_helpers_read_local_edid(
 2116			link->ctx,
 2117			link,
 2118			sink);
 2119
 2120	if (edid_status != EDID_OK)
 2121		DC_ERROR("Failed to read EDID");
 2122
 2123}
 2124
 2125static void dm_gpureset_commit_state(struct dc_state *dc_state,
 2126				     struct amdgpu_display_manager *dm)
 2127{
 2128	struct {
 2129		struct dc_surface_update surface_updates[MAX_SURFACES];
 2130		struct dc_plane_info plane_infos[MAX_SURFACES];
 2131		struct dc_scaling_info scaling_infos[MAX_SURFACES];
 2132		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
 2133		struct dc_stream_update stream_update;
 2134	} * bundle;
 2135	int k, m;
 2136
 2137	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 2138
 2139	if (!bundle) {
 2140		dm_error("Failed to allocate update bundle\n");
 2141		goto cleanup;
 2142	}
 2143
 2144	for (k = 0; k < dc_state->stream_count; k++) {
 2145		bundle->stream_update.stream = dc_state->streams[k];
 2146
 2147		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
 2148			bundle->surface_updates[m].surface =
 2149				dc_state->stream_status->plane_states[m];
 2150			bundle->surface_updates[m].surface->force_full_update =
 2151				true;
 2152		}
 2153		dc_commit_updates_for_stream(
 2154			dm->dc, bundle->surface_updates,
 2155			dc_state->stream_status->plane_count,
 2156			dc_state->streams[k], &bundle->stream_update, dc_state);
 2157	}
 2158
 2159cleanup:
 2160	kfree(bundle);
 2161
 2162	return;
 2163}
 2164
 2165static void dm_set_dpms_off(struct dc_link *link)
 2166{
 2167	struct dc_stream_state *stream_state;
 2168	struct amdgpu_dm_connector *aconnector = link->priv;
 2169	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
 2170	struct dc_stream_update stream_update;
 2171	bool dpms_off = true;
 2172
 2173	memset(&stream_update, 0, sizeof(stream_update));
 2174	stream_update.dpms_off = &dpms_off;
 2175
 2176	mutex_lock(&adev->dm.dc_lock);
 2177	stream_state = dc_stream_find_from_link(link);
 2178
 2179	if (stream_state == NULL) {
 2180		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
 2181		mutex_unlock(&adev->dm.dc_lock);
 2182		return;
 2183	}
 2184
 2185	stream_update.stream = stream_state;
 2186	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
 2187				     stream_state, &stream_update,
 2188				     stream_state->ctx->dc->current_state);
 2189	mutex_unlock(&adev->dm.dc_lock);
 2190}
 2191
 2192static int dm_resume(void *handle)
 2193{
 2194	struct amdgpu_device *adev = handle;
 2195	struct drm_device *ddev = adev_to_drm(adev);
 2196	struct amdgpu_display_manager *dm = &adev->dm;
 2197	struct amdgpu_dm_connector *aconnector;
 2198	struct drm_connector *connector;
 2199	struct drm_connector_list_iter iter;
 2200	struct drm_crtc *crtc;
 2201	struct drm_crtc_state *new_crtc_state;
 2202	struct dm_crtc_state *dm_new_crtc_state;
 2203	struct drm_plane *plane;
 2204	struct drm_plane_state *new_plane_state;
 2205	struct dm_plane_state *dm_new_plane_state;
 2206	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
 2207	enum dc_connection_type new_connection_type = dc_connection_none;
 2208	struct dc_state *dc_state;
 2209	int i, r, j;
 2210
 2211	if (amdgpu_in_reset(adev)) {
 2212		dc_state = dm->cached_dc_state;
 2213
 2214		r = dm_dmub_hw_init(adev);
 2215		if (r)
 2216			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 2217
 2218		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 2219		dc_resume(dm->dc);
 2220
 2221		amdgpu_dm_irq_resume_early(adev);
 2222
 2223		for (i = 0; i < dc_state->stream_count; i++) {
 2224			dc_state->streams[i]->mode_changed = true;
 2225			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
 2226				dc_state->stream_status->plane_states[j]->update_flags.raw
 2227					= 0xffffffff;
 2228			}
 2229		}
 2230#if defined(CONFIG_DRM_AMD_DC_DCN)
 2231		/*
 2232		 * Resource allocation happens for link encoders for newer ASIC in
 2233		 * dc_validate_global_state, so we need to revalidate it.
 2234		 *
 2235		 * This shouldn't fail (it passed once before), so warn if it does.
 2236		 */
 2237		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
 2238#endif
 2239
 2240		WARN_ON(!dc_commit_state(dm->dc, dc_state));
 2241
 2242		dm_gpureset_commit_state(dm->cached_dc_state, dm);
 2243
 2244		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
 2245
 2246		dc_release_state(dm->cached_dc_state);
 2247		dm->cached_dc_state = NULL;
 2248
 2249		amdgpu_dm_irq_resume_late(adev);
 2250
 2251		mutex_unlock(&dm->dc_lock);
 2252
 2253		return 0;
 2254	}
 2255	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
 2256	dc_release_state(dm_state->context);
 2257	dm_state->context = dc_create_state(dm->dc);
 2258	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
 2259	dc_resource_state_construct(dm->dc, dm_state->context);
 2260
 2261	/* Before powering on DC we need to re-initialize DMUB. */
 2262	r = dm_dmub_hw_init(adev);
 2263	if (r)
 2264		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 2265
 2266	/* power on hardware */
 2267	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 2268
 2269	/* program HPD filter */
 2270	dc_resume(dm->dc);
 2271
 2272	/*
 2273	 * early enable HPD Rx IRQ, should be done before set mode as short
 2274	 * pulse interrupts are used for MST
 2275	 */
 2276	amdgpu_dm_irq_resume_early(adev);
 2277
 2278	/* On resume we need to rewrite the MSTM control bits to enable MST*/
 2279	s3_handle_mst(ddev, false);
 2280
 2281	/* Do detection*/
 2282	drm_connector_list_iter_begin(ddev, &iter);
 2283	drm_for_each_connector_iter(connector, &iter) {
 2284		aconnector = to_amdgpu_dm_connector(connector);
 2285
 2286		/*
 2287		 * this is the case when traversing through already created
 2288		 * MST connectors, should be skipped
 2289		 */
 2290		if (aconnector->mst_port)
 2291			continue;
 2292
 2293		mutex_lock(&aconnector->hpd_lock);
 2294		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
 2295			DRM_ERROR("KMS: Failed to detect connector\n");
 2296
 2297		if (aconnector->base.force && new_connection_type == dc_connection_none)
 2298			emulated_link_detect(aconnector->dc_link);
 2299		else
 2300			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 2301
 2302		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
 2303			aconnector->fake_enable = false;
 2304
 2305		if (aconnector->dc_sink)
 2306			dc_sink_release(aconnector->dc_sink);
 2307		aconnector->dc_sink = NULL;
 2308		amdgpu_dm_update_connector_after_detect(aconnector);
 2309		mutex_unlock(&aconnector->hpd_lock);
 2310	}
 2311	drm_connector_list_iter_end(&iter);
 2312
 2313	/* Force mode set in atomic commit */
 2314	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
 2315		new_crtc_state->active_changed = true;
 2316
 2317	/*
 2318	 * atomic_check is expected to create the dc states. We need to release
 2319	 * them here, since they were duplicated as part of the suspend
 2320	 * procedure.
 2321	 */
 2322	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
 2323		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 2324		if (dm_new_crtc_state->stream) {
 2325			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
 2326			dc_stream_release(dm_new_crtc_state->stream);
 2327			dm_new_crtc_state->stream = NULL;
 2328		}
 2329	}
 2330
 2331	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
 2332		dm_new_plane_state = to_dm_plane_state(new_plane_state);
 2333		if (dm_new_plane_state->dc_state) {
 2334			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
 2335			dc_plane_state_release(dm_new_plane_state->dc_state);
 2336			dm_new_plane_state->dc_state = NULL;
 2337		}
 2338	}
 2339
 2340	drm_atomic_helper_resume(ddev, dm->cached_state);
 2341
 2342	dm->cached_state = NULL;
 2343
 2344	amdgpu_dm_irq_resume_late(adev);
 2345
 2346	amdgpu_dm_smu_write_watermarks_table(adev);
 2347
 2348	return 0;
 2349}
 2350
 2351/**
 2352 * DOC: DM Lifecycle
 2353 *
 2354 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
 2355 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
 2356 * the base driver's device list to be initialized and torn down accordingly.
 2357 *
 2358 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
 2359 */
 2360
 2361static const struct amd_ip_funcs amdgpu_dm_funcs = {
 2362	.name = "dm",
 2363	.early_init = dm_early_init,
 2364	.late_init = dm_late_init,
 2365	.sw_init = dm_sw_init,
 2366	.sw_fini = dm_sw_fini,
 2367	.early_fini = amdgpu_dm_early_fini,
 2368	.hw_init = dm_hw_init,
 2369	.hw_fini = dm_hw_fini,
 2370	.suspend = dm_suspend,
 2371	.resume = dm_resume,
 2372	.is_idle = dm_is_idle,
 2373	.wait_for_idle = dm_wait_for_idle,
 2374	.check_soft_reset = dm_check_soft_reset,
 2375	.soft_reset = dm_soft_reset,
 2376	.set_clockgating_state = dm_set_clockgating_state,
 2377	.set_powergating_state = dm_set_powergating_state,
 2378};
 2379
 2380const struct amdgpu_ip_block_version dm_ip_block =
 2381{
 2382	.type = AMD_IP_BLOCK_TYPE_DCE,
 2383	.major = 1,
 2384	.minor = 0,
 2385	.rev = 0,
 2386	.funcs = &amdgpu_dm_funcs,
 2387};
 2388
 2389
 2390/**
 2391 * DOC: atomic
 2392 *
 2393 * *WIP*
 2394 */
 2395
 2396static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
 2397	.fb_create = amdgpu_display_user_framebuffer_create,
 2398	.get_format_info = amd_get_format_info,
 2399	.output_poll_changed = drm_fb_helper_output_poll_changed,
 2400	.atomic_check = amdgpu_dm_atomic_check,
 2401	.atomic_commit = drm_atomic_helper_commit,
 2402};
 2403
 2404static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
 2405	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
 2406};
 2407
 2408static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
 2409{
 2410	u32 max_cll, min_cll, max, min, q, r;
 2411	struct amdgpu_dm_backlight_caps *caps;
 2412	struct amdgpu_display_manager *dm;
 2413	struct drm_connector *conn_base;
 2414	struct amdgpu_device *adev;
 2415	struct dc_link *link = NULL;
 2416	static const u8 pre_computed_values[] = {
 2417		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
 2418		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
 2419
 2420	if (!aconnector || !aconnector->dc_link)
 2421		return;
 2422
 2423	link = aconnector->dc_link;
 2424	if (link->connector_signal != SIGNAL_TYPE_EDP)
 2425		return;
 2426
 2427	conn_base = &aconnector->base;
 2428	adev = drm_to_adev(conn_base->dev);
 2429	dm = &adev->dm;
 2430	caps = &dm->backlight_caps;
 2431	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
 2432	caps->aux_support = false;
 2433	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
 2434	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
 2435
 2436	if (caps->ext_caps->bits.oled == 1 /*||
 2437	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
 2438	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
 2439		caps->aux_support = true;
 2440
 2441	if (amdgpu_backlight == 0)
 2442		caps->aux_support = false;
 2443	else if (amdgpu_backlight == 1)
 2444		caps->aux_support = true;
 2445
 2446	/* From the specification (CTA-861-G), for calculating the maximum
 2447	 * luminance we need to use:
 2448	 *	Luminance = 50*2**(CV/32)
 2449	 * Where CV is a one-byte value.
 2450	 * For calculating this expression we may need float point precision;
 2451	 * to avoid this complexity level, we take advantage that CV is divided
 2452	 * by a constant. From the Euclids division algorithm, we know that CV
 2453	 * can be written as: CV = 32*q + r. Next, we replace CV in the
 2454	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
 2455	 * need to pre-compute the value of r/32. For pre-computing the values
 2456	 * We just used the following Ruby line:
 2457	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
 2458	 * The results of the above expressions can be verified at
 2459	 * pre_computed_values.
 2460	 */
 2461	q = max_cll >> 5;
 2462	r = max_cll % 32;
 2463	max = (1 << q) * pre_computed_values[r];
 2464
 2465	// min luminance: maxLum * (CV/255)^2 / 100
 2466	q = DIV_ROUND_CLOSEST(min_cll, 255);
 2467	min = max * DIV_ROUND_CLOSEST((q * q), 100);
 2468
 2469	caps->aux_max_input_signal = max;
 2470	caps->aux_min_input_signal = min;
 2471}
 2472
 2473void amdgpu_dm_update_connector_after_detect(
 2474		struct amdgpu_dm_connector *aconnector)
 2475{
 2476	struct drm_connector *connector = &aconnector->base;
 2477	struct drm_device *dev = connector->dev;
 2478	struct dc_sink *sink;
 2479
 2480	/* MST handled by drm_mst framework */
 2481	if (aconnector->mst_mgr.mst_state == true)
 2482		return;
 2483
 2484	sink = aconnector->dc_link->local_sink;
 2485	if (sink)
 2486		dc_sink_retain(sink);
 2487
 2488	/*
 2489	 * Edid mgmt connector gets first update only in mode_valid hook and then
 2490	 * the connector sink is set to either fake or physical sink depends on link status.
 2491	 * Skip if already done during boot.
 2492	 */
 2493	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
 2494			&& aconnector->dc_em_sink) {
 2495
 2496		/*
 2497		 * For S3 resume with headless use eml_sink to fake stream
 2498		 * because on resume connector->sink is set to NULL
 2499		 */
 2500		mutex_lock(&dev->mode_config.mutex);
 2501
 2502		if (sink) {
 2503			if (aconnector->dc_sink) {
 2504				amdgpu_dm_update_freesync_caps(connector, NULL);
 2505				/*
 2506				 * retain and release below are used to
 2507				 * bump up refcount for sink because the link doesn't point
 2508				 * to it anymore after disconnect, so on next crtc to connector
 2509				 * reshuffle by UMD we will get into unwanted dc_sink release
 2510				 */
 2511				dc_sink_release(aconnector->dc_sink);
 2512			}
 2513			aconnector->dc_sink = sink;
 2514			dc_sink_retain(aconnector->dc_sink);
 2515			amdgpu_dm_update_freesync_caps(connector,
 2516					aconnector->edid);
 2517		} else {
 2518			amdgpu_dm_update_freesync_caps(connector, NULL);
 2519			if (!aconnector->dc_sink) {
 2520				aconnector->dc_sink = aconnector->dc_em_sink;
 2521				dc_sink_retain(aconnector->dc_sink);
 2522			}
 2523		}
 2524
 2525		mutex_unlock(&dev->mode_config.mutex);
 2526
 2527		if (sink)
 2528			dc_sink_release(sink);
 2529		return;
 2530	}
 2531
 2532	/*
 2533	 * TODO: temporary guard to look for proper fix
 2534	 * if this sink is MST sink, we should not do anything
 2535	 */
 2536	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
 2537		dc_sink_release(sink);
 2538		return;
 2539	}
 2540
 2541	if (aconnector->dc_sink == sink) {
 2542		/*
 2543		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
 2544		 * Do nothing!!
 2545		 */
 2546		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
 2547				aconnector->connector_id);
 2548		if (sink)
 2549			dc_sink_release(sink);
 2550		return;
 2551	}
 2552
 2553	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
 2554		aconnector->connector_id, aconnector->dc_sink, sink);
 2555
 2556	mutex_lock(&dev->mode_config.mutex);
 2557
 2558	/*
 2559	 * 1. Update status of the drm connector
 2560	 * 2. Send an event and let userspace tell us what to do
 2561	 */
 2562	if (sink) {
 2563		/*
 2564		 * TODO: check if we still need the S3 mode update workaround.
 2565		 * If yes, put it here.
 2566		 */
 2567		if (aconnector->dc_sink) {
 2568			amdgpu_dm_update_freesync_caps(connector, NULL);
 2569			dc_sink_release(aconnector->dc_sink);
 2570		}
 2571
 2572		aconnector->dc_sink = sink;
 2573		dc_sink_retain(aconnector->dc_sink);
 2574		if (sink->dc_edid.length == 0) {
 2575			aconnector->edid = NULL;
 2576			if (aconnector->dc_link->aux_mode) {
 2577				drm_dp_cec_unset_edid(
 2578					&aconnector->dm_dp_aux.aux);
 2579			}
 2580		} else {
 2581			aconnector->edid =
 2582				(struct edid *)sink->dc_edid.raw_edid;
 2583
 2584			drm_connector_update_edid_property(connector,
 2585							   aconnector->edid);
 2586			if (aconnector->dc_link->aux_mode)
 2587				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
 2588						    aconnector->edid);
 2589		}
 2590
 2591		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
 2592		update_connector_ext_caps(aconnector);
 2593	} else {
 2594		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
 2595		amdgpu_dm_update_freesync_caps(connector, NULL);
 2596		drm_connector_update_edid_property(connector, NULL);
 2597		aconnector->num_modes = 0;
 2598		dc_sink_release(aconnector->dc_sink);
 2599		aconnector->dc_sink = NULL;
 2600		aconnector->edid = NULL;
 2601#ifdef CONFIG_DRM_AMD_DC_HDCP
 2602		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
 2603		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
 2604			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 2605#endif
 2606	}
 2607
 2608	mutex_unlock(&dev->mode_config.mutex);
 2609
 2610	update_subconnector_property(aconnector);
 2611
 2612	if (sink)
 2613		dc_sink_release(sink);
 2614}
 2615
 2616static void handle_hpd_irq(void *param)
 2617{
 2618	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
 2619	struct drm_connector *connector = &aconnector->base;
 2620	struct drm_device *dev = connector->dev;
 2621	enum dc_connection_type new_connection_type = dc_connection_none;
 2622	struct amdgpu_device *adev = drm_to_adev(dev);
 2623#ifdef CONFIG_DRM_AMD_DC_HDCP
 2624	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
 2625#endif
 2626
 2627	if (adev->dm.disable_hpd_irq)
 2628		return;
 2629
 2630	/*
 2631	 * In case of failure or MST no need to update connector status or notify the OS
 2632	 * since (for MST case) MST does this in its own context.
 2633	 */
 2634	mutex_lock(&aconnector->hpd_lock);
 2635
 2636#ifdef CONFIG_DRM_AMD_DC_HDCP
 2637	if (adev->dm.hdcp_workqueue) {
 2638		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
 2639		dm_con_state->update_hdcp = true;
 2640	}
 2641#endif
 2642	if (aconnector->fake_enable)
 2643		aconnector->fake_enable = false;
 2644
 2645	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
 2646		DRM_ERROR("KMS: Failed to detect connector\n");
 2647
 2648	if (aconnector->base.force && new_connection_type == dc_connection_none) {
 2649		emulated_link_detect(aconnector->dc_link);
 2650
 2651
 2652		drm_modeset_lock_all(dev);
 2653		dm_restore_drm_connector_state(dev, connector);
 2654		drm_modeset_unlock_all(dev);
 2655
 2656		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
 2657			drm_kms_helper_hotplug_event(dev);
 2658
 2659	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
 2660		if (new_connection_type == dc_connection_none &&
 2661		    aconnector->dc_link->type == dc_connection_none)
 2662			dm_set_dpms_off(aconnector->dc_link);
 2663
 2664		amdgpu_dm_update_connector_after_detect(aconnector);
 2665
 2666		drm_modeset_lock_all(dev);
 2667		dm_restore_drm_connector_state(dev, connector);
 2668		drm_modeset_unlock_all(dev);
 2669
 2670		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
 2671			drm_kms_helper_hotplug_event(dev);
 2672	}
 2673	mutex_unlock(&aconnector->hpd_lock);
 2674
 2675}
 2676
 2677static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
 2678{
 2679	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
 2680	uint8_t dret;
 2681	bool new_irq_handled = false;
 2682	int dpcd_addr;
 2683	int dpcd_bytes_to_read;
 2684
 2685	const int max_process_count = 30;
 2686	int process_count = 0;
 2687
 2688	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
 2689
 2690	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
 2691		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
 2692		/* DPCD 0x200 - 0x201 for downstream IRQ */
 2693		dpcd_addr = DP_SINK_COUNT;
 2694	} else {
 2695		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
 2696		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
 2697		dpcd_addr = DP_SINK_COUNT_ESI;
 2698	}
 2699
 2700	dret = drm_dp_dpcd_read(
 2701		&aconnector->dm_dp_aux.aux,
 2702		dpcd_addr,
 2703		esi,
 2704		dpcd_bytes_to_read);
 2705
 2706	while (dret == dpcd_bytes_to_read &&
 2707		process_count < max_process_count) {
 2708		uint8_t retry;
 2709		dret = 0;
 2710
 2711		process_count++;
 2712
 2713		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
 2714		/* handle HPD short pulse irq */
 2715		if (aconnector->mst_mgr.mst_state)
 2716			drm_dp_mst_hpd_irq(
 2717				&aconnector->mst_mgr,
 2718				esi,
 2719				&new_irq_handled);
 2720
 2721		if (new_irq_handled) {
 2722			/* ACK at DPCD to notify down stream */
 2723			const int ack_dpcd_bytes_to_write =
 2724				dpcd_bytes_to_read - 1;
 2725
 2726			for (retry = 0; retry < 3; retry++) {
 2727				uint8_t wret;
 2728
 2729				wret = drm_dp_dpcd_write(
 2730					&aconnector->dm_dp_aux.aux,
 2731					dpcd_addr + 1,
 2732					&esi[1],
 2733					ack_dpcd_bytes_to_write);
 2734				if (wret == ack_dpcd_bytes_to_write)
 2735					break;
 2736			}
 2737
 2738			/* check if there is new irq to be handled */
 2739			dret = drm_dp_dpcd_read(
 2740				&aconnector->dm_dp_aux.aux,
 2741				dpcd_addr,
 2742				esi,
 2743				dpcd_bytes_to_read);
 2744
 2745			new_irq_handled = false;
 2746		} else {
 2747			break;
 2748		}
 2749	}
 2750
 2751	if (process_count == max_process_count)
 2752		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
 2753}
 2754
 2755static void handle_hpd_rx_irq(void *param)
 2756{
 2757	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
 2758	struct drm_connector *connector = &aconnector->base;
 2759	struct drm_device *dev = connector->dev;
 2760	struct dc_link *dc_link = aconnector->dc_link;
 2761	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
 2762	bool result = false;
 2763	enum dc_connection_type new_connection_type = dc_connection_none;
 2764	struct amdgpu_device *adev = drm_to_adev(dev);
 2765	union hpd_irq_data hpd_irq_data;
 2766	bool lock_flag = 0;
 2767
 2768	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
 2769
 2770	if (adev->dm.disable_hpd_irq)
 2771		return;
 2772
 2773
 2774	/*
 2775	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
 2776	 * conflict, after implement i2c helper, this mutex should be
 2777	 * retired.
 2778	 */
 2779	mutex_lock(&aconnector->hpd_lock);
 2780
 2781	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
 2782
 2783	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
 2784		(dc_link->type == dc_connection_mst_branch)) {
 2785		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
 2786			result = true;
 2787			dm_handle_hpd_rx_irq(aconnector);
 2788			goto out;
 2789		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
 2790			result = false;
 2791			dm_handle_hpd_rx_irq(aconnector);
 2792			goto out;
 2793		}
 2794	}
 2795
 2796	/*
 2797	 * TODO: We need the lock to avoid touching DC state while it's being
 2798	 * modified during automated compliance testing, or when link loss
 2799	 * happens. While this should be split into subhandlers and proper
 2800	 * interfaces to avoid having to conditionally lock like this in the
 2801	 * outer layer, we need this workaround temporarily to allow MST
 2802	 * lightup in some scenarios to avoid timeout.
 2803	 */
 2804	if (!amdgpu_in_reset(adev) &&
 2805	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
 2806	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
 2807		mutex_lock(&adev->dm.dc_lock);
 2808		lock_flag = 1;
 2809	}
 2810
 2811#ifdef CONFIG_DRM_AMD_DC_HDCP
 2812	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
 2813#else
 2814	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
 2815#endif
 2816	if (!amdgpu_in_reset(adev) && lock_flag)
 2817		mutex_unlock(&adev->dm.dc_lock);
 2818
 2819out:
 2820	if (result && !is_mst_root_connector) {
 2821		/* Downstream Port status changed. */
 2822		if (!dc_link_detect_sink(dc_link, &new_connection_type))
 2823			DRM_ERROR("KMS: Failed to detect connector\n");
 2824
 2825		if (aconnector->base.force && new_connection_type == dc_connection_none) {
 2826			emulated_link_detect(dc_link);
 2827
 2828			if (aconnector->fake_enable)
 2829				aconnector->fake_enable = false;
 2830
 2831			amdgpu_dm_update_connector_after_detect(aconnector);
 2832
 2833
 2834			drm_modeset_lock_all(dev);
 2835			dm_restore_drm_connector_state(dev, connector);
 2836			drm_modeset_unlock_all(dev);
 2837
 2838			drm_kms_helper_hotplug_event(dev);
 2839		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
 2840
 2841			if (aconnector->fake_enable)
 2842				aconnector->fake_enable = false;
 2843
 2844			amdgpu_dm_update_connector_after_detect(aconnector);
 2845
 2846
 2847			drm_modeset_lock_all(dev);
 2848			dm_restore_drm_connector_state(dev, connector);
 2849			drm_modeset_unlock_all(dev);
 2850
 2851			drm_kms_helper_hotplug_event(dev);
 2852		}
 2853	}
 2854#ifdef CONFIG_DRM_AMD_DC_HDCP
 2855	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
 2856		if (adev->dm.hdcp_workqueue)
 2857			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
 2858	}
 2859#endif
 2860
 2861	if (dc_link->type != dc_connection_mst_branch)
 2862		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
 2863
 2864	mutex_unlock(&aconnector->hpd_lock);
 2865}
 2866
 2867static void register_hpd_handlers(struct amdgpu_device *adev)
 2868{
 2869	struct drm_device *dev = adev_to_drm(adev);
 2870	struct drm_connector *connector;
 2871	struct amdgpu_dm_connector *aconnector;
 2872	const struct dc_link *dc_link;
 2873	struct dc_interrupt_params int_params = {0};
 2874
 2875	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 2876	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 2877
 2878	list_for_each_entry(connector,
 2879			&dev->mode_config.connector_list, head)	{
 2880
 2881		aconnector = to_amdgpu_dm_connector(connector);
 2882		dc_link = aconnector->dc_link;
 2883
 2884		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
 2885			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
 2886			int_params.irq_source = dc_link->irq_source_hpd;
 2887
 2888			amdgpu_dm_irq_register_interrupt(adev, &int_params,
 2889					handle_hpd_irq,
 2890					(void *) aconnector);
 2891		}
 2892
 2893		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
 2894
 2895			/* Also register for DP short pulse (hpd_rx). */
 2896			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
 2897			int_params.irq_source =	dc_link->irq_source_hpd_rx;
 2898
 2899			amdgpu_dm_irq_register_interrupt(adev, &int_params,
 2900					handle_hpd_rx_irq,
 2901					(void *) aconnector);
 2902		}
 2903	}
 2904}
 2905
 2906#if defined(CONFIG_DRM_AMD_DC_SI)
 2907/* Register IRQ sources and initialize IRQ callbacks */
 2908static int dce60_register_irq_handlers(struct amdgpu_device *adev)
 2909{
 2910	struct dc *dc = adev->dm.dc;
 2911	struct common_irq_params *c_irq_params;
 2912	struct dc_interrupt_params int_params = {0};
 2913	int r;
 2914	int i;
 2915	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 2916
 2917	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 2918	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 2919
 2920	/*
 2921	 * Actions of amdgpu_irq_add_id():
 2922	 * 1. Register a set() function with base driver.
 2923	 *    Base driver will call set() function to enable/disable an
 2924	 *    interrupt in DC hardware.
 2925	 * 2. Register amdgpu_dm_irq_handler().
 2926	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
 2927	 *    coming from DC hardware.
 2928	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
 2929	 *    for acknowledging and handling. */
 2930
 2931	/* Use VBLANK interrupt */
 2932	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 2933		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
 2934		if (r) {
 2935			DRM_ERROR("Failed to add crtc irq id!\n");
 2936			return r;
 2937		}
 2938
 2939		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 2940		int_params.irq_source =
 2941			dc_interrupt_to_irq_source(dc, i+1 , 0);
 2942
 2943		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 2944
 2945		c_irq_params->adev = adev;
 2946		c_irq_params->irq_src = int_params.irq_source;
 2947
 2948		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 2949				dm_crtc_high_irq, c_irq_params);
 2950	}
 2951
 2952	/* Use GRPH_PFLIP interrupt */
 2953	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
 2954			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
 2955		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
 2956		if (r) {
 2957			DRM_ERROR("Failed to add page flip irq id!\n");
 2958			return r;
 2959		}
 2960
 2961		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 2962		int_params.irq_source =
 2963			dc_interrupt_to_irq_source(dc, i, 0);
 2964
 2965		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
 2966
 2967		c_irq_params->adev = adev;
 2968		c_irq_params->irq_src = int_params.irq_source;
 2969
 2970		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 2971				dm_pflip_high_irq, c_irq_params);
 2972
 2973	}
 2974
 2975	/* HPD */
 2976	r = amdgpu_irq_add_id(adev, client_id,
 2977			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
 2978	if (r) {
 2979		DRM_ERROR("Failed to add hpd irq id!\n");
 2980		return r;
 2981	}
 2982
 2983	register_hpd_handlers(adev);
 2984
 2985	return 0;
 2986}
 2987#endif
 2988
 2989/* Register IRQ sources and initialize IRQ callbacks */
 2990static int dce110_register_irq_handlers(struct amdgpu_device *adev)
 2991{
 2992	struct dc *dc = adev->dm.dc;
 2993	struct common_irq_params *c_irq_params;
 2994	struct dc_interrupt_params int_params = {0};
 2995	int r;
 2996	int i;
 2997	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 2998
 2999	if (adev->asic_type >= CHIP_VEGA10)
 3000		client_id = SOC15_IH_CLIENTID_DCE;
 3001
 3002	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 3003	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 3004
 3005	/*
 3006	 * Actions of amdgpu_irq_add_id():
 3007	 * 1. Register a set() function with base driver.
 3008	 *    Base driver will call set() function to enable/disable an
 3009	 *    interrupt in DC hardware.
 3010	 * 2. Register amdgpu_dm_irq_handler().
 3011	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
 3012	 *    coming from DC hardware.
 3013	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
 3014	 *    for acknowledging and handling. */
 3015
 3016	/* Use VBLANK interrupt */
 3017	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
 3018		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
 3019		if (r) {
 3020			DRM_ERROR("Failed to add crtc irq id!\n");
 3021			return r;
 3022		}
 3023
 3024		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 3025		int_params.irq_source =
 3026			dc_interrupt_to_irq_source(dc, i, 0);
 3027
 3028		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 3029
 3030		c_irq_params->adev = adev;
 3031		c_irq_params->irq_src = int_params.irq_source;
 3032
 3033		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3034				dm_crtc_high_irq, c_irq_params);
 3035	}
 3036
 3037	/* Use VUPDATE interrupt */
 3038	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
 3039		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
 3040		if (r) {
 3041			DRM_ERROR("Failed to add vupdate irq id!\n");
 3042			return r;
 3043		}
 3044
 3045		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 3046		int_params.irq_source =
 3047			dc_interrupt_to_irq_source(dc, i, 0);
 3048
 3049		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
 3050
 3051		c_irq_params->adev = adev;
 3052		c_irq_params->irq_src = int_params.irq_source;
 3053
 3054		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3055				dm_vupdate_high_irq, c_irq_params);
 3056	}
 3057
 3058	/* Use GRPH_PFLIP interrupt */
 3059	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
 3060			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
 3061		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
 3062		if (r) {
 3063			DRM_ERROR("Failed to add page flip irq id!\n");
 3064			return r;
 3065		}
 3066
 3067		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 3068		int_params.irq_source =
 3069			dc_interrupt_to_irq_source(dc, i, 0);
 3070
 3071		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
 3072
 3073		c_irq_params->adev = adev;
 3074		c_irq_params->irq_src = int_params.irq_source;
 3075
 3076		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3077				dm_pflip_high_irq, c_irq_params);
 3078
 3079	}
 3080
 3081	/* HPD */
 3082	r = amdgpu_irq_add_id(adev, client_id,
 3083			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
 3084	if (r) {
 3085		DRM_ERROR("Failed to add hpd irq id!\n");
 3086		return r;
 3087	}
 3088
 3089	register_hpd_handlers(adev);
 3090
 3091	return 0;
 3092}
 3093
 3094#if defined(CONFIG_DRM_AMD_DC_DCN)
 3095/* Register IRQ sources and initialize IRQ callbacks */
 3096static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 3097{
 3098	struct dc *dc = adev->dm.dc;
 3099	struct common_irq_params *c_irq_params;
 3100	struct dc_interrupt_params int_params = {0};
 3101	int r;
 3102	int i;
 3103#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 3104	static const unsigned int vrtl_int_srcid[] = {
 3105		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
 3106		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
 3107		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
 3108		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
 3109		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
 3110		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
 3111	};
 3112#endif
 3113
 3114	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 3115	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 3116
 3117	/*
 3118	 * Actions of amdgpu_irq_add_id():
 3119	 * 1. Register a set() function with base driver.
 3120	 *    Base driver will call set() function to enable/disable an
 3121	 *    interrupt in DC hardware.
 3122	 * 2. Register amdgpu_dm_irq_handler().
 3123	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
 3124	 *    coming from DC hardware.
 3125	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
 3126	 *    for acknowledging and handling.
 3127	 */
 3128
 3129	/* Use VSTARTUP interrupt */
 3130	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
 3131			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
 3132			i++) {
 3133		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
 3134
 3135		if (r) {
 3136			DRM_ERROR("Failed to add crtc irq id!\n");
 3137			return r;
 3138		}
 3139
 3140		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 3141		int_params.irq_source =
 3142			dc_interrupt_to_irq_source(dc, i, 0);
 3143
 3144		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 3145
 3146		c_irq_params->adev = adev;
 3147		c_irq_params->irq_src = int_params.irq_source;
 3148
 3149		amdgpu_dm_irq_register_interrupt(
 3150			adev, &int_params, dm_crtc_high_irq, c_irq_params);
 3151	}
 3152
 3153	/* Use otg vertical line interrupt */
 3154#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 3155	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
 3156		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
 3157				vrtl_int_srcid[i], &adev->vline0_irq);
 3158
 3159		if (r) {
 3160			DRM_ERROR("Failed to add vline0 irq id!\n");
 3161			return r;
 3162		}
 3163
 3164		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 3165		int_params.irq_source =
 3166			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
 3167
 3168		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
 3169			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
 3170			break;
 3171		}
 3172
 3173		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
 3174					- DC_IRQ_SOURCE_DC1_VLINE0];
 3175
 3176		c_irq_params->adev = adev;
 3177		c_irq_params->irq_src = int_params.irq_source;
 3178
 3179		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3180				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
 3181	}
 3182#endif
 3183
 3184	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
 3185	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
 3186	 * to trigger at end of each vblank, regardless of state of the lock,
 3187	 * matching DCE behaviour.
 3188	 */
 3189	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
 3190	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
 3191	     i++) {
 3192		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
 3193
 3194		if (r) {
 3195			DRM_ERROR("Failed to add vupdate irq id!\n");
 3196			return r;
 3197		}
 3198
 3199		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 3200		int_params.irq_source =
 3201			dc_interrupt_to_irq_source(dc, i, 0);
 3202
 3203		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
 3204
 3205		c_irq_params->adev = adev;
 3206		c_irq_params->irq_src = int_params.irq_source;
 3207
 3208		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3209				dm_vupdate_high_irq, c_irq_params);
 3210	}
 3211
 3212	/* Use GRPH_PFLIP interrupt */
 3213	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
 3214			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
 3215			i++) {
 3216		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
 3217		if (r) {
 3218			DRM_ERROR("Failed to add page flip irq id!\n");
 3219			return r;
 3220		}
 3221
 3222		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
 3223		int_params.irq_source =
 3224			dc_interrupt_to_irq_source(dc, i, 0);
 3225
 3226		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
 3227
 3228		c_irq_params->adev = adev;
 3229		c_irq_params->irq_src = int_params.irq_source;
 3230
 3231		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3232				dm_pflip_high_irq, c_irq_params);
 3233
 3234	}
 3235
 3236	/* HPD */
 3237	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
 3238			&adev->hpd_irq);
 3239	if (r) {
 3240		DRM_ERROR("Failed to add hpd irq id!\n");
 3241		return r;
 3242	}
 3243
 3244	register_hpd_handlers(adev);
 3245
 3246	return 0;
 3247}
 3248/* Register Outbox IRQ sources and initialize IRQ callbacks */
 3249static int register_outbox_irq_handlers(struct amdgpu_device *adev)
 3250{
 3251	struct dc *dc = adev->dm.dc;
 3252	struct common_irq_params *c_irq_params;
 3253	struct dc_interrupt_params int_params = {0};
 3254	int r, i;
 3255
 3256	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
 3257	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 3258
 3259	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
 3260			&adev->dmub_outbox_irq);
 3261	if (r) {
 3262		DRM_ERROR("Failed to add outbox irq id!\n");
 3263		return r;
 3264	}
 3265
 3266	if (dc->ctx->dmub_srv) {
 3267		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
 3268		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
 3269		int_params.irq_source =
 3270		dc_interrupt_to_irq_source(dc, i, 0);
 3271
 3272		c_irq_params = &adev->dm.dmub_outbox_params[0];
 3273
 3274		c_irq_params->adev = adev;
 3275		c_irq_params->irq_src = int_params.irq_source;
 3276
 3277		amdgpu_dm_irq_register_interrupt(adev, &int_params,
 3278				dm_dmub_outbox1_low_irq, c_irq_params);
 3279	}
 3280
 3281	return 0;
 3282}
 3283#endif
 3284
 3285/*
 3286 * Acquires the lock for the atomic state object and returns
 3287 * the new atomic state.
 3288 *
 3289 * This should only be called during atomic check.
 3290 */
 3291static int dm_atomic_get_state(struct drm_atomic_state *state,
 3292			       struct dm_atomic_state **dm_state)
 3293{
 3294	struct drm_device *dev = state->dev;
 3295	struct amdgpu_device *adev = drm_to_adev(dev);
 3296	struct amdgpu_display_manager *dm = &adev->dm;
 3297	struct drm_private_state *priv_state;
 3298
 3299	if (*dm_state)
 3300		return 0;
 3301
 3302	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
 3303	if (IS_ERR(priv_state))
 3304		return PTR_ERR(priv_state);
 3305
 3306	*dm_state = to_dm_atomic_state(priv_state);
 3307
 3308	return 0;
 3309}
 3310
 3311static struct dm_atomic_state *
 3312dm_atomic_get_new_state(struct drm_atomic_state *state)
 3313{
 3314	struct drm_device *dev = state->dev;
 3315	struct amdgpu_device *adev = drm_to_adev(dev);
 3316	struct amdgpu_display_manager *dm = &adev->dm;
 3317	struct drm_private_obj *obj;
 3318	struct drm_private_state *new_obj_state;
 3319	int i;
 3320
 3321	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
 3322		if (obj->funcs == dm->atomic_obj.funcs)
 3323			return to_dm_atomic_state(new_obj_state);
 3324	}
 3325
 3326	return NULL;
 3327}
 3328
 3329static struct drm_private_state *
 3330dm_atomic_duplicate_state(struct drm_private_obj *obj)
 3331{
 3332	struct dm_atomic_state *old_state, *new_state;
 3333
 3334	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
 3335	if (!new_state)
 3336		return NULL;
 3337
 3338	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
 3339
 3340	old_state = to_dm_atomic_state(obj->state);
 3341
 3342	if (old_state && old_state->context)
 3343		new_state->context = dc_copy_state(old_state->context);
 3344
 3345	if (!new_state->context) {
 3346		kfree(new_state);
 3347		return NULL;
 3348	}
 3349
 3350	return &new_state->base;
 3351}
 3352
 3353static void dm_atomic_destroy_state(struct drm_private_obj *obj,
 3354				    struct drm_private_state *state)
 3355{
 3356	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 3357
 3358	if (dm_state && dm_state->context)
 3359		dc_release_state(dm_state->context);
 3360
 3361	kfree(dm_state);
 3362}
 3363
 3364static struct drm_private_state_funcs dm_atomic_state_funcs = {
 3365	.atomic_duplicate_state = dm_atomic_duplicate_state,
 3366	.atomic_destroy_state = dm_atomic_destroy_state,
 3367};
 3368
 3369static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 3370{
 3371	struct dm_atomic_state *state;
 3372	int r;
 3373
 3374	adev->mode_info.mode_config_initialized = true;
 3375
 3376	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
 3377	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
 3378
 3379	adev_to_drm(adev)->mode_config.max_width = 16384;
 3380	adev_to_drm(adev)->mode_config.max_height = 16384;
 3381
 3382	adev_to_drm(adev)->mode_config.preferred_depth = 24;
 3383	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 3384	/* indicates support for immediate flip */
 3385	adev_to_drm(adev)->mode_config.async_page_flip = true;
 3386
 3387	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 3388
 3389	state = kzalloc(sizeof(*state), GFP_KERNEL);
 3390	if (!state)
 3391		return -ENOMEM;
 3392
 3393	state->context = dc_create_state(adev->dm.dc);
 3394	if (!state->context) {
 3395		kfree(state);
 3396		return -ENOMEM;
 3397	}
 3398
 3399	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
 3400
 3401	drm_atomic_private_obj_init(adev_to_drm(adev),
 3402				    &adev->dm.atomic_obj,
 3403				    &state->base,
 3404				    &dm_atomic_state_funcs);
 3405
 3406	r = amdgpu_display_modeset_create_props(adev);
 3407	if (r) {
 3408		dc_release_state(state->context);
 3409		kfree(state);
 3410		return r;
 3411	}
 3412
 3413	r = amdgpu_dm_audio_init(adev);
 3414	if (r) {
 3415		dc_release_state(state->context);
 3416		kfree(state);
 3417		return r;
 3418	}
 3419
 3420	return 0;
 3421}
 3422
 3423#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
 3424#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
 3425#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
 3426
 3427#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 3428	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 3429
 3430static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
 3431{
 3432#if defined(CONFIG_ACPI)
 3433	struct amdgpu_dm_backlight_caps caps;
 3434
 3435	memset(&caps, 0, sizeof(caps));
 3436
 3437	if (dm->backlight_caps.caps_valid)
 3438		return;
 3439
 3440	amdgpu_acpi_get_backlight_caps(&caps);
 3441	if (caps.caps_valid) {
 3442		dm->backlight_caps.caps_valid = true;
 3443		if (caps.aux_support)
 3444			return;
 3445		dm->backlight_caps.min_input_signal = caps.min_input_signal;
 3446		dm->backlight_caps.max_input_signal = caps.max_input_signal;
 3447	} else {
 3448		dm->backlight_caps.min_input_signal =
 3449				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
 3450		dm->backlight_caps.max_input_signal =
 3451				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
 3452	}
 3453#else
 3454	if (dm->backlight_caps.aux_support)
 3455		return;
 3456
 3457	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
 3458	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
 3459#endif
 3460}
 3461
 3462static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
 3463				unsigned *min, unsigned *max)
 3464{
 3465	if (!caps)
 3466		return 0;
 3467
 3468	if (caps->aux_support) {
 3469		// Firmware limits are in nits, DC API wants millinits.
 3470		*max = 1000 * caps->aux_max_input_signal;
 3471		*min = 1000 * caps->aux_min_input_signal;
 3472	} else {
 3473		// Firmware limits are 8-bit, PWM control is 16-bit.
 3474		*max = 0x101 * caps->max_input_signal;
 3475		*min = 0x101 * caps->min_input_signal;
 3476	}
 3477	return 1;
 3478}
 3479
 3480static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
 3481					uint32_t brightness)
 3482{
 3483	unsigned min, max;
 3484
 3485	if (!get_brightness_range(caps, &min, &max))
 3486		return brightness;
 3487
 3488	// Rescale 0..255 to min..max
 3489	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
 3490				       AMDGPU_MAX_BL_LEVEL);
 3491}
 3492
 3493static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
 3494				      uint32_t brightness)
 3495{
 3496	unsigned min, max;
 3497
 3498	if (!get_brightness_range(caps, &min, &max))
 3499		return brightness;
 3500
 3501	if (brightness < min)
 3502		return 0;
 3503	// Rescale min..max to 0..255
 3504	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
 3505				 max - min);
 3506}
 3507
 3508static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
 3509					 u32 user_brightness)
 3510{
 3511	struct amdgpu_dm_backlight_caps caps;
 3512	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
 3513	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
 3514	bool rc;
 3515	int i;
 3516
 3517	amdgpu_dm_update_backlight_caps(dm);
 3518	caps = dm->backlight_caps;
 3519
 3520	for (i = 0; i < dm->num_of_edps; i++) {
 3521		dm->brightness[i] = user_brightness;
 3522		brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
 3523		link[i] = (struct dc_link *)dm->backlight_link[i];
 3524	}
 3525
 3526	/* Change brightness based on AUX property */
 3527	if (caps.aux_support) {
 3528		for (i = 0; i < dm->num_of_edps; i++) {
 3529			rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
 3530				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
 3531			if (!rc) {
 3532				DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
 3533				break;
 3534			}
 3535		}
 3536	} else {
 3537		for (i = 0; i < dm->num_of_edps; i++) {
 3538			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
 3539			if (!rc) {
 3540				DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
 3541				break;
 3542			}
 3543		}
 3544	}
 3545
 3546	return rc ? 0 : 1;
 3547}
 3548
 3549static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 3550{
 3551	struct amdgpu_display_manager *dm = bl_get_data(bd);
 3552
 3553	amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
 3554
 3555	return 0;
 3556}
 3557
 3558static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
 3559{
 3560	struct amdgpu_dm_backlight_caps caps;
 3561
 3562	amdgpu_dm_update_backlight_caps(dm);
 3563	caps = dm->backlight_caps;
 3564
 3565	if (caps.aux_support) {
 3566		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
 3567		u32 avg, peak;
 3568		bool rc;
 3569
 3570		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
 3571		if (!rc)
 3572			return dm->brightness[0];
 3573		return convert_brightness_to_user(&caps, avg);
 3574	} else {
 3575		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
 3576
 3577		if (ret == DC_ERROR_UNEXPECTED)
 3578			return dm->brightness[0];
 3579		return convert_brightness_to_user(&caps, ret);
 3580	}
 3581}
 3582
 3583static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
 3584{
 3585	struct amdgpu_display_manager *dm = bl_get_data(bd);
 3586
 3587	return amdgpu_dm_backlight_get_level(dm);
 3588}
 3589
 3590static const struct backlight_ops amdgpu_dm_backlight_ops = {
 3591	.options = BL_CORE_SUSPENDRESUME,
 3592	.get_brightness = amdgpu_dm_backlight_get_brightness,
 3593	.update_status	= amdgpu_dm_backlight_update_status,
 3594};
 3595
 3596static void
 3597amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
 3598{
 3599	char bl_name[16];
 3600	struct backlight_properties props = { 0 };
 3601	int i;
 3602
 3603	amdgpu_dm_update_backlight_caps(dm);
 3604	for (i = 0; i < dm->num_of_edps; i++)
 3605		dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
 3606
 3607	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
 3608	props.brightness = AMDGPU_MAX_BL_LEVEL;
 3609	props.type = BACKLIGHT_RAW;
 3610
 3611	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
 3612		 adev_to_drm(dm->adev)->primary->index);
 3613
 3614	dm->backlight_dev = backlight_device_register(bl_name,
 3615						      adev_to_drm(dm->adev)->dev,
 3616						      dm,
 3617						      &amdgpu_dm_backlight_ops,
 3618						      &props);
 3619
 3620	if (IS_ERR(dm->backlight_dev))
 3621		DRM_ERROR("DM: Backlight registration failed!\n");
 3622	else
 3623		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
 3624}
 3625
 3626#endif
 3627
 3628static int initialize_plane(struct amdgpu_display_manager *dm,
 3629			    struct amdgpu_mode_info *mode_info, int plane_id,
 3630			    enum drm_plane_type plane_type,
 3631			    const struct dc_plane_cap *plane_cap)
 3632{
 3633	struct drm_plane *plane;
 3634	unsigned long possible_crtcs;
 3635	int ret = 0;
 3636
 3637	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
 3638	if (!plane) {
 3639		DRM_ERROR("KMS: Failed to allocate plane\n");
 3640		return -ENOMEM;
 3641	}
 3642	plane->type = plane_type;
 3643
 3644	/*
 3645	 * HACK: IGT tests expect that the primary plane for a CRTC
 3646	 * can only have one possible CRTC. Only expose support for
 3647	 * any CRTC if they're not going to be used as a primary plane
 3648	 * for a CRTC - like overlay or underlay planes.
 3649	 */
 3650	possible_crtcs = 1 << plane_id;
 3651	if (plane_id >= dm->dc->caps.max_streams)
 3652		possible_crtcs = 0xff;
 3653
 3654	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
 3655
 3656	if (ret) {
 3657		DRM_ERROR("KMS: Failed to initialize plane\n");
 3658		kfree(plane);
 3659		return ret;
 3660	}
 3661
 3662	if (mode_info)
 3663		mode_info->planes[plane_id] = plane;
 3664
 3665	return ret;
 3666}
 3667
 3668
 3669static void register_backlight_device(struct amdgpu_display_manager *dm,
 3670				      struct dc_link *link)
 3671{
 3672#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 3673	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 3674
 3675	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
 3676	    link->type != dc_connection_none) {
 3677		/*
 3678		 * Event if registration failed, we should continue with
 3679		 * DM initialization because not having a backlight control
 3680		 * is better then a black screen.
 3681		 */
 3682		if (!dm->backlight_dev)
 3683			amdgpu_dm_register_backlight_device(dm);
 3684
 3685		if (dm->backlight_dev) {
 3686			dm->backlight_link[dm->num_of_edps] = link;
 3687			dm->num_of_edps++;
 3688		}
 3689	}
 3690#endif
 3691}
 3692
 3693
 3694/*
 3695 * In this architecture, the association
 3696 * connector -> encoder -> crtc
 3697 * id not really requried. The crtc and connector will hold the
 3698 * display_index as an abstraction to use with DAL component
 3699 *
 3700 * Returns 0 on success
 3701 */
 3702static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 3703{
 3704	struct amdgpu_display_manager *dm = &adev->dm;
 3705	int32_t i;
 3706	struct amdgpu_dm_connector *aconnector = NULL;
 3707	struct amdgpu_encoder *aencoder = NULL;
 3708	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 3709	uint32_t link_cnt;
 3710	int32_t primary_planes;
 3711	enum dc_connection_type new_connection_type = dc_connection_none;
 3712	const struct dc_plane_cap *plane;
 3713
 3714	dm->display_indexes_num = dm->dc->caps.max_streams;
 3715	/* Update the actual used number of crtc */
 3716	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
 3717
 3718	link_cnt = dm->dc->caps.max_links;
 3719	if (amdgpu_dm_mode_config_init(dm->adev)) {
 3720		DRM_ERROR("DM: Failed to initialize mode config\n");
 3721		return -EINVAL;
 3722	}
 3723
 3724	/* There is one primary plane per CRTC */
 3725	primary_planes = dm->dc->caps.max_streams;
 3726	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
 3727
 3728	/*
 3729	 * Initialize primary planes, implicit planes for legacy IOCTLS.
 3730	 * Order is reversed to match iteration order in atomic check.
 3731	 */
 3732	for (i = (primary_planes - 1); i >= 0; i--) {
 3733		plane = &dm->dc->caps.planes[i];
 3734
 3735		if (initialize_plane(dm, mode_info, i,
 3736				     DRM_PLANE_TYPE_PRIMARY, plane)) {
 3737			DRM_ERROR("KMS: Failed to initialize primary plane\n");
 3738			goto fail;
 3739		}
 3740	}
 3741
 3742	/*
 3743	 * Initialize overlay planes, index starting after primary planes.
 3744	 * These planes have a higher DRM index than the primary planes since
 3745	 * they should be considered as having a higher z-order.
 3746	 * Order is reversed to match iteration order in atomic check.
 3747	 *
 3748	 * Only support DCN for now, and only expose one so we don't encourage
 3749	 * userspace to use up all the pipes.
 3750	 */
 3751	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
 3752		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
 3753
 3754		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
 3755			continue;
 3756
 3757		if (!plane->blends_with_above || !plane->blends_with_below)
 3758			continue;
 3759
 3760		if (!plane->pixel_format_support.argb8888)
 3761			continue;
 3762
 3763		if (initialize_plane(dm, NULL, primary_planes + i,
 3764				     DRM_PLANE_TYPE_OVERLAY, plane)) {
 3765			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
 3766			goto fail;
 3767		}
 3768
 3769		/* Only create one overlay plane. */
 3770		break;
 3771	}
 3772
 3773	for (i = 0; i < dm->dc->caps.max_streams; i++)
 3774		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
 3775			DRM_ERROR("KMS: Failed to initialize crtc\n");
 3776			goto fail;
 3777		}
 3778
 3779#if defined(CONFIG_DRM_AMD_DC_DCN)
 3780	/* Use Outbox interrupt */
 3781	switch (adev->asic_type) {
 3782	case CHIP_SIENNA_CICHLID:
 3783	case CHIP_NAVY_FLOUNDER:
 3784	case CHIP_YELLOW_CARP:
 3785	case CHIP_RENOIR:
 3786		if (register_outbox_irq_handlers(dm->adev)) {
 3787			DRM_ERROR("DM: Failed to initialize IRQ\n");
 3788			goto fail;
 3789		}
 3790		break;
 3791	default:
 3792		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
 3793	}
 3794#endif
 3795
 3796	/* loops over all connectors on the board */
 3797	for (i = 0; i < link_cnt; i++) {
 3798		struct dc_link *link = NULL;
 3799
 3800		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
 3801			DRM_ERROR(
 3802				"KMS: Cannot support more than %d display indexes\n",
 3803					AMDGPU_DM_MAX_DISPLAY_INDEX);
 3804			continue;
 3805		}
 3806
 3807		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
 3808		if (!aconnector)
 3809			goto fail;
 3810
 3811		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
 3812		if (!aencoder)
 3813			goto fail;
 3814
 3815		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
 3816			DRM_ERROR("KMS: Failed to initialize encoder\n");
 3817			goto fail;
 3818		}
 3819
 3820		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
 3821			DRM_ERROR("KMS: Failed to initialize connector\n");
 3822			goto fail;
 3823		}
 3824
 3825		link = dc_get_link_at_index(dm->dc, i);
 3826
 3827		if (!dc_link_detect_sink(link, &new_connection_type))
 3828			DRM_ERROR("KMS: Failed to detect connector\n");
 3829
 3830		if (aconnector->base.force && new_connection_type == dc_connection_none) {
 3831			emulated_link_detect(link);
 3832			amdgpu_dm_update_connector_after_detect(aconnector);
 3833
 3834		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
 3835			amdgpu_dm_update_connector_after_detect(aconnector);
 3836			register_backlight_device(dm, link);
 3837			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
 3838				amdgpu_dm_set_psr_caps(link);
 3839		}
 3840
 3841
 3842	}
 3843
 3844	/* Software is initialized. Now we can register interrupt handlers. */
 3845	switch (adev->asic_type) {
 3846#if defined(CONFIG_DRM_AMD_DC_SI)
 3847	case CHIP_TAHITI:
 3848	case CHIP_PITCAIRN:
 3849	case CHIP_VERDE:
 3850	case CHIP_OLAND:
 3851		if (dce60_register_irq_handlers(dm->adev)) {
 3852			DRM_ERROR("DM: Failed to initialize IRQ\n");
 3853			goto fail;
 3854		}
 3855		break;
 3856#endif
 3857	case CHIP_BONAIRE:
 3858	case CHIP_HAWAII:
 3859	case CHIP_KAVERI:
 3860	case CHIP_KABINI:
 3861	case CHIP_MULLINS:
 3862	case CHIP_TONGA:
 3863	case CHIP_FIJI:
 3864	case CHIP_CARRIZO:
 3865	case CHIP_STONEY:
 3866	case CHIP_POLARIS11:
 3867	case CHIP_POLARIS10:
 3868	case CHIP_POLARIS12:
 3869	case CHIP_VEGAM:
 3870	case CHIP_VEGA10:
 3871	case CHIP_VEGA12:
 3872	case CHIP_VEGA20:
 3873		if (dce110_register_irq_handlers(dm->adev)) {
 3874			DRM_ERROR("DM: Failed to initialize IRQ\n");
 3875			goto fail;
 3876		}
 3877		break;
 3878#if defined(CONFIG_DRM_AMD_DC_DCN)
 3879	case CHIP_RAVEN:
 3880	case CHIP_NAVI12:
 3881	case CHIP_NAVI10:
 3882	case CHIP_NAVI14:
 3883	case CHIP_RENOIR:
 3884	case CHIP_SIENNA_CICHLID:
 3885	case CHIP_NAVY_FLOUNDER:
 3886	case CHIP_DIMGREY_CAVEFISH:
 3887	case CHIP_BEIGE_GOBY:
 3888	case CHIP_VANGOGH:
 3889	case CHIP_YELLOW_CARP:
 3890		if (dcn10_register_irq_handlers(dm->adev)) {
 3891			DRM_ERROR("DM: Failed to initialize IRQ\n");
 3892			goto fail;
 3893		}
 3894		break;
 3895#endif
 3896	default:
 3897		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 3898		goto fail;
 3899	}
 3900
 3901	return 0;
 3902fail:
 3903	kfree(aencoder);
 3904	kfree(aconnector);
 3905
 3906	return -EINVAL;
 3907}
 3908
 3909static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
 3910{
 3911	drm_atomic_private_obj_fini(&dm->atomic_obj);
 3912	return;
 3913}
 3914
 3915/******************************************************************************
 3916 * amdgpu_display_funcs functions
 3917 *****************************************************************************/
 3918
 3919/*
 3920 * dm_bandwidth_update - program display watermarks
 3921 *
 3922 * @adev: amdgpu_device pointer
 3923 *
 3924 * Calculate and program the display watermarks and line buffer allocation.
 3925 */
 3926static void dm_bandwidth_update(struct amdgpu_device *adev)
 3927{
 3928	/* TODO: implement later */
 3929}
 3930
 3931static const struct amdgpu_display_funcs dm_display_funcs = {
 3932	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
 3933	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
 3934	.backlight_set_level = NULL, /* never called for DC */
 3935	.backlight_get_level = NULL, /* never called for DC */
 3936	.hpd_sense = NULL,/* called unconditionally */
 3937	.hpd_set_polarity = NULL, /* called unconditionally */
 3938	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
 3939	.page_flip_get_scanoutpos =
 3940		dm_crtc_get_scanoutpos,/* called unconditionally */
 3941	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
 3942	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
 3943};
 3944
 3945#if defined(CONFIG_DEBUG_KERNEL_DC)
 3946
 3947static ssize_t s3_debug_store(struct device *device,
 3948			      struct device_attribute *attr,
 3949			      const char *buf,
 3950			      size_t count)
 3951{
 3952	int ret;
 3953	int s3_state;
 3954	struct drm_device *drm_dev = dev_get_drvdata(device);
 3955	struct amdgpu_device *adev = drm_to_adev(drm_dev);
 3956
 3957	ret = kstrtoint(buf, 0, &s3_state);
 3958
 3959	if (ret == 0) {
 3960		if (s3_state) {
 3961			dm_resume(adev);
 3962			drm_kms_helper_hotplug_event(adev_to_drm(adev));
 3963		} else
 3964			dm_suspend(adev);
 3965	}
 3966
 3967	return ret == 0 ? count : 0;
 3968}
 3969
 3970DEVICE_ATTR_WO(s3_debug);
 3971
 3972#endif
 3973
 3974static int dm_early_init(void *handle)
 3975{
 3976	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 3977
 3978	switch (adev->asic_type) {
 3979#if defined(CONFIG_DRM_AMD_DC_SI)
 3980	case CHIP_TAHITI:
 3981	case CHIP_PITCAIRN:
 3982	case CHIP_VERDE:
 3983		adev->mode_info.num_crtc = 6;
 3984		adev->mode_info.num_hpd = 6;
 3985		adev->mode_info.num_dig = 6;
 3986		break;
 3987	case CHIP_OLAND:
 3988		adev->mode_info.num_crtc = 2;
 3989		adev->mode_info.num_hpd = 2;
 3990		adev->mode_info.num_dig = 2;
 3991		break;
 3992#endif
 3993	case CHIP_BONAIRE:
 3994	case CHIP_HAWAII:
 3995		adev->mode_info.num_crtc = 6;
 3996		adev->mode_info.num_hpd = 6;
 3997		adev->mode_info.num_dig = 6;
 3998		break;
 3999	case CHIP_KAVERI:
 4000		adev->mode_info.num_crtc = 4;
 4001		adev->mode_info.num_hpd = 6;
 4002		adev->mode_info.num_dig = 7;
 4003		break;
 4004	case CHIP_KABINI:
 4005	case CHIP_MULLINS:
 4006		adev->mode_info.num_crtc = 2;
 4007		adev->mode_info.num_hpd = 6;
 4008		adev->mode_info.num_dig = 6;
 4009		break;
 4010	case CHIP_FIJI:
 4011	case CHIP_TONGA:
 4012		adev->mode_info.num_crtc = 6;
 4013		adev->mode_info.num_hpd = 6;
 4014		adev->mode_info.num_dig = 7;
 4015		break;
 4016	case CHIP_CARRIZO:
 4017		adev->mode_info.num_crtc = 3;
 4018		adev->mode_info.num_hpd = 6;
 4019		adev->mode_info.num_dig = 9;
 4020		break;
 4021	case CHIP_STONEY:
 4022		adev->mode_info.num_crtc = 2;
 4023		adev->mode_info.num_hpd = 6;
 4024		adev->mode_info.num_dig = 9;
 4025		break;
 4026	case CHIP_POLARIS11:
 4027	case CHIP_POLARIS12:
 4028		adev->mode_info.num_crtc = 5;
 4029		adev->mode_info.num_hpd = 5;
 4030		adev->mode_info.num_dig = 5;
 4031		break;
 4032	case CHIP_POLARIS10:
 4033	case CHIP_VEGAM:
 4034		adev->mode_info.num_crtc = 6;
 4035		adev->mode_info.num_hpd = 6;
 4036		adev->mode_info.num_dig = 6;
 4037		break;
 4038	case CHIP_VEGA10:
 4039	case CHIP_VEGA12:
 4040	case CHIP_VEGA20:
 4041		adev->mode_info.num_crtc = 6;
 4042		adev->mode_info.num_hpd = 6;
 4043		adev->mode_info.num_dig = 6;
 4044		break;
 4045#if defined(CONFIG_DRM_AMD_DC_DCN)
 4046	case CHIP_RAVEN:
 4047	case CHIP_RENOIR:
 4048	case CHIP_VANGOGH:
 4049		adev->mode_info.num_crtc = 4;
 4050		adev->mode_info.num_hpd = 4;
 4051		adev->mode_info.num_dig = 4;
 4052		break;
 4053	case CHIP_NAVI10:
 4054	case CHIP_NAVI12:
 4055	case CHIP_SIENNA_CICHLID:
 4056	case CHIP_NAVY_FLOUNDER:
 4057		adev->mode_info.num_crtc = 6;
 4058		adev->mode_info.num_hpd = 6;
 4059		adev->mode_info.num_dig = 6;
 4060		break;
 4061	case CHIP_YELLOW_CARP:
 4062		adev->mode_info.num_crtc = 4;
 4063		adev->mode_info.num_hpd = 4;
 4064		adev->mode_info.num_dig = 4;
 4065		break;
 4066	case CHIP_NAVI14:
 4067	case CHIP_DIMGREY_CAVEFISH:
 4068		adev->mode_info.num_crtc = 5;
 4069		adev->mode_info.num_hpd = 5;
 4070		adev->mode_info.num_dig = 5;
 4071		break;
 4072	case CHIP_BEIGE_GOBY:
 4073		adev->mode_info.num_crtc = 2;
 4074		adev->mode_info.num_hpd = 2;
 4075		adev->mode_info.num_dig = 2;
 4076		break;
 4077#endif
 4078	default:
 4079		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 4080		return -EINVAL;
 4081	}
 4082
 4083	amdgpu_dm_set_irq_funcs(adev);
 4084
 4085	if (adev->mode_info.funcs == NULL)
 4086		adev->mode_info.funcs = &dm_display_funcs;
 4087
 4088	/*
 4089	 * Note: Do NOT change adev->audio_endpt_rreg and
 4090	 * adev->audio_endpt_wreg because they are initialised in
 4091	 * amdgpu_device_init()
 4092	 */
 4093#if defined(CONFIG_DEBUG_KERNEL_DC)
 4094	device_create_file(
 4095		adev_to_drm(adev)->dev,
 4096		&dev_attr_s3_debug);
 4097#endif
 4098
 4099	return 0;
 4100}
 4101
 4102static bool modeset_required(struct drm_crtc_state *crtc_state,
 4103			     struct dc_stream_state *new_stream,
 4104			     struct dc_stream_state *old_stream)
 4105{
 4106	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
 4107}
 4108
 4109static bool modereset_required(struct drm_crtc_state *crtc_state)
 4110{
 4111	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
 4112}
 4113
 4114static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
 4115{
 4116	drm_encoder_cleanup(encoder);
 4117	kfree(encoder);
 4118}
 4119
 4120static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
 4121	.destroy = amdgpu_dm_encoder_destroy,
 4122};
 4123
 4124
 4125static void get_min_max_dc_plane_scaling(struct drm_device *dev,
 4126					 struct drm_framebuffer *fb,
 4127					 int *min_downscale, int *max_upscale)
 4128{
 4129	struct amdgpu_device *adev = drm_to_adev(dev);
 4130	struct dc *dc = adev->dm.dc;
 4131	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
 4132	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
 4133
 4134	switch (fb->format->format) {
 4135	case DRM_FORMAT_P010:
 4136	case DRM_FORMAT_NV12:
 4137	case DRM_FORMAT_NV21:
 4138		*max_upscale = plane_cap->max_upscale_factor.nv12;
 4139		*min_downscale = plane_cap->max_downscale_factor.nv12;
 4140		break;
 4141
 4142	case DRM_FORMAT_XRGB16161616F:
 4143	case DRM_FORMAT_ARGB16161616F:
 4144	case DRM_FORMAT_XBGR16161616F:
 4145	case DRM_FORMAT_ABGR16161616F:
 4146		*max_upscale = plane_cap->max_upscale_factor.fp16;
 4147		*min_downscale = plane_cap->max_downscale_factor.fp16;
 4148		break;
 4149
 4150	default:
 4151		*max_upscale = plane_cap->max_upscale_factor.argb8888;
 4152		*min_downscale = plane_cap->max_downscale_factor.argb8888;
 4153		break;
 4154	}
 4155
 4156	/*
 4157	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
 4158	 * scaling factor of 1.0 == 1000 units.
 4159	 */
 4160	if (*max_upscale == 1)
 4161		*max_upscale = 1000;
 4162
 4163	if (*min_downscale == 1)
 4164		*min_downscale = 1000;
 4165}
 4166
 4167
 4168static int fill_dc_scaling_info(const struct drm_plane_state *state,
 4169				struct dc_scaling_info *scaling_info)
 4170{
 4171	int scale_w, scale_h, min_downscale, max_upscale;
 4172
 4173	memset(scaling_info, 0, sizeof(*scaling_info));
 4174
 4175	/* Source is fixed 16.16 but we ignore mantissa for now... */
 4176	scaling_info->src_rect.x = state->src_x >> 16;
 4177	scaling_info->src_rect.y = state->src_y >> 16;
 4178
 4179	/*
 4180	 * For reasons we don't (yet) fully understand a non-zero
 4181	 * src_y coordinate into an NV12 buffer can cause a
 4182	 * system hang. To avoid hangs (and maybe be overly cautious)
 4183	 * let's reject both non-zero src_x and src_y.
 4184	 *
 4185	 * We currently know of only one use-case to reproduce a
 4186	 * scenario with non-zero src_x and src_y for NV12, which
 4187	 * is to gesture the YouTube Android app into full screen
 4188	 * on ChromeOS.
 4189	 */
 4190	if (state->fb &&
 4191	    state->fb->format->format == DRM_FORMAT_NV12 &&
 4192	    (scaling_info->src_rect.x != 0 ||
 4193	     scaling_info->src_rect.y != 0))
 4194		return -EINVAL;
 4195
 4196	scaling_info->src_rect.width = state->src_w >> 16;
 4197	if (scaling_info->src_rect.width == 0)
 4198		return -EINVAL;
 4199
 4200	scaling_info->src_rect.height = state->src_h >> 16;
 4201	if (scaling_info->src_rect.height == 0)
 4202		return -EINVAL;
 4203
 4204	scaling_info->dst_rect.x = state->crtc_x;
 4205	scaling_info->dst_rect.y = state->crtc_y;
 4206
 4207	if (state->crtc_w == 0)
 4208		return -EINVAL;
 4209
 4210	scaling_info->dst_rect.width = state->crtc_w;
 4211
 4212	if (state->crtc_h == 0)
 4213		return -EINVAL;
 4214
 4215	scaling_info->dst_rect.height = state->crtc_h;
 4216
 4217	/* DRM doesn't specify clipping on destination output. */
 4218	scaling_info->clip_rect = scaling_info->dst_rect;
 4219
 4220	/* Validate scaling per-format with DC plane caps */
 4221	if (state->plane && state->plane->dev && state->fb) {
 4222		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
 4223					     &min_downscale, &max_upscale);
 4224	} else {
 4225		min_downscale = 250;
 4226		max_upscale = 16000;
 4227	}
 4228
 4229	scale_w = scaling_info->dst_rect.width * 1000 /
 4230		  scaling_info->src_rect.width;
 4231
 4232	if (scale_w < min_downscale || scale_w > max_upscale)
 4233		return -EINVAL;
 4234
 4235	scale_h = scaling_info->dst_rect.height * 1000 /
 4236		  scaling_info->src_rect.height;
 4237
 4238	if (scale_h < min_downscale || scale_h > max_upscale)
 4239		return -EINVAL;
 4240
 4241	/*
 4242	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
 4243	 * assume reasonable defaults based on the format.
 4244	 */
 4245
 4246	return 0;
 4247}
 4248
 4249static void
 4250fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
 4251				 uint64_t tiling_flags)
 4252{
 4253	/* Fill GFX8 params */
 4254	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
 4255		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
 4256
 4257		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
 4258		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
 4259		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
 4260		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
 4261		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
 4262
 4263		/* XXX fix me for VI */
 4264		tiling_info->gfx8.num_banks = num_banks;
 4265		tiling_info->gfx8.array_mode =
 4266				DC_ARRAY_2D_TILED_THIN1;
 4267		tiling_info->gfx8.tile_split = tile_split;
 4268		tiling_info->gfx8.bank_width = bankw;
 4269		tiling_info->gfx8.bank_height = bankh;
 4270		tiling_info->gfx8.tile_aspect = mtaspect;
 4271		tiling_info->gfx8.tile_mode =
 4272				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
 4273	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
 4274			== DC_ARRAY_1D_TILED_THIN1) {
 4275		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
 4276	}
 4277
 4278	tiling_info->gfx8.pipe_config =
 4279			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
 4280}
 4281
 4282static void
 4283fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
 4284				  union dc_tiling_info *tiling_info)
 4285{
 4286	tiling_info->gfx9.num_pipes =
 4287		adev->gfx.config.gb_addr_config_fields.num_pipes;
 4288	tiling_info->gfx9.num_banks =
 4289		adev->gfx.config.gb_addr_config_fields.num_banks;
 4290	tiling_info->gfx9.pipe_interleave =
 4291		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
 4292	tiling_info->gfx9.num_shader_engines =
 4293		adev->gfx.config.gb_addr_config_fields.num_se;
 4294	tiling_info->gfx9.max_compressed_frags =
 4295		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
 4296	tiling_info->gfx9.num_rb_per_se =
 4297		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
 4298	tiling_info->gfx9.shaderEnable = 1;
 4299	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
 4300	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
 4301	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
 4302	    adev->asic_type == CHIP_BEIGE_GOBY ||
 4303	    adev->asic_type == CHIP_YELLOW_CARP ||
 4304	    adev->asic_type == CHIP_VANGOGH)
 4305		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
 4306}
 4307
 4308static int
 4309validate_dcc(struct amdgpu_device *adev,
 4310	     const enum surface_pixel_format format,
 4311	     const enum dc_rotation_angle rotation,
 4312	     const union dc_tiling_info *tiling_info,
 4313	     const struct dc_plane_dcc_param *dcc,
 4314	     const struct dc_plane_address *address,
 4315	     const struct plane_size *plane_size)
 4316{
 4317	struct dc *dc = adev->dm.dc;
 4318	struct dc_dcc_surface_param input;
 4319	struct dc_surface_dcc_cap output;
 4320
 4321	memset(&input, 0, sizeof(input));
 4322	memset(&output, 0, sizeof(output));
 4323
 4324	if (!dcc->enable)
 4325		return 0;
 4326
 4327	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
 4328	    !dc->cap_funcs.get_dcc_compression_cap)
 4329		return -EINVAL;
 4330
 4331	input.format = format;
 4332	input.surface_size.width = plane_size->surface_size.width;
 4333	input.surface_size.height = plane_size->surface_size.height;
 4334	input.swizzle_mode = tiling_info->gfx9.swizzle;
 4335
 4336	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
 4337		input.scan = SCAN_DIRECTION_HORIZONTAL;
 4338	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
 4339		input.scan = SCAN_DIRECTION_VERTICAL;
 4340
 4341	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
 4342		return -EINVAL;
 4343
 4344	if (!output.capable)
 4345		return -EINVAL;
 4346
 4347	if (dcc->independent_64b_blks == 0 &&
 4348	    output.grph.rgb.independent_64b_blks != 0)
 4349		return -EINVAL;
 4350
 4351	return 0;
 4352}
 4353
 4354static bool
 4355modifier_has_dcc(uint64_t modifier)
 4356{
 4357	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
 4358}
 4359
 4360static unsigned
 4361modifier_gfx9_swizzle_mode(uint64_t modifier)
 4362{
 4363	if (modifier == DRM_FORMAT_MOD_LINEAR)
 4364		return 0;
 4365
 4366	return AMD_FMT_MOD_GET(TILE, modifier);
 4367}
 4368
 4369static const struct drm_format_info *
 4370amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
 4371{
 4372	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
 4373}
 4374
 4375static void
 4376fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
 4377				    union dc_tiling_info *tiling_info,
 4378				    uint64_t modifier)
 4379{
 4380	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
 4381	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
 4382	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
 4383	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
 4384
 4385	fill_gfx9_tiling_info_from_device(adev, tiling_info);
 4386
 4387	if (!IS_AMD_FMT_MOD(modifier))
 4388		return;
 4389
 4390	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
 4391	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
 4392
 4393	if (adev->family >= AMDGPU_FAMILY_NV) {
 4394		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
 4395	} else {
 4396		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
 4397
 4398		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
 4399	}
 4400}
 4401
 4402enum dm_micro_swizzle {
 4403	MICRO_SWIZZLE_Z = 0,
 4404	MICRO_SWIZZLE_S = 1,
 4405	MICRO_SWIZZLE_D = 2,
 4406	MICRO_SWIZZLE_R = 3
 4407};
 4408
 4409static bool dm_plane_format_mod_supported(struct drm_plane *plane,
 4410					  uint32_t format,
 4411					  uint64_t modifier)
 4412{
 4413	struct amdgpu_device *adev = drm_to_adev(plane->dev);
 4414	const struct drm_format_info *info = drm_format_info(format);
 4415	int i;
 4416
 4417	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
 4418
 4419	if (!info)
 4420		return false;
 4421
 4422	/*
 4423	 * We always have to allow these modifiers:
 4424	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
 4425	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
 4426	 */
 4427	if (modifier == DRM_FORMAT_MOD_LINEAR ||
 4428	    modifier == DRM_FORMAT_MOD_INVALID) {
 4429		return true;
 4430	}
 4431
 4432	/* Check that the modifier is on the list of the plane's supported modifiers. */
 4433	for (i = 0; i < plane->modifier_count; i++) {
 4434		if (modifier == plane->modifiers[i])
 4435			break;
 4436	}
 4437	if (i == plane->modifier_count)
 4438		return false;
 4439
 4440	/*
 4441	 * For D swizzle the canonical modifier depends on the bpp, so check
 4442	 * it here.
 4443	 */
 4444	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
 4445	    adev->family >= AMDGPU_FAMILY_NV) {
 4446		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
 4447			return false;
 4448	}
 4449
 4450	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
 4451	    info->cpp[0] < 8)
 4452		return false;
 4453
 4454	if (modifier_has_dcc(modifier)) {
 4455		/* Per radeonsi comments 16/64 bpp are more complicated. */
 4456		if (info->cpp[0] != 4)
 4457			return false;
 4458		/* We support multi-planar formats, but not when combined with
 4459		 * additional DCC metadata planes. */
 4460		if (info->num_planes > 1)
 4461			return false;
 4462	}
 4463
 4464	return true;
 4465}
 4466
 4467static void
 4468add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
 4469{
 4470	if (!*mods)
 4471		return;
 4472
 4473	if (*cap - *size < 1) {
 4474		uint64_t new_cap = *cap * 2;
 4475		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
 4476
 4477		if (!new_mods) {
 4478			kfree(*mods);
 4479			*mods = NULL;
 4480			return;
 4481		}
 4482
 4483		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
 4484		kfree(*mods);
 4485		*mods = new_mods;
 4486		*cap = new_cap;
 4487	}
 4488
 4489	(*mods)[*size] = mod;
 4490	*size += 1;
 4491}
 4492
 4493static void
 4494add_gfx9_modifiers(const struct amdgpu_device *adev,
 4495		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
 4496{
 4497	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
 4498	int pipe_xor_bits = min(8, pipes +
 4499				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
 4500	int bank_xor_bits = min(8 - pipe_xor_bits,
 4501				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
 4502	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
 4503		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
 4504
 4505
 4506	if (adev->family == AMDGPU_FAMILY_RV) {
 4507		/* Raven2 and later */
 4508		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
 4509
 4510		/*
 4511		 * No _D DCC swizzles yet because we only allow 32bpp, which
 4512		 * doesn't support _D on DCN
 4513		 */
 4514
 4515		if (has_constant_encode) {
 4516			add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4517				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
 4518				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
 4519				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4520				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
 4521				    AMD_FMT_MOD_SET(DCC, 1) |
 4522				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4523				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
 4524				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
 4525		}
 4526
 4527		add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4528			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
 4529			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
 4530			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4531			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
 4532			    AMD_FMT_MOD_SET(DCC, 1) |
 4533			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4534			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
 4535			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
 4536
 4537		if (has_constant_encode) {
 4538			add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4539				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
 4540				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
 4541				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4542				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
 4543				    AMD_FMT_MOD_SET(DCC, 1) |
 4544				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
 4545				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4546				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
 4547
 4548				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
 4549				    AMD_FMT_MOD_SET(RB, rb) |
 4550				    AMD_FMT_MOD_SET(PIPE, pipes));
 4551		}
 4552
 4553		add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4554			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
 4555			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
 4556			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4557			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
 4558			    AMD_FMT_MOD_SET(DCC, 1) |
 4559			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
 4560			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4561			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
 4562			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
 4563			    AMD_FMT_MOD_SET(RB, rb) |
 4564			    AMD_FMT_MOD_SET(PIPE, pipes));
 4565	}
 4566
 4567	/*
 4568	 * Only supported for 64bpp on Raven, will be filtered on format in
 4569	 * dm_plane_format_mod_supported.
 4570	 */
 4571	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4572		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
 4573		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
 4574		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4575		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
 4576
 4577	if (adev->family == AMDGPU_FAMILY_RV) {
 4578		add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4579			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
 4580			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
 4581			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4582			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
 4583	}
 4584
 4585	/*
 4586	 * Only supported for 64bpp on Raven, will be filtered on format in
 4587	 * dm_plane_format_mod_supported.
 4588	 */
 4589	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4590		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
 4591		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
 4592
 4593	if (adev->family == AMDGPU_FAMILY_RV) {
 4594		add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4595			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
 4596			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
 4597	}
 4598}
 4599
 4600static void
 4601add_gfx10_1_modifiers(const struct amdgpu_device *adev,
 4602		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
 4603{
 4604	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
 4605
 4606	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4607		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
 4608		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
 4609		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4610		    AMD_FMT_MOD_SET(DCC, 1) |
 4611		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
 4612		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4613		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
 4614
 4615	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4616		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
 4617		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
 4618		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4619		    AMD_FMT_MOD_SET(DCC, 1) |
 4620		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
 4621		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
 4622		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4623		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
 4624
 4625	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4626		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
 4627		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
 4628		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
 4629
 4630	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4631		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
 4632		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
 4633		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
 4634
 4635
 4636	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
 4637	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4638		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
 4639		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
 4640
 4641	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4642		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
 4643		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
 4644}
 4645
 4646static void
 4647add_gfx10_3_modifiers(const struct amdgpu_device *adev,
 4648		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
 4649{
 4650	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
 4651	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
 4652
 4653	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4654		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
 4655		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
 4656		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4657		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
 4658		    AMD_FMT_MOD_SET(DCC, 1) |
 4659		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
 4660		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4661		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
 4662		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
 4663
 4664	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4665		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
 4666		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
 4667		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4668		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
 4669		    AMD_FMT_MOD_SET(DCC, 1) |
 4670		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
 4671		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
 4672		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
 4673		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
 4674		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
 4675
 4676	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4677		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
 4678		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
 4679		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4680		    AMD_FMT_MOD_SET(PACKERS, pkrs));
 4681
 4682	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4683		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
 4684		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
 4685		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
 4686		    AMD_FMT_MOD_SET(PACKERS, pkrs));
 4687
 4688	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
 4689	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4690		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
 4691		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
 4692
 4693	add_modifier(mods, size, capacity, AMD_FMT_MOD |
 4694		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
 4695		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
 4696}
 4697
 4698static int
 4699get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
 4700{
 4701	uint64_t size = 0, capacity = 128;
 4702	*mods = NULL;
 4703
 4704	/* We have not hooked up any pre-GFX9 modifiers. */
 4705	if (adev->family < AMDGPU_FAMILY_AI)
 4706		return 0;
 4707
 4708	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
 4709
 4710	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
 4711		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
 4712		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
 4713		return *mods ? 0 : -ENOMEM;
 4714	}
 4715
 4716	switch (adev->family) {
 4717	case AMDGPU_FAMILY_AI:
 4718	case AMDGPU_FAMILY_RV:
 4719		add_gfx9_modifiers(adev, mods, &size, &capacity);
 4720		break;
 4721	case AMDGPU_FAMILY_NV:
 4722	case AMDGPU_FAMILY_VGH:
 4723	case AMDGPU_FAMILY_YC:
 4724		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
 4725			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
 4726		else
 4727			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
 4728		break;
 4729	}
 4730
 4731	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
 4732
 4733	/* INVALID marks the end of the list. */
 4734	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
 4735
 4736	if (!*mods)
 4737		return -ENOMEM;
 4738
 4739	return 0;
 4740}
 4741
 4742static int
 4743fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
 4744					  const struct amdgpu_framebuffer *afb,
 4745					  const enum surface_pixel_format format,
 4746					  const enum dc_rotation_angle rotation,
 4747					  const struct plane_size *plane_size,
 4748					  union dc_tiling_info *tiling_info,
 4749					  struct dc_plane_dcc_param *dcc,
 4750					  struct dc_plane_address *address,
 4751					  const bool force_disable_dcc)
 4752{
 4753	const uint64_t modifier = afb->base.modifier;
 4754	int ret;
 4755
 4756	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
 4757	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
 4758
 4759	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
 4760		uint64_t dcc_address = afb->address + afb->base.offsets[1];
 4761
 4762		dcc->enable = 1;
 4763		dcc->meta_pitch = afb->base.pitches[1];
 4764		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
 4765
 4766		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
 4767		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
 4768	}
 4769
 4770	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
 4771	if (ret)
 4772		return ret;
 4773
 4774	return 0;
 4775}
 4776
 4777static int
 4778fill_plane_buffer_attributes(struct amdgpu_device *adev,
 4779			     const struct amdgpu_framebuffer *afb,
 4780			     const enum surface_pixel_format format,
 4781			     const enum dc_rotation_angle rotation,
 4782			     const uint64_t tiling_flags,
 4783			     union dc_tiling_info *tiling_info,
 4784			     struct plane_size *plane_size,
 4785			     struct dc_plane_dcc_param *dcc,
 4786			     struct dc_plane_address *address,
 4787			     bool tmz_surface,
 4788			     bool force_disable_dcc)
 4789{
 4790	const struct drm_framebuffer *fb = &afb->base;
 4791	int ret;
 4792
 4793	memset(tiling_info, 0, sizeof(*tiling_info));
 4794	memset(plane_size, 0, sizeof(*plane_size));
 4795	memset(dcc, 0, sizeof(*dcc));
 4796	memset(address, 0, sizeof(*address));
 4797
 4798	address->tmz_surface = tmz_surface;
 4799
 4800	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
 4801		uint64_t addr = afb->address + fb->offsets[0];
 4802
 4803		plane_size->surface_size.x = 0;
 4804		plane_size->surface_size.y = 0;
 4805		plane_size->surface_size.width = fb->width;
 4806		plane_size->surface_size.height = fb->height;
 4807		plane_size->surface_pitch =
 4808			fb->pitches[0] / fb->format->cpp[0];
 4809
 4810		address->type = PLN_ADDR_TYPE_GRAPHICS;
 4811		address->grph.addr.low_part = lower_32_bits(addr);
 4812		address->grph.addr.high_part = upper_32_bits(addr);
 4813	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
 4814		uint64_t luma_addr = afb->address + fb->offsets[0];
 4815		uint64_t chroma_addr = afb->address + fb->offsets[1];
 4816
 4817		plane_size->surface_size.x = 0;
 4818		plane_size->surface_size.y = 0;
 4819		plane_size->surface_size.width = fb->width;
 4820		plane_size->surface_size.height = fb->height;
 4821		plane_size->surface_pitch =
 4822			fb->pitches[0] / fb->format->cpp[0];
 4823
 4824		plane_size->chroma_size.x = 0;
 4825		plane_size->chroma_size.y = 0;
 4826		/* TODO: set these based on surface format */
 4827		plane_size->chroma_size.width = fb->width / 2;
 4828		plane_size->chroma_size.height = fb->height / 2;
 4829
 4830		plane_size->chroma_pitch =
 4831			fb->pitches[1] / fb->format->cpp[1];
 4832
 4833		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
 4834		address->video_progressive.luma_addr.low_part =
 4835			lower_32_bits(luma_addr);
 4836		address->video_progressive.luma_addr.high_part =
 4837			upper_32_bits(luma_addr);
 4838		address->video_progressive.chroma_addr.low_part =
 4839			lower_32_bits(chroma_addr);
 4840		address->video_progressive.chroma_addr.high_part =
 4841			upper_32_bits(chroma_addr);
 4842	}
 4843
 4844	if (adev->family >= AMDGPU_FAMILY_AI) {
 4845		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
 4846								rotation, plane_size,
 4847								tiling_info, dcc,
 4848								address,
 4849								force_disable_dcc);
 4850		if (ret)
 4851			return ret;
 4852	} else {
 4853		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
 4854	}
 4855
 4856	return 0;
 4857}
 4858
 4859static void
 4860fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
 4861			       bool *per_pixel_alpha, bool *global_alpha,
 4862			       int *global_alpha_value)
 4863{
 4864	*per_pixel_alpha = false;
 4865	*global_alpha = false;
 4866	*global_alpha_value = 0xff;
 4867
 4868	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
 4869		return;
 4870
 4871	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
 4872		static const uint32_t alpha_formats[] = {
 4873			DRM_FORMAT_ARGB8888,
 4874			DRM_FORMAT_RGBA8888,
 4875			DRM_FORMAT_ABGR8888,
 4876		};
 4877		uint32_t format = plane_state->fb->format->format;
 4878		unsigned int i;
 4879
 4880		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
 4881			if (format == alpha_formats[i]) {
 4882				*per_pixel_alpha = true;
 4883				break;
 4884			}
 4885		}
 4886	}
 4887
 4888	if (plane_state->alpha < 0xffff) {
 4889		*global_alpha = true;
 4890		*global_alpha_value = plane_state->alpha >> 8;
 4891	}
 4892}
 4893
 4894static int
 4895fill_plane_color_attributes(const struct drm_plane_state *plane_state,
 4896			    const enum surface_pixel_format format,
 4897			    enum dc_color_space *color_space)
 4898{
 4899	bool full_range;
 4900
 4901	*color_space = COLOR_SPACE_SRGB;
 4902
 4903	/* DRM color properties only affect non-RGB formats. */
 4904	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
 4905		return 0;
 4906
 4907	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
 4908
 4909	switch (plane_state->color_encoding) {
 4910	case DRM_COLOR_YCBCR_BT601:
 4911		if (full_range)
 4912			*color_space = COLOR_SPACE_YCBCR601;
 4913		else
 4914			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
 4915		break;
 4916
 4917	case DRM_COLOR_YCBCR_BT709:
 4918		if (full_range)
 4919			*color_space = COLOR_SPACE_YCBCR709;
 4920		else
 4921			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
 4922		break;
 4923
 4924	case DRM_COLOR_YCBCR_BT2020:
 4925		if (full_range)
 4926			*color_space = COLOR_SPACE_2020_YCBCR;
 4927		else
 4928			return -EINVAL;
 4929		break;
 4930
 4931	default:
 4932		return -EINVAL;
 4933	}
 4934
 4935	return 0;
 4936}
 4937
 4938static int
 4939fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
 4940			    const struct drm_plane_state *plane_state,
 4941			    const uint64_t tiling_flags,
 4942			    struct dc_plane_info *plane_info,
 4943			    struct dc_plane_address *address,
 4944			    bool tmz_surface,
 4945			    bool force_disable_dcc)
 4946{
 4947	const struct drm_framebuffer *fb = plane_state->fb;
 4948	const struct amdgpu_framebuffer *afb =
 4949		to_amdgpu_framebuffer(plane_state->fb);
 4950	int ret;
 4951
 4952	memset(plane_info, 0, sizeof(*plane_info));
 4953
 4954	switch (fb->format->format) {
 4955	case DRM_FORMAT_C8:
 4956		plane_info->format =
 4957			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
 4958		break;
 4959	case DRM_FORMAT_RGB565:
 4960		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
 4961		break;
 4962	case DRM_FORMAT_XRGB8888:
 4963	case DRM_FORMAT_ARGB8888:
 4964		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
 4965		break;
 4966	case DRM_FORMAT_XRGB2101010:
 4967	case DRM_FORMAT_ARGB2101010:
 4968		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
 4969		break;
 4970	case DRM_FORMAT_XBGR2101010:
 4971	case DRM_FORMAT_ABGR2101010:
 4972		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
 4973		break;
 4974	case DRM_FORMAT_XBGR8888:
 4975	case DRM_FORMAT_ABGR8888:
 4976		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
 4977		break;
 4978	case DRM_FORMAT_NV21:
 4979		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
 4980		break;
 4981	case DRM_FORMAT_NV12:
 4982		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
 4983		break;
 4984	case DRM_FORMAT_P010:
 4985		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
 4986		break;
 4987	case DRM_FORMAT_XRGB16161616F:
 4988	case DRM_FORMAT_ARGB16161616F:
 4989		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
 4990		break;
 4991	case DRM_FORMAT_XBGR16161616F:
 4992	case DRM_FORMAT_ABGR16161616F:
 4993		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
 4994		break;
 4995	case DRM_FORMAT_XRGB16161616:
 4996	case DRM_FORMAT_ARGB16161616:
 4997		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
 4998		break;
 4999	case DRM_FORMAT_XBGR16161616:
 5000	case DRM_FORMAT_ABGR16161616:
 5001		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
 5002		break;
 5003	default:
 5004		DRM_ERROR(
 5005			"Unsupported screen format %p4cc\n",
 5006			&fb->format->format);
 5007		return -EINVAL;
 5008	}
 5009
 5010	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
 5011	case DRM_MODE_ROTATE_0:
 5012		plane_info->rotation = ROTATION_ANGLE_0;
 5013		break;
 5014	case DRM_MODE_ROTATE_90:
 5015		plane_info->rotation = ROTATION_ANGLE_90;
 5016		break;
 5017	case DRM_MODE_ROTATE_180:
 5018		plane_info->rotation = ROTATION_ANGLE_180;
 5019		break;
 5020	case DRM_MODE_ROTATE_270:
 5021		plane_info->rotation = ROTATION_ANGLE_270;
 5022		break;
 5023	default:
 5024		plane_info->rotation = ROTATION_ANGLE_0;
 5025		break;
 5026	}
 5027
 5028	plane_info->visible = true;
 5029	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
 5030
 5031	plane_info->layer_index = 0;
 5032
 5033	ret = fill_plane_color_attributes(plane_state, plane_info->format,
 5034					  &plane_info->color_space);
 5035	if (ret)
 5036		return ret;
 5037
 5038	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
 5039					   plane_info->rotation, tiling_flags,
 5040					   &plane_info->tiling_info,
 5041					   &plane_info->plane_size,
 5042					   &plane_info->dcc, address, tmz_surface,
 5043					   force_disable_dcc);
 5044	if (ret)
 5045		return ret;
 5046
 5047	fill_blending_from_plane_state(
 5048		plane_state, &plane_info->per_pixel_alpha,
 5049		&plane_info->global_alpha, &plane_info->global_alpha_value);
 5050
 5051	return 0;
 5052}
 5053
 5054static int fill_dc_plane_attributes(struct amdgpu_device *adev,
 5055				    struct dc_plane_state *dc_plane_state,
 5056				    struct drm_plane_state *plane_state,
 5057				    struct drm_crtc_state *crtc_state)
 5058{
 5059	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
 5060	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
 5061	struct dc_scaling_info scaling_info;
 5062	struct dc_plane_info plane_info;
 5063	int ret;
 5064	bool force_disable_dcc = false;
 5065
 5066	ret = fill_dc_scaling_info(plane_state, &scaling_info);
 5067	if (ret)
 5068		return ret;
 5069
 5070	dc_plane_state->src_rect = scaling_info.src_rect;
 5071	dc_plane_state->dst_rect = scaling_info.dst_rect;
 5072	dc_plane_state->clip_rect = scaling_info.clip_rect;
 5073	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
 5074
 5075	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
 5076	ret = fill_dc_plane_info_and_addr(adev, plane_state,
 5077					  afb->tiling_flags,
 5078					  &plane_info,
 5079					  &dc_plane_state->address,
 5080					  afb->tmz_surface,
 5081					  force_disable_dcc);
 5082	if (ret)
 5083		return ret;
 5084
 5085	dc_plane_state->format = plane_info.format;
 5086	dc_plane_state->color_space = plane_info.color_space;
 5087	dc_plane_state->format = plane_info.format;
 5088	dc_plane_state->plane_size = plane_info.plane_size;
 5089	dc_plane_state->rotation = plane_info.rotation;
 5090	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
 5091	dc_plane_state->stereo_format = plane_info.stereo_format;
 5092	dc_plane_state->tiling_info = plane_info.tiling_info;
 5093	dc_plane_state->visible = plane_info.visible;
 5094	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
 5095	dc_plane_state->global_alpha = plane_info.global_alpha;
 5096	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
 5097	dc_plane_state->dcc = plane_info.dcc;
 5098	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
 5099	dc_plane_state->flip_int_enabled = true;
 5100
 5101	/*
 5102	 * Always set input transfer function, since plane state is refreshed
 5103	 * every time.
 5104	 */
 5105	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
 5106	if (ret)
 5107		return ret;
 5108
 5109	return 0;
 5110}
 5111
 5112static void update_stream_scaling_settings(const struct drm_display_mode *mode,
 5113					   const struct dm_connector_state *dm_state,
 5114					   struct dc_stream_state *stream)
 5115{
 5116	enum amdgpu_rmx_type rmx_type;
 5117
 5118	struct rect src = { 0 }; /* viewport in composition space*/
 5119	struct rect dst = { 0 }; /* stream addressable area */
 5120
 5121	/* no mode. nothing to be done */
 5122	if (!mode)
 5123		return;
 5124
 5125	/* Full screen scaling by default */
 5126	src.width = mode->hdisplay;
 5127	src.height = mode->vdisplay;
 5128	dst.width = stream->timing.h_addressable;
 5129	dst.height = stream->timing.v_addressable;
 5130
 5131	if (dm_state) {
 5132		rmx_type = dm_state->scaling;
 5133		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
 5134			if (src.width * dst.height <
 5135					src.height * dst.width) {
 5136				/* height needs less upscaling/more downscaling */
 5137				dst.width = src.width *
 5138						dst.height / src.height;
 5139			} else {
 5140				/* width needs less upscaling/more downscaling */
 5141				dst.height = src.height *
 5142						dst.width / src.width;
 5143			}
 5144		} else if (rmx_type == RMX_CENTER) {
 5145			dst = src;
 5146		}
 5147
 5148		dst.x = (stream->timing.h_addressable - dst.width) / 2;
 5149		dst.y = (stream->timing.v_addressable - dst.height) / 2;
 5150
 5151		if (dm_state->underscan_enable) {
 5152			dst.x += dm_state->underscan_hborder / 2;
 5153			dst.y += dm_state->underscan_vborder / 2;
 5154			dst.width -= dm_state->underscan_hborder;
 5155			dst.height -= dm_state->underscan_vborder;
 5156		}
 5157	}
 5158
 5159	stream->src = src;
 5160	stream->dst = dst;
 5161
 5162	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
 5163		      dst.x, dst.y, dst.width, dst.height);
 5164
 5165}
 5166
 5167static enum dc_color_depth
 5168convert_color_depth_from_display_info(const struct drm_connector *connector,
 5169				      bool is_y420, int requested_bpc)
 5170{
 5171	uint8_t bpc;
 5172
 5173	if (is_y420) {
 5174		bpc = 8;
 5175
 5176		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
 5177		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
 5178			bpc = 16;
 5179		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
 5180			bpc = 12;
 5181		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
 5182			bpc = 10;
 5183	} else {
 5184		bpc = (uint8_t)connector->display_info.bpc;
 5185		/* Assume 8 bpc by default if no bpc is specified. */
 5186		bpc = bpc ? bpc : 8;
 5187	}
 5188
 5189	if (requested_bpc > 0) {
 5190		/*
 5191		 * Cap display bpc based on the user requested value.
 5192		 *
 5193		 * The value for state->max_bpc may not correctly updated
 5194		 * depending on when the connector gets added to the state
 5195		 * or if this was called outside of atomic check, so it
 5196		 * can't be used directly.
 5197		 */
 5198		bpc = min_t(u8, bpc, requested_bpc);
 5199
 5200		/* Round down to the nearest even number. */
 5201		bpc = bpc - (bpc & 1);
 5202	}
 5203
 5204	switch (bpc) {
 5205	case 0:
 5206		/*
 5207		 * Temporary Work around, DRM doesn't parse color depth for
 5208		 * EDID revision before 1.4
 5209		 * TODO: Fix edid parsing
 5210		 */
 5211		return COLOR_DEPTH_888;
 5212	case 6:
 5213		return COLOR_DEPTH_666;
 5214	case 8:
 5215		return COLOR_DEPTH_888;
 5216	case 10:
 5217		return COLOR_DEPTH_101010;
 5218	case 12:
 5219		return COLOR_DEPTH_121212;
 5220	case 14:
 5221		return COLOR_DEPTH_141414;
 5222	case 16:
 5223		return COLOR_DEPTH_161616;
 5224	default:
 5225		return COLOR_DEPTH_UNDEFINED;
 5226	}
 5227}
 5228
 5229static enum dc_aspect_ratio
 5230get_aspect_ratio(const struct drm_display_mode *mode_in)
 5231{
 5232	/* 1-1 mapping, since both enums follow the HDMI spec. */
 5233	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
 5234}
 5235
 5236static enum dc_color_space
 5237get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
 5238{
 5239	enum dc_color_space color_space = COLOR_SPACE_SRGB;
 5240
 5241	switch (dc_crtc_timing->pixel_encoding)	{
 5242	case PIXEL_ENCODING_YCBCR422:
 5243	case PIXEL_ENCODING_YCBCR444:
 5244	case PIXEL_ENCODING_YCBCR420:
 5245	{
 5246		/*
 5247		 * 27030khz is the separation point between HDTV and SDTV
 5248		 * according to HDMI spec, we use YCbCr709 and YCbCr601
 5249		 * respectively
 5250		 */
 5251		if (dc_crtc_timing->pix_clk_100hz > 270300) {
 5252			if (dc_crtc_timing->flags.Y_ONLY)
 5253				color_space =
 5254					COLOR_SPACE_YCBCR709_LIMITED;
 5255			else
 5256				color_space = COLOR_SPACE_YCBCR709;
 5257		} else {
 5258			if (dc_crtc_timing->flags.Y_ONLY)
 5259				color_space =
 5260					COLOR_SPACE_YCBCR601_LIMITED;
 5261			else
 5262				color_space = COLOR_SPACE_YCBCR601;
 5263		}
 5264
 5265	}
 5266	break;
 5267	case PIXEL_ENCODING_RGB:
 5268		color_space = COLOR_SPACE_SRGB;
 5269		break;
 5270
 5271	default:
 5272		WARN_ON(1);
 5273		break;
 5274	}
 5275
 5276	return color_space;
 5277}
 5278
 5279static bool adjust_colour_depth_from_display_info(
 5280	struct dc_crtc_timing *timing_out,
 5281	const struct drm_display_info *info)
 5282{
 5283	enum dc_color_depth depth = timing_out->display_color_depth;
 5284	int normalized_clk;
 5285	do {
 5286		normalized_clk = timing_out->pix_clk_100hz / 10;
 5287		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
 5288		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
 5289			normalized_clk /= 2;
 5290		/* Adjusting pix clock following on HDMI spec based on colour depth */
 5291		switch (depth) {
 5292		case COLOR_DEPTH_888:
 5293			break;
 5294		case COLOR_DEPTH_101010:
 5295			normalized_clk = (normalized_clk * 30) / 24;
 5296			break;
 5297		case COLOR_DEPTH_121212:
 5298			normalized_clk = (normalized_clk * 36) / 24;
 5299			break;
 5300		case COLOR_DEPTH_161616:
 5301			normalized_clk = (normalized_clk * 48) / 24;
 5302			break;
 5303		default:
 5304			/* The above depths are the only ones valid for HDMI. */
 5305			return false;
 5306		}
 5307		if (normalized_clk <= info->max_tmds_clock) {
 5308			timing_out->display_color_depth = depth;
 5309			return true;
 5310		}
 5311	} while (--depth > COLOR_DEPTH_666);
 5312	return false;
 5313}
 5314
 5315static void fill_stream_properties_from_drm_display_mode(
 5316	struct dc_stream_state *stream,
 5317	const struct drm_display_mode *mode_in,
 5318	const struct drm_connector *connector,
 5319	const struct drm_connector_state *connector_state,
 5320	const struct dc_stream_state *old_stream,
 5321	int requested_bpc)
 5322{
 5323	struct dc_crtc_timing *timing_out = &stream->timing;
 5324	const struct drm_display_info *info = &connector->display_info;
 5325	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 5326	struct hdmi_vendor_infoframe hv_frame;
 5327	struct hdmi_avi_infoframe avi_frame;
 5328
 5329	memset(&hv_frame, 0, sizeof(hv_frame));
 5330	memset(&avi_frame, 0, sizeof(avi_frame));
 5331
 5332	timing_out->h_border_left = 0;
 5333	timing_out->h_border_right = 0;
 5334	timing_out->v_border_top = 0;
 5335	timing_out->v_border_bottom = 0;
 5336	/* TODO: un-hardcode */
 5337	if (drm_mode_is_420_only(info, mode_in)
 5338			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 5339		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
 5340	else if (drm_mode_is_420_also(info, mode_in)
 5341			&& aconnector->force_yuv420_output)
 5342		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
 5343	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
 5344			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 5345		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
 5346	else
 5347		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
 5348
 5349	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
 5350	timing_out->display_color_depth = convert_color_depth_from_display_info(
 5351		connector,
 5352		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
 5353		requested_bpc);
 5354	timing_out->scan_type = SCANNING_TYPE_NODATA;
 5355	timing_out->hdmi_vic = 0;
 5356
 5357	if(old_stream) {
 5358		timing_out->vic = old_stream->timing.vic;
 5359		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
 5360		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
 5361	} else {
 5362		timing_out->vic = drm_match_cea_mode(mode_in);
 5363		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
 5364			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
 5365		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
 5366			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
 5367	}
 5368
 5369	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
 5370		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
 5371		timing_out->vic = avi_frame.video_code;
 5372		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
 5373		timing_out->hdmi_vic = hv_frame.vic;
 5374	}
 5375
 5376	if (is_freesync_video_mode(mode_in, aconnector)) {
 5377		timing_out->h_addressable = mode_in->hdisplay;
 5378		timing_out->h_total = mode_in->htotal;
 5379		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
 5380		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
 5381		timing_out->v_total = mode_in->vtotal;
 5382		timing_out->v_addressable = mode_in->vdisplay;
 5383		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
 5384		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
 5385		timing_out->pix_clk_100hz = mode_in->clock * 10;
 5386	} else {
 5387		timing_out->h_addressable = mode_in->crtc_hdisplay;
 5388		timing_out->h_total = mode_in->crtc_htotal;
 5389		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
 5390		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
 5391		timing_out->v_total = mode_in->crtc_vtotal;
 5392		timing_out->v_addressable = mode_in->crtc_vdisplay;
 5393		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
 5394		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
 5395		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
 5396	}
 5397
 5398	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
 5399
 5400	stream->output_color_space = get_output_color_space(timing_out);
 5401
 5402	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
 5403	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
 5404	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
 5405		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
 5406		    drm_mode_is_420_also(info, mode_in) &&
 5407		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
 5408			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
 5409			adjust_colour_depth_from_display_info(timing_out, info);
 5410		}
 5411	}
 5412}
 5413
 5414static void fill_audio_info(struct audio_info *audio_info,
 5415			    const struct drm_connector *drm_connector,
 5416			    const struct dc_sink *dc_sink)
 5417{
 5418	int i = 0;
 5419	int cea_revision = 0;
 5420	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
 5421
 5422	audio_info->manufacture_id = edid_caps->manufacturer_id;
 5423	audio_info->product_id = edid_caps->product_id;
 5424
 5425	cea_revision = drm_connector->display_info.cea_rev;
 5426
 5427	strscpy(audio_info->display_name,
 5428		edid_caps->display_name,
 5429		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
 5430
 5431	if (cea_revision >= 3) {
 5432		audio_info->mode_count = edid_caps->audio_mode_count;
 5433
 5434		for (i = 0; i < audio_info->mode_count; ++i) {
 5435			audio_info->modes[i].format_code =
 5436					(enum audio_format_code)
 5437					(edid_caps->audio_modes[i].format_code);
 5438			audio_info->modes[i].channel_count =
 5439					edid_caps->audio_modes[i].channel_count;
 5440			audio_info->modes[i].sample_rates.all =
 5441					edid_caps->audio_modes[i].sample_rate;
 5442			audio_info->modes[i].sample_size =
 5443					edid_caps->audio_modes[i].sample_size;
 5444		}
 5445	}
 5446
 5447	audio_info->flags.all = edid_caps->speaker_flags;
 5448
 5449	/* TODO: We only check for the progressive mode, check for interlace mode too */
 5450	if (drm_connector->latency_present[0]) {
 5451		audio_info->video_latency = drm_connector->video_latency[0];
 5452		audio_info->audio_latency = drm_connector->audio_latency[0];
 5453	}
 5454
 5455	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
 5456
 5457}
 5458
 5459static void
 5460copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
 5461				      struct drm_display_mode *dst_mode)
 5462{
 5463	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
 5464	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
 5465	dst_mode->crtc_clock = src_mode->crtc_clock;
 5466	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
 5467	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
 5468	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
 5469	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
 5470	dst_mode->crtc_htotal = src_mode->crtc_htotal;
 5471	dst_mode->crtc_hskew = src_mode->crtc_hskew;
 5472	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
 5473	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
 5474	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
 5475	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
 5476	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
 5477}
 5478
 5479static void
 5480decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
 5481					const struct drm_display_mode *native_mode,
 5482					bool scale_enabled)
 5483{
 5484	if (scale_enabled) {
 5485		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
 5486	} else if (native_mode->clock == drm_mode->clock &&
 5487			native_mode->htotal == drm_mode->htotal &&
 5488			native_mode->vtotal == drm_mode->vtotal) {
 5489		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
 5490	} else {
 5491		/* no scaling nor amdgpu inserted, no need to patch */
 5492	}
 5493}
 5494
 5495static struct dc_sink *
 5496create_fake_sink(struct amdgpu_dm_connector *aconnector)
 5497{
 5498	struct dc_sink_init_data sink_init_data = { 0 };
 5499	struct dc_sink *sink = NULL;
 5500	sink_init_data.link = aconnector->dc_link;
 5501	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
 5502
 5503	sink = dc_sink_create(&sink_init_data);
 5504	if (!sink) {
 5505		DRM_ERROR("Failed to create sink!\n");
 5506		return NULL;
 5507	}
 5508	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
 5509
 5510	return sink;
 5511}
 5512
 5513static void set_multisync_trigger_params(
 5514		struct dc_stream_state *stream)
 5515{
 5516	struct dc_stream_state *master = NULL;
 5517
 5518	if (stream->triggered_crtc_reset.enabled) {
 5519		master = stream->triggered_crtc_reset.event_source;
 5520		stream->triggered_crtc_reset.event =
 5521			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
 5522			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
 5523		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
 5524	}
 5525}
 5526
 5527static void set_master_stream(struct dc_stream_state *stream_set[],
 5528			      int stream_count)
 5529{
 5530	int j, highest_rfr = 0, master_stream = 0;
 5531
 5532	for (j = 0;  j < stream_count; j++) {
 5533		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
 5534			int refresh_rate = 0;
 5535
 5536			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
 5537				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
 5538			if (refresh_rate > highest_rfr) {
 5539				highest_rfr = refresh_rate;
 5540				master_stream = j;
 5541			}
 5542		}
 5543	}
 5544	for (j = 0;  j < stream_count; j++) {
 5545		if (stream_set[j])
 5546			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
 5547	}
 5548}
 5549
 5550static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
 5551{
 5552	int i = 0;
 5553	struct dc_stream_state *stream;
 5554
 5555	if (context->stream_count < 2)
 5556		return;
 5557	for (i = 0; i < context->stream_count ; i++) {
 5558		if (!context->streams[i])
 5559			continue;
 5560		/*
 5561		 * TODO: add a function to read AMD VSDB bits and set
 5562		 * crtc_sync_master.multi_sync_enabled flag
 5563		 * For now it's set to false
 5564		 */
 5565	}
 5566
 5567	set_master_stream(context->streams, context->stream_count);
 5568
 5569	for (i = 0; i < context->stream_count ; i++) {
 5570		stream = context->streams[i];
 5571
 5572		if (!stream)
 5573			continue;
 5574
 5575		set_multisync_trigger_params(stream);
 5576	}
 5577}
 5578
 5579#if defined(CONFIG_DRM_AMD_DC_DCN)
 5580static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
 5581							struct dc_sink *sink, struct dc_stream_state *stream,
 5582							struct dsc_dec_dpcd_caps *dsc_caps)
 5583{
 5584	stream->timing.flags.DSC = 0;
 5585
 5586	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
 5587		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
 5588				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
 5589				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
 5590				      dsc_caps);
 5591	}
 5592}
 5593
 5594static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
 5595										struct dc_sink *sink, struct dc_stream_state *stream,
 5596										struct dsc_dec_dpcd_caps *dsc_caps)
 5597{
 5598	struct drm_connector *drm_connector = &aconnector->base;
 5599	uint32_t link_bandwidth_kbps;
 5600
 5601	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
 5602							dc_link_get_link_cap(aconnector->dc_link));
 5603	/* Set DSC policy according to dsc_clock_en */
 5604	dc_dsc_policy_set_enable_dsc_when_not_needed(
 5605		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
 5606
 5607	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
 5608
 5609		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
 5610						dsc_caps,
 5611						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
 5612						0,
 5613						link_bandwidth_kbps,
 5614						&stream->timing,
 5615						&stream->timing.dsc_cfg)) {
 5616			stream->timing.flags.DSC = 1;
 5617			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
 5618		}
 5619	}
 5620
 5621	/* Overwrite the stream flag if DSC is enabled through debugfs */
 5622	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
 5623		stream->timing.flags.DSC = 1;
 5624
 5625	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
 5626		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
 5627
 5628	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
 5629		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
 5630
 5631	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
 5632		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
 5633}
 5634#endif
 5635
 5636/**
 5637 * DOC: FreeSync Video
 5638 *
 5639 * When a userspace application wants to play a video, the content follows a
 5640 * standard format definition that usually specifies the FPS for that format.
 5641 * The below list illustrates some video format and the expected FPS,
 5642 * respectively:
 5643 *
 5644 * - TV/NTSC (23.976 FPS)
 5645 * - Cinema (24 FPS)
 5646 * - TV/PAL (25 FPS)
 5647 * - TV/NTSC (29.97 FPS)
 5648 * - TV/NTSC (30 FPS)
 5649 * - Cinema HFR (48 FPS)
 5650 * - TV/PAL (50 FPS)
 5651 * - Commonly used (60 FPS)
 5652 * - Multiples of 24 (48,72,96 FPS)
 5653 *
 5654 * The list of standards video format is not huge and can be added to the
 5655 * connector modeset list beforehand. With that, userspace can leverage
 5656 * FreeSync to extends the front porch in order to attain the target refresh
 5657 * rate. Such a switch will happen seamlessly, without screen blanking or
 5658 * reprogramming of the output in any other way. If the userspace requests a
 5659 * modesetting change compatible with FreeSync modes that only differ in the
 5660 * refresh rate, DC will skip the full update and avoid blink during the
 5661 * transition. For example, the video player can change the modesetting from
 5662 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
 5663 * causing any display blink. This same concept can be applied to a mode
 5664 * setting change.
 5665 */
 5666static struct drm_display_mode *
 5667get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
 5668			  bool use_probed_modes)
 5669{
 5670	struct drm_display_mode *m, *m_pref = NULL;
 5671	u16 current_refresh, highest_refresh;
 5672	struct list_head *list_head = use_probed_modes ?
 5673						    &aconnector->base.probed_modes :
 5674						    &aconnector->base.modes;
 5675
 5676	if (aconnector->freesync_vid_base.clock != 0)
 5677		return &aconnector->freesync_vid_base;
 5678
 5679	/* Find the preferred mode */
 5680	list_for_each_entry (m, list_head, head) {
 5681		if (m->type & DRM_MODE_TYPE_PREFERRED) {
 5682			m_pref = m;
 5683			break;
 5684		}
 5685	}
 5686
 5687	if (!m_pref) {
 5688		/* Probably an EDID with no preferred mode. Fallback to first entry */
 5689		m_pref = list_first_entry_or_null(
 5690			&aconnector->base.modes, struct drm_display_mode, head);
 5691		if (!m_pref) {
 5692			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
 5693			return NULL;
 5694		}
 5695	}
 5696
 5697	highest_refresh = drm_mode_vrefresh(m_pref);
 5698
 5699	/*
 5700	 * Find the mode with highest refresh rate with same resolution.
 5701	 * For some monitors, preferred mode is not the mode with highest
 5702	 * supported refresh rate.
 5703	 */
 5704	list_for_each_entry (m, list_head, head) {
 5705		current_refresh  = drm_mode_vrefresh(m);
 5706
 5707		if (m->hdisplay == m_pref->hdisplay &&
 5708		    m->vdisplay == m_pref->vdisplay &&
 5709		    highest_refresh < current_refresh) {
 5710			highest_refresh = current_refresh;
 5711			m_pref = m;
 5712		}
 5713	}
 5714
 5715	aconnector->freesync_vid_base = *m_pref;
 5716	return m_pref;
 5717}
 5718
 5719static bool is_freesync_video_mode(const struct drm_display_mode *mode,
 5720				   struct amdgpu_dm_connector *aconnector)
 5721{
 5722	struct drm_display_mode *high_mode;
 5723	int timing_diff;
 5724
 5725	high_mode = get_highest_refresh_rate_mode(aconnector, false);
 5726	if (!high_mode || !mode)
 5727		return false;
 5728
 5729	timing_diff = high_mode->vtotal - mode->vtotal;
 5730
 5731	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
 5732	    high_mode->hdisplay != mode->hdisplay ||
 5733	    high_mode->vdisplay != mode->vdisplay ||
 5734	    high_mode->hsync_start != mode->hsync_start ||
 5735	    high_mode->hsync_end != mode->hsync_end ||
 5736	    high_mode->htotal != mode->htotal ||
 5737	    high_mode->hskew != mode->hskew ||
 5738	    high_mode->vscan != mode->vscan ||
 5739	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
 5740	    high_mode->vsync_end - mode->vsync_end != timing_diff)
 5741		return false;
 5742	else
 5743		return true;
 5744}
 5745
 5746static struct dc_stream_state *
 5747create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 5748		       const struct drm_display_mode *drm_mode,
 5749		       const struct dm_connector_state *dm_state,
 5750		       const struct dc_stream_state *old_stream,
 5751		       int requested_bpc)
 5752{
 5753	struct drm_display_mode *preferred_mode = NULL;
 5754	struct drm_connector *drm_connector;
 5755	const struct drm_connector_state *con_state =
 5756		dm_state ? &dm_state->base : NULL;
 5757	struct dc_stream_state *stream = NULL;
 5758	struct drm_display_mode mode = *drm_mode;
 5759	struct drm_display_mode saved_mode;
 5760	struct drm_display_mode *freesync_mode = NULL;
 5761	bool native_mode_found = false;
 5762	bool recalculate_timing = false;
 5763	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
 5764	int mode_refresh;
 5765	int preferred_refresh = 0;
 5766#if defined(CONFIG_DRM_AMD_DC_DCN)
 5767	struct dsc_dec_dpcd_caps dsc_caps;
 5768#endif
 5769	struct dc_sink *sink = NULL;
 5770
 5771	memset(&saved_mode, 0, sizeof(saved_mode));
 5772
 5773	if (aconnector == NULL) {
 5774		DRM_ERROR("aconnector is NULL!\n");
 5775		return stream;
 5776	}
 5777
 5778	drm_connector = &aconnector->base;
 5779
 5780	if (!aconnector->dc_sink) {
 5781		sink = create_fake_sink(aconnector);
 5782		if (!sink)
 5783			return stream;
 5784	} else {
 5785		sink = aconnector->dc_sink;
 5786		dc_sink_retain(sink);
 5787	}
 5788
 5789	stream = dc_create_stream_for_sink(sink);
 5790
 5791	if (stream == NULL) {
 5792		DRM_ERROR("Failed to create stream for sink!\n");
 5793		goto finish;
 5794	}
 5795
 5796	stream->dm_stream_context = aconnector;
 5797
 5798	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
 5799		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
 5800
 5801	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
 5802		/* Search for preferred mode */
 5803		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
 5804			native_mode_found = true;
 5805			break;
 5806		}
 5807	}
 5808	if (!native_mode_found)
 5809		preferred_mode = list_first_entry_or_null(
 5810				&aconnector->base.modes,
 5811				struct drm_display_mode,
 5812				head);
 5813
 5814	mode_refresh = drm_mode_vrefresh(&mode);
 5815
 5816	if (preferred_mode == NULL) {
 5817		/*
 5818		 * This may not be an error, the use case is when we have no
 5819		 * usermode calls to reset and set mode upon hotplug. In this
 5820		 * case, we call set mode ourselves to restore the previous mode
 5821		 * and the modelist may not be filled in in time.
 5822		 */
 5823		DRM_DEBUG_DRIVER("No preferred mode found\n");
 5824	} else {
 5825		recalculate_timing = amdgpu_freesync_vid_mode &&
 5826				 is_freesync_video_mode(&mode, aconnector);
 5827		if (recalculate_timing) {
 5828			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
 5829			saved_mode = mode;
 5830			mode = *freesync_mode;
 5831		} else {
 5832			decide_crtc_timing_for_drm_display_mode(
 5833				&mode, preferred_mode, scale);
 5834
 5835			preferred_refresh = drm_mode_vrefresh(preferred_mode);
 5836		}
 5837	}
 5838
 5839	if (recalculate_timing)
 5840		drm_mode_set_crtcinfo(&saved_mode, 0);
 5841	else if (!dm_state)
 5842		drm_mode_set_crtcinfo(&mode, 0);
 5843
 5844       /*
 5845	* If scaling is enabled and refresh rate didn't change
 5846	* we copy the vic and polarities of the old timings
 5847	*/
 5848	if (!scale || mode_refresh != preferred_refresh)
 5849		fill_stream_properties_from_drm_display_mode(
 5850			stream, &mode, &aconnector->base, con_state, NULL,
 5851			requested_bpc);
 5852	else
 5853		fill_stream_properties_from_drm_display_mode(
 5854			stream, &mode, &aconnector->base, con_state, old_stream,
 5855			requested_bpc);
 5856
 5857#if defined(CONFIG_DRM_AMD_DC_DCN)
 5858	/* SST DSC determination policy */
 5859	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
 5860	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
 5861		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
 5862#endif
 5863
 5864	update_stream_scaling_settings(&mode, dm_state, stream);
 5865
 5866	fill_audio_info(
 5867		&stream->audio_info,
 5868		drm_connector,
 5869		sink);
 5870
 5871	update_stream_signal(stream, sink);
 5872
 5873	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 5874		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
 5875
 5876	if (stream->link->psr_settings.psr_feature_enabled) {
 5877		//
 5878		// should decide stream support vsc sdp colorimetry capability
 5879		// before building vsc info packet
 5880		//
 5881		stream->use_vsc_sdp_for_colorimetry = false;
 5882		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
 5883			stream->use_vsc_sdp_for_colorimetry =
 5884				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
 5885		} else {
 5886			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
 5887				stream->use_vsc_sdp_for_colorimetry = true;
 5888		}
 5889		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
 5890		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
 5891
 5892	}
 5893finish:
 5894	dc_sink_release(sink);
 5895
 5896	return stream;
 5897}
 5898
 5899static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
 5900{
 5901	drm_crtc_cleanup(crtc);
 5902	kfree(crtc);
 5903}
 5904
 5905static void dm_crtc_destroy_state(struct drm_crtc *crtc,
 5906				  struct drm_crtc_state *state)
 5907{
 5908	struct dm_crtc_state *cur = to_dm_crtc_state(state);
 5909
 5910	/* TODO Destroy dc_stream objects are stream object is flattened */
 5911	if (cur->stream)
 5912		dc_stream_release(cur->stream);
 5913
 5914
 5915	__drm_atomic_helper_crtc_destroy_state(state);
 5916
 5917
 5918	kfree(state);
 5919}
 5920
 5921static void dm_crtc_reset_state(struct drm_crtc *crtc)
 5922{
 5923	struct dm_crtc_state *state;
 5924
 5925	if (crtc->state)
 5926		dm_crtc_destroy_state(crtc, crtc->state);
 5927
 5928	state = kzalloc(sizeof(*state), GFP_KERNEL);
 5929	if (WARN_ON(!state))
 5930		return;
 5931
 5932	__drm_atomic_helper_crtc_reset(crtc, &state->base);
 5933}
 5934
 5935static struct drm_crtc_state *
 5936dm_crtc_duplicate_state(struct drm_crtc *crtc)
 5937{
 5938	struct dm_crtc_state *state, *cur;
 5939
 5940	cur = to_dm_crtc_state(crtc->state);
 5941
 5942	if (WARN_ON(!crtc->state))
 5943		return NULL;
 5944
 5945	state = kzalloc(sizeof(*state), GFP_KERNEL);
 5946	if (!state)
 5947		return NULL;
 5948
 5949	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
 5950
 5951	if (cur->stream) {
 5952		state->stream = cur->stream;
 5953		dc_stream_retain(state->stream);
 5954	}
 5955
 5956	state->active_planes = cur->active_planes;
 5957	state->vrr_infopacket = cur->vrr_infopacket;
 5958	state->abm_level = cur->abm_level;
 5959	state->vrr_supported = cur->vrr_supported;
 5960	state->freesync_config = cur->freesync_config;
 5961	state->cm_has_degamma = cur->cm_has_degamma;
 5962	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
 5963	/* TODO Duplicate dc_stream after objects are stream object is flattened */
 5964
 5965	return &state->base;
 5966}
 5967
 5968#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
 5969static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
 5970{
 5971	crtc_debugfs_init(crtc);
 5972
 5973	return 0;
 5974}
 5975#endif
 5976
 5977static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
 5978{
 5979	enum dc_irq_source irq_source;
 5980	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 5981	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 5982	int rc;
 5983
 5984	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
 5985
 5986	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
 5987
 5988	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
 5989		      acrtc->crtc_id, enable ? "en" : "dis", rc);
 5990	return rc;
 5991}
 5992
 5993static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
 5994{
 5995	enum dc_irq_source irq_source;
 5996	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 5997	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 5998	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
 5999#if defined(CONFIG_DRM_AMD_DC_DCN)
 6000	struct amdgpu_display_manager *dm = &adev->dm;
 6001	unsigned long flags;
 6002#endif
 6003	int rc = 0;
 6004
 6005	if (enable) {
 6006		/* vblank irq on -> Only need vupdate irq in vrr mode */
 6007		if (amdgpu_dm_vrr_active(acrtc_state))
 6008			rc = dm_set_vupdate_irq(crtc, true);
 6009	} else {
 6010		/* vblank irq off -> vupdate irq off */
 6011		rc = dm_set_vupdate_irq(crtc, false);
 6012	}
 6013
 6014	if (rc)
 6015		return rc;
 6016
 6017	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
 6018
 6019	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
 6020		return -EBUSY;
 6021
 6022	if (amdgpu_in_reset(adev))
 6023		return 0;
 6024
 6025#if defined(CONFIG_DRM_AMD_DC_DCN)
 6026	spin_lock_irqsave(&dm->vblank_lock, flags);
 6027	dm->vblank_workqueue->dm = dm;
 6028	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
 6029	dm->vblank_workqueue->enable = enable;
 6030	spin_unlock_irqrestore(&dm->vblank_lock, flags);
 6031	schedule_work(&dm->vblank_workqueue->mall_work);
 6032#endif
 6033
 6034	return 0;
 6035}
 6036
 6037static int dm_enable_vblank(struct drm_crtc *crtc)
 6038{
 6039	return dm_set_vblank(crtc, true);
 6040}
 6041
 6042static void dm_disable_vblank(struct drm_crtc *crtc)
 6043{
 6044	dm_set_vblank(crtc, false);
 6045}
 6046
 6047/* Implemented only the options currently availible for the driver */
 6048static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
 6049	.reset = dm_crtc_reset_state,
 6050	.destroy = amdgpu_dm_crtc_destroy,
 6051	.set_config = drm_atomic_helper_set_config,
 6052	.page_flip = drm_atomic_helper_page_flip,
 6053	.atomic_duplicate_state = dm_crtc_duplicate_state,
 6054	.atomic_destroy_state = dm_crtc_destroy_state,
 6055	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
 6056	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
 6057	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
 6058	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
 6059	.enable_vblank = dm_enable_vblank,
 6060	.disable_vblank = dm_disable_vblank,
 6061	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 6062#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 6063	.late_register = amdgpu_dm_crtc_late_register,
 6064#endif
 6065};
 6066
 6067static enum drm_connector_status
 6068amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
 6069{
 6070	bool connected;
 6071	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 6072
 6073	/*
 6074	 * Notes:
 6075	 * 1. This interface is NOT called in context of HPD irq.
 6076	 * 2. This interface *is called* in context of user-mode ioctl. Which
 6077	 * makes it a bad place for *any* MST-related activity.
 6078	 */
 6079
 6080	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
 6081	    !aconnector->fake_enable)
 6082		connected = (aconnector->dc_sink != NULL);
 6083	else
 6084		connected = (aconnector->base.force == DRM_FORCE_ON);
 6085
 6086	update_subconnector_property(aconnector);
 6087
 6088	return (connected ? connector_status_connected :
 6089			connector_status_disconnected);
 6090}
 6091
 6092int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
 6093					    struct drm_connector_state *connector_state,
 6094					    struct drm_property *property,
 6095					    uint64_t val)
 6096{
 6097	struct drm_device *dev = connector->dev;
 6098	struct amdgpu_device *adev = drm_to_adev(dev);
 6099	struct dm_connector_state *dm_old_state =
 6100		to_dm_connector_state(connector->state);
 6101	struct dm_connector_state *dm_new_state =
 6102		to_dm_connector_state(connector_state);
 6103
 6104	int ret = -EINVAL;
 6105
 6106	if (property == dev->mode_config.scaling_mode_property) {
 6107		enum amdgpu_rmx_type rmx_type;
 6108
 6109		switch (val) {
 6110		case DRM_MODE_SCALE_CENTER:
 6111			rmx_type = RMX_CENTER;
 6112			break;
 6113		case DRM_MODE_SCALE_ASPECT:
 6114			rmx_type = RMX_ASPECT;
 6115			break;
 6116		case DRM_MODE_SCALE_FULLSCREEN:
 6117			rmx_type = RMX_FULL;
 6118			break;
 6119		case DRM_MODE_SCALE_NONE:
 6120		default:
 6121			rmx_type = RMX_OFF;
 6122			break;
 6123		}
 6124
 6125		if (dm_old_state->scaling == rmx_type)
 6126			return 0;
 6127
 6128		dm_new_state->scaling = rmx_type;
 6129		ret = 0;
 6130	} else if (property == adev->mode_info.underscan_hborder_property) {
 6131		dm_new_state->underscan_hborder = val;
 6132		ret = 0;
 6133	} else if (property == adev->mode_info.underscan_vborder_property) {
 6134		dm_new_state->underscan_vborder = val;
 6135		ret = 0;
 6136	} else if (property == adev->mode_info.underscan_property) {
 6137		dm_new_state->underscan_enable = val;
 6138		ret = 0;
 6139	} else if (property == adev->mode_info.abm_level_property) {
 6140		dm_new_state->abm_level = val;
 6141		ret = 0;
 6142	}
 6143
 6144	return ret;
 6145}
 6146
 6147int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
 6148					    const struct drm_connector_state *state,
 6149					    struct drm_property *property,
 6150					    uint64_t *val)
 6151{
 6152	struct drm_device *dev = connector->dev;
 6153	struct amdgpu_device *adev = drm_to_adev(dev);
 6154	struct dm_connector_state *dm_state =
 6155		to_dm_connector_state(state);
 6156	int ret = -EINVAL;
 6157
 6158	if (property == dev->mode_config.scaling_mode_property) {
 6159		switch (dm_state->scaling) {
 6160		case RMX_CENTER:
 6161			*val = DRM_MODE_SCALE_CENTER;
 6162			break;
 6163		case RMX_ASPECT:
 6164			*val = DRM_MODE_SCALE_ASPECT;
 6165			break;
 6166		case RMX_FULL:
 6167			*val = DRM_MODE_SCALE_FULLSCREEN;
 6168			break;
 6169		case RMX_OFF:
 6170		default:
 6171			*val = DRM_MODE_SCALE_NONE;
 6172			break;
 6173		}
 6174		ret = 0;
 6175	} else if (property == adev->mode_info.underscan_hborder_property) {
 6176		*val = dm_state->underscan_hborder;
 6177		ret = 0;
 6178	} else if (property == adev->mode_info.underscan_vborder_property) {
 6179		*val = dm_state->underscan_vborder;
 6180		ret = 0;
 6181	} else if (property == adev->mode_info.underscan_property) {
 6182		*val = dm_state->underscan_enable;
 6183		ret = 0;
 6184	} else if (property == adev->mode_info.abm_level_property) {
 6185		*val = dm_state->abm_level;
 6186		ret = 0;
 6187	}
 6188
 6189	return ret;
 6190}
 6191
 6192static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
 6193{
 6194	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
 6195
 6196	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
 6197}
 6198
 6199static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
 6200{
 6201	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 6202	const struct dc_link *link = aconnector->dc_link;
 6203	struct amdgpu_device *adev = drm_to_adev(connector->dev);
 6204	struct amdgpu_display_manager *dm = &adev->dm;
 6205
 6206	/*
 6207	 * Call only if mst_mgr was iniitalized before since it's not done
 6208	 * for all connector types.
 6209	 */
 6210	if (aconnector->mst_mgr.dev)
 6211		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
 6212
 6213#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 6214	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 6215
 6216	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
 6217	    link->type != dc_connection_none &&
 6218	    dm->backlight_dev) {
 6219		backlight_device_unregister(dm->backlight_dev);
 6220		dm->backlight_dev = NULL;
 6221	}
 6222#endif
 6223
 6224	if (aconnector->dc_em_sink)
 6225		dc_sink_release(aconnector->dc_em_sink);
 6226	aconnector->dc_em_sink = NULL;
 6227	if (aconnector->dc_sink)
 6228		dc_sink_release(aconnector->dc_sink);
 6229	aconnector->dc_sink = NULL;
 6230
 6231	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
 6232	drm_connector_unregister(connector);
 6233	drm_connector_cleanup(connector);
 6234	if (aconnector->i2c) {
 6235		i2c_del_adapter(&aconnector->i2c->base);
 6236		kfree(aconnector->i2c);
 6237	}
 6238	kfree(aconnector->dm_dp_aux.aux.name);
 6239
 6240	kfree(connector);
 6241}
 6242
 6243void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
 6244{
 6245	struct dm_connector_state *state =
 6246		to_dm_connector_state(connector->state);
 6247
 6248	if (connector->state)
 6249		__drm_atomic_helper_connector_destroy_state(connector->state);
 6250
 6251	kfree(state);
 6252
 6253	state = kzalloc(sizeof(*state), GFP_KERNEL);
 6254
 6255	if (state) {
 6256		state->scaling = RMX_OFF;
 6257		state->underscan_enable = false;
 6258		state->underscan_hborder = 0;
 6259		state->underscan_vborder = 0;
 6260		state->base.max_requested_bpc = 8;
 6261		state->vcpi_slots = 0;
 6262		state->pbn = 0;
 6263		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
 6264			state->abm_level = amdgpu_dm_abm_level;
 6265
 6266		__drm_atomic_helper_connector_reset(connector, &state->base);
 6267	}
 6268}
 6269
 6270struct drm_connector_state *
 6271amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 6272{
 6273	struct dm_connector_state *state =
 6274		to_dm_connector_state(connector->state);
 6275
 6276	struct dm_connector_state *new_state =
 6277			kmemdup(state, sizeof(*state), GFP_KERNEL);
 6278
 6279	if (!new_state)
 6280		return NULL;
 6281
 6282	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
 6283
 6284	new_state->freesync_capable = state->freesync_capable;
 6285	new_state->abm_level = state->abm_level;
 6286	new_state->scaling = state->scaling;
 6287	new_state->underscan_enable = state->underscan_enable;
 6288	new_state->underscan_hborder = state->underscan_hborder;
 6289	new_state->underscan_vborder = state->underscan_vborder;
 6290	new_state->vcpi_slots = state->vcpi_slots;
 6291	new_state->pbn = state->pbn;
 6292	return &new_state->base;
 6293}
 6294
 6295static int
 6296amdgpu_dm_connector_late_register(struct drm_connector *connector)
 6297{
 6298	struct amdgpu_dm_connector *amdgpu_dm_connector =
 6299		to_amdgpu_dm_connector(connector);
 6300	int r;
 6301
 6302	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
 6303	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
 6304		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
 6305		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
 6306		if (r)
 6307			return r;
 6308	}
 6309
 6310#if defined(CONFIG_DEBUG_FS)
 6311	connector_debugfs_init(amdgpu_dm_connector);
 6312#endif
 6313
 6314	return 0;
 6315}
 6316
 6317static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
 6318	.reset = amdgpu_dm_connector_funcs_reset,
 6319	.detect = amdgpu_dm_connector_detect,
 6320	.fill_modes = drm_helper_probe_single_connector_modes,
 6321	.destroy = amdgpu_dm_connector_destroy,
 6322	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
 6323	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 6324	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
 6325	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
 6326	.late_register = amdgpu_dm_connector_late_register,
 6327	.early_unregister = amdgpu_dm_connector_unregister
 6328};
 6329
 6330static int get_modes(struct drm_connector *connector)
 6331{
 6332	return amdgpu_dm_connector_get_modes(connector);
 6333}
 6334
 6335static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
 6336{
 6337	struct dc_sink_init_data init_params = {
 6338			.link = aconnector->dc_link,
 6339			.sink_signal = SIGNAL_TYPE_VIRTUAL
 6340	};
 6341	struct edid *edid;
 6342
 6343	if (!aconnector->base.edid_blob_ptr) {
 6344		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
 6345				aconnector->base.name);
 6346
 6347		aconnector->base.force = DRM_FORCE_OFF;
 6348		aconnector->base.override_edid = false;
 6349		return;
 6350	}
 6351
 6352	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
 6353
 6354	aconnector->edid = edid;
 6355
 6356	aconnector->dc_em_sink = dc_link_add_remote_sink(
 6357		aconnector->dc_link,
 6358		(uint8_t *)edid,
 6359		(edid->extensions + 1) * EDID_LENGTH,
 6360		&init_params);
 6361
 6362	if (aconnector->base.force == DRM_FORCE_ON) {
 6363		aconnector->dc_sink = aconnector->dc_link->local_sink ?
 6364		aconnector->dc_link->local_sink :
 6365		aconnector->dc_em_sink;
 6366		dc_sink_retain(aconnector->dc_sink);
 6367	}
 6368}
 6369
 6370static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
 6371{
 6372	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
 6373
 6374	/*
 6375	 * In case of headless boot with force on for DP managed connector
 6376	 * Those settings have to be != 0 to get initial modeset
 6377	 */
 6378	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
 6379		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
 6380		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
 6381	}
 6382
 6383
 6384	aconnector->base.override_edid = true;
 6385	create_eml_sink(aconnector);
 6386}
 6387
 6388static struct dc_stream_state *
 6389create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 6390				const struct drm_display_mode *drm_mode,
 6391				const struct dm_connector_state *dm_state,
 6392				const struct dc_stream_state *old_stream)
 6393{
 6394	struct drm_connector *connector = &aconnector->base;
 6395	struct amdgpu_device *adev = drm_to_adev(connector->dev);
 6396	struct dc_stream_state *stream;
 6397	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
 6398	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
 6399	enum dc_status dc_result = DC_OK;
 6400
 6401	do {
 6402		stream = create_stream_for_sink(aconnector, drm_mode,
 6403						dm_state, old_stream,
 6404						requested_bpc);
 6405		if (stream == NULL) {
 6406			DRM_ERROR("Failed to create stream for sink!\n");
 6407			break;
 6408		}
 6409
 6410		dc_result = dc_validate_stream(adev->dm.dc, stream);
 6411
 6412		if (dc_result != DC_OK) {
 6413			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
 6414				      drm_mode->hdisplay,
 6415				      drm_mode->vdisplay,
 6416				      drm_mode->clock,
 6417				      dc_result,
 6418				      dc_status_to_str(dc_result));
 6419
 6420			dc_stream_release(stream);
 6421			stream = NULL;
 6422			requested_bpc -= 2; /* lower bpc to retry validation */
 6423		}
 6424
 6425	} while (stream == NULL && requested_bpc >= 6);
 6426
 6427	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
 6428		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
 6429
 6430		aconnector->force_yuv420_output = true;
 6431		stream = create_validate_stream_for_sink(aconnector, drm_mode,
 6432						dm_state, old_stream);
 6433		aconnector->force_yuv420_output = false;
 6434	}
 6435
 6436	return stream;
 6437}
 6438
 6439enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
 6440				   struct drm_display_mode *mode)
 6441{
 6442	int result = MODE_ERROR;
 6443	struct dc_sink *dc_sink;
 6444	/* TODO: Unhardcode stream count */
 6445	struct dc_stream_state *stream;
 6446	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 6447
 6448	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
 6449			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
 6450		return result;
 6451
 6452	/*
 6453	 * Only run this the first time mode_valid is called to initilialize
 6454	 * EDID mgmt
 6455	 */
 6456	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
 6457		!aconnector->dc_em_sink)
 6458		handle_edid_mgmt(aconnector);
 6459
 6460	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
 6461
 6462	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
 6463				aconnector->base.force != DRM_FORCE_ON) {
 6464		DRM_ERROR("dc_sink is NULL!\n");
 6465		goto fail;
 6466	}
 6467
 6468	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
 6469	if (stream) {
 6470		dc_stream_release(stream);
 6471		result = MODE_OK;
 6472	}
 6473
 6474fail:
 6475	/* TODO: error handling*/
 6476	return result;
 6477}
 6478
 6479static int fill_hdr_info_packet(const struct drm_connector_state *state,
 6480				struct dc_info_packet *out)
 6481{
 6482	struct hdmi_drm_infoframe frame;
 6483	unsigned char buf[30]; /* 26 + 4 */
 6484	ssize_t len;
 6485	int ret, i;
 6486
 6487	memset(out, 0, sizeof(*out));
 6488
 6489	if (!state->hdr_output_metadata)
 6490		return 0;
 6491
 6492	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
 6493	if (ret)
 6494		return ret;
 6495
 6496	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
 6497	if (len < 0)
 6498		return (int)len;
 6499
 6500	/* Static metadata is a fixed 26 bytes + 4 byte header. */
 6501	if (len != 30)
 6502		return -EINVAL;
 6503
 6504	/* Prepare the infopacket for DC. */
 6505	switch (state->connector->connector_type) {
 6506	case DRM_MODE_CONNECTOR_HDMIA:
 6507		out->hb0 = 0x87; /* type */
 6508		out->hb1 = 0x01; /* version */
 6509		out->hb2 = 0x1A; /* length */
 6510		out->sb[0] = buf[3]; /* checksum */
 6511		i = 1;
 6512		break;
 6513
 6514	case DRM_MODE_CONNECTOR_DisplayPort:
 6515	case DRM_MODE_CONNECTOR_eDP:
 6516		out->hb0 = 0x00; /* sdp id, zero */
 6517		out->hb1 = 0x87; /* type */
 6518		out->hb2 = 0x1D; /* payload len - 1 */
 6519		out->hb3 = (0x13 << 2); /* sdp version */
 6520		out->sb[0] = 0x01; /* version */
 6521		out->sb[1] = 0x1A; /* length */
 6522		i = 2;
 6523		break;
 6524
 6525	default:
 6526		return -EINVAL;
 6527	}
 6528
 6529	memcpy(&out->sb[i], &buf[4], 26);
 6530	out->valid = true;
 6531
 6532	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
 6533		       sizeof(out->sb), false);
 6534
 6535	return 0;
 6536}
 6537
 6538static int
 6539amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
 6540				 struct drm_atomic_state *state)
 6541{
 6542	struct drm_connector_state *new_con_state =
 6543		drm_atomic_get_new_connector_state(state, conn);
 6544	struct drm_connector_state *old_con_state =
 6545		drm_atomic_get_old_connector_state(state, conn);
 6546	struct drm_crtc *crtc = new_con_state->crtc;
 6547	struct drm_crtc_state *new_crtc_state;
 6548	int ret;
 6549
 6550	trace_amdgpu_dm_connector_atomic_check(new_con_state);
 6551
 6552	if (!crtc)
 6553		return 0;
 6554
 6555	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
 6556		struct dc_info_packet hdr_infopacket;
 6557
 6558		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
 6559		if (ret)
 6560			return ret;
 6561
 6562		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
 6563		if (IS_ERR(new_crtc_state))
 6564			return PTR_ERR(new_crtc_state);
 6565
 6566		/*
 6567		 * DC considers the stream backends changed if the
 6568		 * static metadata changes. Forcing the modeset also
 6569		 * gives a simple way for userspace to switch from
 6570		 * 8bpc to 10bpc when setting the metadata to enter
 6571		 * or exit HDR.
 6572		 *
 6573		 * Changing the static metadata after it's been
 6574		 * set is permissible, however. So only force a
 6575		 * modeset if we're entering or exiting HDR.
 6576		 */
 6577		new_crtc_state->mode_changed =
 6578			!old_con_state->hdr_output_metadata ||
 6579			!new_con_state->hdr_output_metadata;
 6580	}
 6581
 6582	return 0;
 6583}
 6584
 6585static const struct drm_connector_helper_funcs
 6586amdgpu_dm_connector_helper_funcs = {
 6587	/*
 6588	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
 6589	 * modes will be filtered by drm_mode_validate_size(), and those modes
 6590	 * are missing after user start lightdm. So we need to renew modes list.
 6591	 * in get_modes call back, not just return the modes count
 6592	 */
 6593	.get_modes = get_modes,
 6594	.mode_valid = amdgpu_dm_connector_mode_valid,
 6595	.atomic_check = amdgpu_dm_connector_atomic_check,
 6596};
 6597
 6598static void dm_crtc_helper_disable(struct drm_crtc *crtc)
 6599{
 6600}
 6601
 6602static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
 6603{
 6604	struct drm_atomic_state *state = new_crtc_state->state;
 6605	struct drm_plane *plane;
 6606	int num_active = 0;
 6607
 6608	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
 6609		struct drm_plane_state *new_plane_state;
 6610
 6611		/* Cursor planes are "fake". */
 6612		if (plane->type == DRM_PLANE_TYPE_CURSOR)
 6613			continue;
 6614
 6615		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
 6616
 6617		if (!new_plane_state) {
 6618			/*
 6619			 * The plane is enable on the CRTC and hasn't changed
 6620			 * state. This means that it previously passed
 6621			 * validation and is therefore enabled.
 6622			 */
 6623			num_active += 1;
 6624			continue;
 6625		}
 6626
 6627		/* We need a framebuffer to be considered enabled. */
 6628		num_active += (new_plane_state->fb != NULL);
 6629	}
 6630
 6631	return num_active;
 6632}
 6633
 6634static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
 6635					 struct drm_crtc_state *new_crtc_state)
 6636{
 6637	struct dm_crtc_state *dm_new_crtc_state =
 6638		to_dm_crtc_state(new_crtc_state);
 6639
 6640	dm_new_crtc_state->active_planes = 0;
 6641
 6642	if (!dm_new_crtc_state->stream)
 6643		return;
 6644
 6645	dm_new_crtc_state->active_planes =
 6646		count_crtc_active_planes(new_crtc_state);
 6647}
 6648
 6649static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
 6650				       struct drm_atomic_state *state)
 6651{
 6652	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
 6653									  crtc);
 6654	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 6655	struct dc *dc = adev->dm.dc;
 6656	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
 6657	int ret = -EINVAL;
 6658
 6659	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
 6660
 6661	dm_update_crtc_active_planes(crtc, crtc_state);
 6662
 6663	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
 6664		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
 6665		return ret;
 6666	}
 6667
 6668	/*
 6669	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
 6670	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
 6671	 * planes are disabled, which is not supported by the hardware. And there is legacy
 6672	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
 6673	 */
 6674	if (crtc_state->enable &&
 6675	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
 6676		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
 6677		return -EINVAL;
 6678	}
 6679
 6680	/* In some use cases, like reset, no stream is attached */
 6681	if (!dm_crtc_state->stream)
 6682		return 0;
 6683
 6684	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
 6685		return 0;
 6686
 6687	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
 6688	return ret;
 6689}
 6690
 6691static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
 6692				      const struct drm_display_mode *mode,
 6693				      struct drm_display_mode *adjusted_mode)
 6694{
 6695	return true;
 6696}
 6697
 6698static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
 6699	.disable = dm_crtc_helper_disable,
 6700	.atomic_check = dm_crtc_helper_atomic_check,
 6701	.mode_fixup = dm_crtc_helper_mode_fixup,
 6702	.get_scanout_position = amdgpu_crtc_get_scanout_position,
 6703};
 6704
 6705static void dm_encoder_helper_disable(struct drm_encoder *encoder)
 6706{
 6707
 6708}
 6709
 6710static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
 6711{
 6712	switch (display_color_depth) {
 6713		case COLOR_DEPTH_666:
 6714			return 6;
 6715		case COLOR_DEPTH_888:
 6716			return 8;
 6717		case COLOR_DEPTH_101010:
 6718			return 10;
 6719		case COLOR_DEPTH_121212:
 6720			return 12;
 6721		case COLOR_DEPTH_141414:
 6722			return 14;
 6723		case COLOR_DEPTH_161616:
 6724			return 16;
 6725		default:
 6726			break;
 6727		}
 6728	return 0;
 6729}
 6730
 6731static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
 6732					  struct drm_crtc_state *crtc_state,
 6733					  struct drm_connector_state *conn_state)
 6734{
 6735	struct drm_atomic_state *state = crtc_state->state;
 6736	struct drm_connector *connector = conn_state->connector;
 6737	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 6738	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
 6739	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
 6740	struct drm_dp_mst_topology_mgr *mst_mgr;
 6741	struct drm_dp_mst_port *mst_port;
 6742	enum dc_color_depth color_depth;
 6743	int clock, bpp = 0;
 6744	bool is_y420 = false;
 6745
 6746	if (!aconnector->port || !aconnector->dc_sink)
 6747		return 0;
 6748
 6749	mst_port = aconnector->port;
 6750	mst_mgr = &aconnector->mst_port->mst_mgr;
 6751
 6752	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
 6753		return 0;
 6754
 6755	if (!state->duplicated) {
 6756		int max_bpc = conn_state->max_requested_bpc;
 6757		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
 6758				aconnector->force_yuv420_output;
 6759		color_depth = convert_color_depth_from_display_info(connector,
 6760								    is_y420,
 6761								    max_bpc);
 6762		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
 6763		clock = adjusted_mode->clock;
 6764		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
 6765	}
 6766	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
 6767									   mst_mgr,
 6768									   mst_port,
 6769									   dm_new_connector_state->pbn,
 6770									   dm_mst_get_pbn_divider(aconnector->dc_link));
 6771	if (dm_new_connector_state->vcpi_slots < 0) {
 6772		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
 6773		return dm_new_connector_state->vcpi_slots;
 6774	}
 6775	return 0;
 6776}
 6777
 6778const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
 6779	.disable = dm_encoder_helper_disable,
 6780	.atomic_check = dm_encoder_helper_atomic_check
 6781};
 6782
 6783#if defined(CONFIG_DRM_AMD_DC_DCN)
 6784static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
 6785					    struct dc_state *dc_state,
 6786					    struct dsc_mst_fairness_vars *vars)
 6787{
 6788	struct dc_stream_state *stream = NULL;
 6789	struct drm_connector *connector;
 6790	struct drm_connector_state *new_con_state;
 6791	struct amdgpu_dm_connector *aconnector;
 6792	struct dm_connector_state *dm_conn_state;
 6793	int i, j, clock;
 6794	int vcpi, pbn_div, pbn = 0;
 6795
 6796	for_each_new_connector_in_state(state, connector, new_con_state, i) {
 6797
 6798		aconnector = to_amdgpu_dm_connector(connector);
 6799
 6800		if (!aconnector->port)
 6801			continue;
 6802
 6803		if (!new_con_state || !new_con_state->crtc)
 6804			continue;
 6805
 6806		dm_conn_state = to_dm_connector_state(new_con_state);
 6807
 6808		for (j = 0; j < dc_state->stream_count; j++) {
 6809			stream = dc_state->streams[j];
 6810			if (!stream)
 6811				continue;
 6812
 6813			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
 6814				break;
 6815
 6816			stream = NULL;
 6817		}
 6818
 6819		if (!stream)
 6820			continue;
 6821
 6822		if (stream->timing.flags.DSC != 1) {
 6823			drm_dp_mst_atomic_enable_dsc(state,
 6824						     aconnector->port,
 6825						     dm_conn_state->pbn,
 6826						     0,
 6827						     false);
 6828			continue;
 6829		}
 6830
 6831		pbn_div = dm_mst_get_pbn_divider(stream->link);
 6832		clock = stream->timing.pix_clk_100hz / 10;
 6833		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
 6834		for (j = 0; j < dc_state->stream_count; j++) {
 6835			if (vars[j].aconnector == aconnector) {
 6836				pbn = vars[j].pbn;
 6837				break;
 6838			}
 6839		}
 6840
 6841		vcpi = drm_dp_mst_atomic_enable_dsc(state,
 6842						    aconnector->port,
 6843						    pbn, pbn_div,
 6844						    true);
 6845		if (vcpi < 0)
 6846			return vcpi;
 6847
 6848		dm_conn_state->pbn = pbn;
 6849		dm_conn_state->vcpi_slots = vcpi;
 6850	}
 6851	return 0;
 6852}
 6853#endif
 6854
 6855static void dm_drm_plane_reset(struct drm_plane *plane)
 6856{
 6857	struct dm_plane_state *amdgpu_state = NULL;
 6858
 6859	if (plane->state)
 6860		plane->funcs->atomic_destroy_state(plane, plane->state);
 6861
 6862	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
 6863	WARN_ON(amdgpu_state == NULL);
 6864
 6865	if (amdgpu_state)
 6866		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
 6867}
 6868
 6869static struct drm_plane_state *
 6870dm_drm_plane_duplicate_state(struct drm_plane *plane)
 6871{
 6872	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
 6873
 6874	old_dm_plane_state = to_dm_plane_state(plane->state);
 6875	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
 6876	if (!dm_plane_state)
 6877		return NULL;
 6878
 6879	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
 6880
 6881	if (old_dm_plane_state->dc_state) {
 6882		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
 6883		dc_plane_state_retain(dm_plane_state->dc_state);
 6884	}
 6885
 6886	return &dm_plane_state->base;
 6887}
 6888
 6889static void dm_drm_plane_destroy_state(struct drm_plane *plane,
 6890				struct drm_plane_state *state)
 6891{
 6892	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
 6893
 6894	if (dm_plane_state->dc_state)
 6895		dc_plane_state_release(dm_plane_state->dc_state);
 6896
 6897	drm_atomic_helper_plane_destroy_state(plane, state);
 6898}
 6899
 6900static const struct drm_plane_funcs dm_plane_funcs = {
 6901	.update_plane	= drm_atomic_helper_update_plane,
 6902	.disable_plane	= drm_atomic_helper_disable_plane,
 6903	.destroy	= drm_primary_helper_destroy,
 6904	.reset = dm_drm_plane_reset,
 6905	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
 6906	.atomic_destroy_state = dm_drm_plane_destroy_state,
 6907	.format_mod_supported = dm_plane_format_mod_supported,
 6908};
 6909
 6910static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 6911				      struct drm_plane_state *new_state)
 6912{
 6913	struct amdgpu_framebuffer *afb;
 6914	struct drm_gem_object *obj;
 6915	struct amdgpu_device *adev;
 6916	struct amdgpu_bo *rbo;
 6917	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
 6918	struct list_head list;
 6919	struct ttm_validate_buffer tv;
 6920	struct ww_acquire_ctx ticket;
 6921	uint32_t domain;
 6922	int r;
 6923
 6924	if (!new_state->fb) {
 6925		DRM_DEBUG_KMS("No FB bound\n");
 6926		return 0;
 6927	}
 6928
 6929	afb = to_amdgpu_framebuffer(new_state->fb);
 6930	obj = new_state->fb->obj[0];
 6931	rbo = gem_to_amdgpu_bo(obj);
 6932	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
 6933	INIT_LIST_HEAD(&list);
 6934
 6935	tv.bo = &rbo->tbo;
 6936	tv.num_shared = 1;
 6937	list_add(&tv.head, &list);
 6938
 6939	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
 6940	if (r) {
 6941		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
 6942		return r;
 6943	}
 6944
 6945	if (plane->type != DRM_PLANE_TYPE_CURSOR)
 6946		domain = amdgpu_display_supported_domains(adev, rbo->flags);
 6947	else
 6948		domain = AMDGPU_GEM_DOMAIN_VRAM;
 6949
 6950	r = amdgpu_bo_pin(rbo, domain);
 6951	if (unlikely(r != 0)) {
 6952		if (r != -ERESTARTSYS)
 6953			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
 6954		ttm_eu_backoff_reservation(&ticket, &list);
 6955		return r;
 6956	}
 6957
 6958	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
 6959	if (unlikely(r != 0)) {
 6960		amdgpu_bo_unpin(rbo);
 6961		ttm_eu_backoff_reservation(&ticket, &list);
 6962		DRM_ERROR("%p bind failed\n", rbo);
 6963		return r;
 6964	}
 6965
 6966	ttm_eu_backoff_reservation(&ticket, &list);
 6967
 6968	afb->address = amdgpu_bo_gpu_offset(rbo);
 6969
 6970	amdgpu_bo_ref(rbo);
 6971
 6972	/**
 6973	 * We don't do surface updates on planes that have been newly created,
 6974	 * but we also don't have the afb->address during atomic check.
 6975	 *
 6976	 * Fill in buffer attributes depending on the address here, but only on
 6977	 * newly created planes since they're not being used by DC yet and this
 6978	 * won't modify global state.
 6979	 */
 6980	dm_plane_state_old = to_dm_plane_state(plane->state);
 6981	dm_plane_state_new = to_dm_plane_state(new_state);
 6982
 6983	if (dm_plane_state_new->dc_state &&
 6984	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
 6985		struct dc_plane_state *plane_state =
 6986			dm_plane_state_new->dc_state;
 6987		bool force_disable_dcc = !plane_state->dcc.enable;
 6988
 6989		fill_plane_buffer_attributes(
 6990			adev, afb, plane_state->format, plane_state->rotation,
 6991			afb->tiling_flags,
 6992			&plane_state->tiling_info, &plane_state->plane_size,
 6993			&plane_state->dcc, &plane_state->address,
 6994			afb->tmz_surface, force_disable_dcc);
 6995	}
 6996
 6997	return 0;
 6998}
 6999
 7000static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
 7001				       struct drm_plane_state *old_state)
 7002{
 7003	struct amdgpu_bo *rbo;
 7004	int r;
 7005
 7006	if (!old_state->fb)
 7007		return;
 7008
 7009	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
 7010	r = amdgpu_bo_reserve(rbo, false);
 7011	if (unlikely(r)) {
 7012		DRM_ERROR("failed to reserve rbo before unpin\n");
 7013		return;
 7014	}
 7015
 7016	amdgpu_bo_unpin(rbo);
 7017	amdgpu_bo_unreserve(rbo);
 7018	amdgpu_bo_unref(&rbo);
 7019}
 7020
 7021static int dm_plane_helper_check_state(struct drm_plane_state *state,
 7022				       struct drm_crtc_state *new_crtc_state)
 7023{
 7024	struct drm_framebuffer *fb = state->fb;
 7025	int min_downscale, max_upscale;
 7026	int min_scale = 0;
 7027	int max_scale = INT_MAX;
 7028
 7029	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
 7030	if (fb && state->crtc) {
 7031		/* Validate viewport to cover the case when only the position changes */
 7032		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
 7033			int viewport_width = state->crtc_w;
 7034			int viewport_height = state->crtc_h;
 7035
 7036			if (state->crtc_x < 0)
 7037				viewport_width += state->crtc_x;
 7038			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
 7039				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
 7040
 7041			if (state->crtc_y < 0)
 7042				viewport_height += state->crtc_y;
 7043			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
 7044				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
 7045
 7046			if (viewport_width < 0 || viewport_height < 0) {
 7047				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
 7048				return -EINVAL;
 7049			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
 7050				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
 7051				return -EINVAL;
 7052			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
 7053				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
 7054				return -EINVAL;
 7055			}
 7056
 7057		}
 7058
 7059		/* Get min/max allowed scaling factors from plane caps. */
 7060		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
 7061					     &min_downscale, &max_upscale);
 7062		/*
 7063		 * Convert to drm convention: 16.16 fixed point, instead of dc's
 7064		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
 7065		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
 7066		 */
 7067		min_scale = (1000 << 16) / max_upscale;
 7068		max_scale = (1000 << 16) / min_downscale;
 7069	}
 7070
 7071	return drm_atomic_helper_check_plane_state(
 7072		state, new_crtc_state, min_scale, max_scale, true, true);
 7073}
 7074
 7075static int dm_plane_atomic_check(struct drm_plane *plane,
 7076				 struct drm_atomic_state *state)
 7077{
 7078	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 7079										 plane);
 7080	struct amdgpu_device *adev = drm_to_adev(plane->dev);
 7081	struct dc *dc = adev->dm.dc;
 7082	struct dm_plane_state *dm_plane_state;
 7083	struct dc_scaling_info scaling_info;
 7084	struct drm_crtc_state *new_crtc_state;
 7085	int ret;
 7086
 7087	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
 7088
 7089	dm_plane_state = to_dm_plane_state(new_plane_state);
 7090
 7091	if (!dm_plane_state->dc_state)
 7092		return 0;
 7093
 7094	new_crtc_state =
 7095		drm_atomic_get_new_crtc_state(state,
 7096					      new_plane_state->crtc);
 7097	if (!new_crtc_state)
 7098		return -EINVAL;
 7099
 7100	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
 7101	if (ret)
 7102		return ret;
 7103
 7104	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
 7105	if (ret)
 7106		return ret;
 7107
 7108	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
 7109		return 0;
 7110
 7111	return -EINVAL;
 7112}
 7113
 7114static int dm_plane_atomic_async_check(struct drm_plane *plane,
 7115				       struct drm_atomic_state *state)
 7116{
 7117	/* Only support async updates on cursor planes. */
 7118	if (plane->type != DRM_PLANE_TYPE_CURSOR)
 7119		return -EINVAL;
 7120
 7121	return 0;
 7122}
 7123
 7124static void dm_plane_atomic_async_update(struct drm_plane *plane,
 7125					 struct drm_atomic_state *state)
 7126{
 7127	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 7128									   plane);
 7129	struct drm_plane_state *old_state =
 7130		drm_atomic_get_old_plane_state(state, plane);
 7131
 7132	trace_amdgpu_dm_atomic_update_cursor(new_state);
 7133
 7134	swap(plane->state->fb, new_state->fb);
 7135
 7136	plane->state->src_x = new_state->src_x;
 7137	plane->state->src_y = new_state->src_y;
 7138	plane->state->src_w = new_state->src_w;
 7139	plane->state->src_h = new_state->src_h;
 7140	plane->state->crtc_x = new_state->crtc_x;
 7141	plane->state->crtc_y = new_state->crtc_y;
 7142	plane->state->crtc_w = new_state->crtc_w;
 7143	plane->state->crtc_h = new_state->crtc_h;
 7144
 7145	handle_cursor_update(plane, old_state);
 7146}
 7147
 7148static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
 7149	.prepare_fb = dm_plane_helper_prepare_fb,
 7150	.cleanup_fb = dm_plane_helper_cleanup_fb,
 7151	.atomic_check = dm_plane_atomic_check,
 7152	.atomic_async_check = dm_plane_atomic_async_check,
 7153	.atomic_async_update = dm_plane_atomic_async_update
 7154};
 7155
 7156/*
 7157 * TODO: these are currently initialized to rgb formats only.
 7158 * For future use cases we should either initialize them dynamically based on
 7159 * plane capabilities, or initialize this array to all formats, so internal drm
 7160 * check will succeed, and let DC implement proper check
 7161 */
 7162static const uint32_t rgb_formats[] = {
 7163	DRM_FORMAT_XRGB8888,
 7164	DRM_FORMAT_ARGB8888,
 7165	DRM_FORMAT_RGBA8888,
 7166	DRM_FORMAT_XRGB2101010,
 7167	DRM_FORMAT_XBGR2101010,
 7168	DRM_FORMAT_ARGB2101010,
 7169	DRM_FORMAT_ABGR2101010,
 7170	DRM_FORMAT_XRGB16161616,
 7171	DRM_FORMAT_XBGR16161616,
 7172	DRM_FORMAT_ARGB16161616,
 7173	DRM_FORMAT_ABGR16161616,
 7174	DRM_FORMAT_XBGR8888,
 7175	DRM_FORMAT_ABGR8888,
 7176	DRM_FORMAT_RGB565,
 7177};
 7178
 7179static const uint32_t overlay_formats[] = {
 7180	DRM_FORMAT_XRGB8888,
 7181	DRM_FORMAT_ARGB8888,
 7182	DRM_FORMAT_RGBA8888,
 7183	DRM_FORMAT_XBGR8888,
 7184	DRM_FORMAT_ABGR8888,
 7185	DRM_FORMAT_RGB565
 7186};
 7187
 7188static const u32 cursor_formats[] = {
 7189	DRM_FORMAT_ARGB8888
 7190};
 7191
 7192static int get_plane_formats(const struct drm_plane *plane,
 7193			     const struct dc_plane_cap *plane_cap,
 7194			     uint32_t *formats, int max_formats)
 7195{
 7196	int i, num_formats = 0;
 7197
 7198	/*
 7199	 * TODO: Query support for each group of formats directly from
 7200	 * DC plane caps. This will require adding more formats to the
 7201	 * caps list.
 7202	 */
 7203
 7204	switch (plane->type) {
 7205	case DRM_PLANE_TYPE_PRIMARY:
 7206		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
 7207			if (num_formats >= max_formats)
 7208				break;
 7209
 7210			formats[num_formats++] = rgb_formats[i];
 7211		}
 7212
 7213		if (plane_cap && plane_cap->pixel_format_support.nv12)
 7214			formats[num_formats++] = DRM_FORMAT_NV12;
 7215		if (plane_cap && plane_cap->pixel_format_support.p010)
 7216			formats[num_formats++] = DRM_FORMAT_P010;
 7217		if (plane_cap && plane_cap->pixel_format_support.fp16) {
 7218			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
 7219			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
 7220			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
 7221			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
 7222		}
 7223		break;
 7224
 7225	case DRM_PLANE_TYPE_OVERLAY:
 7226		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
 7227			if (num_formats >= max_formats)
 7228				break;
 7229
 7230			formats[num_formats++] = overlay_formats[i];
 7231		}
 7232		break;
 7233
 7234	case DRM_PLANE_TYPE_CURSOR:
 7235		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
 7236			if (num_formats >= max_formats)
 7237				break;
 7238
 7239			formats[num_formats++] = cursor_formats[i];
 7240		}
 7241		break;
 7242	}
 7243
 7244	return num_formats;
 7245}
 7246
 7247static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
 7248				struct drm_plane *plane,
 7249				unsigned long possible_crtcs,
 7250				const struct dc_plane_cap *plane_cap)
 7251{
 7252	uint32_t formats[32];
 7253	int num_formats;
 7254	int res = -EPERM;
 7255	unsigned int supported_rotations;
 7256	uint64_t *modifiers = NULL;
 7257
 7258	num_formats = get_plane_formats(plane, plane_cap, formats,
 7259					ARRAY_SIZE(formats));
 7260
 7261	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
 7262	if (res)
 7263		return res;
 7264
 7265	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
 7266				       &dm_plane_funcs, formats, num_formats,
 7267				       modifiers, plane->type, NULL);
 7268	kfree(modifiers);
 7269	if (res)
 7270		return res;
 7271
 7272	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
 7273	    plane_cap && plane_cap->per_pixel_alpha) {
 7274		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
 7275					  BIT(DRM_MODE_BLEND_PREMULTI);
 7276
 7277		drm_plane_create_alpha_property(plane);
 7278		drm_plane_create_blend_mode_property(plane, blend_caps);
 7279	}
 7280
 7281	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
 7282	    plane_cap &&
 7283	    (plane_cap->pixel_format_support.nv12 ||
 7284	     plane_cap->pixel_format_support.p010)) {
 7285		/* This only affects YUV formats. */
 7286		drm_plane_create_color_properties(
 7287			plane,
 7288			BIT(DRM_COLOR_YCBCR_BT601) |
 7289			BIT(DRM_COLOR_YCBCR_BT709) |
 7290			BIT(DRM_COLOR_YCBCR_BT2020),
 7291			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
 7292			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
 7293			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
 7294	}
 7295
 7296	supported_rotations =
 7297		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
 7298		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
 7299
 7300	if (dm->adev->asic_type >= CHIP_BONAIRE &&
 7301	    plane->type != DRM_PLANE_TYPE_CURSOR)
 7302		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
 7303						   supported_rotations);
 7304
 7305	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
 7306
 7307	/* Create (reset) the plane state */
 7308	if (plane->funcs->reset)
 7309		plane->funcs->reset(plane);
 7310
 7311	return 0;
 7312}
 7313
 7314static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 7315			       struct drm_plane *plane,
 7316			       uint32_t crtc_index)
 7317{
 7318	struct amdgpu_crtc *acrtc = NULL;
 7319	struct drm_plane *cursor_plane;
 7320
 7321	int res = -ENOMEM;
 7322
 7323	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
 7324	if (!cursor_plane)
 7325		goto fail;
 7326
 7327	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
 7328	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
 7329
 7330	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
 7331	if (!acrtc)
 7332		goto fail;
 7333
 7334	res = drm_crtc_init_with_planes(
 7335			dm->ddev,
 7336			&acrtc->base,
 7337			plane,
 7338			cursor_plane,
 7339			&amdgpu_dm_crtc_funcs, NULL);
 7340
 7341	if (res)
 7342		goto fail;
 7343
 7344	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
 7345
 7346	/* Create (reset) the plane state */
 7347	if (acrtc->base.funcs->reset)
 7348		acrtc->base.funcs->reset(&acrtc->base);
 7349
 7350	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
 7351	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
 7352
 7353	acrtc->crtc_id = crtc_index;
 7354	acrtc->base.enabled = false;
 7355	acrtc->otg_inst = -1;
 7356
 7357	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
 7358	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
 7359				   true, MAX_COLOR_LUT_ENTRIES);
 7360	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
 7361
 7362	return 0;
 7363
 7364fail:
 7365	kfree(acrtc);
 7366	kfree(cursor_plane);
 7367	return res;
 7368}
 7369
 7370
 7371static int to_drm_connector_type(enum signal_type st)
 7372{
 7373	switch (st) {
 7374	case SIGNAL_TYPE_HDMI_TYPE_A:
 7375		return DRM_MODE_CONNECTOR_HDMIA;
 7376	case SIGNAL_TYPE_EDP:
 7377		return DRM_MODE_CONNECTOR_eDP;
 7378	case SIGNAL_TYPE_LVDS:
 7379		return DRM_MODE_CONNECTOR_LVDS;
 7380	case SIGNAL_TYPE_RGB:
 7381		return DRM_MODE_CONNECTOR_VGA;
 7382	case SIGNAL_TYPE_DISPLAY_PORT:
 7383	case SIGNAL_TYPE_DISPLAY_PORT_MST:
 7384		return DRM_MODE_CONNECTOR_DisplayPort;
 7385	case SIGNAL_TYPE_DVI_DUAL_LINK:
 7386	case SIGNAL_TYPE_DVI_SINGLE_LINK:
 7387		return DRM_MODE_CONNECTOR_DVID;
 7388	case SIGNAL_TYPE_VIRTUAL:
 7389		return DRM_MODE_CONNECTOR_VIRTUAL;
 7390
 7391	default:
 7392		return DRM_MODE_CONNECTOR_Unknown;
 7393	}
 7394}
 7395
 7396static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
 7397{
 7398	struct drm_encoder *encoder;
 7399
 7400	/* There is only one encoder per connector */
 7401	drm_connector_for_each_possible_encoder(connector, encoder)
 7402		return encoder;
 7403
 7404	return NULL;
 7405}
 7406
 7407static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
 7408{
 7409	struct drm_encoder *encoder;
 7410	struct amdgpu_encoder *amdgpu_encoder;
 7411
 7412	encoder = amdgpu_dm_connector_to_encoder(connector);
 7413
 7414	if (encoder == NULL)
 7415		return;
 7416
 7417	amdgpu_encoder = to_amdgpu_encoder(encoder);
 7418
 7419	amdgpu_encoder->native_mode.clock = 0;
 7420
 7421	if (!list_empty(&connector->probed_modes)) {
 7422		struct drm_display_mode *preferred_mode = NULL;
 7423
 7424		list_for_each_entry(preferred_mode,
 7425				    &connector->probed_modes,
 7426				    head) {
 7427			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
 7428				amdgpu_encoder->native_mode = *preferred_mode;
 7429
 7430			break;
 7431		}
 7432
 7433	}
 7434}
 7435
 7436static struct drm_display_mode *
 7437amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
 7438			     char *name,
 7439			     int hdisplay, int vdisplay)
 7440{
 7441	struct drm_device *dev = encoder->dev;
 7442	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 7443	struct drm_display_mode *mode = NULL;
 7444	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
 7445
 7446	mode = drm_mode_duplicate(dev, native_mode);
 7447
 7448	if (mode == NULL)
 7449		return NULL;
 7450
 7451	mode->hdisplay = hdisplay;
 7452	mode->vdisplay = vdisplay;
 7453	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
 7454	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
 7455
 7456	return mode;
 7457
 7458}
 7459
 7460static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
 7461						 struct drm_connector *connector)
 7462{
 7463	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 7464	struct drm_display_mode *mode = NULL;
 7465	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
 7466	struct amdgpu_dm_connector *amdgpu_dm_connector =
 7467				to_amdgpu_dm_connector(connector);
 7468	int i;
 7469	int n;
 7470	struct mode_size {
 7471		char name[DRM_DISPLAY_MODE_LEN];
 7472		int w;
 7473		int h;
 7474	} common_modes[] = {
 7475		{  "640x480",  640,  480},
 7476		{  "800x600",  800,  600},
 7477		{ "1024x768", 1024,  768},
 7478		{ "1280x720", 1280,  720},
 7479		{ "1280x800", 1280,  800},
 7480		{"1280x1024", 1280, 1024},
 7481		{ "1440x900", 1440,  900},
 7482		{"1680x1050", 1680, 1050},
 7483		{"1600x1200", 1600, 1200},
 7484		{"1920x1080", 1920, 1080},
 7485		{"1920x1200", 1920, 1200}
 7486	};
 7487
 7488	n = ARRAY_SIZE(common_modes);
 7489
 7490	for (i = 0; i < n; i++) {
 7491		struct drm_display_mode *curmode = NULL;
 7492		bool mode_existed = false;
 7493
 7494		if (common_modes[i].w > native_mode->hdisplay ||
 7495		    common_modes[i].h > native_mode->vdisplay ||
 7496		   (common_modes[i].w == native_mode->hdisplay &&
 7497		    common_modes[i].h == native_mode->vdisplay))
 7498			continue;
 7499
 7500		list_for_each_entry(curmode, &connector->probed_modes, head) {
 7501			if (common_modes[i].w == curmode->hdisplay &&
 7502			    common_modes[i].h == curmode->vdisplay) {
 7503				mode_existed = true;
 7504				break;
 7505			}
 7506		}
 7507
 7508		if (mode_existed)
 7509			continue;
 7510
 7511		mode = amdgpu_dm_create_common_mode(encoder,
 7512				common_modes[i].name, common_modes[i].w,
 7513				common_modes[i].h);
 7514		drm_mode_probed_add(connector, mode);
 7515		amdgpu_dm_connector->num_modes++;
 7516	}
 7517}
 7518
 7519static void amdgpu_set_panel_orientation(struct drm_connector *connector)
 7520{
 7521	struct drm_encoder *encoder;
 7522	struct amdgpu_encoder *amdgpu_encoder;
 7523	const struct drm_display_mode *native_mode;
 7524
 7525	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
 7526	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
 7527		return;
 7528
 7529	encoder = amdgpu_dm_connector_to_encoder(connector);
 7530	if (!encoder)
 7531		return;
 7532
 7533	amdgpu_encoder = to_amdgpu_encoder(encoder);
 7534
 7535	native_mode = &amdgpu_encoder->native_mode;
 7536	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
 7537		return;
 7538
 7539	drm_connector_set_panel_orientation_with_quirk(connector,
 7540						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
 7541						       native_mode->hdisplay,
 7542						       native_mode->vdisplay);
 7543}
 7544
 7545static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
 7546					      struct edid *edid)
 7547{
 7548	struct amdgpu_dm_connector *amdgpu_dm_connector =
 7549			to_amdgpu_dm_connector(connector);
 7550
 7551	if (edid) {
 7552		/* empty probed_modes */
 7553		INIT_LIST_HEAD(&connector->probed_modes);
 7554		amdgpu_dm_connector->num_modes =
 7555				drm_add_edid_modes(connector, edid);
 7556
 7557		/* sorting the probed modes before calling function
 7558		 * amdgpu_dm_get_native_mode() since EDID can have
 7559		 * more than one preferred mode. The modes that are
 7560		 * later in the probed mode list could be of higher
 7561		 * and preferred resolution. For example, 3840x2160
 7562		 * resolution in base EDID preferred timing and 4096x2160
 7563		 * preferred resolution in DID extension block later.
 7564		 */
 7565		drm_mode_sort(&connector->probed_modes);
 7566		amdgpu_dm_get_native_mode(connector);
 7567
 7568		/* Freesync capabilities are reset by calling
 7569		 * drm_add_edid_modes() and need to be
 7570		 * restored here.
 7571		 */
 7572		amdgpu_dm_update_freesync_caps(connector, edid);
 7573
 7574		amdgpu_set_panel_orientation(connector);
 7575	} else {
 7576		amdgpu_dm_connector->num_modes = 0;
 7577	}
 7578}
 7579
 7580static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
 7581			      struct drm_display_mode *mode)
 7582{
 7583	struct drm_display_mode *m;
 7584
 7585	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
 7586		if (drm_mode_equal(m, mode))
 7587			return true;
 7588	}
 7589
 7590	return false;
 7591}
 7592
 7593static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
 7594{
 7595	const struct drm_display_mode *m;
 7596	struct drm_display_mode *new_mode;
 7597	uint i;
 7598	uint32_t new_modes_count = 0;
 7599
 7600	/* Standard FPS values
 7601	 *
 7602	 * 23.976   - TV/NTSC
 7603	 * 24 	    - Cinema
 7604	 * 25 	    - TV/PAL
 7605	 * 29.97    - TV/NTSC
 7606	 * 30 	    - TV/NTSC
 7607	 * 48 	    - Cinema HFR
 7608	 * 50 	    - TV/PAL
 7609	 * 60 	    - Commonly used
 7610	 * 48,72,96 - Multiples of 24
 7611	 */
 7612	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
 7613					 48000, 50000, 60000, 72000, 96000 };
 7614
 7615	/*
 7616	 * Find mode with highest refresh rate with the same resolution
 7617	 * as the preferred mode. Some monitors report a preferred mode
 7618	 * with lower resolution than the highest refresh rate supported.
 7619	 */
 7620
 7621	m = get_highest_refresh_rate_mode(aconnector, true);
 7622	if (!m)
 7623		return 0;
 7624
 7625	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
 7626		uint64_t target_vtotal, target_vtotal_diff;
 7627		uint64_t num, den;
 7628
 7629		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
 7630			continue;
 7631
 7632		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
 7633		    common_rates[i] > aconnector->max_vfreq * 1000)
 7634			continue;
 7635
 7636		num = (unsigned long long)m->clock * 1000 * 1000;
 7637		den = common_rates[i] * (unsigned long long)m->htotal;
 7638		target_vtotal = div_u64(num, den);
 7639		target_vtotal_diff = target_vtotal - m->vtotal;
 7640
 7641		/* Check for illegal modes */
 7642		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
 7643		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
 7644		    m->vtotal + target_vtotal_diff < m->vsync_end)
 7645			continue;
 7646
 7647		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
 7648		if (!new_mode)
 7649			goto out;
 7650
 7651		new_mode->vtotal += (u16)target_vtotal_diff;
 7652		new_mode->vsync_start += (u16)target_vtotal_diff;
 7653		new_mode->vsync_end += (u16)target_vtotal_diff;
 7654		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
 7655		new_mode->type |= DRM_MODE_TYPE_DRIVER;
 7656
 7657		if (!is_duplicate_mode(aconnector, new_mode)) {
 7658			drm_mode_probed_add(&aconnector->base, new_mode);
 7659			new_modes_count += 1;
 7660		} else
 7661			drm_mode_destroy(aconnector->base.dev, new_mode);
 7662	}
 7663 out:
 7664	return new_modes_count;
 7665}
 7666
 7667static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
 7668						   struct edid *edid)
 7669{
 7670	struct amdgpu_dm_connector *amdgpu_dm_connector =
 7671		to_amdgpu_dm_connector(connector);
 7672
 7673	if (!(amdgpu_freesync_vid_mode && edid))
 7674		return;
 7675
 7676	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
 7677		amdgpu_dm_connector->num_modes +=
 7678			add_fs_modes(amdgpu_dm_connector);
 7679}
 7680
 7681static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
 7682{
 7683	struct amdgpu_dm_connector *amdgpu_dm_connector =
 7684			to_amdgpu_dm_connector(connector);
 7685	struct drm_encoder *encoder;
 7686	struct edid *edid = amdgpu_dm_connector->edid;
 7687
 7688	encoder = amdgpu_dm_connector_to_encoder(connector);
 7689
 7690	if (!drm_edid_is_valid(edid)) {
 7691		amdgpu_dm_connector->num_modes =
 7692				drm_add_modes_noedid(connector, 640, 480);
 7693	} else {
 7694		amdgpu_dm_connector_ddc_get_modes(connector, edid);
 7695		amdgpu_dm_connector_add_common_modes(encoder, connector);
 7696		amdgpu_dm_connector_add_freesync_modes(connector, edid);
 7697	}
 7698	amdgpu_dm_fbc_init(connector);
 7699
 7700	return amdgpu_dm_connector->num_modes;
 7701}
 7702
 7703void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 7704				     struct amdgpu_dm_connector *aconnector,
 7705				     int connector_type,
 7706				     struct dc_link *link,
 7707				     int link_index)
 7708{
 7709	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
 7710
 7711	/*
 7712	 * Some of the properties below require access to state, like bpc.
 7713	 * Allocate some default initial connector state with our reset helper.
 7714	 */
 7715	if (aconnector->base.funcs->reset)
 7716		aconnector->base.funcs->reset(&aconnector->base);
 7717
 7718	aconnector->connector_id = link_index;
 7719	aconnector->dc_link = link;
 7720	aconnector->base.interlace_allowed = false;
 7721	aconnector->base.doublescan_allowed = false;
 7722	aconnector->base.stereo_allowed = false;
 7723	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
 7724	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
 7725	aconnector->audio_inst = -1;
 7726	mutex_init(&aconnector->hpd_lock);
 7727
 7728	/*
 7729	 * configure support HPD hot plug connector_>polled default value is 0
 7730	 * which means HPD hot plug not supported
 7731	 */
 7732	switch (connector_type) {
 7733	case DRM_MODE_CONNECTOR_HDMIA:
 7734		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 7735		aconnector->base.ycbcr_420_allowed =
 7736			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
 7737		break;
 7738	case DRM_MODE_CONNECTOR_DisplayPort:
 7739		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 7740		aconnector->base.ycbcr_420_allowed =
 7741			link->link_enc->features.dp_ycbcr420_supported ? true : false;
 7742		break;
 7743	case DRM_MODE_CONNECTOR_DVID:
 7744		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 7745		break;
 7746	default:
 7747		break;
 7748	}
 7749
 7750	drm_object_attach_property(&aconnector->base.base,
 7751				dm->ddev->mode_config.scaling_mode_property,
 7752				DRM_MODE_SCALE_NONE);
 7753
 7754	drm_object_attach_property(&aconnector->base.base,
 7755				adev->mode_info.underscan_property,
 7756				UNDERSCAN_OFF);
 7757	drm_object_attach_property(&aconnector->base.base,
 7758				adev->mode_info.underscan_hborder_property,
 7759				0);
 7760	drm_object_attach_property(&aconnector->base.base,
 7761				adev->mode_info.underscan_vborder_property,
 7762				0);
 7763
 7764	if (!aconnector->mst_port)
 7765		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 7766
 7767	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
 7768	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
 7769	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
 7770
 7771	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
 7772	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
 7773		drm_object_attach_property(&aconnector->base.base,
 7774				adev->mode_info.abm_level_property, 0);
 7775	}
 7776
 7777	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
 7778	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
 7779	    connector_type == DRM_MODE_CONNECTOR_eDP) {
 7780		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
 7781
 7782		if (!aconnector->mst_port)
 7783			drm_connector_attach_vrr_capable_property(&aconnector->base);
 7784
 7785#ifdef CONFIG_DRM_AMD_DC_HDCP
 7786		if (adev->dm.hdcp_workqueue)
 7787			drm_connector_attach_content_protection_property(&aconnector->base, true);
 7788#endif
 7789	}
 7790}
 7791
 7792static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
 7793			      struct i2c_msg *msgs, int num)
 7794{
 7795	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
 7796	struct ddc_service *ddc_service = i2c->ddc_service;
 7797	struct i2c_command cmd;
 7798	int i;
 7799	int result = -EIO;
 7800
 7801	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
 7802
 7803	if (!cmd.payloads)
 7804		return result;
 7805
 7806	cmd.number_of_payloads = num;
 7807	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
 7808	cmd.speed = 100;
 7809
 7810	for (i = 0; i < num; i++) {
 7811		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
 7812		cmd.payloads[i].address = msgs[i].addr;
 7813		cmd.payloads[i].length = msgs[i].len;
 7814		cmd.payloads[i].data = msgs[i].buf;
 7815	}
 7816
 7817	if (dc_submit_i2c(
 7818			ddc_service->ctx->dc,
 7819			ddc_service->ddc_pin->hw_info.ddc_channel,
 7820			&cmd))
 7821		result = num;
 7822
 7823	kfree(cmd.payloads);
 7824	return result;
 7825}
 7826
 7827static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
 7828{
 7829	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 7830}
 7831
 7832static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
 7833	.master_xfer = amdgpu_dm_i2c_xfer,
 7834	.functionality = amdgpu_dm_i2c_func,
 7835};
 7836
 7837static struct amdgpu_i2c_adapter *
 7838create_i2c(struct ddc_service *ddc_service,
 7839	   int link_index,
 7840	   int *res)
 7841{
 7842	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
 7843	struct amdgpu_i2c_adapter *i2c;
 7844
 7845	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
 7846	if (!i2c)
 7847		return NULL;
 7848	i2c->base.owner = THIS_MODULE;
 7849	i2c->base.class = I2C_CLASS_DDC;
 7850	i2c->base.dev.parent = &adev->pdev->dev;
 7851	i2c->base.algo = &amdgpu_dm_i2c_algo;
 7852	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
 7853	i2c_set_adapdata(&i2c->base, i2c);
 7854	i2c->ddc_service = ddc_service;
 7855	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
 7856
 7857	return i2c;
 7858}
 7859
 7860
 7861/*
 7862 * Note: this function assumes that dc_link_detect() was called for the
 7863 * dc_link which will be represented by this aconnector.
 7864 */
 7865static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 7866				    struct amdgpu_dm_connector *aconnector,
 7867				    uint32_t link_index,
 7868				    struct amdgpu_encoder *aencoder)
 7869{
 7870	int res = 0;
 7871	int connector_type;
 7872	struct dc *dc = dm->dc;
 7873	struct dc_link *link = dc_get_link_at_index(dc, link_index);
 7874	struct amdgpu_i2c_adapter *i2c;
 7875
 7876	link->priv = aconnector;
 7877
 7878	DRM_DEBUG_DRIVER("%s()\n", __func__);
 7879
 7880	i2c = create_i2c(link->ddc, link->link_index, &res);
 7881	if (!i2c) {
 7882		DRM_ERROR("Failed to create i2c adapter data\n");
 7883		return -ENOMEM;
 7884	}
 7885
 7886	aconnector->i2c = i2c;
 7887	res = i2c_add_adapter(&i2c->base);
 7888
 7889	if (res) {
 7890		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
 7891		goto out_free;
 7892	}
 7893
 7894	connector_type = to_drm_connector_type(link->connector_signal);
 7895
 7896	res = drm_connector_init_with_ddc(
 7897			dm->ddev,
 7898			&aconnector->base,
 7899			&amdgpu_dm_connector_funcs,
 7900			connector_type,
 7901			&i2c->base);
 7902
 7903	if (res) {
 7904		DRM_ERROR("connector_init failed\n");
 7905		aconnector->connector_id = -1;
 7906		goto out_free;
 7907	}
 7908
 7909	drm_connector_helper_add(
 7910			&aconnector->base,
 7911			&amdgpu_dm_connector_helper_funcs);
 7912
 7913	amdgpu_dm_connector_init_helper(
 7914		dm,
 7915		aconnector,
 7916		connector_type,
 7917		link,
 7918		link_index);
 7919
 7920	drm_connector_attach_encoder(
 7921		&aconnector->base, &aencoder->base);
 7922
 7923	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
 7924		|| connector_type == DRM_MODE_CONNECTOR_eDP)
 7925		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
 7926
 7927out_free:
 7928	if (res) {
 7929		kfree(i2c);
 7930		aconnector->i2c = NULL;
 7931	}
 7932	return res;
 7933}
 7934
 7935int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
 7936{
 7937	switch (adev->mode_info.num_crtc) {
 7938	case 1:
 7939		return 0x1;
 7940	case 2:
 7941		return 0x3;
 7942	case 3:
 7943		return 0x7;
 7944	case 4:
 7945		return 0xf;
 7946	case 5:
 7947		return 0x1f;
 7948	case 6:
 7949	default:
 7950		return 0x3f;
 7951	}
 7952}
 7953
 7954static int amdgpu_dm_encoder_init(struct drm_device *dev,
 7955				  struct amdgpu_encoder *aencoder,
 7956				  uint32_t link_index)
 7957{
 7958	struct amdgpu_device *adev = drm_to_adev(dev);
 7959
 7960	int res = drm_encoder_init(dev,
 7961				   &aencoder->base,
 7962				   &amdgpu_dm_encoder_funcs,
 7963				   DRM_MODE_ENCODER_TMDS,
 7964				   NULL);
 7965
 7966	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
 7967
 7968	if (!res)
 7969		aencoder->encoder_id = link_index;
 7970	else
 7971		aencoder->encoder_id = -1;
 7972
 7973	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
 7974
 7975	return res;
 7976}
 7977
 7978static void manage_dm_interrupts(struct amdgpu_device *adev,
 7979				 struct amdgpu_crtc *acrtc,
 7980				 bool enable)
 7981{
 7982	/*
 7983	 * We have no guarantee that the frontend index maps to the same
 7984	 * backend index - some even map to more than one.
 7985	 *
 7986	 * TODO: Use a different interrupt or check DC itself for the mapping.
 7987	 */
 7988	int irq_type =
 7989		amdgpu_display_crtc_idx_to_irq_type(
 7990			adev,
 7991			acrtc->crtc_id);
 7992
 7993	if (enable) {
 7994		drm_crtc_vblank_on(&acrtc->base);
 7995		amdgpu_irq_get(
 7996			adev,
 7997			&adev->pageflip_irq,
 7998			irq_type);
 7999#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 8000		amdgpu_irq_get(
 8001			adev,
 8002			&adev->vline0_irq,
 8003			irq_type);
 8004#endif
 8005	} else {
 8006#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 8007		amdgpu_irq_put(
 8008			adev,
 8009			&adev->vline0_irq,
 8010			irq_type);
 8011#endif
 8012		amdgpu_irq_put(
 8013			adev,
 8014			&adev->pageflip_irq,
 8015			irq_type);
 8016		drm_crtc_vblank_off(&acrtc->base);
 8017	}
 8018}
 8019
 8020static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
 8021				      struct amdgpu_crtc *acrtc)
 8022{
 8023	int irq_type =
 8024		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
 8025
 8026	/**
 8027	 * This reads the current state for the IRQ and force reapplies
 8028	 * the setting to hardware.
 8029	 */
 8030	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
 8031}
 8032
 8033static bool
 8034is_scaling_state_different(const struct dm_connector_state *dm_state,
 8035			   const struct dm_connector_state *old_dm_state)
 8036{
 8037	if (dm_state->scaling != old_dm_state->scaling)
 8038		return true;
 8039	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
 8040		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
 8041			return true;
 8042	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
 8043		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
 8044			return true;
 8045	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
 8046		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
 8047		return true;
 8048	return false;
 8049}
 8050
 8051#ifdef CONFIG_DRM_AMD_DC_HDCP
 8052static bool is_content_protection_different(struct drm_connector_state *state,
 8053					    const struct drm_connector_state *old_state,
 8054					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
 8055{
 8056	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 8057	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
 8058
 8059	/* Handle: Type0/1 change */
 8060	if (old_state->hdcp_content_type != state->hdcp_content_type &&
 8061	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
 8062		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 8063		return true;
 8064	}
 8065
 8066	/* CP is being re enabled, ignore this
 8067	 *
 8068	 * Handles:	ENABLED -> DESIRED
 8069	 */
 8070	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
 8071	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
 8072		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
 8073		return false;
 8074	}
 8075
 8076	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
 8077	 *
 8078	 * Handles:	UNDESIRED -> ENABLED
 8079	 */
 8080	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
 8081	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
 8082		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 8083
 8084	/* Stream removed and re-enabled
 8085	 *
 8086	 * Can sometimes overlap with the HPD case,
 8087	 * thus set update_hdcp to false to avoid
 8088	 * setting HDCP multiple times.
 8089	 *
 8090	 * Handles:	DESIRED -> DESIRED (Special case)
 8091	 */
 8092	if (!(old_state->crtc && old_state->crtc->enabled) &&
 8093		state->crtc && state->crtc->enabled &&
 8094		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
 8095		dm_con_state->update_hdcp = false;
 8096		return true;
 8097	}
 8098
 8099	/* Hot-plug, headless s3, dpms
 8100	 *
 8101	 * Only start HDCP if the display is connected/enabled.
 8102	 * update_hdcp flag will be set to false until the next
 8103	 * HPD comes in.
 8104	 *
 8105	 * Handles:	DESIRED -> DESIRED (Special case)
 8106	 */
 8107	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
 8108	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
 8109		dm_con_state->update_hdcp = false;
 8110		return true;
 8111	}
 8112
 8113	/*
 8114	 * Handles:	UNDESIRED -> UNDESIRED
 8115	 *		DESIRED -> DESIRED
 8116	 *		ENABLED -> ENABLED
 8117	 */
 8118	if (old_state->content_protection == state->content_protection)
 8119		return false;
 8120
 8121	/*
 8122	 * Handles:	UNDESIRED -> DESIRED
 8123	 *		DESIRED -> UNDESIRED
 8124	 *		ENABLED -> UNDESIRED
 8125	 */
 8126	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
 8127		return true;
 8128
 8129	/*
 8130	 * Handles:	DESIRED -> ENABLED
 8131	 */
 8132	return false;
 8133}
 8134
 8135#endif
 8136static void remove_stream(struct amdgpu_device *adev,
 8137			  struct amdgpu_crtc *acrtc,
 8138			  struct dc_stream_state *stream)
 8139{
 8140	/* this is the update mode case */
 8141
 8142	acrtc->otg_inst = -1;
 8143	acrtc->enabled = false;
 8144}
 8145
 8146static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
 8147			       struct dc_cursor_position *position)
 8148{
 8149	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 8150	int x, y;
 8151	int xorigin = 0, yorigin = 0;
 8152
 8153	if (!crtc || !plane->state->fb)
 8154		return 0;
 8155
 8156	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
 8157	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
 8158		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
 8159			  __func__,
 8160			  plane->state->crtc_w,
 8161			  plane->state->crtc_h);
 8162		return -EINVAL;
 8163	}
 8164
 8165	x = plane->state->crtc_x;
 8166	y = plane->state->crtc_y;
 8167
 8168	if (x <= -amdgpu_crtc->max_cursor_width ||
 8169	    y <= -amdgpu_crtc->max_cursor_height)
 8170		return 0;
 8171
 8172	if (x < 0) {
 8173		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
 8174		x = 0;
 8175	}
 8176	if (y < 0) {
 8177		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
 8178		y = 0;
 8179	}
 8180	position->enable = true;
 8181	position->translate_by_source = true;
 8182	position->x = x;
 8183	position->y = y;
 8184	position->x_hotspot = xorigin;
 8185	position->y_hotspot = yorigin;
 8186
 8187	return 0;
 8188}
 8189
 8190static void handle_cursor_update(struct drm_plane *plane,
 8191				 struct drm_plane_state *old_plane_state)
 8192{
 8193	struct amdgpu_device *adev = drm_to_adev(plane->dev);
 8194	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
 8195	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
 8196	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
 8197	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 8198	uint64_t address = afb ? afb->address : 0;
 8199	struct dc_cursor_position position = {0};
 8200	struct dc_cursor_attributes attributes;
 8201	int ret;
 8202
 8203	if (!plane->state->fb && !old_plane_state->fb)
 8204		return;
 8205
 8206	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
 8207		      __func__,
 8208		      amdgpu_crtc->crtc_id,
 8209		      plane->state->crtc_w,
 8210		      plane->state->crtc_h);
 8211
 8212	ret = get_cursor_position(plane, crtc, &position);
 8213	if (ret)
 8214		return;
 8215
 8216	if (!position.enable) {
 8217		/* turn off cursor */
 8218		if (crtc_state && crtc_state->stream) {
 8219			mutex_lock(&adev->dm.dc_lock);
 8220			dc_stream_set_cursor_position(crtc_state->stream,
 8221						      &position);
 8222			mutex_unlock(&adev->dm.dc_lock);
 8223		}
 8224		return;
 8225	}
 8226
 8227	amdgpu_crtc->cursor_width = plane->state->crtc_w;
 8228	amdgpu_crtc->cursor_height = plane->state->crtc_h;
 8229
 8230	memset(&attributes, 0, sizeof(attributes));
 8231	attributes.address.high_part = upper_32_bits(address);
 8232	attributes.address.low_part  = lower_32_bits(address);
 8233	attributes.width             = plane->state->crtc_w;
 8234	attributes.height            = plane->state->crtc_h;
 8235	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
 8236	attributes.rotation_angle    = 0;
 8237	attributes.attribute_flags.value = 0;
 8238
 8239	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
 8240
 8241	if (crtc_state->stream) {
 8242		mutex_lock(&adev->dm.dc_lock);
 8243		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
 8244							 &attributes))
 8245			DRM_ERROR("DC failed to set cursor attributes\n");
 8246
 8247		if (!dc_stream_set_cursor_position(crtc_state->stream,
 8248						   &position))
 8249			DRM_ERROR("DC failed to set cursor position\n");
 8250		mutex_unlock(&adev->dm.dc_lock);
 8251	}
 8252}
 8253
 8254static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
 8255{
 8256
 8257	assert_spin_locked(&acrtc->base.dev->event_lock);
 8258	WARN_ON(acrtc->event);
 8259
 8260	acrtc->event = acrtc->base.state->event;
 8261
 8262	/* Set the flip status */
 8263	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
 8264
 8265	/* Mark this event as consumed */
 8266	acrtc->base.state->event = NULL;
 8267
 8268	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
 8269		     acrtc->crtc_id);
 8270}
 8271
 8272static void update_freesync_state_on_stream(
 8273	struct amdgpu_display_manager *dm,
 8274	struct dm_crtc_state *new_crtc_state,
 8275	struct dc_stream_state *new_stream,
 8276	struct dc_plane_state *surface,
 8277	u32 flip_timestamp_in_us)
 8278{
 8279	struct mod_vrr_params vrr_params;
 8280	struct dc_info_packet vrr_infopacket = {0};
 8281	struct amdgpu_device *adev = dm->adev;
 8282	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
 8283	unsigned long flags;
 8284	bool pack_sdp_v1_3 = false;
 8285
 8286	if (!new_stream)
 8287		return;
 8288
 8289	/*
 8290	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
 8291	 * For now it's sufficient to just guard against these conditions.
 8292	 */
 8293
 8294	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
 8295		return;
 8296
 8297	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 8298        vrr_params = acrtc->dm_irq_params.vrr_params;
 8299
 8300	if (surface) {
 8301		mod_freesync_handle_preflip(
 8302			dm->freesync_module,
 8303			surface,
 8304			new_stream,
 8305			flip_timestamp_in_us,
 8306			&vrr_params);
 8307
 8308		if (adev->family < AMDGPU_FAMILY_AI &&
 8309		    amdgpu_dm_vrr_active(new_crtc_state)) {
 8310			mod_freesync_handle_v_update(dm->freesync_module,
 8311						     new_stream, &vrr_params);
 8312
 8313			/* Need to call this before the frame ends. */
 8314			dc_stream_adjust_vmin_vmax(dm->dc,
 8315						   new_crtc_state->stream,
 8316						   &vrr_params.adjust);
 8317		}
 8318	}
 8319
 8320	mod_freesync_build_vrr_infopacket(
 8321		dm->freesync_module,
 8322		new_stream,
 8323		&vrr_params,
 8324		PACKET_TYPE_VRR,
 8325		TRANSFER_FUNC_UNKNOWN,
 8326		&vrr_infopacket,
 8327		pack_sdp_v1_3);
 8328
 8329	new_crtc_state->freesync_timing_changed |=
 8330		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
 8331			&vrr_params.adjust,
 8332			sizeof(vrr_params.adjust)) != 0);
 8333
 8334	new_crtc_state->freesync_vrr_info_changed |=
 8335		(memcmp(&new_crtc_state->vrr_infopacket,
 8336			&vrr_infopacket,
 8337			sizeof(vrr_infopacket)) != 0);
 8338
 8339	acrtc->dm_irq_params.vrr_params = vrr_params;
 8340	new_crtc_state->vrr_infopacket = vrr_infopacket;
 8341
 8342	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
 8343	new_stream->vrr_infopacket = vrr_infopacket;
 8344
 8345	if (new_crtc_state->freesync_vrr_info_changed)
 8346		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
 8347			      new_crtc_state->base.crtc->base.id,
 8348			      (int)new_crtc_state->base.vrr_enabled,
 8349			      (int)vrr_params.state);
 8350
 8351	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 8352}
 8353
 8354static void update_stream_irq_parameters(
 8355	struct amdgpu_display_manager *dm,
 8356	struct dm_crtc_state *new_crtc_state)
 8357{
 8358	struct dc_stream_state *new_stream = new_crtc_state->stream;
 8359	struct mod_vrr_params vrr_params;
 8360	struct mod_freesync_config config = new_crtc_state->freesync_config;
 8361	struct amdgpu_device *adev = dm->adev;
 8362	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
 8363	unsigned long flags;
 8364
 8365	if (!new_stream)
 8366		return;
 8367
 8368	/*
 8369	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
 8370	 * For now it's sufficient to just guard against these conditions.
 8371	 */
 8372	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
 8373		return;
 8374
 8375	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 8376	vrr_params = acrtc->dm_irq_params.vrr_params;
 8377
 8378	if (new_crtc_state->vrr_supported &&
 8379	    config.min_refresh_in_uhz &&
 8380	    config.max_refresh_in_uhz) {
 8381		/*
 8382		 * if freesync compatible mode was set, config.state will be set
 8383		 * in atomic check
 8384		 */
 8385		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
 8386		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
 8387		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
 8388			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
 8389			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
 8390			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
 8391			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
 8392		} else {
 8393			config.state = new_crtc_state->base.vrr_enabled ?
 8394						     VRR_STATE_ACTIVE_VARIABLE :
 8395						     VRR_STATE_INACTIVE;
 8396		}
 8397	} else {
 8398		config.state = VRR_STATE_UNSUPPORTED;
 8399	}
 8400
 8401	mod_freesync_build_vrr_params(dm->freesync_module,
 8402				      new_stream,
 8403				      &config, &vrr_params);
 8404
 8405	new_crtc_state->freesync_timing_changed |=
 8406		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
 8407			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
 8408
 8409	new_crtc_state->freesync_config = config;
 8410	/* Copy state for access from DM IRQ handler */
 8411	acrtc->dm_irq_params.freesync_config = config;
 8412	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
 8413	acrtc->dm_irq_params.vrr_params = vrr_params;
 8414	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 8415}
 8416
 8417static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
 8418					    struct dm_crtc_state *new_state)
 8419{
 8420	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
 8421	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
 8422
 8423	if (!old_vrr_active && new_vrr_active) {
 8424		/* Transition VRR inactive -> active:
 8425		 * While VRR is active, we must not disable vblank irq, as a
 8426		 * reenable after disable would compute bogus vblank/pflip
 8427		 * timestamps if it likely happened inside display front-porch.
 8428		 *
 8429		 * We also need vupdate irq for the actual core vblank handling
 8430		 * at end of vblank.
 8431		 */
 8432		dm_set_vupdate_irq(new_state->base.crtc, true);
 8433		drm_crtc_vblank_get(new_state->base.crtc);
 8434		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
 8435				 __func__, new_state->base.crtc->base.id);
 8436	} else if (old_vrr_active && !new_vrr_active) {
 8437		/* Transition VRR active -> inactive:
 8438		 * Allow vblank irq disable again for fixed refresh rate.
 8439		 */
 8440		dm_set_vupdate_irq(new_state->base.crtc, false);
 8441		drm_crtc_vblank_put(new_state->base.crtc);
 8442		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
 8443				 __func__, new_state->base.crtc->base.id);
 8444	}
 8445}
 8446
 8447static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
 8448{
 8449	struct drm_plane *plane;
 8450	struct drm_plane_state *old_plane_state;
 8451	int i;
 8452
 8453	/*
 8454	 * TODO: Make this per-stream so we don't issue redundant updates for
 8455	 * commits with multiple streams.
 8456	 */
 8457	for_each_old_plane_in_state(state, plane, old_plane_state, i)
 8458		if (plane->type == DRM_PLANE_TYPE_CURSOR)
 8459			handle_cursor_update(plane, old_plane_state);
 8460}
 8461
 8462static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 8463				    struct dc_state *dc_state,
 8464				    struct drm_device *dev,
 8465				    struct amdgpu_display_manager *dm,
 8466				    struct drm_crtc *pcrtc,
 8467				    bool wait_for_vblank)
 8468{
 8469	uint32_t i;
 8470	uint64_t timestamp_ns;
 8471	struct drm_plane *plane;
 8472	struct drm_plane_state *old_plane_state, *new_plane_state;
 8473	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
 8474	struct drm_crtc_state *new_pcrtc_state =
 8475			drm_atomic_get_new_crtc_state(state, pcrtc);
 8476	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
 8477	struct dm_crtc_state *dm_old_crtc_state =
 8478			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
 8479	int planes_count = 0, vpos, hpos;
 8480	long r;
 8481	unsigned long flags;
 8482	struct amdgpu_bo *abo;
 8483	uint32_t target_vblank, last_flip_vblank;
 8484	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
 8485	bool pflip_present = false;
 8486	struct {
 8487		struct dc_surface_update surface_updates[MAX_SURFACES];
 8488		struct dc_plane_info plane_infos[MAX_SURFACES];
 8489		struct dc_scaling_info scaling_infos[MAX_SURFACES];
 8490		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
 8491		struct dc_stream_update stream_update;
 8492	} *bundle;
 8493
 8494	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 8495
 8496	if (!bundle) {
 8497		dm_error("Failed to allocate update bundle\n");
 8498		goto cleanup;
 8499	}
 8500
 8501	/*
 8502	 * Disable the cursor first if we're disabling all the planes.
 8503	 * It'll remain on the screen after the planes are re-enabled
 8504	 * if we don't.
 8505	 */
 8506	if (acrtc_state->active_planes == 0)
 8507		amdgpu_dm_commit_cursors(state);
 8508
 8509	/* update planes when needed */
 8510	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
 8511		struct drm_crtc *crtc = new_plane_state->crtc;
 8512		struct drm_crtc_state *new_crtc_state;
 8513		struct drm_framebuffer *fb = new_plane_state->fb;
 8514		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
 8515		bool plane_needs_flip;
 8516		struct dc_plane_state *dc_plane;
 8517		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
 8518
 8519		/* Cursor plane is handled after stream updates */
 8520		if (plane->type == DRM_PLANE_TYPE_CURSOR)
 8521			continue;
 8522
 8523		if (!fb || !crtc || pcrtc != crtc)
 8524			continue;
 8525
 8526		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
 8527		if (!new_crtc_state->active)
 8528			continue;
 8529
 8530		dc_plane = dm_new_plane_state->dc_state;
 8531
 8532		bundle->surface_updates[planes_count].surface = dc_plane;
 8533		if (new_pcrtc_state->color_mgmt_changed) {
 8534			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
 8535			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
 8536			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
 8537		}
 8538
 8539		fill_dc_scaling_info(new_plane_state,
 8540				     &bundle->scaling_infos[planes_count]);
 8541
 8542		bundle->surface_updates[planes_count].scaling_info =
 8543			&bundle->scaling_infos[planes_count];
 8544
 8545		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
 8546
 8547		pflip_present = pflip_present || plane_needs_flip;
 8548
 8549		if (!plane_needs_flip) {
 8550			planes_count += 1;
 8551			continue;
 8552		}
 8553
 8554		abo = gem_to_amdgpu_bo(fb->obj[0]);
 8555
 8556		/*
 8557		 * Wait for all fences on this FB. Do limited wait to avoid
 8558		 * deadlock during GPU reset when this fence will not signal
 8559		 * but we hold reservation lock for the BO.
 8560		 */
 8561		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
 8562					  msecs_to_jiffies(5000));
 8563		if (unlikely(r <= 0))
 8564			DRM_ERROR("Waiting for fences timed out!");
 8565
 8566		fill_dc_plane_info_and_addr(
 8567			dm->adev, new_plane_state,
 8568			afb->tiling_flags,
 8569			&bundle->plane_infos[planes_count],
 8570			&bundle->flip_addrs[planes_count].address,
 8571			afb->tmz_surface, false);
 8572
 8573		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
 8574				 new_plane_state->plane->index,
 8575				 bundle->plane_infos[planes_count].dcc.enable);
 8576
 8577		bundle->surface_updates[planes_count].plane_info =
 8578			&bundle->plane_infos[planes_count];
 8579
 8580		/*
 8581		 * Only allow immediate flips for fast updates that don't
 8582		 * change FB pitch, DCC state, rotation or mirroing.
 8583		 */
 8584		bundle->flip_addrs[planes_count].flip_immediate =
 8585			crtc->state->async_flip &&
 8586			acrtc_state->update_type == UPDATE_TYPE_FAST;
 8587
 8588		timestamp_ns = ktime_get_ns();
 8589		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
 8590		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
 8591		bundle->surface_updates[planes_count].surface = dc_plane;
 8592
 8593		if (!bundle->surface_updates[planes_count].surface) {
 8594			DRM_ERROR("No surface for CRTC: id=%d\n",
 8595					acrtc_attach->crtc_id);
 8596			continue;
 8597		}
 8598
 8599		if (plane == pcrtc->primary)
 8600			update_freesync_state_on_stream(
 8601				dm,
 8602				acrtc_state,
 8603				acrtc_state->stream,
 8604				dc_plane,
 8605				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
 8606
 8607		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
 8608				 __func__,
 8609				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
 8610				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
 8611
 8612		planes_count += 1;
 8613
 8614	}
 8615
 8616	if (pflip_present) {
 8617		if (!vrr_active) {
 8618			/* Use old throttling in non-vrr fixed refresh rate mode
 8619			 * to keep flip scheduling based on target vblank counts
 8620			 * working in a backwards compatible way, e.g., for
 8621			 * clients using the GLX_OML_sync_control extension or
 8622			 * DRI3/Present extension with defined target_msc.
 8623			 */
 8624			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
 8625		}
 8626		else {
 8627			/* For variable refresh rate mode only:
 8628			 * Get vblank of last completed flip to avoid > 1 vrr
 8629			 * flips per video frame by use of throttling, but allow
 8630			 * flip programming anywhere in the possibly large
 8631			 * variable vrr vblank interval for fine-grained flip
 8632			 * timing control and more opportunity to avoid stutter
 8633			 * on late submission of flips.
 8634			 */
 8635			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 8636			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
 8637			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 8638		}
 8639
 8640		target_vblank = last_flip_vblank + wait_for_vblank;
 8641
 8642		/*
 8643		 * Wait until we're out of the vertical blank period before the one
 8644		 * targeted by the flip
 8645		 */
 8646		while ((acrtc_attach->enabled &&
 8647			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
 8648							    0, &vpos, &hpos, NULL,
 8649							    NULL, &pcrtc->hwmode)
 8650			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
 8651			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
 8652			(int)(target_vblank -
 8653			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
 8654			usleep_range(1000, 1100);
 8655		}
 8656
 8657		/**
 8658		 * Prepare the flip event for the pageflip interrupt to handle.
 8659		 *
 8660		 * This only works in the case where we've already turned on the
 8661		 * appropriate hardware blocks (eg. HUBP) so in the transition case
 8662		 * from 0 -> n planes we have to skip a hardware generated event
 8663		 * and rely on sending it from software.
 8664		 */
 8665		if (acrtc_attach->base.state->event &&
 8666		    acrtc_state->active_planes > 0) {
 8667			drm_crtc_vblank_get(pcrtc);
 8668
 8669			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 8670
 8671			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
 8672			prepare_flip_isr(acrtc_attach);
 8673
 8674			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 8675		}
 8676
 8677		if (acrtc_state->stream) {
 8678			if (acrtc_state->freesync_vrr_info_changed)
 8679				bundle->stream_update.vrr_infopacket =
 8680					&acrtc_state->stream->vrr_infopacket;
 8681		}
 8682	}
 8683
 8684	/* Update the planes if changed or disable if we don't have any. */
 8685	if ((planes_count || acrtc_state->active_planes == 0) &&
 8686		acrtc_state->stream) {
 8687		bundle->stream_update.stream = acrtc_state->stream;
 8688		if (new_pcrtc_state->mode_changed) {
 8689			bundle->stream_update.src = acrtc_state->stream->src;
 8690			bundle->stream_update.dst = acrtc_state->stream->dst;
 8691		}
 8692
 8693		if (new_pcrtc_state->color_mgmt_changed) {
 8694			/*
 8695			 * TODO: This isn't fully correct since we've actually
 8696			 * already modified the stream in place.
 8697			 */
 8698			bundle->stream_update.gamut_remap =
 8699				&acrtc_state->stream->gamut_remap_matrix;
 8700			bundle->stream_update.output_csc_transform =
 8701				&acrtc_state->stream->csc_color_matrix;
 8702			bundle->stream_update.out_transfer_func =
 8703				acrtc_state->stream->out_transfer_func;
 8704		}
 8705
 8706		acrtc_state->stream->abm_level = acrtc_state->abm_level;
 8707		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
 8708			bundle->stream_update.abm_level = &acrtc_state->abm_level;
 8709
 8710		/*
 8711		 * If FreeSync state on the stream has changed then we need to
 8712		 * re-adjust the min/max bounds now that DC doesn't handle this
 8713		 * as part of commit.
 8714		 */
 8715		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
 8716			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 8717			dc_stream_adjust_vmin_vmax(
 8718				dm->dc, acrtc_state->stream,
 8719				&acrtc_attach->dm_irq_params.vrr_params.adjust);
 8720			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 8721		}
 8722		mutex_lock(&dm->dc_lock);
 8723		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
 8724				acrtc_state->stream->link->psr_settings.psr_allow_active)
 8725			amdgpu_dm_psr_disable(acrtc_state->stream);
 8726
 8727		dc_commit_updates_for_stream(dm->dc,
 8728						     bundle->surface_updates,
 8729						     planes_count,
 8730						     acrtc_state->stream,
 8731						     &bundle->stream_update,
 8732						     dc_state);
 8733
 8734		/**
 8735		 * Enable or disable the interrupts on the backend.
 8736		 *
 8737		 * Most pipes are put into power gating when unused.
 8738		 *
 8739		 * When power gating is enabled on a pipe we lose the
 8740		 * interrupt enablement state when power gating is disabled.
 8741		 *
 8742		 * So we need to update the IRQ control state in hardware
 8743		 * whenever the pipe turns on (since it could be previously
 8744		 * power gated) or off (since some pipes can't be power gated
 8745		 * on some ASICs).
 8746		 */
 8747		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
 8748			dm_update_pflip_irq_state(drm_to_adev(dev),
 8749						  acrtc_attach);
 8750
 8751		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
 8752				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
 8753				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
 8754			amdgpu_dm_link_setup_psr(acrtc_state->stream);
 8755		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
 8756				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
 8757				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
 8758			struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
 8759					acrtc_state->stream->dm_stream_context;
 8760
 8761			if (aconn->psr_skip_count > 0)
 8762				aconn->psr_skip_count--;
 8763			else
 8764				amdgpu_dm_psr_enable(acrtc_state->stream);
 8765		}
 8766
 8767		mutex_unlock(&dm->dc_lock);
 8768	}
 8769
 8770	/*
 8771	 * Update cursor state *after* programming all the planes.
 8772	 * This avoids redundant programming in the case where we're going
 8773	 * to be disabling a single plane - those pipes are being disabled.
 8774	 */
 8775	if (acrtc_state->active_planes)
 8776		amdgpu_dm_commit_cursors(state);
 8777
 8778cleanup:
 8779	kfree(bundle);
 8780}
 8781
 8782static void amdgpu_dm_commit_audio(struct drm_device *dev,
 8783				   struct drm_atomic_state *state)
 8784{
 8785	struct amdgpu_device *adev = drm_to_adev(dev);
 8786	struct amdgpu_dm_connector *aconnector;
 8787	struct drm_connector *connector;
 8788	struct drm_connector_state *old_con_state, *new_con_state;
 8789	struct drm_crtc_state *new_crtc_state;
 8790	struct dm_crtc_state *new_dm_crtc_state;
 8791	const struct dc_stream_status *status;
 8792	int i, inst;
 8793
 8794	/* Notify device removals. */
 8795	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 8796		if (old_con_state->crtc != new_con_state->crtc) {
 8797			/* CRTC changes require notification. */
 8798			goto notify;
 8799		}
 8800
 8801		if (!new_con_state->crtc)
 8802			continue;
 8803
 8804		new_crtc_state = drm_atomic_get_new_crtc_state(
 8805			state, new_con_state->crtc);
 8806
 8807		if (!new_crtc_state)
 8808			continue;
 8809
 8810		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 8811			continue;
 8812
 8813	notify:
 8814		aconnector = to_amdgpu_dm_connector(connector);
 8815
 8816		mutex_lock(&adev->dm.audio_lock);
 8817		inst = aconnector->audio_inst;
 8818		aconnector->audio_inst = -1;
 8819		mutex_unlock(&adev->dm.audio_lock);
 8820
 8821		amdgpu_dm_audio_eld_notify(adev, inst);
 8822	}
 8823
 8824	/* Notify audio device additions. */
 8825	for_each_new_connector_in_state(state, connector, new_con_state, i) {
 8826		if (!new_con_state->crtc)
 8827			continue;
 8828
 8829		new_crtc_state = drm_atomic_get_new_crtc_state(
 8830			state, new_con_state->crtc);
 8831
 8832		if (!new_crtc_state)
 8833			continue;
 8834
 8835		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 8836			continue;
 8837
 8838		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
 8839		if (!new_dm_crtc_state->stream)
 8840			continue;
 8841
 8842		status = dc_stream_get_status(new_dm_crtc_state->stream);
 8843		if (!status)
 8844			continue;
 8845
 8846		aconnector = to_amdgpu_dm_connector(connector);
 8847
 8848		mutex_lock(&adev->dm.audio_lock);
 8849		inst = status->audio_inst;
 8850		aconnector->audio_inst = inst;
 8851		mutex_unlock(&adev->dm.audio_lock);
 8852
 8853		amdgpu_dm_audio_eld_notify(adev, inst);
 8854	}
 8855}
 8856
 8857/*
 8858 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
 8859 * @crtc_state: the DRM CRTC state
 8860 * @stream_state: the DC stream state.
 8861 *
 8862 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
 8863 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
 8864 */
 8865static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
 8866						struct dc_stream_state *stream_state)
 8867{
 8868	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
 8869}
 8870
 8871/**
 8872 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
 8873 * @state: The atomic state to commit
 8874 *
 8875 * This will tell DC to commit the constructed DC state from atomic_check,
 8876 * programming the hardware. Any failures here implies a hardware failure, since
 8877 * atomic check should have filtered anything non-kosher.
 8878 */
 8879static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 8880{
 8881	struct drm_device *dev = state->dev;
 8882	struct amdgpu_device *adev = drm_to_adev(dev);
 8883	struct amdgpu_display_manager *dm = &adev->dm;
 8884	struct dm_atomic_state *dm_state;
 8885	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
 8886	uint32_t i, j;
 8887	struct drm_crtc *crtc;
 8888	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 8889	unsigned long flags;
 8890	bool wait_for_vblank = true;
 8891	struct drm_connector *connector;
 8892	struct drm_connector_state *old_con_state, *new_con_state;
 8893	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
 8894	int crtc_disable_count = 0;
 8895	bool mode_set_reset_required = false;
 8896
 8897	trace_amdgpu_dm_atomic_commit_tail_begin(state);
 8898
 8899	drm_atomic_helper_update_legacy_modeset_state(dev, state);
 8900
 8901	dm_state = dm_atomic_get_new_state(state);
 8902	if (dm_state && dm_state->context) {
 8903		dc_state = dm_state->context;
 8904	} else {
 8905		/* No state changes, retain current state. */
 8906		dc_state_temp = dc_create_state(dm->dc);
 8907		ASSERT(dc_state_temp);
 8908		dc_state = dc_state_temp;
 8909		dc_resource_state_copy_construct_current(dm->dc, dc_state);
 8910	}
 8911
 8912	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
 8913				       new_crtc_state, i) {
 8914		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 8915
 8916		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 8917
 8918		if (old_crtc_state->active &&
 8919		    (!new_crtc_state->active ||
 8920		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
 8921			manage_dm_interrupts(adev, acrtc, false);
 8922			dc_stream_release(dm_old_crtc_state->stream);
 8923		}
 8924	}
 8925
 8926	drm_atomic_helper_calc_timestamping_constants(state);
 8927
 8928	/* update changed items */
 8929	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 8930		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 8931
 8932		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 8933		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 8934
 8935		DRM_DEBUG_ATOMIC(
 8936			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
 8937			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
 8938			"connectors_changed:%d\n",
 8939			acrtc->crtc_id,
 8940			new_crtc_state->enable,
 8941			new_crtc_state->active,
 8942			new_crtc_state->planes_changed,
 8943			new_crtc_state->mode_changed,
 8944			new_crtc_state->active_changed,
 8945			new_crtc_state->connectors_changed);
 8946
 8947		/* Disable cursor if disabling crtc */
 8948		if (old_crtc_state->active && !new_crtc_state->active) {
 8949			struct dc_cursor_position position;
 8950
 8951			memset(&position, 0, sizeof(position));
 8952			mutex_lock(&dm->dc_lock);
 8953			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
 8954			mutex_unlock(&dm->dc_lock);
 8955		}
 8956
 8957		/* Copy all transient state flags into dc state */
 8958		if (dm_new_crtc_state->stream) {
 8959			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
 8960							    dm_new_crtc_state->stream);
 8961		}
 8962
 8963		/* handles headless hotplug case, updating new_state and
 8964		 * aconnector as needed
 8965		 */
 8966
 8967		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
 8968
 8969			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
 8970
 8971			if (!dm_new_crtc_state->stream) {
 8972				/*
 8973				 * this could happen because of issues with
 8974				 * userspace notifications delivery.
 8975				 * In this case userspace tries to set mode on
 8976				 * display which is disconnected in fact.
 8977				 * dc_sink is NULL in this case on aconnector.
 8978				 * We expect reset mode will come soon.
 8979				 *
 8980				 * This can also happen when unplug is done
 8981				 * during resume sequence ended
 8982				 *
 8983				 * In this case, we want to pretend we still
 8984				 * have a sink to keep the pipe running so that
 8985				 * hw state is consistent with the sw state
 8986				 */
 8987				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
 8988						__func__, acrtc->base.base.id);
 8989				continue;
 8990			}
 8991
 8992			if (dm_old_crtc_state->stream)
 8993				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
 8994
 8995			pm_runtime_get_noresume(dev->dev);
 8996
 8997			acrtc->enabled = true;
 8998			acrtc->hw_mode = new_crtc_state->mode;
 8999			crtc->hwmode = new_crtc_state->mode;
 9000			mode_set_reset_required = true;
 9001		} else if (modereset_required(new_crtc_state)) {
 9002			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
 9003			/* i.e. reset mode */
 9004			if (dm_old_crtc_state->stream)
 9005				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
 9006
 9007			mode_set_reset_required = true;
 9008		}
 9009	} /* for_each_crtc_in_state() */
 9010
 9011	if (dc_state) {
 9012		/* if there mode set or reset, disable eDP PSR */
 9013		if (mode_set_reset_required)
 9014			amdgpu_dm_psr_disable_all(dm);
 9015
 9016		dm_enable_per_frame_crtc_master_sync(dc_state);
 9017		mutex_lock(&dm->dc_lock);
 9018		WARN_ON(!dc_commit_state(dm->dc, dc_state));
 9019#if defined(CONFIG_DRM_AMD_DC_DCN)
 9020               /* Allow idle optimization when vblank count is 0 for display off */
 9021               if (dm->active_vblank_irq_count == 0)
 9022                   dc_allow_idle_optimizations(dm->dc,true);
 9023#endif
 9024		mutex_unlock(&dm->dc_lock);
 9025	}
 9026
 9027	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
 9028		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 9029
 9030		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9031
 9032		if (dm_new_crtc_state->stream != NULL) {
 9033			const struct dc_stream_status *status =
 9034					dc_stream_get_status(dm_new_crtc_state->stream);
 9035
 9036			if (!status)
 9037				status = dc_stream_get_status_from_state(dc_state,
 9038									 dm_new_crtc_state->stream);
 9039			if (!status)
 9040				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
 9041			else
 9042				acrtc->otg_inst = status->primary_otg_inst;
 9043		}
 9044	}
 9045#ifdef CONFIG_DRM_AMD_DC_HDCP
 9046	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 9047		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
 9048		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
 9049		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 9050
 9051		new_crtc_state = NULL;
 9052
 9053		if (acrtc)
 9054			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
 9055
 9056		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9057
 9058		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
 9059		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
 9060			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
 9061			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 9062			dm_new_con_state->update_hdcp = true;
 9063			continue;
 9064		}
 9065
 9066		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
 9067			hdcp_update_display(
 9068				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
 9069				new_con_state->hdcp_content_type,
 9070				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
 9071	}
 9072#endif
 9073
 9074	/* Handle connector state changes */
 9075	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 9076		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
 9077		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
 9078		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
 9079		struct dc_surface_update dummy_updates[MAX_SURFACES];
 9080		struct dc_stream_update stream_update;
 9081		struct dc_info_packet hdr_packet;
 9082		struct dc_stream_status *status = NULL;
 9083		bool abm_changed, hdr_changed, scaling_changed;
 9084
 9085		memset(&dummy_updates, 0, sizeof(dummy_updates));
 9086		memset(&stream_update, 0, sizeof(stream_update));
 9087
 9088		if (acrtc) {
 9089			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
 9090			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
 9091		}
 9092
 9093		/* Skip any modesets/resets */
 9094		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
 9095			continue;
 9096
 9097		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9098		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 9099
 9100		scaling_changed = is_scaling_state_different(dm_new_con_state,
 9101							     dm_old_con_state);
 9102
 9103		abm_changed = dm_new_crtc_state->abm_level !=
 9104			      dm_old_crtc_state->abm_level;
 9105
 9106		hdr_changed =
 9107			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
 9108
 9109		if (!scaling_changed && !abm_changed && !hdr_changed)
 9110			continue;
 9111
 9112		stream_update.stream = dm_new_crtc_state->stream;
 9113		if (scaling_changed) {
 9114			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
 9115					dm_new_con_state, dm_new_crtc_state->stream);
 9116
 9117			stream_update.src = dm_new_crtc_state->stream->src;
 9118			stream_update.dst = dm_new_crtc_state->stream->dst;
 9119		}
 9120
 9121		if (abm_changed) {
 9122			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
 9123
 9124			stream_update.abm_level = &dm_new_crtc_state->abm_level;
 9125		}
 9126
 9127		if (hdr_changed) {
 9128			fill_hdr_info_packet(new_con_state, &hdr_packet);
 9129			stream_update.hdr_static_metadata = &hdr_packet;
 9130		}
 9131
 9132		status = dc_stream_get_status(dm_new_crtc_state->stream);
 9133
 9134		if (WARN_ON(!status))
 9135			continue;
 9136
 9137		WARN_ON(!status->plane_count);
 9138
 9139		/*
 9140		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
 9141		 * Here we create an empty update on each plane.
 9142		 * To fix this, DC should permit updating only stream properties.
 9143		 */
 9144		for (j = 0; j < status->plane_count; j++)
 9145			dummy_updates[j].surface = status->plane_states[0];
 9146
 9147
 9148		mutex_lock(&dm->dc_lock);
 9149		dc_commit_updates_for_stream(dm->dc,
 9150						     dummy_updates,
 9151						     status->plane_count,
 9152						     dm_new_crtc_state->stream,
 9153						     &stream_update,
 9154						     dc_state);
 9155		mutex_unlock(&dm->dc_lock);
 9156	}
 9157
 9158	/* Count number of newly disabled CRTCs for dropping PM refs later. */
 9159	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
 9160				      new_crtc_state, i) {
 9161		if (old_crtc_state->active && !new_crtc_state->active)
 9162			crtc_disable_count++;
 9163
 9164		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9165		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 9166
 9167		/* For freesync config update on crtc state and params for irq */
 9168		update_stream_irq_parameters(dm, dm_new_crtc_state);
 9169
 9170		/* Handle vrr on->off / off->on transitions */
 9171		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
 9172						dm_new_crtc_state);
 9173	}
 9174
 9175	/**
 9176	 * Enable interrupts for CRTCs that are newly enabled or went through
 9177	 * a modeset. It was intentionally deferred until after the front end
 9178	 * state was modified to wait until the OTG was on and so the IRQ
 9179	 * handlers didn't access stale or invalid state.
 9180	 */
 9181	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 9182		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 9183#ifdef CONFIG_DEBUG_FS
 9184		bool configure_crc = false;
 9185		enum amdgpu_dm_pipe_crc_source cur_crc_src;
 9186#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 9187		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
 9188#endif
 9189		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 9190		cur_crc_src = acrtc->dm_irq_params.crc_src;
 9191		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 9192#endif
 9193		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9194
 9195		if (new_crtc_state->active &&
 9196		    (!old_crtc_state->active ||
 9197		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
 9198			dc_stream_retain(dm_new_crtc_state->stream);
 9199			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
 9200			manage_dm_interrupts(adev, acrtc, true);
 9201
 9202#ifdef CONFIG_DEBUG_FS
 9203			/**
 9204			 * Frontend may have changed so reapply the CRC capture
 9205			 * settings for the stream.
 9206			 */
 9207			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9208
 9209			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
 9210				configure_crc = true;
 9211#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 9212				if (amdgpu_dm_crc_window_is_activated(crtc)) {
 9213					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 9214					acrtc->dm_irq_params.crc_window.update_win = true;
 9215					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
 9216					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
 9217					crc_rd_wrk->crtc = crtc;
 9218					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
 9219					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 9220				}
 9221#endif
 9222			}
 9223
 9224			if (configure_crc)
 9225				if (amdgpu_dm_crtc_configure_crc_source(
 9226					crtc, dm_new_crtc_state, cur_crc_src))
 9227					DRM_DEBUG_DRIVER("Failed to configure crc source");
 9228#endif
 9229		}
 9230	}
 9231
 9232	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
 9233		if (new_crtc_state->async_flip)
 9234			wait_for_vblank = false;
 9235
 9236	/* update planes when needed per crtc*/
 9237	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
 9238		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9239
 9240		if (dm_new_crtc_state->stream)
 9241			amdgpu_dm_commit_planes(state, dc_state, dev,
 9242						dm, crtc, wait_for_vblank);
 9243	}
 9244
 9245	/* Update audio instances for each connector. */
 9246	amdgpu_dm_commit_audio(dev, state);
 9247
 9248#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
 9249	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 9250	/* restore the backlight level */
 9251	if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
 9252		amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
 9253#endif
 9254	/*
 9255	 * send vblank event on all events not handled in flip and
 9256	 * mark consumed event for drm_atomic_helper_commit_hw_done
 9257	 */
 9258	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 9259	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
 9260
 9261		if (new_crtc_state->event)
 9262			drm_send_event_locked(dev, &new_crtc_state->event->base);
 9263
 9264		new_crtc_state->event = NULL;
 9265	}
 9266	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 9267
 9268	/* Signal HW programming completion */
 9269	drm_atomic_helper_commit_hw_done(state);
 9270
 9271	if (wait_for_vblank)
 9272		drm_atomic_helper_wait_for_flip_done(dev, state);
 9273
 9274	drm_atomic_helper_cleanup_planes(dev, state);
 9275
 9276	/* return the stolen vga memory back to VRAM */
 9277	if (!adev->mman.keep_stolen_vga_memory)
 9278		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
 9279	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
 9280
 9281	/*
 9282	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
 9283	 * so we can put the GPU into runtime suspend if we're not driving any
 9284	 * displays anymore
 9285	 */
 9286	for (i = 0; i < crtc_disable_count; i++)
 9287		pm_runtime_put_autosuspend(dev->dev);
 9288	pm_runtime_mark_last_busy(dev->dev);
 9289
 9290	if (dc_state_temp)
 9291		dc_release_state(dc_state_temp);
 9292}
 9293
 9294
 9295static int dm_force_atomic_commit(struct drm_connector *connector)
 9296{
 9297	int ret = 0;
 9298	struct drm_device *ddev = connector->dev;
 9299	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
 9300	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
 9301	struct drm_plane *plane = disconnected_acrtc->base.primary;
 9302	struct drm_connector_state *conn_state;
 9303	struct drm_crtc_state *crtc_state;
 9304	struct drm_plane_state *plane_state;
 9305
 9306	if (!state)
 9307		return -ENOMEM;
 9308
 9309	state->acquire_ctx = ddev->mode_config.acquire_ctx;
 9310
 9311	/* Construct an atomic state to restore previous display setting */
 9312
 9313	/*
 9314	 * Attach connectors to drm_atomic_state
 9315	 */
 9316	conn_state = drm_atomic_get_connector_state(state, connector);
 9317
 9318	ret = PTR_ERR_OR_ZERO(conn_state);
 9319	if (ret)
 9320		goto out;
 9321
 9322	/* Attach crtc to drm_atomic_state*/
 9323	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
 9324
 9325	ret = PTR_ERR_OR_ZERO(crtc_state);
 9326	if (ret)
 9327		goto out;
 9328
 9329	/* force a restore */
 9330	crtc_state->mode_changed = true;
 9331
 9332	/* Attach plane to drm_atomic_state */
 9333	plane_state = drm_atomic_get_plane_state(state, plane);
 9334
 9335	ret = PTR_ERR_OR_ZERO(plane_state);
 9336	if (ret)
 9337		goto out;
 9338
 9339	/* Call commit internally with the state we just constructed */
 9340	ret = drm_atomic_commit(state);
 9341
 9342out:
 9343	drm_atomic_state_put(state);
 9344	if (ret)
 9345		DRM_ERROR("Restoring old state failed with %i\n", ret);
 9346
 9347	return ret;
 9348}
 9349
 9350/*
 9351 * This function handles all cases when set mode does not come upon hotplug.
 9352 * This includes when a display is unplugged then plugged back into the
 9353 * same port and when running without usermode desktop manager supprot
 9354 */
 9355void dm_restore_drm_connector_state(struct drm_device *dev,
 9356				    struct drm_connector *connector)
 9357{
 9358	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 9359	struct amdgpu_crtc *disconnected_acrtc;
 9360	struct dm_crtc_state *acrtc_state;
 9361
 9362	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
 9363		return;
 9364
 9365	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
 9366	if (!disconnected_acrtc)
 9367		return;
 9368
 9369	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
 9370	if (!acrtc_state->stream)
 9371		return;
 9372
 9373	/*
 9374	 * If the previous sink is not released and different from the current,
 9375	 * we deduce we are in a state where we can not rely on usermode call
 9376	 * to turn on the display, so we do it here
 9377	 */
 9378	if (acrtc_state->stream->sink != aconnector->dc_sink)
 9379		dm_force_atomic_commit(&aconnector->base);
 9380}
 9381
 9382/*
 9383 * Grabs all modesetting locks to serialize against any blocking commits,
 9384 * Waits for completion of all non blocking commits.
 9385 */
 9386static int do_aquire_global_lock(struct drm_device *dev,
 9387				 struct drm_atomic_state *state)
 9388{
 9389	struct drm_crtc *crtc;
 9390	struct drm_crtc_commit *commit;
 9391	long ret;
 9392
 9393	/*
 9394	 * Adding all modeset locks to aquire_ctx will
 9395	 * ensure that when the framework release it the
 9396	 * extra locks we are locking here will get released to
 9397	 */
 9398	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
 9399	if (ret)
 9400		return ret;
 9401
 9402	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 9403		spin_lock(&crtc->commit_lock);
 9404		commit = list_first_entry_or_null(&crtc->commit_list,
 9405				struct drm_crtc_commit, commit_entry);
 9406		if (commit)
 9407			drm_crtc_commit_get(commit);
 9408		spin_unlock(&crtc->commit_lock);
 9409
 9410		if (!commit)
 9411			continue;
 9412
 9413		/*
 9414		 * Make sure all pending HW programming completed and
 9415		 * page flips done
 9416		 */
 9417		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
 9418
 9419		if (ret > 0)
 9420			ret = wait_for_completion_interruptible_timeout(
 9421					&commit->flip_done, 10*HZ);
 9422
 9423		if (ret == 0)
 9424			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
 9425				  "timed out\n", crtc->base.id, crtc->name);
 9426
 9427		drm_crtc_commit_put(commit);
 9428	}
 9429
 9430	return ret < 0 ? ret : 0;
 9431}
 9432
 9433static void get_freesync_config_for_crtc(
 9434	struct dm_crtc_state *new_crtc_state,
 9435	struct dm_connector_state *new_con_state)
 9436{
 9437	struct mod_freesync_config config = {0};
 9438	struct amdgpu_dm_connector *aconnector =
 9439			to_amdgpu_dm_connector(new_con_state->base.connector);
 9440	struct drm_display_mode *mode = &new_crtc_state->base.mode;
 9441	int vrefresh = drm_mode_vrefresh(mode);
 9442	bool fs_vid_mode = false;
 9443
 9444	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
 9445					vrefresh >= aconnector->min_vfreq &&
 9446					vrefresh <= aconnector->max_vfreq;
 9447
 9448	if (new_crtc_state->vrr_supported) {
 9449		new_crtc_state->stream->ignore_msa_timing_param = true;
 9450		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
 9451
 9452		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
 9453		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
 9454		config.vsif_supported = true;
 9455		config.btr = true;
 9456
 9457		if (fs_vid_mode) {
 9458			config.state = VRR_STATE_ACTIVE_FIXED;
 9459			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
 9460			goto out;
 9461		} else if (new_crtc_state->base.vrr_enabled) {
 9462			config.state = VRR_STATE_ACTIVE_VARIABLE;
 9463		} else {
 9464			config.state = VRR_STATE_INACTIVE;
 9465		}
 9466	}
 9467out:
 9468	new_crtc_state->freesync_config = config;
 9469}
 9470
 9471static void reset_freesync_config_for_crtc(
 9472	struct dm_crtc_state *new_crtc_state)
 9473{
 9474	new_crtc_state->vrr_supported = false;
 9475
 9476	memset(&new_crtc_state->vrr_infopacket, 0,
 9477	       sizeof(new_crtc_state->vrr_infopacket));
 9478}
 9479
 9480static bool
 9481is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
 9482				 struct drm_crtc_state *new_crtc_state)
 9483{
 9484	struct drm_display_mode old_mode, new_mode;
 9485
 9486	if (!old_crtc_state || !new_crtc_state)
 9487		return false;
 9488
 9489	old_mode = old_crtc_state->mode;
 9490	new_mode = new_crtc_state->mode;
 9491
 9492	if (old_mode.clock       == new_mode.clock &&
 9493	    old_mode.hdisplay    == new_mode.hdisplay &&
 9494	    old_mode.vdisplay    == new_mode.vdisplay &&
 9495	    old_mode.htotal      == new_mode.htotal &&
 9496	    old_mode.vtotal      != new_mode.vtotal &&
 9497	    old_mode.hsync_start == new_mode.hsync_start &&
 9498	    old_mode.vsync_start != new_mode.vsync_start &&
 9499	    old_mode.hsync_end   == new_mode.hsync_end &&
 9500	    old_mode.vsync_end   != new_mode.vsync_end &&
 9501	    old_mode.hskew       == new_mode.hskew &&
 9502	    old_mode.vscan       == new_mode.vscan &&
 9503	    (old_mode.vsync_end - old_mode.vsync_start) ==
 9504	    (new_mode.vsync_end - new_mode.vsync_start))
 9505		return true;
 9506
 9507	return false;
 9508}
 9509
 9510static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
 9511	uint64_t num, den, res;
 9512	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
 9513
 9514	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
 9515
 9516	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
 9517	den = (unsigned long long)new_crtc_state->mode.htotal *
 9518	      (unsigned long long)new_crtc_state->mode.vtotal;
 9519
 9520	res = div_u64(num, den);
 9521	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
 9522}
 9523
 9524static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
 9525				struct drm_atomic_state *state,
 9526				struct drm_crtc *crtc,
 9527				struct drm_crtc_state *old_crtc_state,
 9528				struct drm_crtc_state *new_crtc_state,
 9529				bool enable,
 9530				bool *lock_and_validation_needed)
 9531{
 9532	struct dm_atomic_state *dm_state = NULL;
 9533	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
 9534	struct dc_stream_state *new_stream;
 9535	int ret = 0;
 9536
 9537	/*
 9538	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
 9539	 * update changed items
 9540	 */
 9541	struct amdgpu_crtc *acrtc = NULL;
 9542	struct amdgpu_dm_connector *aconnector = NULL;
 9543	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
 9544	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
 9545
 9546	new_stream = NULL;
 9547
 9548	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 9549	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 9550	acrtc = to_amdgpu_crtc(crtc);
 9551	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
 9552
 9553	/* TODO This hack should go away */
 9554	if (aconnector && enable) {
 9555		/* Make sure fake sink is created in plug-in scenario */
 9556		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
 9557							    &aconnector->base);
 9558		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
 9559							    &aconnector->base);
 9560
 9561		if (IS_ERR(drm_new_conn_state)) {
 9562			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
 9563			goto fail;
 9564		}
 9565
 9566		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
 9567		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
 9568
 9569		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 9570			goto skip_modeset;
 9571
 9572		new_stream = create_validate_stream_for_sink(aconnector,
 9573							     &new_crtc_state->mode,
 9574							     dm_new_conn_state,
 9575							     dm_old_crtc_state->stream);
 9576
 9577		/*
 9578		 * we can have no stream on ACTION_SET if a display
 9579		 * was disconnected during S3, in this case it is not an
 9580		 * error, the OS will be updated after detection, and
 9581		 * will do the right thing on next atomic commit
 9582		 */
 9583
 9584		if (!new_stream) {
 9585			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
 9586					__func__, acrtc->base.base.id);
 9587			ret = -ENOMEM;
 9588			goto fail;
 9589		}
 9590
 9591		/*
 9592		 * TODO: Check VSDB bits to decide whether this should
 9593		 * be enabled or not.
 9594		 */
 9595		new_stream->triggered_crtc_reset.enabled =
 9596			dm->force_timing_sync;
 9597
 9598		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
 9599
 9600		ret = fill_hdr_info_packet(drm_new_conn_state,
 9601					   &new_stream->hdr_static_metadata);
 9602		if (ret)
 9603			goto fail;
 9604
 9605		/*
 9606		 * If we already removed the old stream from the context
 9607		 * (and set the new stream to NULL) then we can't reuse
 9608		 * the old stream even if the stream and scaling are unchanged.
 9609		 * We'll hit the BUG_ON and black screen.
 9610		 *
 9611		 * TODO: Refactor this function to allow this check to work
 9612		 * in all conditions.
 9613		 */
 9614		if (amdgpu_freesync_vid_mode &&
 9615		    dm_new_crtc_state->stream &&
 9616		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
 9617			goto skip_modeset;
 9618
 9619		if (dm_new_crtc_state->stream &&
 9620		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
 9621		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
 9622			new_crtc_state->mode_changed = false;
 9623			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
 9624					 new_crtc_state->mode_changed);
 9625		}
 9626	}
 9627
 9628	/* mode_changed flag may get updated above, need to check again */
 9629	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 9630		goto skip_modeset;
 9631
 9632	DRM_DEBUG_ATOMIC(
 9633		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
 9634		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
 9635		"connectors_changed:%d\n",
 9636		acrtc->crtc_id,
 9637		new_crtc_state->enable,
 9638		new_crtc_state->active,
 9639		new_crtc_state->planes_changed,
 9640		new_crtc_state->mode_changed,
 9641		new_crtc_state->active_changed,
 9642		new_crtc_state->connectors_changed);
 9643
 9644	/* Remove stream for any changed/disabled CRTC */
 9645	if (!enable) {
 9646
 9647		if (!dm_old_crtc_state->stream)
 9648			goto skip_modeset;
 9649
 9650		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
 9651		    is_timing_unchanged_for_freesync(new_crtc_state,
 9652						     old_crtc_state)) {
 9653			new_crtc_state->mode_changed = false;
 9654			DRM_DEBUG_DRIVER(
 9655				"Mode change not required for front porch change, "
 9656				"setting mode_changed to %d",
 9657				new_crtc_state->mode_changed);
 9658
 9659			set_freesync_fixed_config(dm_new_crtc_state);
 9660
 9661			goto skip_modeset;
 9662		} else if (amdgpu_freesync_vid_mode && aconnector &&
 9663			   is_freesync_video_mode(&new_crtc_state->mode,
 9664						  aconnector)) {
 9665			struct drm_display_mode *high_mode;
 9666
 9667			high_mode = get_highest_refresh_rate_mode(aconnector, false);
 9668			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
 9669				set_freesync_fixed_config(dm_new_crtc_state);
 9670			}
 9671		}
 9672
 9673		ret = dm_atomic_get_state(state, &dm_state);
 9674		if (ret)
 9675			goto fail;
 9676
 9677		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
 9678				crtc->base.id);
 9679
 9680		/* i.e. reset mode */
 9681		if (dc_remove_stream_from_ctx(
 9682				dm->dc,
 9683				dm_state->context,
 9684				dm_old_crtc_state->stream) != DC_OK) {
 9685			ret = -EINVAL;
 9686			goto fail;
 9687		}
 9688
 9689		dc_stream_release(dm_old_crtc_state->stream);
 9690		dm_new_crtc_state->stream = NULL;
 9691
 9692		reset_freesync_config_for_crtc(dm_new_crtc_state);
 9693
 9694		*lock_and_validation_needed = true;
 9695
 9696	} else {/* Add stream for any updated/enabled CRTC */
 9697		/*
 9698		 * Quick fix to prevent NULL pointer on new_stream when
 9699		 * added MST connectors not found in existing crtc_state in the chained mode
 9700		 * TODO: need to dig out the root cause of that
 9701		 */
 9702		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
 9703			goto skip_modeset;
 9704
 9705		if (modereset_required(new_crtc_state))
 9706			goto skip_modeset;
 9707
 9708		if (modeset_required(new_crtc_state, new_stream,
 9709				     dm_old_crtc_state->stream)) {
 9710
 9711			WARN_ON(dm_new_crtc_state->stream);
 9712
 9713			ret = dm_atomic_get_state(state, &dm_state);
 9714			if (ret)
 9715				goto fail;
 9716
 9717			dm_new_crtc_state->stream = new_stream;
 9718
 9719			dc_stream_retain(new_stream);
 9720
 9721			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
 9722					 crtc->base.id);
 9723
 9724			if (dc_add_stream_to_ctx(
 9725					dm->dc,
 9726					dm_state->context,
 9727					dm_new_crtc_state->stream) != DC_OK) {
 9728				ret = -EINVAL;
 9729				goto fail;
 9730			}
 9731
 9732			*lock_and_validation_needed = true;
 9733		}
 9734	}
 9735
 9736skip_modeset:
 9737	/* Release extra reference */
 9738	if (new_stream)
 9739		 dc_stream_release(new_stream);
 9740
 9741	/*
 9742	 * We want to do dc stream updates that do not require a
 9743	 * full modeset below.
 9744	 */
 9745	if (!(enable && aconnector && new_crtc_state->active))
 9746		return 0;
 9747	/*
 9748	 * Given above conditions, the dc state cannot be NULL because:
 9749	 * 1. We're in the process of enabling CRTCs (just been added
 9750	 *    to the dc context, or already is on the context)
 9751	 * 2. Has a valid connector attached, and
 9752	 * 3. Is currently active and enabled.
 9753	 * => The dc stream state currently exists.
 9754	 */
 9755	BUG_ON(dm_new_crtc_state->stream == NULL);
 9756
 9757	/* Scaling or underscan settings */
 9758	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
 9759				drm_atomic_crtc_needs_modeset(new_crtc_state))
 9760		update_stream_scaling_settings(
 9761			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
 9762
 9763	/* ABM settings */
 9764	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
 9765
 9766	/*
 9767	 * Color management settings. We also update color properties
 9768	 * when a modeset is needed, to ensure it gets reprogrammed.
 9769	 */
 9770	if (dm_new_crtc_state->base.color_mgmt_changed ||
 9771	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
 9772		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
 9773		if (ret)
 9774			goto fail;
 9775	}
 9776
 9777	/* Update Freesync settings. */
 9778	get_freesync_config_for_crtc(dm_new_crtc_state,
 9779				     dm_new_conn_state);
 9780
 9781	return ret;
 9782
 9783fail:
 9784	if (new_stream)
 9785		dc_stream_release(new_stream);
 9786	return ret;
 9787}
 9788
 9789static bool should_reset_plane(struct drm_atomic_state *state,
 9790			       struct drm_plane *plane,
 9791			       struct drm_plane_state *old_plane_state,
 9792			       struct drm_plane_state *new_plane_state)
 9793{
 9794	struct drm_plane *other;
 9795	struct drm_plane_state *old_other_state, *new_other_state;
 9796	struct drm_crtc_state *new_crtc_state;
 9797	int i;
 9798
 9799	/*
 9800	 * TODO: Remove this hack once the checks below are sufficient
 9801	 * enough to determine when we need to reset all the planes on
 9802	 * the stream.
 9803	 */
 9804	if (state->allow_modeset)
 9805		return true;
 9806
 9807	/* Exit early if we know that we're adding or removing the plane. */
 9808	if (old_plane_state->crtc != new_plane_state->crtc)
 9809		return true;
 9810
 9811	/* old crtc == new_crtc == NULL, plane not in context. */
 9812	if (!new_plane_state->crtc)
 9813		return false;
 9814
 9815	new_crtc_state =
 9816		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
 9817
 9818	if (!new_crtc_state)
 9819		return true;
 9820
 9821	/* CRTC Degamma changes currently require us to recreate planes. */
 9822	if (new_crtc_state->color_mgmt_changed)
 9823		return true;
 9824
 9825	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
 9826		return true;
 9827
 9828	/*
 9829	 * If there are any new primary or overlay planes being added or
 9830	 * removed then the z-order can potentially change. To ensure
 9831	 * correct z-order and pipe acquisition the current DC architecture
 9832	 * requires us to remove and recreate all existing planes.
 9833	 *
 9834	 * TODO: Come up with a more elegant solution for this.
 9835	 */
 9836	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
 9837		struct amdgpu_framebuffer *old_afb, *new_afb;
 9838		if (other->type == DRM_PLANE_TYPE_CURSOR)
 9839			continue;
 9840
 9841		if (old_other_state->crtc != new_plane_state->crtc &&
 9842		    new_other_state->crtc != new_plane_state->crtc)
 9843			continue;
 9844
 9845		if (old_other_state->crtc != new_other_state->crtc)
 9846			return true;
 9847
 9848		/* Src/dst size and scaling updates. */
 9849		if (old_other_state->src_w != new_other_state->src_w ||
 9850		    old_other_state->src_h != new_other_state->src_h ||
 9851		    old_other_state->crtc_w != new_other_state->crtc_w ||
 9852		    old_other_state->crtc_h != new_other_state->crtc_h)
 9853			return true;
 9854
 9855		/* Rotation / mirroring updates. */
 9856		if (old_other_state->rotation != new_other_state->rotation)
 9857			return true;
 9858
 9859		/* Blending updates. */
 9860		if (old_other_state->pixel_blend_mode !=
 9861		    new_other_state->pixel_blend_mode)
 9862			return true;
 9863
 9864		/* Alpha updates. */
 9865		if (old_other_state->alpha != new_other_state->alpha)
 9866			return true;
 9867
 9868		/* Colorspace changes. */
 9869		if (old_other_state->color_range != new_other_state->color_range ||
 9870		    old_other_state->color_encoding != new_other_state->color_encoding)
 9871			return true;
 9872
 9873		/* Framebuffer checks fall at the end. */
 9874		if (!old_other_state->fb || !new_other_state->fb)
 9875			continue;
 9876
 9877		/* Pixel format changes can require bandwidth updates. */
 9878		if (old_other_state->fb->format != new_other_state->fb->format)
 9879			return true;
 9880
 9881		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
 9882		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
 9883
 9884		/* Tiling and DCC changes also require bandwidth updates. */
 9885		if (old_afb->tiling_flags != new_afb->tiling_flags ||
 9886		    old_afb->base.modifier != new_afb->base.modifier)
 9887			return true;
 9888	}
 9889
 9890	return false;
 9891}
 9892
 9893static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
 9894			      struct drm_plane_state *new_plane_state,
 9895			      struct drm_framebuffer *fb)
 9896{
 9897	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
 9898	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
 9899	unsigned int pitch;
 9900	bool linear;
 9901
 9902	if (fb->width > new_acrtc->max_cursor_width ||
 9903	    fb->height > new_acrtc->max_cursor_height) {
 9904		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
 9905				 new_plane_state->fb->width,
 9906				 new_plane_state->fb->height);
 9907		return -EINVAL;
 9908	}
 9909	if (new_plane_state->src_w != fb->width << 16 ||
 9910	    new_plane_state->src_h != fb->height << 16) {
 9911		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
 9912		return -EINVAL;
 9913	}
 9914
 9915	/* Pitch in pixels */
 9916	pitch = fb->pitches[0] / fb->format->cpp[0];
 9917
 9918	if (fb->width != pitch) {
 9919		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
 9920				 fb->width, pitch);
 9921		return -EINVAL;
 9922	}
 9923
 9924	switch (pitch) {
 9925	case 64:
 9926	case 128:
 9927	case 256:
 9928		/* FB pitch is supported by cursor plane */
 9929		break;
 9930	default:
 9931		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
 9932		return -EINVAL;
 9933	}
 9934
 9935	/* Core DRM takes care of checking FB modifiers, so we only need to
 9936	 * check tiling flags when the FB doesn't have a modifier. */
 9937	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
 9938		if (adev->family < AMDGPU_FAMILY_AI) {
 9939			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
 9940			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
 9941				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
 9942		} else {
 9943			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
 9944		}
 9945		if (!linear) {
 9946			DRM_DEBUG_ATOMIC("Cursor FB not linear");
 9947			return -EINVAL;
 9948		}
 9949	}
 9950
 9951	return 0;
 9952}
 9953
 9954static int dm_update_plane_state(struct dc *dc,
 9955				 struct drm_atomic_state *state,
 9956				 struct drm_plane *plane,
 9957				 struct drm_plane_state *old_plane_state,
 9958				 struct drm_plane_state *new_plane_state,
 9959				 bool enable,
 9960				 bool *lock_and_validation_needed)
 9961{
 9962
 9963	struct dm_atomic_state *dm_state = NULL;
 9964	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
 9965	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 9966	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
 9967	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
 9968	struct amdgpu_crtc *new_acrtc;
 9969	bool needs_reset;
 9970	int ret = 0;
 9971
 9972
 9973	new_plane_crtc = new_plane_state->crtc;
 9974	old_plane_crtc = old_plane_state->crtc;
 9975	dm_new_plane_state = to_dm_plane_state(new_plane_state);
 9976	dm_old_plane_state = to_dm_plane_state(old_plane_state);
 9977
 9978	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
 9979		if (!enable || !new_plane_crtc ||
 9980			drm_atomic_plane_disabling(plane->state, new_plane_state))
 9981			return 0;
 9982
 9983		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
 9984
 9985		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
 9986			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
 9987			return -EINVAL;
 9988		}
 9989
 9990		if (new_plane_state->fb) {
 9991			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
 9992						 new_plane_state->fb);
 9993			if (ret)
 9994				return ret;
 9995		}
 9996
 9997		return 0;
 9998	}
 9999
10000	needs_reset = should_reset_plane(state, plane, old_plane_state,
10001					 new_plane_state);
10002
10003	/* Remove any changed/removed planes */
10004	if (!enable) {
10005		if (!needs_reset)
10006			return 0;
10007
10008		if (!old_plane_crtc)
10009			return 0;
10010
10011		old_crtc_state = drm_atomic_get_old_crtc_state(
10012				state, old_plane_crtc);
10013		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10014
10015		if (!dm_old_crtc_state->stream)
10016			return 0;
10017
10018		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10019				plane->base.id, old_plane_crtc->base.id);
10020
10021		ret = dm_atomic_get_state(state, &dm_state);
10022		if (ret)
10023			return ret;
10024
10025		if (!dc_remove_plane_from_context(
10026				dc,
10027				dm_old_crtc_state->stream,
10028				dm_old_plane_state->dc_state,
10029				dm_state->context)) {
10030
10031			return -EINVAL;
10032		}
10033
10034
10035		dc_plane_state_release(dm_old_plane_state->dc_state);
10036		dm_new_plane_state->dc_state = NULL;
10037
10038		*lock_and_validation_needed = true;
10039
10040	} else { /* Add new planes */
10041		struct dc_plane_state *dc_new_plane_state;
10042
10043		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10044			return 0;
10045
10046		if (!new_plane_crtc)
10047			return 0;
10048
10049		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10050		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10051
10052		if (!dm_new_crtc_state->stream)
10053			return 0;
10054
10055		if (!needs_reset)
10056			return 0;
10057
10058		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10059		if (ret)
10060			return ret;
10061
10062		WARN_ON(dm_new_plane_state->dc_state);
10063
10064		dc_new_plane_state = dc_create_plane_state(dc);
10065		if (!dc_new_plane_state)
10066			return -ENOMEM;
10067
10068		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10069				 plane->base.id, new_plane_crtc->base.id);
10070
10071		ret = fill_dc_plane_attributes(
10072			drm_to_adev(new_plane_crtc->dev),
10073			dc_new_plane_state,
10074			new_plane_state,
10075			new_crtc_state);
10076		if (ret) {
10077			dc_plane_state_release(dc_new_plane_state);
10078			return ret;
10079		}
10080
10081		ret = dm_atomic_get_state(state, &dm_state);
10082		if (ret) {
10083			dc_plane_state_release(dc_new_plane_state);
10084			return ret;
10085		}
10086
10087		/*
10088		 * Any atomic check errors that occur after this will
10089		 * not need a release. The plane state will be attached
10090		 * to the stream, and therefore part of the atomic
10091		 * state. It'll be released when the atomic state is
10092		 * cleaned.
10093		 */
10094		if (!dc_add_plane_to_context(
10095				dc,
10096				dm_new_crtc_state->stream,
10097				dc_new_plane_state,
10098				dm_state->context)) {
10099
10100			dc_plane_state_release(dc_new_plane_state);
10101			return -EINVAL;
10102		}
10103
10104		dm_new_plane_state->dc_state = dc_new_plane_state;
10105
10106		/* Tell DC to do a full surface update every time there
10107		 * is a plane change. Inefficient, but works for now.
10108		 */
10109		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10110
10111		*lock_and_validation_needed = true;
10112	}
10113
10114
10115	return ret;
10116}
10117
10118static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10119				struct drm_crtc *crtc,
10120				struct drm_crtc_state *new_crtc_state)
10121{
10122	struct drm_plane_state *new_cursor_state, *new_primary_state;
10123	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10124
10125	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10126	 * cursor per pipe but it's going to inherit the scaling and
10127	 * positioning from the underlying pipe. Check the cursor plane's
10128	 * blending properties match the primary plane's. */
10129
10130	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10131	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10132	if (!new_cursor_state || !new_primary_state ||
10133	    !new_cursor_state->fb || !new_primary_state->fb) {
10134		return 0;
10135	}
10136
10137	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10138			 (new_cursor_state->src_w >> 16);
10139	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10140			 (new_cursor_state->src_h >> 16);
10141
10142	primary_scale_w = new_primary_state->crtc_w * 1000 /
10143			 (new_primary_state->src_w >> 16);
10144	primary_scale_h = new_primary_state->crtc_h * 1000 /
10145			 (new_primary_state->src_h >> 16);
10146
10147	if (cursor_scale_w != primary_scale_w ||
10148	    cursor_scale_h != primary_scale_h) {
10149		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10150		return -EINVAL;
10151	}
10152
10153	return 0;
10154}
10155
10156#if defined(CONFIG_DRM_AMD_DC_DCN)
10157static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10158{
10159	struct drm_connector *connector;
10160	struct drm_connector_state *conn_state;
10161	struct amdgpu_dm_connector *aconnector = NULL;
10162	int i;
10163	for_each_new_connector_in_state(state, connector, conn_state, i) {
10164		if (conn_state->crtc != crtc)
10165			continue;
10166
10167		aconnector = to_amdgpu_dm_connector(connector);
10168		if (!aconnector->port || !aconnector->mst_port)
10169			aconnector = NULL;
10170		else
10171			break;
10172	}
10173
10174	if (!aconnector)
10175		return 0;
10176
10177	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10178}
10179#endif
10180
10181static int validate_overlay(struct drm_atomic_state *state)
10182{
10183	int i;
10184	struct drm_plane *plane;
10185	struct drm_plane_state *new_plane_state;
10186	struct drm_plane_state *primary_state, *overlay_state = NULL;
10187
10188	/* Check if primary plane is contained inside overlay */
10189	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10190		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10191			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10192				return 0;
10193
10194			overlay_state = new_plane_state;
10195			continue;
10196		}
10197	}
10198
10199	/* check if we're making changes to the overlay plane */
10200	if (!overlay_state)
10201		return 0;
10202
10203	/* check if overlay plane is enabled */
10204	if (!overlay_state->crtc)
10205		return 0;
10206
10207	/* find the primary plane for the CRTC that the overlay is enabled on */
10208	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10209	if (IS_ERR(primary_state))
10210		return PTR_ERR(primary_state);
10211
10212	/* check if primary plane is enabled */
10213	if (!primary_state->crtc)
10214		return 0;
10215
10216	/* Perform the bounds check to ensure the overlay plane covers the primary */
10217	if (primary_state->crtc_x < overlay_state->crtc_x ||
10218	    primary_state->crtc_y < overlay_state->crtc_y ||
10219	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10220	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10221		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10222		return -EINVAL;
10223	}
10224
10225	return 0;
10226}
10227
10228/**
10229 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10230 * @dev: The DRM device
10231 * @state: The atomic state to commit
10232 *
10233 * Validate that the given atomic state is programmable by DC into hardware.
10234 * This involves constructing a &struct dc_state reflecting the new hardware
10235 * state we wish to commit, then querying DC to see if it is programmable. It's
10236 * important not to modify the existing DC state. Otherwise, atomic_check
10237 * may unexpectedly commit hardware changes.
10238 *
10239 * When validating the DC state, it's important that the right locks are
10240 * acquired. For full updates case which removes/adds/updates streams on one
10241 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10242 * that any such full update commit will wait for completion of any outstanding
10243 * flip using DRMs synchronization events.
10244 *
10245 * Note that DM adds the affected connectors for all CRTCs in state, when that
10246 * might not seem necessary. This is because DC stream creation requires the
10247 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10248 * be possible but non-trivial - a possible TODO item.
10249 *
10250 * Return: -Error code if validation failed.
10251 */
10252static int amdgpu_dm_atomic_check(struct drm_device *dev,
10253				  struct drm_atomic_state *state)
10254{
10255	struct amdgpu_device *adev = drm_to_adev(dev);
10256	struct dm_atomic_state *dm_state = NULL;
10257	struct dc *dc = adev->dm.dc;
10258	struct drm_connector *connector;
10259	struct drm_connector_state *old_con_state, *new_con_state;
10260	struct drm_crtc *crtc;
10261	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10262	struct drm_plane *plane;
10263	struct drm_plane_state *old_plane_state, *new_plane_state;
10264	enum dc_status status;
10265	int ret, i;
10266	bool lock_and_validation_needed = false;
10267	struct dm_crtc_state *dm_old_crtc_state;
10268#if defined(CONFIG_DRM_AMD_DC_DCN)
10269	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10270#endif
10271
10272	trace_amdgpu_dm_atomic_check_begin(state);
10273
10274	ret = drm_atomic_helper_check_modeset(dev, state);
10275	if (ret)
10276		goto fail;
10277
10278	/* Check connector changes */
10279	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10280		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10281		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10282
10283		/* Skip connectors that are disabled or part of modeset already. */
10284		if (!old_con_state->crtc && !new_con_state->crtc)
10285			continue;
10286
10287		if (!new_con_state->crtc)
10288			continue;
10289
10290		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10291		if (IS_ERR(new_crtc_state)) {
10292			ret = PTR_ERR(new_crtc_state);
10293			goto fail;
10294		}
10295
10296		if (dm_old_con_state->abm_level !=
10297		    dm_new_con_state->abm_level)
10298			new_crtc_state->connectors_changed = true;
10299	}
10300
10301#if defined(CONFIG_DRM_AMD_DC_DCN)
10302	if (dc_resource_is_dsc_encoding_supported(dc)) {
10303		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10304			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10305				ret = add_affected_mst_dsc_crtcs(state, crtc);
10306				if (ret)
10307					goto fail;
10308			}
10309		}
10310	}
10311#endif
10312	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10313		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10314
10315		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10316		    !new_crtc_state->color_mgmt_changed &&
10317		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10318			dm_old_crtc_state->dsc_force_changed == false)
10319			continue;
10320
10321		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10322		if (ret)
10323			goto fail;
10324
10325		if (!new_crtc_state->enable)
10326			continue;
10327
10328		ret = drm_atomic_add_affected_connectors(state, crtc);
10329		if (ret)
10330			return ret;
10331
10332		ret = drm_atomic_add_affected_planes(state, crtc);
10333		if (ret)
10334			goto fail;
10335
10336		if (dm_old_crtc_state->dsc_force_changed)
10337			new_crtc_state->mode_changed = true;
10338	}
10339
10340	/*
10341	 * Add all primary and overlay planes on the CRTC to the state
10342	 * whenever a plane is enabled to maintain correct z-ordering
10343	 * and to enable fast surface updates.
10344	 */
10345	drm_for_each_crtc(crtc, dev) {
10346		bool modified = false;
10347
10348		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10349			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10350				continue;
10351
10352			if (new_plane_state->crtc == crtc ||
10353			    old_plane_state->crtc == crtc) {
10354				modified = true;
10355				break;
10356			}
10357		}
10358
10359		if (!modified)
10360			continue;
10361
10362		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10363			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10364				continue;
10365
10366			new_plane_state =
10367				drm_atomic_get_plane_state(state, plane);
10368
10369			if (IS_ERR(new_plane_state)) {
10370				ret = PTR_ERR(new_plane_state);
10371				goto fail;
10372			}
10373		}
10374	}
10375
10376	/* Remove exiting planes if they are modified */
10377	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10378		ret = dm_update_plane_state(dc, state, plane,
10379					    old_plane_state,
10380					    new_plane_state,
10381					    false,
10382					    &lock_and_validation_needed);
10383		if (ret)
10384			goto fail;
10385	}
10386
10387	/* Disable all crtcs which require disable */
10388	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10389		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10390					   old_crtc_state,
10391					   new_crtc_state,
10392					   false,
10393					   &lock_and_validation_needed);
10394		if (ret)
10395			goto fail;
10396	}
10397
10398	/* Enable all crtcs which require enable */
10399	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10400		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10401					   old_crtc_state,
10402					   new_crtc_state,
10403					   true,
10404					   &lock_and_validation_needed);
10405		if (ret)
10406			goto fail;
10407	}
10408
10409	ret = validate_overlay(state);
10410	if (ret)
10411		goto fail;
10412
10413	/* Add new/modified planes */
10414	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10415		ret = dm_update_plane_state(dc, state, plane,
10416					    old_plane_state,
10417					    new_plane_state,
10418					    true,
10419					    &lock_and_validation_needed);
10420		if (ret)
10421			goto fail;
10422	}
10423
10424	/* Run this here since we want to validate the streams we created */
10425	ret = drm_atomic_helper_check_planes(dev, state);
10426	if (ret)
10427		goto fail;
10428
10429	/* Check cursor planes scaling */
10430	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10431		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10432		if (ret)
10433			goto fail;
10434	}
10435
10436	if (state->legacy_cursor_update) {
10437		/*
10438		 * This is a fast cursor update coming from the plane update
10439		 * helper, check if it can be done asynchronously for better
10440		 * performance.
10441		 */
10442		state->async_update =
10443			!drm_atomic_helper_async_check(dev, state);
10444
10445		/*
10446		 * Skip the remaining global validation if this is an async
10447		 * update. Cursor updates can be done without affecting
10448		 * state or bandwidth calcs and this avoids the performance
10449		 * penalty of locking the private state object and
10450		 * allocating a new dc_state.
10451		 */
10452		if (state->async_update)
10453			return 0;
10454	}
10455
10456	/* Check scaling and underscan changes*/
10457	/* TODO Removed scaling changes validation due to inability to commit
10458	 * new stream into context w\o causing full reset. Need to
10459	 * decide how to handle.
10460	 */
10461	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10462		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10463		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10464		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10465
10466		/* Skip any modesets/resets */
10467		if (!acrtc || drm_atomic_crtc_needs_modeset(
10468				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10469			continue;
10470
10471		/* Skip any thing not scale or underscan changes */
10472		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10473			continue;
10474
10475		lock_and_validation_needed = true;
10476	}
10477
10478	/**
10479	 * Streams and planes are reset when there are changes that affect
10480	 * bandwidth. Anything that affects bandwidth needs to go through
10481	 * DC global validation to ensure that the configuration can be applied
10482	 * to hardware.
10483	 *
10484	 * We have to currently stall out here in atomic_check for outstanding
10485	 * commits to finish in this case because our IRQ handlers reference
10486	 * DRM state directly - we can end up disabling interrupts too early
10487	 * if we don't.
10488	 *
10489	 * TODO: Remove this stall and drop DM state private objects.
10490	 */
10491	if (lock_and_validation_needed) {
10492		ret = dm_atomic_get_state(state, &dm_state);
10493		if (ret)
10494			goto fail;
10495
10496		ret = do_aquire_global_lock(dev, state);
10497		if (ret)
10498			goto fail;
10499
10500#if defined(CONFIG_DRM_AMD_DC_DCN)
10501		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10502			goto fail;
10503
10504		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10505		if (ret)
10506			goto fail;
10507#endif
10508
10509		/*
10510		 * Perform validation of MST topology in the state:
10511		 * We need to perform MST atomic check before calling
10512		 * dc_validate_global_state(), or there is a chance
10513		 * to get stuck in an infinite loop and hang eventually.
10514		 */
10515		ret = drm_dp_mst_atomic_check(state);
10516		if (ret)
10517			goto fail;
10518		status = dc_validate_global_state(dc, dm_state->context, false);
10519		if (status != DC_OK) {
10520			drm_dbg_atomic(dev,
10521				       "DC global validation failure: %s (%d)",
10522				       dc_status_to_str(status), status);
10523			ret = -EINVAL;
10524			goto fail;
10525		}
10526	} else {
10527		/*
10528		 * The commit is a fast update. Fast updates shouldn't change
10529		 * the DC context, affect global validation, and can have their
10530		 * commit work done in parallel with other commits not touching
10531		 * the same resource. If we have a new DC context as part of
10532		 * the DM atomic state from validation we need to free it and
10533		 * retain the existing one instead.
10534		 *
10535		 * Furthermore, since the DM atomic state only contains the DC
10536		 * context and can safely be annulled, we can free the state
10537		 * and clear the associated private object now to free
10538		 * some memory and avoid a possible use-after-free later.
10539		 */
10540
10541		for (i = 0; i < state->num_private_objs; i++) {
10542			struct drm_private_obj *obj = state->private_objs[i].ptr;
10543
10544			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10545				int j = state->num_private_objs-1;
10546
10547				dm_atomic_destroy_state(obj,
10548						state->private_objs[i].state);
10549
10550				/* If i is not at the end of the array then the
10551				 * last element needs to be moved to where i was
10552				 * before the array can safely be truncated.
10553				 */
10554				if (i != j)
10555					state->private_objs[i] =
10556						state->private_objs[j];
10557
10558				state->private_objs[j].ptr = NULL;
10559				state->private_objs[j].state = NULL;
10560				state->private_objs[j].old_state = NULL;
10561				state->private_objs[j].new_state = NULL;
10562
10563				state->num_private_objs = j;
10564				break;
10565			}
10566		}
10567	}
10568
10569	/* Store the overall update type for use later in atomic check. */
10570	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10571		struct dm_crtc_state *dm_new_crtc_state =
10572			to_dm_crtc_state(new_crtc_state);
10573
10574		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10575							 UPDATE_TYPE_FULL :
10576							 UPDATE_TYPE_FAST;
10577	}
10578
10579	/* Must be success */
10580	WARN_ON(ret);
10581
10582	trace_amdgpu_dm_atomic_check_finish(state, ret);
10583
10584	return ret;
10585
10586fail:
10587	if (ret == -EDEADLK)
10588		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10589	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10590		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10591	else
10592		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10593
10594	trace_amdgpu_dm_atomic_check_finish(state, ret);
10595
10596	return ret;
10597}
10598
10599static bool is_dp_capable_without_timing_msa(struct dc *dc,
10600					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10601{
10602	uint8_t dpcd_data;
10603	bool capable = false;
10604
10605	if (amdgpu_dm_connector->dc_link &&
10606		dm_helpers_dp_read_dpcd(
10607				NULL,
10608				amdgpu_dm_connector->dc_link,
10609				DP_DOWN_STREAM_PORT_COUNT,
10610				&dpcd_data,
10611				sizeof(dpcd_data))) {
10612		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10613	}
10614
10615	return capable;
10616}
10617
10618static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10619		uint8_t *edid_ext, int len,
10620		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10621{
10622	int i;
10623	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10624	struct dc *dc = adev->dm.dc;
10625
10626	/* send extension block to DMCU for parsing */
10627	for (i = 0; i < len; i += 8) {
10628		bool res;
10629		int offset;
10630
10631		/* send 8 bytes a time */
10632		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10633			return false;
10634
10635		if (i+8 == len) {
10636			/* EDID block sent completed, expect result */
10637			int version, min_rate, max_rate;
10638
10639			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10640			if (res) {
10641				/* amd vsdb found */
10642				vsdb_info->freesync_supported = 1;
10643				vsdb_info->amd_vsdb_version = version;
10644				vsdb_info->min_refresh_rate_hz = min_rate;
10645				vsdb_info->max_refresh_rate_hz = max_rate;
10646				return true;
10647			}
10648			/* not amd vsdb */
10649			return false;
10650		}
10651
10652		/* check for ack*/
10653		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10654		if (!res)
10655			return false;
10656	}
10657
10658	return false;
10659}
10660
10661static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10662		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10663{
10664	uint8_t *edid_ext = NULL;
10665	int i;
10666	bool valid_vsdb_found = false;
10667
10668	/*----- drm_find_cea_extension() -----*/
10669	/* No EDID or EDID extensions */
10670	if (edid == NULL || edid->extensions == 0)
10671		return -ENODEV;
10672
10673	/* Find CEA extension */
10674	for (i = 0; i < edid->extensions; i++) {
10675		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10676		if (edid_ext[0] == CEA_EXT)
10677			break;
10678	}
10679
10680	if (i == edid->extensions)
10681		return -ENODEV;
10682
10683	/*----- cea_db_offsets() -----*/
10684	if (edid_ext[0] != CEA_EXT)
10685		return -ENODEV;
10686
10687	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10688
10689	return valid_vsdb_found ? i : -ENODEV;
10690}
10691
10692void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10693					struct edid *edid)
10694{
10695	int i = 0;
10696	struct detailed_timing *timing;
10697	struct detailed_non_pixel *data;
10698	struct detailed_data_monitor_range *range;
10699	struct amdgpu_dm_connector *amdgpu_dm_connector =
10700			to_amdgpu_dm_connector(connector);
10701	struct dm_connector_state *dm_con_state = NULL;
10702
10703	struct drm_device *dev = connector->dev;
10704	struct amdgpu_device *adev = drm_to_adev(dev);
10705	bool freesync_capable = false;
10706	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10707
10708	if (!connector->state) {
10709		DRM_ERROR("%s - Connector has no state", __func__);
10710		goto update;
10711	}
10712
10713	if (!edid) {
10714		dm_con_state = to_dm_connector_state(connector->state);
10715
10716		amdgpu_dm_connector->min_vfreq = 0;
10717		amdgpu_dm_connector->max_vfreq = 0;
10718		amdgpu_dm_connector->pixel_clock_mhz = 0;
10719
10720		goto update;
10721	}
10722
10723	dm_con_state = to_dm_connector_state(connector->state);
10724
10725	if (!amdgpu_dm_connector->dc_sink) {
10726		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10727		goto update;
10728	}
10729	if (!adev->dm.freesync_module)
10730		goto update;
10731
10732
10733	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10734		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10735		bool edid_check_required = false;
10736
10737		if (edid) {
10738			edid_check_required = is_dp_capable_without_timing_msa(
10739						adev->dm.dc,
10740						amdgpu_dm_connector);
10741		}
10742
10743		if (edid_check_required == true && (edid->version > 1 ||
10744		   (edid->version == 1 && edid->revision > 1))) {
10745			for (i = 0; i < 4; i++) {
10746
10747				timing	= &edid->detailed_timings[i];
10748				data	= &timing->data.other_data;
10749				range	= &data->data.range;
10750				/*
10751				 * Check if monitor has continuous frequency mode
10752				 */
10753				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10754					continue;
10755				/*
10756				 * Check for flag range limits only. If flag == 1 then
10757				 * no additional timing information provided.
10758				 * Default GTF, GTF Secondary curve and CVT are not
10759				 * supported
10760				 */
10761				if (range->flags != 1)
10762					continue;
10763
10764				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10765				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10766				amdgpu_dm_connector->pixel_clock_mhz =
10767					range->pixel_clock_mhz * 10;
10768
10769				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10770				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10771
10772				break;
10773			}
10774
10775			if (amdgpu_dm_connector->max_vfreq -
10776			    amdgpu_dm_connector->min_vfreq > 10) {
10777
10778				freesync_capable = true;
10779			}
10780		}
10781	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10782		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10783		if (i >= 0 && vsdb_info.freesync_supported) {
10784			timing  = &edid->detailed_timings[i];
10785			data    = &timing->data.other_data;
10786
10787			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10788			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10789			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10790				freesync_capable = true;
10791
10792			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10793			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10794		}
10795	}
10796
10797update:
10798	if (dm_con_state)
10799		dm_con_state->freesync_capable = freesync_capable;
10800
10801	if (connector->vrr_capable_property)
10802		drm_connector_set_vrr_capable_property(connector,
10803						       freesync_capable);
10804}
10805
10806void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10807{
10808	struct amdgpu_device *adev = drm_to_adev(dev);
10809	struct dc *dc = adev->dm.dc;
10810	int i;
10811
10812	mutex_lock(&adev->dm.dc_lock);
10813	if (dc->current_state) {
10814		for (i = 0; i < dc->current_state->stream_count; ++i)
10815			dc->current_state->streams[i]
10816				->triggered_crtc_reset.enabled =
10817				adev->dm.force_timing_sync;
10818
10819		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10820		dc_trigger_sync(dc, dc->current_state);
10821	}
10822	mutex_unlock(&adev->dm.dc_lock);
10823}
10824
10825void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10826		       uint32_t value, const char *func_name)
10827{
10828#ifdef DM_CHECK_ADDR_0
10829	if (address == 0) {
10830		DC_ERR("invalid register write. address = 0");
10831		return;
10832	}
10833#endif
10834	cgs_write_register(ctx->cgs_device, address, value);
10835	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10836}
10837
10838uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10839			  const char *func_name)
10840{
10841	uint32_t value;
10842#ifdef DM_CHECK_ADDR_0
10843	if (address == 0) {
10844		DC_ERR("invalid register read; address = 0\n");
10845		return 0;
10846	}
10847#endif
10848
10849	if (ctx->dmub_srv &&
10850	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10851	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10852		ASSERT(false);
10853		return 0;
10854	}
10855
10856	value = cgs_read_register(ctx->cgs_device, address);
10857
10858	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10859
10860	return value;
10861}
10862
10863int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10864				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10865{
10866	struct amdgpu_device *adev = ctx->driver_context;
10867	int ret = 0;
10868
10869	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10870	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10871	if (ret == 0) {
10872		*operation_result = AUX_RET_ERROR_TIMEOUT;
10873		return -1;
10874	}
10875	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10876
10877	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10878		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10879
10880		// For read case, Copy data to payload
10881		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10882		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10883			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10884			adev->dm.dmub_notify->aux_reply.length);
10885	}
10886
10887	return adev->dm.dmub_notify->aux_reply.length;
10888}