Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <drm/drmP.h>
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "atom.h"
  28#include "amdgpu_atombios.h"
  29#include "atombios_crtc.h"
  30#include "atombios_encoders.h"
  31#include "amdgpu_pll.h"
  32#include "amdgpu_connectors.h"
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gca/gfx_6_0_d.h"
  39#include "gca/gfx_6_0_sh_mask.h"
  40#include "gmc/gmc_6_0_d.h"
  41#include "gmc/gmc_6_0_sh_mask.h"
  42#include "dce/dce_6_0_d.h"
  43#include "dce/dce_6_0_sh_mask.h"
  44#include "gca/gfx_7_2_enum.h"
  45#include "dce_v6_0.h"
  46#include "si_enums.h"
  47
  48static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
  49static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  50
  51static const u32 crtc_offsets[6] =
  52{
  53	SI_CRTC0_REGISTER_OFFSET,
  54	SI_CRTC1_REGISTER_OFFSET,
  55	SI_CRTC2_REGISTER_OFFSET,
  56	SI_CRTC3_REGISTER_OFFSET,
  57	SI_CRTC4_REGISTER_OFFSET,
  58	SI_CRTC5_REGISTER_OFFSET
  59};
  60
  61static const u32 hpd_offsets[] =
  62{
  63	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
  64	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
  65	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
  66	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
  67	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
  68	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
  69};
  70
  71static const uint32_t dig_offsets[] = {
  72	SI_CRTC0_REGISTER_OFFSET,
  73	SI_CRTC1_REGISTER_OFFSET,
  74	SI_CRTC2_REGISTER_OFFSET,
  75	SI_CRTC3_REGISTER_OFFSET,
  76	SI_CRTC4_REGISTER_OFFSET,
  77	SI_CRTC5_REGISTER_OFFSET,
  78	(0x13830 - 0x7030) >> 2,
  79};
  80
  81static const struct {
  82	uint32_t	reg;
  83	uint32_t	vblank;
  84	uint32_t	vline;
  85	uint32_t	hpd;
  86
  87} interrupt_status_offsets[6] = { {
  88	.reg = mmDISP_INTERRUPT_STATUS,
  89	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  90	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  91	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  92}, {
  93	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  94	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  95	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  96	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  97}, {
  98	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  99	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
 100	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 101	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 102}, {
 103	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 104	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 105	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 106	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 107}, {
 108	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 109	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 110	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 111	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 112}, {
 113	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 114	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 115	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 116	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 117} };
 118
 119static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 120				     u32 block_offset, u32 reg)
 121{
 122	unsigned long flags;
 123	u32 r;
 124
 125	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 126	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 127	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 128	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 129
 130	return r;
 131}
 132
 133static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
 134				      u32 block_offset, u32 reg, u32 v)
 135{
 136	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137
 138	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 139	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
 140		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
 141	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 142	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143}
 144
 145static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 146{
 147	if (crtc >= adev->mode_info.num_crtc)
 148		return 0;
 149	else
 150		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 151}
 152
 153static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 154{
 155	unsigned i;
 156
 157	/* Enable pflip interrupts */
 158	for (i = 0; i < adev->mode_info.num_crtc; i++)
 159		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 160}
 161
 162static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 163{
 164	unsigned i;
 165
 166	/* Disable pflip interrupts */
 167	for (i = 0; i < adev->mode_info.num_crtc; i++)
 168		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 169}
 170
 171/**
 172 * dce_v6_0_page_flip - pageflip callback.
 173 *
 174 * @adev: amdgpu_device pointer
 175 * @crtc_id: crtc to cleanup pageflip on
 176 * @crtc_base: new address of the crtc (GPU MC address)
 177 *
 178 * Does the actual pageflip (evergreen+).
 179 * During vblank we take the crtc lock and wait for the update_pending
 180 * bit to go high, when it does, we release the lock, and allow the
 181 * double buffered update to take place.
 182 * Returns the current update pending status.
 183 */
 184static void dce_v6_0_page_flip(struct amdgpu_device *adev,
 185			       int crtc_id, u64 crtc_base, bool async)
 186{
 187	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 188
 189	/* flip at hsync for async, default is vsync */
 190	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 191	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 192	/* update the scanout addresses */
 193	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 194	       upper_32_bits(crtc_base));
 195	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 196	       (u32)crtc_base);
 197
 198	/* post the write */
 199	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 200}
 201
 202static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 203					u32 *vbl, u32 *position)
 204{
 205	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 206		return -EINVAL;
 207	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 208	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 209
 210	return 0;
 211
 212}
 213
 214/**
 215 * dce_v6_0_hpd_sense - hpd sense callback.
 216 *
 217 * @adev: amdgpu_device pointer
 218 * @hpd: hpd (hotplug detect) pin
 219 *
 220 * Checks if a digital monitor is connected (evergreen+).
 221 * Returns true if connected, false if not connected.
 222 */
 223static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 224			       enum amdgpu_hpd_id hpd)
 225{
 226	bool connected = false;
 227
 228	if (hpd >= adev->mode_info.num_hpd)
 229		return connected;
 230
 231	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 232		connected = true;
 233
 234	return connected;
 235}
 236
 237/**
 238 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
 239 *
 240 * @adev: amdgpu_device pointer
 241 * @hpd: hpd (hotplug detect) pin
 242 *
 243 * Set the polarity of the hpd pin (evergreen+).
 244 */
 245static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 246				      enum amdgpu_hpd_id hpd)
 247{
 248	u32 tmp;
 249	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 250
 251	if (hpd >= adev->mode_info.num_hpd)
 252		return;
 253
 254	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 255	if (connected)
 256		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 257	else
 258		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 259	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 260}
 261
 262/**
 263 * dce_v6_0_hpd_init - hpd setup callback.
 264 *
 265 * @adev: amdgpu_device pointer
 266 *
 267 * Setup the hpd pins used by the card (evergreen+).
 268 * Enable the pin, set the polarity, and enable the hpd interrupts.
 269 */
 270static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 271{
 272	struct drm_device *dev = adev->ddev;
 273	struct drm_connector *connector;
 274	u32 tmp;
 275
 276	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 277		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 278
 279		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 280			continue;
 281
 282		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 283		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 284		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 285
 286		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 287		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 288			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 289			 * aux dp channel on imac and help (but not completely fix)
 290			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 291			 * also avoid interrupt storms during dpms.
 292			 */
 293			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 294			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 295			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 296			continue;
 297		}
 298
 299		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 300		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 301	}
 302
 303}
 304
 305/**
 306 * dce_v6_0_hpd_fini - hpd tear down callback.
 307 *
 308 * @adev: amdgpu_device pointer
 309 *
 310 * Tear down the hpd pins used by the card (evergreen+).
 311 * Disable the hpd interrupts.
 312 */
 313static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 314{
 315	struct drm_device *dev = adev->ddev;
 316	struct drm_connector *connector;
 317	u32 tmp;
 318
 319	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 320		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 321
 322		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 323			continue;
 324
 325		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 326		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 327		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 328
 329		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 330	}
 331}
 332
 333static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 334{
 335	return mmDC_GPIO_HPD_A;
 336}
 337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 339					  bool render)
 340{
 341	if (!render)
 342		WREG32(mmVGA_RENDER_CONTROL,
 343			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
 344
 345}
 346
 347static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
 348{
 
 
 349	switch (adev->asic_type) {
 350	case CHIP_TAHITI:
 351	case CHIP_PITCAIRN:
 352	case CHIP_VERDE:
 353		return 6;
 
 354	case CHIP_OLAND:
 355		return 2;
 
 356	default:
 357		return 0;
 358	}
 
 359}
 360
 361void dce_v6_0_disable_dce(struct amdgpu_device *adev)
 362{
 363	/*Disable VGA render and enabled crtc, if has DCE engine*/
 364	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 365		u32 tmp;
 366		int crtc_enabled, i;
 367
 368		dce_v6_0_set_vga_render_state(adev, false);
 369
 370		/*Disable crtc*/
 371		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
 372			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
 373				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 374			if (crtc_enabled) {
 375				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 376				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 377				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 378				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 379				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 380			}
 381		}
 382	}
 383}
 384
 385static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 386{
 387
 388	struct drm_device *dev = encoder->dev;
 389	struct amdgpu_device *adev = dev->dev_private;
 390	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 391	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 392	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 393	int bpc = 0;
 394	u32 tmp = 0;
 395	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 396
 397	if (connector) {
 398		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 399		bpc = amdgpu_connector_get_monitor_bpc(connector);
 400		dither = amdgpu_connector->dither;
 401	}
 402
 403	/* LVDS FMT is set up by atom */
 404	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 405		return;
 406
 407	if (bpc == 0)
 408		return;
 409
 410
 411	switch (bpc) {
 412	case 6:
 413		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 414			/* XXX sort out optimal dither settings */
 415			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 416				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 417				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
 418		else
 419			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
 420		break;
 421	case 8:
 422		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 423			/* XXX sort out optimal dither settings */
 424			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 425				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 426				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 427				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 428				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
 429		else
 430			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 431				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
 432		break;
 433	case 10:
 434	default:
 435		/* not needed */
 436		break;
 437	}
 438
 439	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 440}
 441
 442/**
 443 * cik_get_number_of_dram_channels - get the number of dram channels
 444 *
 445 * @adev: amdgpu_device pointer
 446 *
 447 * Look up the number of video ram channels (CIK).
 448 * Used for display watermark bandwidth calculations
 449 * Returns the number of dram channels
 450 */
 451static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
 452{
 453	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 454
 455	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 456	case 0:
 457	default:
 458		return 1;
 459	case 1:
 460		return 2;
 461	case 2:
 462		return 4;
 463	case 3:
 464		return 8;
 465	case 4:
 466		return 3;
 467	case 5:
 468		return 6;
 469	case 6:
 470		return 10;
 471	case 7:
 472		return 12;
 473	case 8:
 474		return 16;
 475	}
 476}
 477
 478struct dce6_wm_params {
 479	u32 dram_channels; /* number of dram channels */
 480	u32 yclk;          /* bandwidth per dram data pin in kHz */
 481	u32 sclk;          /* engine clock in kHz */
 482	u32 disp_clk;      /* display clock in kHz */
 483	u32 src_width;     /* viewport width */
 484	u32 active_time;   /* active display time in ns */
 485	u32 blank_time;    /* blank time in ns */
 486	bool interlaced;    /* mode is interlaced */
 487	fixed20_12 vsc;    /* vertical scale ratio */
 488	u32 num_heads;     /* number of active crtcs */
 489	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 490	u32 lb_size;       /* line buffer allocated to pipe */
 491	u32 vtaps;         /* vertical scaler taps */
 492};
 493
 494/**
 495 * dce_v6_0_dram_bandwidth - get the dram bandwidth
 496 *
 497 * @wm: watermark calculation data
 498 *
 499 * Calculate the raw dram bandwidth (CIK).
 500 * Used for display watermark bandwidth calculations
 501 * Returns the dram bandwidth in MBytes/s
 502 */
 503static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
 504{
 505	/* Calculate raw DRAM Bandwidth */
 506	fixed20_12 dram_efficiency; /* 0.7 */
 507	fixed20_12 yclk, dram_channels, bandwidth;
 508	fixed20_12 a;
 509
 510	a.full = dfixed_const(1000);
 511	yclk.full = dfixed_const(wm->yclk);
 512	yclk.full = dfixed_div(yclk, a);
 513	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 514	a.full = dfixed_const(10);
 515	dram_efficiency.full = dfixed_const(7);
 516	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 517	bandwidth.full = dfixed_mul(dram_channels, yclk);
 518	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 519
 520	return dfixed_trunc(bandwidth);
 521}
 522
 523/**
 524 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
 525 *
 526 * @wm: watermark calculation data
 527 *
 528 * Calculate the dram bandwidth used for display (CIK).
 529 * Used for display watermark bandwidth calculations
 530 * Returns the dram bandwidth for display in MBytes/s
 531 */
 532static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 533{
 534	/* Calculate DRAM Bandwidth and the part allocated to display. */
 535	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 536	fixed20_12 yclk, dram_channels, bandwidth;
 537	fixed20_12 a;
 538
 539	a.full = dfixed_const(1000);
 540	yclk.full = dfixed_const(wm->yclk);
 541	yclk.full = dfixed_div(yclk, a);
 542	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 543	a.full = dfixed_const(10);
 544	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 545	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 546	bandwidth.full = dfixed_mul(dram_channels, yclk);
 547	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 548
 549	return dfixed_trunc(bandwidth);
 550}
 551
 552/**
 553 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
 554 *
 555 * @wm: watermark calculation data
 556 *
 557 * Calculate the data return bandwidth used for display (CIK).
 558 * Used for display watermark bandwidth calculations
 559 * Returns the data return bandwidth in MBytes/s
 560 */
 561static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
 562{
 563	/* Calculate the display Data return Bandwidth */
 564	fixed20_12 return_efficiency; /* 0.8 */
 565	fixed20_12 sclk, bandwidth;
 566	fixed20_12 a;
 567
 568	a.full = dfixed_const(1000);
 569	sclk.full = dfixed_const(wm->sclk);
 570	sclk.full = dfixed_div(sclk, a);
 571	a.full = dfixed_const(10);
 572	return_efficiency.full = dfixed_const(8);
 573	return_efficiency.full = dfixed_div(return_efficiency, a);
 574	a.full = dfixed_const(32);
 575	bandwidth.full = dfixed_mul(a, sclk);
 576	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 577
 578	return dfixed_trunc(bandwidth);
 579}
 580
 581/**
 582 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
 583 *
 584 * @wm: watermark calculation data
 585 *
 586 * Calculate the dmif bandwidth used for display (CIK).
 587 * Used for display watermark bandwidth calculations
 588 * Returns the dmif bandwidth in MBytes/s
 589 */
 590static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
 591{
 592	/* Calculate the DMIF Request Bandwidth */
 593	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 594	fixed20_12 disp_clk, bandwidth;
 595	fixed20_12 a, b;
 596
 597	a.full = dfixed_const(1000);
 598	disp_clk.full = dfixed_const(wm->disp_clk);
 599	disp_clk.full = dfixed_div(disp_clk, a);
 600	a.full = dfixed_const(32);
 601	b.full = dfixed_mul(a, disp_clk);
 602
 603	a.full = dfixed_const(10);
 604	disp_clk_request_efficiency.full = dfixed_const(8);
 605	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 606
 607	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 608
 609	return dfixed_trunc(bandwidth);
 610}
 611
 612/**
 613 * dce_v6_0_available_bandwidth - get the min available bandwidth
 614 *
 615 * @wm: watermark calculation data
 616 *
 617 * Calculate the min available bandwidth used for display (CIK).
 618 * Used for display watermark bandwidth calculations
 619 * Returns the min available bandwidth in MBytes/s
 620 */
 621static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
 622{
 623	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 624	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
 625	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
 626	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
 627
 628	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 629}
 630
 631/**
 632 * dce_v6_0_average_bandwidth - get the average available bandwidth
 633 *
 634 * @wm: watermark calculation data
 635 *
 636 * Calculate the average available bandwidth used for display (CIK).
 637 * Used for display watermark bandwidth calculations
 638 * Returns the average available bandwidth in MBytes/s
 639 */
 640static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
 641{
 642	/* Calculate the display mode Average Bandwidth
 643	 * DisplayMode should contain the source and destination dimensions,
 644	 * timing, etc.
 645	 */
 646	fixed20_12 bpp;
 647	fixed20_12 line_time;
 648	fixed20_12 src_width;
 649	fixed20_12 bandwidth;
 650	fixed20_12 a;
 651
 652	a.full = dfixed_const(1000);
 653	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 654	line_time.full = dfixed_div(line_time, a);
 655	bpp.full = dfixed_const(wm->bytes_per_pixel);
 656	src_width.full = dfixed_const(wm->src_width);
 657	bandwidth.full = dfixed_mul(src_width, bpp);
 658	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 659	bandwidth.full = dfixed_div(bandwidth, line_time);
 660
 661	return dfixed_trunc(bandwidth);
 662}
 663
 664/**
 665 * dce_v6_0_latency_watermark - get the latency watermark
 666 *
 667 * @wm: watermark calculation data
 668 *
 669 * Calculate the latency watermark (CIK).
 670 * Used for display watermark bandwidth calculations
 671 * Returns the latency watermark in ns
 672 */
 673static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
 674{
 675	/* First calculate the latency in ns */
 676	u32 mc_latency = 2000; /* 2000 ns. */
 677	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
 678	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 679	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 680	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 681	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 682		(wm->num_heads * cursor_line_pair_return_time);
 683	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 684	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 685	u32 tmp, dmif_size = 12288;
 686	fixed20_12 a, b, c;
 687
 688	if (wm->num_heads == 0)
 689		return 0;
 690
 691	a.full = dfixed_const(2);
 692	b.full = dfixed_const(1);
 693	if ((wm->vsc.full > a.full) ||
 694	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 695	    (wm->vtaps >= 5) ||
 696	    ((wm->vsc.full >= a.full) && wm->interlaced))
 697		max_src_lines_per_dst_line = 4;
 698	else
 699		max_src_lines_per_dst_line = 2;
 700
 701	a.full = dfixed_const(available_bandwidth);
 702	b.full = dfixed_const(wm->num_heads);
 703	a.full = dfixed_div(a, b);
 704	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 705	tmp = min(dfixed_trunc(a), tmp);
 706
 707	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 708
 709	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 710	b.full = dfixed_const(1000);
 711	c.full = dfixed_const(lb_fill_bw);
 712	b.full = dfixed_div(c, b);
 713	a.full = dfixed_div(a, b);
 714	line_fill_time = dfixed_trunc(a);
 715
 716	if (line_fill_time < wm->active_time)
 717		return latency;
 718	else
 719		return latency + (line_fill_time - wm->active_time);
 720
 721}
 722
 723/**
 724 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 725 * average and available dram bandwidth
 726 *
 727 * @wm: watermark calculation data
 728 *
 729 * Check if the display average bandwidth fits in the display
 730 * dram bandwidth (CIK).
 731 * Used for display watermark bandwidth calculations
 732 * Returns true if the display fits, false if not.
 733 */
 734static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 735{
 736	if (dce_v6_0_average_bandwidth(wm) <=
 737	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 738		return true;
 739	else
 740		return false;
 741}
 742
 743/**
 744 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
 745 * average and available bandwidth
 746 *
 747 * @wm: watermark calculation data
 748 *
 749 * Check if the display average bandwidth fits in the display
 750 * available bandwidth (CIK).
 751 * Used for display watermark bandwidth calculations
 752 * Returns true if the display fits, false if not.
 753 */
 754static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
 755{
 756	if (dce_v6_0_average_bandwidth(wm) <=
 757	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
 758		return true;
 759	else
 760		return false;
 761}
 762
 763/**
 764 * dce_v6_0_check_latency_hiding - check latency hiding
 765 *
 766 * @wm: watermark calculation data
 767 *
 768 * Check latency hiding (CIK).
 769 * Used for display watermark bandwidth calculations
 770 * Returns true if the display fits, false if not.
 771 */
 772static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
 773{
 774	u32 lb_partitions = wm->lb_size / wm->src_width;
 775	u32 line_time = wm->active_time + wm->blank_time;
 776	u32 latency_tolerant_lines;
 777	u32 latency_hiding;
 778	fixed20_12 a;
 779
 780	a.full = dfixed_const(1);
 781	if (wm->vsc.full > a.full)
 782		latency_tolerant_lines = 1;
 783	else {
 784		if (lb_partitions <= (wm->vtaps + 1))
 785			latency_tolerant_lines = 1;
 786		else
 787			latency_tolerant_lines = 2;
 788	}
 789
 790	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 791
 792	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
 793		return true;
 794	else
 795		return false;
 796}
 797
 798/**
 799 * dce_v6_0_program_watermarks - program display watermarks
 800 *
 801 * @adev: amdgpu_device pointer
 802 * @amdgpu_crtc: the selected display controller
 803 * @lb_size: line buffer size
 804 * @num_heads: number of display controllers in use
 805 *
 806 * Calculate and program the display watermarks for the
 807 * selected display controller (CIK).
 808 */
 809static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 810					struct amdgpu_crtc *amdgpu_crtc,
 811					u32 lb_size, u32 num_heads)
 812{
 813	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 814	struct dce6_wm_params wm_low, wm_high;
 815	u32 dram_channels;
 816	u32 active_time;
 817	u32 line_time = 0;
 818	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 819	u32 priority_a_mark = 0, priority_b_mark = 0;
 820	u32 priority_a_cnt = PRIORITY_OFF;
 821	u32 priority_b_cnt = PRIORITY_OFF;
 822	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
 823	fixed20_12 a, b, c;
 824
 825	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 826		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
 827					    (u32)mode->clock);
 828		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
 829					  (u32)mode->clock);
 830		line_time = min(line_time, (u32)65535);
 831		priority_a_cnt = 0;
 832		priority_b_cnt = 0;
 833
 834		dram_channels = si_get_number_of_dram_channels(adev);
 835
 836		/* watermark for high clocks */
 837		if (adev->pm.dpm_enabled) {
 838			wm_high.yclk =
 839				amdgpu_dpm_get_mclk(adev, false) * 10;
 840			wm_high.sclk =
 841				amdgpu_dpm_get_sclk(adev, false) * 10;
 842		} else {
 843			wm_high.yclk = adev->pm.current_mclk * 10;
 844			wm_high.sclk = adev->pm.current_sclk * 10;
 845		}
 846
 847		wm_high.disp_clk = mode->clock;
 848		wm_high.src_width = mode->crtc_hdisplay;
 849		wm_high.active_time = active_time;
 850		wm_high.blank_time = line_time - wm_high.active_time;
 851		wm_high.interlaced = false;
 852		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 853			wm_high.interlaced = true;
 854		wm_high.vsc = amdgpu_crtc->vsc;
 855		wm_high.vtaps = 1;
 856		if (amdgpu_crtc->rmx_type != RMX_OFF)
 857			wm_high.vtaps = 2;
 858		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
 859		wm_high.lb_size = lb_size;
 860		wm_high.dram_channels = dram_channels;
 861		wm_high.num_heads = num_heads;
 862
 863		if (adev->pm.dpm_enabled) {
 864		/* watermark for low clocks */
 865			wm_low.yclk =
 866				amdgpu_dpm_get_mclk(adev, true) * 10;
 867			wm_low.sclk =
 868				amdgpu_dpm_get_sclk(adev, true) * 10;
 869		} else {
 870			wm_low.yclk = adev->pm.current_mclk * 10;
 871			wm_low.sclk = adev->pm.current_sclk * 10;
 872		}
 873
 874		wm_low.disp_clk = mode->clock;
 875		wm_low.src_width = mode->crtc_hdisplay;
 876		wm_low.active_time = active_time;
 877		wm_low.blank_time = line_time - wm_low.active_time;
 878		wm_low.interlaced = false;
 879		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 880			wm_low.interlaced = true;
 881		wm_low.vsc = amdgpu_crtc->vsc;
 882		wm_low.vtaps = 1;
 883		if (amdgpu_crtc->rmx_type != RMX_OFF)
 884			wm_low.vtaps = 2;
 885		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
 886		wm_low.lb_size = lb_size;
 887		wm_low.dram_channels = dram_channels;
 888		wm_low.num_heads = num_heads;
 889
 890		/* set for high clocks */
 891		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
 892		/* set for low clocks */
 893		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
 894
 895		/* possibly force display priority to high */
 896		/* should really do this at mode validation time... */
 897		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
 898		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
 899		    !dce_v6_0_check_latency_hiding(&wm_high) ||
 900		    (adev->mode_info.disp_priority == 2)) {
 901			DRM_DEBUG_KMS("force priority to high\n");
 902			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 903			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 904		}
 905		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
 906		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
 907		    !dce_v6_0_check_latency_hiding(&wm_low) ||
 908		    (adev->mode_info.disp_priority == 2)) {
 909			DRM_DEBUG_KMS("force priority to high\n");
 910			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 911			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 912		}
 913
 914		a.full = dfixed_const(1000);
 915		b.full = dfixed_const(mode->clock);
 916		b.full = dfixed_div(b, a);
 917		c.full = dfixed_const(latency_watermark_a);
 918		c.full = dfixed_mul(c, b);
 919		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
 920		c.full = dfixed_div(c, a);
 921		a.full = dfixed_const(16);
 922		c.full = dfixed_div(c, a);
 923		priority_a_mark = dfixed_trunc(c);
 924		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
 925
 926		a.full = dfixed_const(1000);
 927		b.full = dfixed_const(mode->clock);
 928		b.full = dfixed_div(b, a);
 929		c.full = dfixed_const(latency_watermark_b);
 930		c.full = dfixed_mul(c, b);
 931		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
 932		c.full = dfixed_div(c, a);
 933		a.full = dfixed_const(16);
 934		c.full = dfixed_div(c, a);
 935		priority_b_mark = dfixed_trunc(c);
 936		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 937
 938		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 939	}
 940
 941	/* select wm A */
 942	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 943	tmp = arb_control3;
 944	tmp &= ~LATENCY_WATERMARK_MASK(3);
 945	tmp |= LATENCY_WATERMARK_MASK(1);
 946	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 947	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
 948	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
 949		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 950	/* select wm B */
 951	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 952	tmp &= ~LATENCY_WATERMARK_MASK(3);
 953	tmp |= LATENCY_WATERMARK_MASK(2);
 954	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 955	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
 956	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
 957		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 958	/* restore original selection */
 959	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
 960
 961	/* write the priority marks */
 962	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
 963	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
 964
 965	/* save values for DPM */
 966	amdgpu_crtc->line_time = line_time;
 967	amdgpu_crtc->wm_high = latency_watermark_a;
 968
 969	/* Save number of lines the linebuffer leads before the scanout */
 970	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 971}
 972
 973/* watermark setup */
 974static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
 975				   struct amdgpu_crtc *amdgpu_crtc,
 976				   struct drm_display_mode *mode,
 977				   struct drm_display_mode *other_mode)
 978{
 979	u32 tmp, buffer_alloc, i;
 980	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 981	/*
 982	 * Line Buffer Setup
 983	 * There are 3 line buffers, each one shared by 2 display controllers.
 984	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
 985	 * the display controllers.  The paritioning is done via one of four
 986	 * preset allocations specified in bits 21:20:
 987	 *  0 - half lb
 988	 *  2 - whole lb, other crtc must be disabled
 989	 */
 990	/* this can get tricky if we have two large displays on a paired group
 991	 * of crtcs.  Ideally for multiple large displays we'd assign them to
 992	 * non-linked crtcs for maximum line buffer allocation.
 993	 */
 994	if (amdgpu_crtc->base.enabled && mode) {
 995		if (other_mode) {
 996			tmp = 0; /* 1/2 */
 997			buffer_alloc = 1;
 998		} else {
 999			tmp = 2; /* whole */
1000			buffer_alloc = 2;
1001		}
1002	} else {
1003		tmp = 0;
1004		buffer_alloc = 0;
1005	}
1006
1007	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1008	       DC_LB_MEMORY_CONFIG(tmp));
1009
1010	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1011	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1012	for (i = 0; i < adev->usec_timeout; i++) {
1013		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1014		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1015			break;
1016		udelay(1);
1017	}
1018
1019	if (amdgpu_crtc->base.enabled && mode) {
1020		switch (tmp) {
1021		case 0:
1022		default:
1023			return 4096 * 2;
1024		case 2:
1025			return 8192 * 2;
1026		}
1027	}
1028
1029	/* controller not enabled, so no lb used */
1030	return 0;
1031}
1032
1033
1034/**
1035 *
1036 * dce_v6_0_bandwidth_update - program display watermarks
1037 *
1038 * @adev: amdgpu_device pointer
1039 *
1040 * Calculate and program the display watermarks and line
1041 * buffer allocation (CIK).
1042 */
1043static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1044{
1045	struct drm_display_mode *mode0 = NULL;
1046	struct drm_display_mode *mode1 = NULL;
1047	u32 num_heads = 0, lb_size;
1048	int i;
1049
1050	if (!adev->mode_info.mode_config_initialized)
1051		return;
1052
1053	amdgpu_display_update_priority(adev);
1054
1055	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1056		if (adev->mode_info.crtcs[i]->base.enabled)
1057			num_heads++;
1058	}
1059	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1060		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1061		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1062		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1063		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1064		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1065		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1066	}
1067}
1068
1069static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1070{
1071	int i;
1072	u32 tmp;
1073
1074	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1075		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1076				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1077		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1078					PORT_CONNECTIVITY))
1079			adev->mode_info.audio.pin[i].connected = false;
1080		else
1081			adev->mode_info.audio.pin[i].connected = true;
1082	}
1083
1084}
1085
1086static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1087{
1088	int i;
1089
1090	dce_v6_0_audio_get_connected_pins(adev);
1091
1092	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1093		if (adev->mode_info.audio.pin[i].connected)
1094			return &adev->mode_info.audio.pin[i];
1095	}
1096	DRM_ERROR("No connected audio pins found!\n");
1097	return NULL;
1098}
1099
1100static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1101{
1102	struct amdgpu_device *adev = encoder->dev->dev_private;
1103	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1104	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 
1105
1106	if (!dig || !dig->afmt || !dig->afmt->pin)
1107		return;
1108
1109	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1110	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1111		             dig->afmt->pin->id));
 
 
1112}
1113
1114static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1115						struct drm_display_mode *mode)
1116{
1117	struct amdgpu_device *adev = encoder->dev->dev_private;
1118	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1119	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1120	struct drm_connector *connector;
1121	struct amdgpu_connector *amdgpu_connector = NULL;
1122	int interlace = 0;
1123	u32 tmp;
1124
1125	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1126		if (connector->encoder == encoder) {
1127			amdgpu_connector = to_amdgpu_connector(connector);
1128			break;
1129		}
1130	}
1131
1132	if (!amdgpu_connector) {
1133		DRM_ERROR("Couldn't find encoder's connector\n");
1134		return;
1135	}
1136
1137	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1138		interlace = 1;
1139
1140	if (connector->latency_present[interlace]) {
1141		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1142				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1143		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1144				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1145	} else {
1146		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1147				VIDEO_LIPSYNC, 0);
1148		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1149				AUDIO_LIPSYNC, 0);
1150	}
1151	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1152			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1153}
1154
1155static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1156{
1157	struct amdgpu_device *adev = encoder->dev->dev_private;
1158	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1159	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1160	struct drm_connector *connector;
1161	struct amdgpu_connector *amdgpu_connector = NULL;
1162	u8 *sadb = NULL;
1163	int sad_count;
1164	u32 tmp;
1165
1166	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1167		if (connector->encoder == encoder) {
1168			amdgpu_connector = to_amdgpu_connector(connector);
1169			break;
1170		}
1171	}
1172
1173	if (!amdgpu_connector) {
1174		DRM_ERROR("Couldn't find encoder's connector\n");
1175		return;
1176	}
1177
1178	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1179	if (sad_count < 0) {
1180		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1181		sad_count = 0;
1182	}
1183
1184	/* program the speaker allocation */
1185	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1186			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1187	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1188			HDMI_CONNECTION, 0);
1189	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1190			DP_CONNECTION, 0);
1191
1192	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1193		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1194				DP_CONNECTION, 1);
1195	else
1196		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1197				HDMI_CONNECTION, 1);
1198
1199	if (sad_count)
1200		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1201				SPEAKER_ALLOCATION, sadb[0]);
1202	else
1203		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1204				SPEAKER_ALLOCATION, 5); /* stereo */
1205
1206	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1207			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1208
1209	kfree(sadb);
1210}
1211
1212static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1213{
1214	struct amdgpu_device *adev = encoder->dev->dev_private;
1215	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1216	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1217	struct drm_connector *connector;
1218	struct amdgpu_connector *amdgpu_connector = NULL;
1219	struct cea_sad *sads;
1220	int i, sad_count;
1221
1222	static const u16 eld_reg_to_type[][2] = {
1223		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1224		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1225		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1226		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1227		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1228		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1229		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1230		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1231		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1232		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1233		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1234		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1235	};
1236
1237	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1238		if (connector->encoder == encoder) {
1239			amdgpu_connector = to_amdgpu_connector(connector);
1240			break;
1241		}
1242	}
1243
1244	if (!amdgpu_connector) {
1245		DRM_ERROR("Couldn't find encoder's connector\n");
1246		return;
1247	}
1248
1249	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1250	if (sad_count <= 0) {
1251		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1252		return;
1253	}
1254
1255	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1256		u32 tmp = 0;
1257		u8 stereo_freqs = 0;
1258		int max_channels = -1;
1259		int j;
1260
1261		for (j = 0; j < sad_count; j++) {
1262			struct cea_sad *sad = &sads[j];
1263
1264			if (sad->format == eld_reg_to_type[i][1]) {
1265				if (sad->channels > max_channels) {
1266					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1267							MAX_CHANNELS, sad->channels);
1268					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1269							DESCRIPTOR_BYTE_2, sad->byte2);
1270					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1271							SUPPORTED_FREQUENCIES, sad->freq);
1272					max_channels = sad->channels;
1273				}
1274
1275				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1276					stereo_freqs |= sad->freq;
1277				else
1278					break;
1279			}
1280		}
1281
1282		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1283				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1284		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1285	}
1286
1287	kfree(sads);
1288
1289}
1290
1291static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1292				  struct amdgpu_audio_pin *pin,
1293				  bool enable)
1294{
1295	if (!pin)
1296		return;
1297
1298	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1299			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1300}
1301
1302static const u32 pin_offsets[7] =
1303{
1304	(0x1780 - 0x1780),
1305	(0x1786 - 0x1780),
1306	(0x178c - 0x1780),
1307	(0x1792 - 0x1780),
1308	(0x1798 - 0x1780),
1309	(0x179d - 0x1780),
1310	(0x17a4 - 0x1780),
1311};
1312
1313static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1314{
1315	int i;
1316
1317	if (!amdgpu_audio)
1318		return 0;
1319
1320	adev->mode_info.audio.enabled = true;
1321
1322	switch (adev->asic_type) {
1323	case CHIP_TAHITI:
1324	case CHIP_PITCAIRN:
1325	case CHIP_VERDE:
1326	default:
1327		adev->mode_info.audio.num_pins = 6;
1328		break;
1329	case CHIP_OLAND:
1330		adev->mode_info.audio.num_pins = 2;
1331		break;
1332	}
1333
1334	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1335		adev->mode_info.audio.pin[i].channels = -1;
1336		adev->mode_info.audio.pin[i].rate = -1;
1337		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1338		adev->mode_info.audio.pin[i].status_bits = 0;
1339		adev->mode_info.audio.pin[i].category_code = 0;
1340		adev->mode_info.audio.pin[i].connected = false;
1341		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1342		adev->mode_info.audio.pin[i].id = i;
1343		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1344	}
1345
1346	return 0;
1347}
1348
1349static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1350{
1351	int i;
1352
1353	if (!amdgpu_audio)
1354		return;
1355
1356	if (!adev->mode_info.audio.enabled)
1357		return;
1358
1359	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1360		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1361
1362	adev->mode_info.audio.enabled = false;
1363}
1364
1365static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1366{
1367	struct drm_device *dev = encoder->dev;
1368	struct amdgpu_device *adev = dev->dev_private;
1369	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1370	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1371	u32 tmp;
1372
1373	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1374	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1375	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1376	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1377	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1378}
1379
1380static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1381				   uint32_t clock, int bpc)
1382{
1383	struct drm_device *dev = encoder->dev;
1384	struct amdgpu_device *adev = dev->dev_private;
1385	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1386	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1387	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1388	u32 tmp;
1389
1390	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1391	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1392	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1393			bpc > 8 ? 0 : 1);
1394	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1395
1396	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1397	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1398	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1399	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1400	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1401	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1402
1403	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1404	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1405	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1406	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1407	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1408	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1409
1410	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1411	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1412	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1413	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1414	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1415	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1416}
1417
1418static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1419					       struct drm_display_mode *mode)
 
 
 
 
1420{
1421	struct drm_device *dev = encoder->dev;
1422	struct amdgpu_device *adev = dev->dev_private;
1423	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1424	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1425	struct hdmi_avi_infoframe frame;
1426	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1427	uint8_t *payload = buffer + 3;
1428	uint8_t *header = buffer;
1429	ssize_t err;
1430	u32 tmp;
1431
1432	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
1433	if (err < 0) {
1434		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1435		return;
1436	}
1437
1438	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1439	if (err < 0) {
1440		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1441		return;
1442	}
1443
1444	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1445	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1446	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1447	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1448	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1449	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1450	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1451	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1452
1453	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1454	/* anything other than 0 */
1455	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1456			HDMI_AUDIO_INFO_LINE, 2);
1457	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1458}
1459
1460static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1461{
1462	struct drm_device *dev = encoder->dev;
1463	struct amdgpu_device *adev = dev->dev_private;
1464	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1465	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1466	u32 tmp;
1467
1468	/*
1469	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1470	 * Express [24MHz / target pixel clock] as an exact rational
1471	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1472	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1473	 */
1474	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1475	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1476			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1477	if (em == ATOM_ENCODER_MODE_HDMI) {
1478		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1479				DCCG_AUDIO_DTO_SEL, 0);
1480	} else if (ENCODER_MODE_IS_DP(em)) {
1481		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1482				DCCG_AUDIO_DTO_SEL, 1);
1483	}
1484	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1485	if (em == ATOM_ENCODER_MODE_HDMI) {
1486		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1487		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1488	} else if (ENCODER_MODE_IS_DP(em)) {
1489		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1490		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1491	}
1492}
1493
1494static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1495{
1496	struct drm_device *dev = encoder->dev;
1497	struct amdgpu_device *adev = dev->dev_private;
1498	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1499	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1500	u32 tmp;
1501
1502	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1503	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1504	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1505
1506	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1507	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1508	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1509
1510	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1511	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1512	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1513
1514	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1515	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1516	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1517	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1518	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1519	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1520	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1521	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1522
1523	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1524	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1525	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1526
1527	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1528	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1529	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1530	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1531
1532	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1533	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1534	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1535	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1536}
1537
1538static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1539{
1540	struct drm_device *dev = encoder->dev;
1541	struct amdgpu_device *adev = dev->dev_private;
1542	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1543	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1544	u32 tmp;
1545
1546	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1547	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1548	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1549}
1550
1551static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1552{
1553	struct drm_device *dev = encoder->dev;
1554	struct amdgpu_device *adev = dev->dev_private;
1555	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1556	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1557	u32 tmp;
1558
1559	if (enable) {
1560		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1561		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1562		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1563		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1564		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1565		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1566
1567		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1568		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1569		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1570
1571		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1572		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1573		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1574	} else {
1575		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1576		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1577		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1578		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1579		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1580		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1581
1582		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1583		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1584		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1585	}
1586}
1587
1588static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1589{
1590	struct drm_device *dev = encoder->dev;
1591	struct amdgpu_device *adev = dev->dev_private;
1592	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1593	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1594	u32 tmp;
1595
1596	if (enable) {
1597		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1598		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1599		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1600
1601		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1602		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1603		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1604
1605		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1606		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1607		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1608		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1609		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1610		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1611	} else {
1612		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1613	}
1614}
1615
1616static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1617				  struct drm_display_mode *mode)
1618{
1619	struct drm_device *dev = encoder->dev;
1620	struct amdgpu_device *adev = dev->dev_private;
1621	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1622	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1623	struct drm_connector *connector;
1624	struct amdgpu_connector *amdgpu_connector = NULL;
1625	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1626	int bpc = 8;
1627
1628	if (!dig || !dig->afmt)
1629		return;
1630
1631	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1632		if (connector->encoder == encoder) {
1633			amdgpu_connector = to_amdgpu_connector(connector);
1634			break;
1635		}
1636	}
1637
1638	if (!amdgpu_connector) {
1639		DRM_ERROR("Couldn't find encoder's connector\n");
1640		return;
1641	}
1642
1643	if (!dig->afmt->enabled)
1644		return;
1645
1646	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1647	if (!dig->afmt->pin)
1648		return;
1649
1650	if (encoder->crtc) {
1651		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1652		bpc = amdgpu_crtc->bpc;
1653	}
1654
1655	/* disable audio before setting up hw */
1656	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1657
1658	dce_v6_0_audio_set_mute(encoder, true);
1659	dce_v6_0_audio_write_speaker_allocation(encoder);
1660	dce_v6_0_audio_write_sad_regs(encoder);
1661	dce_v6_0_audio_write_latency_fields(encoder, mode);
1662	if (em == ATOM_ENCODER_MODE_HDMI) {
1663		dce_v6_0_audio_set_dto(encoder, mode->clock);
1664		dce_v6_0_audio_set_vbi_packet(encoder);
1665		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1666	} else if (ENCODER_MODE_IS_DP(em)) {
1667		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1668	}
1669	dce_v6_0_audio_set_packet(encoder);
1670	dce_v6_0_audio_select_pin(encoder);
1671	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1672	dce_v6_0_audio_set_mute(encoder, false);
1673	if (em == ATOM_ENCODER_MODE_HDMI) {
1674		dce_v6_0_audio_hdmi_enable(encoder, 1);
1675	} else if (ENCODER_MODE_IS_DP(em)) {
1676		dce_v6_0_audio_dp_enable(encoder, 1);
1677	}
1678
1679	/* enable audio after setting up hw */
1680	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1681}
1682
1683static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1684{
1685	struct drm_device *dev = encoder->dev;
1686	struct amdgpu_device *adev = dev->dev_private;
1687	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1688	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1689
1690	if (!dig || !dig->afmt)
1691		return;
1692
1693	/* Silent, r600_hdmi_enable will raise WARN for us */
1694	if (enable && dig->afmt->enabled)
1695		return;
1696
1697	if (!enable && !dig->afmt->enabled)
1698		return;
1699
1700	if (!enable && dig->afmt->pin) {
1701		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1702		dig->afmt->pin = NULL;
1703	}
1704
1705	dig->afmt->enabled = enable;
1706
1707	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1708		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1709}
1710
1711static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1712{
1713	int i, j;
1714
1715	for (i = 0; i < adev->mode_info.num_dig; i++)
1716		adev->mode_info.afmt[i] = NULL;
1717
1718	/* DCE6 has audio blocks tied to DIG encoders */
1719	for (i = 0; i < adev->mode_info.num_dig; i++) {
1720		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1721		if (adev->mode_info.afmt[i]) {
1722			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1723			adev->mode_info.afmt[i]->id = i;
1724		} else {
1725			for (j = 0; j < i; j++) {
1726				kfree(adev->mode_info.afmt[j]);
1727				adev->mode_info.afmt[j] = NULL;
1728			}
1729			DRM_ERROR("Out of memory allocating afmt table\n");
1730			return -ENOMEM;
1731		}
1732	}
1733	return 0;
1734}
1735
1736static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1737{
1738	int i;
1739
1740	for (i = 0; i < adev->mode_info.num_dig; i++) {
1741		kfree(adev->mode_info.afmt[i]);
1742		adev->mode_info.afmt[i] = NULL;
1743	}
1744}
1745
1746static const u32 vga_control_regs[6] =
1747{
1748	mmD1VGA_CONTROL,
1749	mmD2VGA_CONTROL,
1750	mmD3VGA_CONTROL,
1751	mmD4VGA_CONTROL,
1752	mmD5VGA_CONTROL,
1753	mmD6VGA_CONTROL,
1754};
1755
1756static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1757{
1758	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1759	struct drm_device *dev = crtc->dev;
1760	struct amdgpu_device *adev = dev->dev_private;
1761	u32 vga_control;
1762
1763	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1764	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1765}
1766
1767static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1768{
1769	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1770	struct drm_device *dev = crtc->dev;
1771	struct amdgpu_device *adev = dev->dev_private;
1772
1773	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1774}
1775
1776static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1777				     struct drm_framebuffer *fb,
1778				     int x, int y, int atomic)
1779{
1780	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1781	struct drm_device *dev = crtc->dev;
1782	struct amdgpu_device *adev = dev->dev_private;
1783	struct amdgpu_framebuffer *amdgpu_fb;
1784	struct drm_framebuffer *target_fb;
1785	struct drm_gem_object *obj;
1786	struct amdgpu_bo *abo;
1787	uint64_t fb_location, tiling_flags;
1788	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1789	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1790	u32 viewport_w, viewport_h;
1791	int r;
1792	bool bypass_lut = false;
1793	struct drm_format_name_buf format_name;
1794
1795	/* no fb bound */
1796	if (!atomic && !crtc->primary->fb) {
1797		DRM_DEBUG_KMS("No FB bound\n");
1798		return 0;
1799	}
1800
1801	if (atomic) {
1802		amdgpu_fb = to_amdgpu_framebuffer(fb);
1803		target_fb = fb;
1804	} else {
1805		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1806		target_fb = crtc->primary->fb;
1807	}
1808
1809	/* If atomic, assume fb object is pinned & idle & fenced and
1810	 * just update base pointers
1811	 */
1812	obj = amdgpu_fb->obj;
1813	abo = gem_to_amdgpu_bo(obj);
1814	r = amdgpu_bo_reserve(abo, false);
1815	if (unlikely(r != 0))
1816		return r;
1817
1818	if (atomic) {
1819		fb_location = amdgpu_bo_gpu_offset(abo);
1820	} else {
1821		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1822		if (unlikely(r != 0)) {
1823			amdgpu_bo_unreserve(abo);
1824			return -EINVAL;
1825		}
1826	}
1827
1828	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1829	amdgpu_bo_unreserve(abo);
1830
1831	switch (target_fb->format->format) {
1832	case DRM_FORMAT_C8:
1833		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1834			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1835		break;
1836	case DRM_FORMAT_XRGB4444:
1837	case DRM_FORMAT_ARGB4444:
1838		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1839			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1840#ifdef __BIG_ENDIAN
1841		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1842#endif
1843		break;
1844	case DRM_FORMAT_XRGB1555:
1845	case DRM_FORMAT_ARGB1555:
1846		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1847			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1848#ifdef __BIG_ENDIAN
1849		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1850#endif
1851		break;
1852	case DRM_FORMAT_BGRX5551:
1853	case DRM_FORMAT_BGRA5551:
1854		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1855			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1856#ifdef __BIG_ENDIAN
1857		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1858#endif
1859		break;
1860	case DRM_FORMAT_RGB565:
1861		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1862			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1863#ifdef __BIG_ENDIAN
1864		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1865#endif
1866		break;
1867	case DRM_FORMAT_XRGB8888:
1868	case DRM_FORMAT_ARGB8888:
1869		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1870			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1871#ifdef __BIG_ENDIAN
1872		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1873#endif
1874		break;
1875	case DRM_FORMAT_XRGB2101010:
1876	case DRM_FORMAT_ARGB2101010:
1877		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1878			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1879#ifdef __BIG_ENDIAN
1880		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1881#endif
1882		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1883		bypass_lut = true;
1884		break;
1885	case DRM_FORMAT_BGRX1010102:
1886	case DRM_FORMAT_BGRA1010102:
1887		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1888			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1889#ifdef __BIG_ENDIAN
1890		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1891#endif
1892		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1893		bypass_lut = true;
1894		break;
1895	default:
1896		DRM_ERROR("Unsupported screen format %s\n",
1897		          drm_get_format_name(target_fb->format->format, &format_name));
1898		return -EINVAL;
1899	}
1900
1901	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1902		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1903
1904		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1905		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1906		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1907		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1908		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1909
1910		fb_format |= GRPH_NUM_BANKS(num_banks);
1911		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1912		fb_format |= GRPH_TILE_SPLIT(tile_split);
1913		fb_format |= GRPH_BANK_WIDTH(bankw);
1914		fb_format |= GRPH_BANK_HEIGHT(bankh);
1915		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1916	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1917		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1918	}
1919
1920	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1921	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1922
1923	dce_v6_0_vga_enable(crtc, false);
1924
1925	/* Make sure surface address is updated at vertical blank rather than
1926	 * horizontal blank
1927	 */
1928	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1929
1930	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1931	       upper_32_bits(fb_location));
1932	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1933	       upper_32_bits(fb_location));
1934	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1935	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1936	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1937	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1938	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1939	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1940
1941	/*
1942	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1943	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1944	 * retain the full precision throughout the pipeline.
1945	 */
1946	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1947		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1948		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1949
1950	if (bypass_lut)
1951		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1952
1953	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1954	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1955	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1956	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1957	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1958	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1959
1960	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1961	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1962
1963	dce_v6_0_grph_enable(crtc, true);
1964
1965	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1966		       target_fb->height);
1967	x &= ~3;
1968	y &= ~1;
1969	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1970	       (x << 16) | y);
1971	viewport_w = crtc->mode.hdisplay;
1972	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1973
1974	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1975	       (viewport_w << 16) | viewport_h);
1976
1977	/* set pageflip to happen anywhere in vblank interval */
1978	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1979
1980	if (!atomic && fb && fb != crtc->primary->fb) {
1981		amdgpu_fb = to_amdgpu_framebuffer(fb);
1982		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1983		r = amdgpu_bo_reserve(abo, true);
1984		if (unlikely(r != 0))
1985			return r;
1986		amdgpu_bo_unpin(abo);
1987		amdgpu_bo_unreserve(abo);
1988	}
1989
1990	/* Bytes per pixel may have changed */
1991	dce_v6_0_bandwidth_update(adev);
1992
1993	return 0;
1994
1995}
1996
1997static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1998				    struct drm_display_mode *mode)
1999{
2000	struct drm_device *dev = crtc->dev;
2001	struct amdgpu_device *adev = dev->dev_private;
2002	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2003
2004	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2005		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2006		       INTERLEAVE_EN);
2007	else
2008		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2009}
2010
2011static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2012{
2013
2014	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2015	struct drm_device *dev = crtc->dev;
2016	struct amdgpu_device *adev = dev->dev_private;
2017	u16 *r, *g, *b;
2018	int i;
2019
2020	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2021
2022	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2023	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2024		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2025	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2026	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2027	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2028	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2029	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2030	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2031		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2032
2033	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2034
2035	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2036	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2037	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2038
2039	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2040	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2041	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2042
2043	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2044	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2045
2046	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2047	r = crtc->gamma_store;
2048	g = r + crtc->gamma_size;
2049	b = g + crtc->gamma_size;
2050	for (i = 0; i < 256; i++) {
2051		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2052		       ((*r++ & 0xffc0) << 14) |
2053		       ((*g++ & 0xffc0) << 4) |
2054		       (*b++ >> 6));
2055	}
2056
2057	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2058	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2059		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2060		ICON_DEGAMMA_MODE(0) |
2061		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2062	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2063	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2064		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2065	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2066	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2067		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2068	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2069	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2070		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2071	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2072	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2073
2074
2075}
2076
2077static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2078{
2079	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2080	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2081
2082	switch (amdgpu_encoder->encoder_id) {
2083	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2084		return dig->linkb ? 1 : 0;
2085	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2086		return dig->linkb ? 3 : 2;
2087	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2088		return dig->linkb ? 5 : 4;
2089	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2090		return 6;
2091	default:
2092		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2093		return 0;
2094	}
2095}
2096
2097/**
2098 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2099 *
2100 * @crtc: drm crtc
2101 *
2102 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2103 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2104 * monitors a dedicated PPLL must be used.  If a particular board has
2105 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2106 * as there is no need to program the PLL itself.  If we are not able to
2107 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2108 * avoid messing up an existing monitor.
2109 *
2110 *
2111 */
2112static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2113{
2114	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2115	struct drm_device *dev = crtc->dev;
2116	struct amdgpu_device *adev = dev->dev_private;
2117	u32 pll_in_use;
2118	int pll;
2119
2120	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2121		if (adev->clock.dp_extclk)
2122			/* skip PPLL programming if using ext clock */
2123			return ATOM_PPLL_INVALID;
2124		else
2125			return ATOM_PPLL0;
2126	} else {
2127		/* use the same PPLL for all monitors with the same clock */
2128		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2129		if (pll != ATOM_PPLL_INVALID)
2130			return pll;
2131	}
2132
2133	/*  PPLL1, and PPLL2 */
2134	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2135	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2136		return ATOM_PPLL2;
2137	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2138		return ATOM_PPLL1;
2139	DRM_ERROR("unable to allocate a PPLL\n");
2140	return ATOM_PPLL_INVALID;
2141}
2142
2143static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2144{
2145	struct amdgpu_device *adev = crtc->dev->dev_private;
2146	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2147	uint32_t cur_lock;
2148
2149	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2150	if (lock)
2151		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2152	else
2153		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2154	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2155}
2156
2157static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2158{
2159	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2160	struct amdgpu_device *adev = crtc->dev->dev_private;
2161
2162	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2163		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2164		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2165
2166
2167}
2168
2169static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2170{
2171	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2172	struct amdgpu_device *adev = crtc->dev->dev_private;
2173
2174	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2175	       upper_32_bits(amdgpu_crtc->cursor_addr));
2176	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2177	       lower_32_bits(amdgpu_crtc->cursor_addr));
2178
2179	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2180		   CUR_CONTROL__CURSOR_EN_MASK |
2181		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2182		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2183
2184}
2185
2186static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2187				       int x, int y)
2188{
2189	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2190	struct amdgpu_device *adev = crtc->dev->dev_private;
2191	int xorigin = 0, yorigin = 0;
2192
2193	int w = amdgpu_crtc->cursor_width;
2194
2195	amdgpu_crtc->cursor_x = x;
2196	amdgpu_crtc->cursor_y = y;
2197
2198	/* avivo cursor are offset into the total surface */
2199	x += crtc->x;
2200	y += crtc->y;
2201	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2202
2203	if (x < 0) {
2204		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2205		x = 0;
2206	}
2207	if (y < 0) {
2208		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2209		y = 0;
2210	}
2211
2212	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2213	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2214	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2215	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2216
2217	return 0;
2218}
2219
2220static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2221				     int x, int y)
2222{
2223	int ret;
2224
2225	dce_v6_0_lock_cursor(crtc, true);
2226	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2227	dce_v6_0_lock_cursor(crtc, false);
2228
2229	return ret;
2230}
2231
2232static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2233				     struct drm_file *file_priv,
2234				     uint32_t handle,
2235				     uint32_t width,
2236				     uint32_t height,
2237				     int32_t hot_x,
2238				     int32_t hot_y)
2239{
2240	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2241	struct drm_gem_object *obj;
2242	struct amdgpu_bo *aobj;
2243	int ret;
2244
2245	if (!handle) {
2246		/* turn off cursor */
2247		dce_v6_0_hide_cursor(crtc);
2248		obj = NULL;
2249		goto unpin;
2250	}
2251
2252	if ((width > amdgpu_crtc->max_cursor_width) ||
2253	    (height > amdgpu_crtc->max_cursor_height)) {
2254		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2255		return -EINVAL;
2256	}
2257
2258	obj = drm_gem_object_lookup(file_priv, handle);
2259	if (!obj) {
2260		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2261		return -ENOENT;
2262	}
2263
2264	aobj = gem_to_amdgpu_bo(obj);
2265	ret = amdgpu_bo_reserve(aobj, false);
2266	if (ret != 0) {
2267		drm_gem_object_put_unlocked(obj);
2268		return ret;
2269	}
2270
2271	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2272	amdgpu_bo_unreserve(aobj);
2273	if (ret) {
2274		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2275		drm_gem_object_put_unlocked(obj);
2276		return ret;
2277	}
2278
2279	dce_v6_0_lock_cursor(crtc, true);
2280
2281	if (width != amdgpu_crtc->cursor_width ||
2282	    height != amdgpu_crtc->cursor_height ||
2283	    hot_x != amdgpu_crtc->cursor_hot_x ||
2284	    hot_y != amdgpu_crtc->cursor_hot_y) {
2285		int x, y;
2286
2287		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2288		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2289
2290		dce_v6_0_cursor_move_locked(crtc, x, y);
2291
2292		amdgpu_crtc->cursor_width = width;
2293		amdgpu_crtc->cursor_height = height;
2294		amdgpu_crtc->cursor_hot_x = hot_x;
2295		amdgpu_crtc->cursor_hot_y = hot_y;
2296	}
2297
2298	dce_v6_0_show_cursor(crtc);
2299	dce_v6_0_lock_cursor(crtc, false);
2300
2301unpin:
2302	if (amdgpu_crtc->cursor_bo) {
2303		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2304		ret = amdgpu_bo_reserve(aobj, true);
2305		if (likely(ret == 0)) {
2306			amdgpu_bo_unpin(aobj);
2307			amdgpu_bo_unreserve(aobj);
2308		}
2309		drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2310	}
2311
2312	amdgpu_crtc->cursor_bo = obj;
2313	return 0;
2314}
2315
2316static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2317{
2318	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2319
2320	if (amdgpu_crtc->cursor_bo) {
2321		dce_v6_0_lock_cursor(crtc, true);
2322
2323		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2324					    amdgpu_crtc->cursor_y);
2325
2326		dce_v6_0_show_cursor(crtc);
2327		dce_v6_0_lock_cursor(crtc, false);
2328	}
2329}
2330
2331static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2332				   u16 *blue, uint32_t size,
2333				   struct drm_modeset_acquire_ctx *ctx)
2334{
 
 
 
 
 
 
 
 
 
2335	dce_v6_0_crtc_load_lut(crtc);
2336
2337	return 0;
2338}
2339
2340static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2341{
2342	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2343
2344	drm_crtc_cleanup(crtc);
2345	kfree(amdgpu_crtc);
2346}
2347
2348static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2349	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2350	.cursor_move = dce_v6_0_crtc_cursor_move,
2351	.gamma_set = dce_v6_0_crtc_gamma_set,
2352	.set_config = amdgpu_display_crtc_set_config,
2353	.destroy = dce_v6_0_crtc_destroy,
2354	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2355};
2356
2357static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2358{
2359	struct drm_device *dev = crtc->dev;
2360	struct amdgpu_device *adev = dev->dev_private;
2361	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2362	unsigned type;
2363
2364	switch (mode) {
2365	case DRM_MODE_DPMS_ON:
2366		amdgpu_crtc->enabled = true;
2367		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2368		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2369		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2370		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2371						amdgpu_crtc->crtc_id);
2372		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2373		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2374		drm_crtc_vblank_on(crtc);
2375		dce_v6_0_crtc_load_lut(crtc);
2376		break;
2377	case DRM_MODE_DPMS_STANDBY:
2378	case DRM_MODE_DPMS_SUSPEND:
2379	case DRM_MODE_DPMS_OFF:
2380		drm_crtc_vblank_off(crtc);
2381		if (amdgpu_crtc->enabled)
2382			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2383		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2384		amdgpu_crtc->enabled = false;
2385		break;
2386	}
2387	/* adjust pm to dpms */
2388	amdgpu_pm_compute_clocks(adev);
2389}
2390
2391static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2392{
2393	/* disable crtc pair power gating before programming */
2394	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2395	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2396	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2397}
2398
2399static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2400{
2401	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2402	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2403}
2404
2405static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2406{
2407
2408	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2409	struct drm_device *dev = crtc->dev;
2410	struct amdgpu_device *adev = dev->dev_private;
2411	struct amdgpu_atom_ss ss;
2412	int i;
2413
2414	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2415	if (crtc->primary->fb) {
2416		int r;
2417		struct amdgpu_framebuffer *amdgpu_fb;
2418		struct amdgpu_bo *abo;
2419
2420		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2421		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2422		r = amdgpu_bo_reserve(abo, true);
2423		if (unlikely(r))
2424			DRM_ERROR("failed to reserve abo before unpin\n");
2425		else {
2426			amdgpu_bo_unpin(abo);
2427			amdgpu_bo_unreserve(abo);
2428		}
2429	}
2430	/* disable the GRPH */
2431	dce_v6_0_grph_enable(crtc, false);
2432
2433	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2434
2435	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2436		if (adev->mode_info.crtcs[i] &&
2437		    adev->mode_info.crtcs[i]->enabled &&
2438		    i != amdgpu_crtc->crtc_id &&
2439		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2440			/* one other crtc is using this pll don't turn
2441			 * off the pll
2442			 */
2443			goto done;
2444		}
2445	}
2446
2447	switch (amdgpu_crtc->pll_id) {
2448	case ATOM_PPLL1:
2449	case ATOM_PPLL2:
2450		/* disable the ppll */
2451		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2452						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2453		break;
2454	default:
2455		break;
2456	}
2457done:
2458	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2459	amdgpu_crtc->adjusted_clock = 0;
2460	amdgpu_crtc->encoder = NULL;
2461	amdgpu_crtc->connector = NULL;
2462}
2463
2464static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2465				  struct drm_display_mode *mode,
2466				  struct drm_display_mode *adjusted_mode,
2467				  int x, int y, struct drm_framebuffer *old_fb)
2468{
2469	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2470
2471	if (!amdgpu_crtc->adjusted_clock)
2472		return -EINVAL;
2473
2474	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2475	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2476	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2477	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2478	amdgpu_atombios_crtc_scaler_setup(crtc);
2479	dce_v6_0_cursor_reset(crtc);
2480	/* update the hw version fpr dpm */
2481	amdgpu_crtc->hw_mode = *adjusted_mode;
2482
2483	return 0;
2484}
2485
2486static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2487				     const struct drm_display_mode *mode,
2488				     struct drm_display_mode *adjusted_mode)
2489{
2490
2491	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2492	struct drm_device *dev = crtc->dev;
2493	struct drm_encoder *encoder;
2494
2495	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2496	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2497		if (encoder->crtc == crtc) {
2498			amdgpu_crtc->encoder = encoder;
2499			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2500			break;
2501		}
2502	}
2503	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2504		amdgpu_crtc->encoder = NULL;
2505		amdgpu_crtc->connector = NULL;
2506		return false;
2507	}
2508	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2509		return false;
2510	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2511		return false;
2512	/* pick pll */
2513	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2514	/* if we can't get a PPLL for a non-DP encoder, fail */
2515	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2516	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2517		return false;
2518
2519	return true;
2520}
2521
2522static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2523				  struct drm_framebuffer *old_fb)
2524{
2525	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2526}
2527
2528static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2529					 struct drm_framebuffer *fb,
2530					 int x, int y, enum mode_set_atomic state)
2531{
2532       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2533}
2534
2535static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2536	.dpms = dce_v6_0_crtc_dpms,
2537	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2538	.mode_set = dce_v6_0_crtc_mode_set,
2539	.mode_set_base = dce_v6_0_crtc_set_base,
2540	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2541	.prepare = dce_v6_0_crtc_prepare,
2542	.commit = dce_v6_0_crtc_commit,
 
2543	.disable = dce_v6_0_crtc_disable,
2544};
2545
2546static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2547{
2548	struct amdgpu_crtc *amdgpu_crtc;
 
2549
2550	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2551			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2552	if (amdgpu_crtc == NULL)
2553		return -ENOMEM;
2554
2555	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2556
2557	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2558	amdgpu_crtc->crtc_id = index;
2559	adev->mode_info.crtcs[index] = amdgpu_crtc;
2560
2561	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2562	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2563	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2564	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2565
 
 
 
 
 
 
2566	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2567
2568	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2569	amdgpu_crtc->adjusted_clock = 0;
2570	amdgpu_crtc->encoder = NULL;
2571	amdgpu_crtc->connector = NULL;
2572	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2573
2574	return 0;
2575}
2576
2577static int dce_v6_0_early_init(void *handle)
2578{
2579	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2580
2581	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2582	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2583
2584	dce_v6_0_set_display_funcs(adev);
 
2585
2586	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2587
2588	switch (adev->asic_type) {
2589	case CHIP_TAHITI:
2590	case CHIP_PITCAIRN:
2591	case CHIP_VERDE:
2592		adev->mode_info.num_hpd = 6;
2593		adev->mode_info.num_dig = 6;
2594		break;
2595	case CHIP_OLAND:
2596		adev->mode_info.num_hpd = 2;
2597		adev->mode_info.num_dig = 2;
2598		break;
2599	default:
2600		return -EINVAL;
2601	}
2602
2603	dce_v6_0_set_irq_funcs(adev);
2604
2605	return 0;
2606}
2607
2608static int dce_v6_0_sw_init(void *handle)
2609{
2610	int r, i;
2611	bool ret;
2612	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2613
2614	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2615		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2616		if (r)
2617			return r;
2618	}
2619
2620	for (i = 8; i < 20; i += 2) {
2621		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2622		if (r)
2623			return r;
2624	}
2625
2626	/* HPD hotplug */
2627	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2628	if (r)
2629		return r;
2630
2631	adev->mode_info.mode_config_initialized = true;
2632
2633	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2634	adev->ddev->mode_config.async_page_flip = true;
2635	adev->ddev->mode_config.max_width = 16384;
2636	adev->ddev->mode_config.max_height = 16384;
2637	adev->ddev->mode_config.preferred_depth = 24;
2638	adev->ddev->mode_config.prefer_shadow = 1;
2639	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2640
2641	r = amdgpu_display_modeset_create_props(adev);
2642	if (r)
2643		return r;
2644
2645	adev->ddev->mode_config.max_width = 16384;
2646	adev->ddev->mode_config.max_height = 16384;
2647
2648	/* allocate crtcs */
2649	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2650		r = dce_v6_0_crtc_init(adev, i);
2651		if (r)
2652			return r;
2653	}
2654
2655	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2656	if (ret)
2657		amdgpu_display_print_display_setup(adev->ddev);
2658	else
2659		return -EINVAL;
2660
2661	/* setup afmt */
2662	r = dce_v6_0_afmt_init(adev);
2663	if (r)
2664		return r;
2665
2666	r = dce_v6_0_audio_init(adev);
2667	if (r)
2668		return r;
2669
2670	drm_kms_helper_poll_init(adev->ddev);
2671
2672	return r;
2673}
2674
2675static int dce_v6_0_sw_fini(void *handle)
2676{
2677	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2678
2679	kfree(adev->mode_info.bios_hardcoded_edid);
2680
2681	drm_kms_helper_poll_fini(adev->ddev);
2682
2683	dce_v6_0_audio_fini(adev);
2684	dce_v6_0_afmt_fini(adev);
2685
2686	drm_mode_config_cleanup(adev->ddev);
2687	adev->mode_info.mode_config_initialized = false;
2688
2689	return 0;
2690}
2691
2692static int dce_v6_0_hw_init(void *handle)
2693{
2694	int i;
2695	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2696
2697	/* disable vga render */
2698	dce_v6_0_set_vga_render_state(adev, false);
2699	/* init dig PHYs, disp eng pll */
2700	amdgpu_atombios_encoder_init_dig(adev);
2701	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2702
2703	/* initialize hpd */
2704	dce_v6_0_hpd_init(adev);
2705
2706	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2707		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2708	}
2709
2710	dce_v6_0_pageflip_interrupt_init(adev);
2711
2712	return 0;
2713}
2714
2715static int dce_v6_0_hw_fini(void *handle)
2716{
2717	int i;
2718	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2719
2720	dce_v6_0_hpd_fini(adev);
2721
2722	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2723		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2724	}
2725
2726	dce_v6_0_pageflip_interrupt_fini(adev);
2727
2728	return 0;
2729}
2730
2731static int dce_v6_0_suspend(void *handle)
2732{
2733	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2734
2735	adev->mode_info.bl_level =
2736		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2737
2738	return dce_v6_0_hw_fini(handle);
2739}
2740
2741static int dce_v6_0_resume(void *handle)
2742{
2743	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2744	int ret;
2745
2746	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2747							   adev->mode_info.bl_level);
2748
2749	ret = dce_v6_0_hw_init(handle);
2750
2751	/* turn on the BL */
2752	if (adev->mode_info.bl_encoder) {
2753		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2754								  adev->mode_info.bl_encoder);
2755		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2756						    bl_level);
2757	}
2758
2759	return ret;
2760}
2761
2762static bool dce_v6_0_is_idle(void *handle)
2763{
2764	return true;
2765}
2766
2767static int dce_v6_0_wait_for_idle(void *handle)
2768{
2769	return 0;
2770}
2771
2772static int dce_v6_0_soft_reset(void *handle)
2773{
2774	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2775	return 0;
2776}
2777
2778static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2779						     int crtc,
2780						     enum amdgpu_interrupt_state state)
2781{
2782	u32 reg_block, interrupt_mask;
2783
2784	if (crtc >= adev->mode_info.num_crtc) {
2785		DRM_DEBUG("invalid crtc %d\n", crtc);
2786		return;
2787	}
2788
2789	switch (crtc) {
2790	case 0:
2791		reg_block = SI_CRTC0_REGISTER_OFFSET;
2792		break;
2793	case 1:
2794		reg_block = SI_CRTC1_REGISTER_OFFSET;
2795		break;
2796	case 2:
2797		reg_block = SI_CRTC2_REGISTER_OFFSET;
2798		break;
2799	case 3:
2800		reg_block = SI_CRTC3_REGISTER_OFFSET;
2801		break;
2802	case 4:
2803		reg_block = SI_CRTC4_REGISTER_OFFSET;
2804		break;
2805	case 5:
2806		reg_block = SI_CRTC5_REGISTER_OFFSET;
2807		break;
2808	default:
2809		DRM_DEBUG("invalid crtc %d\n", crtc);
2810		return;
2811	}
2812
2813	switch (state) {
2814	case AMDGPU_IRQ_STATE_DISABLE:
2815		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2816		interrupt_mask &= ~VBLANK_INT_MASK;
2817		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2818		break;
2819	case AMDGPU_IRQ_STATE_ENABLE:
2820		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2821		interrupt_mask |= VBLANK_INT_MASK;
2822		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2823		break;
2824	default:
2825		break;
2826	}
2827}
2828
2829static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2830						    int crtc,
2831						    enum amdgpu_interrupt_state state)
2832{
2833
2834}
2835
2836static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2837					    struct amdgpu_irq_src *src,
2838					    unsigned type,
2839					    enum amdgpu_interrupt_state state)
2840{
2841	u32 dc_hpd_int_cntl;
2842
2843	if (type >= adev->mode_info.num_hpd) {
2844		DRM_DEBUG("invalid hdp %d\n", type);
2845		return 0;
2846	}
2847
2848	switch (state) {
2849	case AMDGPU_IRQ_STATE_DISABLE:
2850		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2851		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2852		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2853		break;
2854	case AMDGPU_IRQ_STATE_ENABLE:
2855		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2856		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2857		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2858		break;
2859	default:
2860		break;
2861	}
2862
2863	return 0;
2864}
2865
2866static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2867					     struct amdgpu_irq_src *src,
2868					     unsigned type,
2869					     enum amdgpu_interrupt_state state)
2870{
2871	switch (type) {
2872	case AMDGPU_CRTC_IRQ_VBLANK1:
2873		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2874		break;
2875	case AMDGPU_CRTC_IRQ_VBLANK2:
2876		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2877		break;
2878	case AMDGPU_CRTC_IRQ_VBLANK3:
2879		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2880		break;
2881	case AMDGPU_CRTC_IRQ_VBLANK4:
2882		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2883		break;
2884	case AMDGPU_CRTC_IRQ_VBLANK5:
2885		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2886		break;
2887	case AMDGPU_CRTC_IRQ_VBLANK6:
2888		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2889		break;
2890	case AMDGPU_CRTC_IRQ_VLINE1:
2891		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2892		break;
2893	case AMDGPU_CRTC_IRQ_VLINE2:
2894		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2895		break;
2896	case AMDGPU_CRTC_IRQ_VLINE3:
2897		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2898		break;
2899	case AMDGPU_CRTC_IRQ_VLINE4:
2900		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2901		break;
2902	case AMDGPU_CRTC_IRQ_VLINE5:
2903		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2904		break;
2905	case AMDGPU_CRTC_IRQ_VLINE6:
2906		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2907		break;
2908	default:
2909		break;
2910	}
2911	return 0;
2912}
2913
2914static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2915			     struct amdgpu_irq_src *source,
2916			     struct amdgpu_iv_entry *entry)
2917{
2918	unsigned crtc = entry->src_id - 1;
2919	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2920	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2921								    crtc);
2922
2923	switch (entry->src_data[0]) {
2924	case 0: /* vblank */
2925		if (disp_int & interrupt_status_offsets[crtc].vblank)
2926			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2927		else
2928			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2929
2930		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2931			drm_handle_vblank(adev->ddev, crtc);
2932		}
2933		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2934		break;
2935	case 1: /* vline */
2936		if (disp_int & interrupt_status_offsets[crtc].vline)
2937			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2938		else
2939			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2940
2941		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2942		break;
2943	default:
2944		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2945		break;
2946	}
2947
2948	return 0;
2949}
2950
2951static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2952						 struct amdgpu_irq_src *src,
2953						 unsigned type,
2954						 enum amdgpu_interrupt_state state)
2955{
2956	u32 reg;
2957
2958	if (type >= adev->mode_info.num_crtc) {
2959		DRM_ERROR("invalid pageflip crtc %d\n", type);
2960		return -EINVAL;
2961	}
2962
2963	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2964	if (state == AMDGPU_IRQ_STATE_DISABLE)
2965		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2966		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2967	else
2968		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2969		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2970
2971	return 0;
2972}
2973
2974static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2975				 struct amdgpu_irq_src *source,
2976				 struct amdgpu_iv_entry *entry)
2977{
2978		unsigned long flags;
2979	unsigned crtc_id;
2980	struct amdgpu_crtc *amdgpu_crtc;
2981	struct amdgpu_flip_work *works;
2982
2983	crtc_id = (entry->src_id - 8) >> 1;
2984	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2985
2986	if (crtc_id >= adev->mode_info.num_crtc) {
2987		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2988		return -EINVAL;
2989	}
2990
2991	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
2992	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2993		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
2994		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2995
2996	/* IRQ could occur when in initial stage */
2997	if (amdgpu_crtc == NULL)
2998		return 0;
2999
3000	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3001	works = amdgpu_crtc->pflip_works;
3002	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3003		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3004						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3005						amdgpu_crtc->pflip_status,
3006						AMDGPU_FLIP_SUBMITTED);
3007		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3008		return 0;
3009	}
3010
3011	/* page flip completed. clean up */
3012	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3013	amdgpu_crtc->pflip_works = NULL;
3014
3015	/* wakeup usersapce */
3016	if (works->event)
3017		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3018
3019	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3020
3021	drm_crtc_vblank_put(&amdgpu_crtc->base);
3022	schedule_work(&works->unpin_work);
3023
3024	return 0;
3025}
3026
3027static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3028			    struct amdgpu_irq_src *source,
3029			    struct amdgpu_iv_entry *entry)
3030{
3031	uint32_t disp_int, mask, tmp;
3032	unsigned hpd;
3033
3034	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3035		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3036		return 0;
3037	}
3038
3039	hpd = entry->src_data[0];
3040	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3041	mask = interrupt_status_offsets[hpd].hpd;
3042
3043	if (disp_int & mask) {
3044		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3045		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3046		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3047		schedule_work(&adev->hotplug_work);
3048		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3049	}
3050
3051	return 0;
3052
3053}
3054
3055static int dce_v6_0_set_clockgating_state(void *handle,
3056					  enum amd_clockgating_state state)
3057{
3058	return 0;
3059}
3060
3061static int dce_v6_0_set_powergating_state(void *handle,
3062					  enum amd_powergating_state state)
3063{
3064	return 0;
3065}
3066
3067static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3068	.name = "dce_v6_0",
3069	.early_init = dce_v6_0_early_init,
3070	.late_init = NULL,
3071	.sw_init = dce_v6_0_sw_init,
3072	.sw_fini = dce_v6_0_sw_fini,
3073	.hw_init = dce_v6_0_hw_init,
3074	.hw_fini = dce_v6_0_hw_fini,
3075	.suspend = dce_v6_0_suspend,
3076	.resume = dce_v6_0_resume,
3077	.is_idle = dce_v6_0_is_idle,
3078	.wait_for_idle = dce_v6_0_wait_for_idle,
3079	.soft_reset = dce_v6_0_soft_reset,
3080	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3081	.set_powergating_state = dce_v6_0_set_powergating_state,
3082};
3083
3084static void
3085dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3086			  struct drm_display_mode *mode,
3087			  struct drm_display_mode *adjusted_mode)
3088{
3089
3090	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3091	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3092
3093	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3094
3095	/* need to call this here rather than in prepare() since we need some crtc info */
3096	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3097
3098	/* set scaler clears this on some chips */
3099	dce_v6_0_set_interleave(encoder->crtc, mode);
3100
3101	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3102		dce_v6_0_afmt_enable(encoder, true);
3103		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3104	}
3105}
3106
3107static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3108{
3109
3110	struct amdgpu_device *adev = encoder->dev->dev_private;
3111	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3112	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3113
3114	if ((amdgpu_encoder->active_device &
3115	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3116	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3117	     ENCODER_OBJECT_ID_NONE)) {
3118		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3119		if (dig) {
3120			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3121			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3122				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3123		}
3124	}
3125
3126	amdgpu_atombios_scratch_regs_lock(adev, true);
3127
3128	if (connector) {
3129		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3130
3131		/* select the clock/data port if it uses a router */
3132		if (amdgpu_connector->router.cd_valid)
3133			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3134
3135		/* turn eDP panel on for mode set */
3136		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3137			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3138							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3139	}
3140
3141	/* this is needed for the pll/ss setup to work correctly in some cases */
3142	amdgpu_atombios_encoder_set_crtc_source(encoder);
3143	/* set up the FMT blocks */
3144	dce_v6_0_program_fmt(encoder);
3145}
3146
3147static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3148{
3149
3150	struct drm_device *dev = encoder->dev;
3151	struct amdgpu_device *adev = dev->dev_private;
3152
3153	/* need to call this here as we need the crtc set up */
3154	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3155	amdgpu_atombios_scratch_regs_lock(adev, false);
3156}
3157
3158static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3159{
3160
3161	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3162	struct amdgpu_encoder_atom_dig *dig;
3163	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3164
3165	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3166
3167	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3168		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3169			dce_v6_0_afmt_enable(encoder, false);
3170		dig = amdgpu_encoder->enc_priv;
3171		dig->dig_encoder = -1;
3172	}
3173	amdgpu_encoder->active_device = 0;
3174}
3175
3176/* these are handled by the primary encoders */
3177static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3178{
3179
3180}
3181
3182static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3183{
3184
3185}
3186
3187static void
3188dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3189		      struct drm_display_mode *mode,
3190		      struct drm_display_mode *adjusted_mode)
3191{
3192
3193}
3194
3195static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3196{
3197
3198}
3199
3200static void
3201dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3202{
3203
3204}
3205
3206static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3207				    const struct drm_display_mode *mode,
3208				    struct drm_display_mode *adjusted_mode)
3209{
3210	return true;
3211}
3212
3213static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3214	.dpms = dce_v6_0_ext_dpms,
3215	.mode_fixup = dce_v6_0_ext_mode_fixup,
3216	.prepare = dce_v6_0_ext_prepare,
3217	.mode_set = dce_v6_0_ext_mode_set,
3218	.commit = dce_v6_0_ext_commit,
3219	.disable = dce_v6_0_ext_disable,
3220	/* no detect for TMDS/LVDS yet */
3221};
3222
3223static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3224	.dpms = amdgpu_atombios_encoder_dpms,
3225	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3226	.prepare = dce_v6_0_encoder_prepare,
3227	.mode_set = dce_v6_0_encoder_mode_set,
3228	.commit = dce_v6_0_encoder_commit,
3229	.disable = dce_v6_0_encoder_disable,
3230	.detect = amdgpu_atombios_encoder_dig_detect,
3231};
3232
3233static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3234	.dpms = amdgpu_atombios_encoder_dpms,
3235	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3236	.prepare = dce_v6_0_encoder_prepare,
3237	.mode_set = dce_v6_0_encoder_mode_set,
3238	.commit = dce_v6_0_encoder_commit,
3239	.detect = amdgpu_atombios_encoder_dac_detect,
3240};
3241
3242static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3243{
3244	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3245	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3246		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3247	kfree(amdgpu_encoder->enc_priv);
3248	drm_encoder_cleanup(encoder);
3249	kfree(amdgpu_encoder);
3250}
3251
3252static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3253	.destroy = dce_v6_0_encoder_destroy,
3254};
3255
3256static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3257				 uint32_t encoder_enum,
3258				 uint32_t supported_device,
3259				 u16 caps)
3260{
3261	struct drm_device *dev = adev->ddev;
3262	struct drm_encoder *encoder;
3263	struct amdgpu_encoder *amdgpu_encoder;
3264
3265	/* see if we already added it */
3266	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3267		amdgpu_encoder = to_amdgpu_encoder(encoder);
3268		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3269			amdgpu_encoder->devices |= supported_device;
3270			return;
3271		}
3272
3273	}
3274
3275	/* add a new one */
3276	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3277	if (!amdgpu_encoder)
3278		return;
3279
3280	encoder = &amdgpu_encoder->base;
3281	switch (adev->mode_info.num_crtc) {
3282	case 1:
3283		encoder->possible_crtcs = 0x1;
3284		break;
3285	case 2:
3286	default:
3287		encoder->possible_crtcs = 0x3;
3288		break;
3289	case 4:
3290		encoder->possible_crtcs = 0xf;
3291		break;
3292	case 6:
3293		encoder->possible_crtcs = 0x3f;
3294		break;
3295	}
3296
3297	amdgpu_encoder->enc_priv = NULL;
3298	amdgpu_encoder->encoder_enum = encoder_enum;
3299	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3300	amdgpu_encoder->devices = supported_device;
3301	amdgpu_encoder->rmx_type = RMX_OFF;
3302	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3303	amdgpu_encoder->is_ext_encoder = false;
3304	amdgpu_encoder->caps = caps;
3305
3306	switch (amdgpu_encoder->encoder_id) {
3307	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3308	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3309		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3310				 DRM_MODE_ENCODER_DAC, NULL);
3311		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3312		break;
3313	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3314	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3315	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3316	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3317	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3318		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3319			amdgpu_encoder->rmx_type = RMX_FULL;
3320			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3321					 DRM_MODE_ENCODER_LVDS, NULL);
3322			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3323		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3324			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3325					 DRM_MODE_ENCODER_DAC, NULL);
3326			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3327		} else {
3328			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3329					 DRM_MODE_ENCODER_TMDS, NULL);
3330			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3331		}
3332		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3333		break;
3334	case ENCODER_OBJECT_ID_SI170B:
3335	case ENCODER_OBJECT_ID_CH7303:
3336	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3337	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3338	case ENCODER_OBJECT_ID_TITFP513:
3339	case ENCODER_OBJECT_ID_VT1623:
3340	case ENCODER_OBJECT_ID_HDMI_SI1930:
3341	case ENCODER_OBJECT_ID_TRAVIS:
3342	case ENCODER_OBJECT_ID_NUTMEG:
3343		/* these are handled by the primary encoders */
3344		amdgpu_encoder->is_ext_encoder = true;
3345		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3346			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3347					 DRM_MODE_ENCODER_LVDS, NULL);
3348		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3349			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3350					 DRM_MODE_ENCODER_DAC, NULL);
3351		else
3352			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3353					 DRM_MODE_ENCODER_TMDS, NULL);
3354		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3355		break;
3356	}
3357}
3358
3359static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
 
3360	.bandwidth_update = &dce_v6_0_bandwidth_update,
3361	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
 
3362	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3363	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3364	.hpd_sense = &dce_v6_0_hpd_sense,
3365	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3366	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3367	.page_flip = &dce_v6_0_page_flip,
3368	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3369	.add_encoder = &dce_v6_0_encoder_add,
3370	.add_connector = &amdgpu_connector_add,
 
 
3371};
3372
3373static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3374{
3375	if (adev->mode_info.funcs == NULL)
3376		adev->mode_info.funcs = &dce_v6_0_display_funcs;
3377}
3378
3379static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3380	.set = dce_v6_0_set_crtc_interrupt_state,
3381	.process = dce_v6_0_crtc_irq,
3382};
3383
3384static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3385	.set = dce_v6_0_set_pageflip_interrupt_state,
3386	.process = dce_v6_0_pageflip_irq,
3387};
3388
3389static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3390	.set = dce_v6_0_set_hpd_interrupt_state,
3391	.process = dce_v6_0_hpd_irq,
3392};
3393
3394static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3395{
3396	if (adev->mode_info.num_crtc > 0)
3397		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3398	else
3399		adev->crtc_irq.num_types = 0;
3400	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3401
3402	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3403	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3404
3405	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3406	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3407}
3408
3409const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3410{
3411	.type = AMD_IP_BLOCK_TYPE_DCE,
3412	.major = 6,
3413	.minor = 0,
3414	.rev = 0,
3415	.funcs = &dce_v6_0_ip_funcs,
3416};
3417
3418const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3419{
3420	.type = AMD_IP_BLOCK_TYPE_DCE,
3421	.major = 6,
3422	.minor = 4,
3423	.rev = 0,
3424	.funcs = &dce_v6_0_ip_funcs,
3425};
v4.10.11
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "drmP.h"
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "atom.h"
  28#include "amdgpu_atombios.h"
  29#include "atombios_crtc.h"
  30#include "atombios_encoders.h"
  31#include "amdgpu_pll.h"
  32#include "amdgpu_connectors.h"
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gca/gfx_6_0_d.h"
  39#include "gca/gfx_6_0_sh_mask.h"
  40#include "gmc/gmc_6_0_d.h"
  41#include "gmc/gmc_6_0_sh_mask.h"
  42#include "dce/dce_6_0_d.h"
  43#include "dce/dce_6_0_sh_mask.h"
  44#include "gca/gfx_7_2_enum.h"
 
  45#include "si_enums.h"
  46
  47static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52	SI_CRTC0_REGISTER_OFFSET,
  53	SI_CRTC1_REGISTER_OFFSET,
  54	SI_CRTC2_REGISTER_OFFSET,
  55	SI_CRTC3_REGISTER_OFFSET,
  56	SI_CRTC4_REGISTER_OFFSET,
  57	SI_CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
  63	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
  64	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
  65	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
  66	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
  67	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71	SI_CRTC0_REGISTER_OFFSET,
  72	SI_CRTC1_REGISTER_OFFSET,
  73	SI_CRTC2_REGISTER_OFFSET,
  74	SI_CRTC3_REGISTER_OFFSET,
  75	SI_CRTC4_REGISTER_OFFSET,
  76	SI_CRTC5_REGISTER_OFFSET,
  77	(0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81	uint32_t	reg;
  82	uint32_t	vblank;
  83	uint32_t	vline;
  84	uint32_t	hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87	.reg = mmDISP_INTERRUPT_STATUS,
  88	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 118static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119				     u32 block_offset, u32 reg)
 120{
 121	DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
 122	return 0;
 
 
 
 
 
 
 
 123}
 124
 125static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
 126				      u32 block_offset, u32 reg, u32 v)
 127{
 128	DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
 129}
 130
 131static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 132{
 133	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
 134		return true;
 135	else
 136		return false;
 137}
 138
 139static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 140{
 141	u32 pos1, pos2;
 142
 143	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 144	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 145
 146	if (pos1 != pos2)
 147		return true;
 148	else
 149		return false;
 150}
 151
 152/**
 153 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
 154 *
 155 * @crtc: crtc to wait for vblank on
 156 *
 157 * Wait for vblank on the requested crtc (evergreen+).
 158 */
 159static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 160{
 161	unsigned i = 100;
 162
 163	if (crtc >= adev->mode_info.num_crtc)
 164		return;
 165
 166	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 167		return;
 168
 169	/* depending on when we hit vblank, we may be close to active; if so,
 170	 * wait for another frame.
 171	 */
 172	while (dce_v6_0_is_in_vblank(adev, crtc)) {
 173		if (i++ == 100) {
 174			i = 0;
 175			if (!dce_v6_0_is_counter_moving(adev, crtc))
 176				break;
 177		}
 178	}
 179
 180	while (!dce_v6_0_is_in_vblank(adev, crtc)) {
 181		if (i++ == 100) {
 182			i = 0;
 183			if (!dce_v6_0_is_counter_moving(adev, crtc))
 184				break;
 185		}
 186	}
 187}
 188
 189static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 190{
 191	if (crtc >= adev->mode_info.num_crtc)
 192		return 0;
 193	else
 194		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 195}
 196
 197static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 198{
 199	unsigned i;
 200
 201	/* Enable pflip interrupts */
 202	for (i = 0; i < adev->mode_info.num_crtc; i++)
 203		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 204}
 205
 206static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 207{
 208	unsigned i;
 209
 210	/* Disable pflip interrupts */
 211	for (i = 0; i < adev->mode_info.num_crtc; i++)
 212		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 213}
 214
 215/**
 216 * dce_v6_0_page_flip - pageflip callback.
 217 *
 218 * @adev: amdgpu_device pointer
 219 * @crtc_id: crtc to cleanup pageflip on
 220 * @crtc_base: new address of the crtc (GPU MC address)
 221 *
 222 * Does the actual pageflip (evergreen+).
 223 * During vblank we take the crtc lock and wait for the update_pending
 224 * bit to go high, when it does, we release the lock, and allow the
 225 * double buffered update to take place.
 226 * Returns the current update pending status.
 227 */
 228static void dce_v6_0_page_flip(struct amdgpu_device *adev,
 229			       int crtc_id, u64 crtc_base, bool async)
 230{
 231	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 232
 233	/* flip at hsync for async, default is vsync */
 234	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 235	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 236	/* update the scanout addresses */
 237	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 238	       upper_32_bits(crtc_base));
 239	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 240	       (u32)crtc_base);
 241
 242	/* post the write */
 243	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 244}
 245
 246static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 247					u32 *vbl, u32 *position)
 248{
 249	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 250		return -EINVAL;
 251	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 252	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 253
 254	return 0;
 255
 256}
 257
 258/**
 259 * dce_v6_0_hpd_sense - hpd sense callback.
 260 *
 261 * @adev: amdgpu_device pointer
 262 * @hpd: hpd (hotplug detect) pin
 263 *
 264 * Checks if a digital monitor is connected (evergreen+).
 265 * Returns true if connected, false if not connected.
 266 */
 267static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 268			       enum amdgpu_hpd_id hpd)
 269{
 270	bool connected = false;
 271
 272	if (hpd >= adev->mode_info.num_hpd)
 273		return connected;
 274
 275	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 276		connected = true;
 277
 278	return connected;
 279}
 280
 281/**
 282 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
 283 *
 284 * @adev: amdgpu_device pointer
 285 * @hpd: hpd (hotplug detect) pin
 286 *
 287 * Set the polarity of the hpd pin (evergreen+).
 288 */
 289static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 290				      enum amdgpu_hpd_id hpd)
 291{
 292	u32 tmp;
 293	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 294
 295	if (hpd >= adev->mode_info.num_hpd)
 296		return;
 297
 298	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 299	if (connected)
 300		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 301	else
 302		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 303	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 304}
 305
 306/**
 307 * dce_v6_0_hpd_init - hpd setup callback.
 308 *
 309 * @adev: amdgpu_device pointer
 310 *
 311 * Setup the hpd pins used by the card (evergreen+).
 312 * Enable the pin, set the polarity, and enable the hpd interrupts.
 313 */
 314static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 315{
 316	struct drm_device *dev = adev->ddev;
 317	struct drm_connector *connector;
 318	u32 tmp;
 319
 320	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 321		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 322
 323		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 324			continue;
 325
 326		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 327		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 328		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 329
 330		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 331		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 332			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 333			 * aux dp channel on imac and help (but not completely fix)
 334			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 335			 * also avoid interrupt storms during dpms.
 336			 */
 337			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 338			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 339			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 340			continue;
 341		}
 342
 343		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 344		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 345	}
 346
 347}
 348
 349/**
 350 * dce_v6_0_hpd_fini - hpd tear down callback.
 351 *
 352 * @adev: amdgpu_device pointer
 353 *
 354 * Tear down the hpd pins used by the card (evergreen+).
 355 * Disable the hpd interrupts.
 356 */
 357static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 358{
 359	struct drm_device *dev = adev->ddev;
 360	struct drm_connector *connector;
 361	u32 tmp;
 362
 363	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 364		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 365
 366		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 367			continue;
 368
 369		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 370		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 371		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 372
 373		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 374	}
 375}
 376
 377static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 378{
 379	return mmDC_GPIO_HPD_A;
 380}
 381
 382static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
 383{
 384	if (crtc >= adev->mode_info.num_crtc)
 385		return 0;
 386	else
 387		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 388}
 389
 390static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
 391				    struct amdgpu_mode_mc_save *save)
 392{
 393	u32 crtc_enabled, tmp, frame_count;
 394	int i, j;
 395
 396	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 397	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 398
 399	/* disable VGA render */
 400	WREG32(mmVGA_RENDER_CONTROL, 0);
 401
 402	/* blank the display controllers */
 403	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 404		crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 405		if (crtc_enabled) {
 406			save->crtc_enabled[i] = true;
 407			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 408
 409			if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
 410				dce_v6_0_vblank_wait(adev, i);
 411				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 412				tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
 413				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 414				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 415			}
 416			/* wait for the next frame */
 417			frame_count = evergreen_get_vblank_counter(adev, i);
 418			for (j = 0; j < adev->usec_timeout; j++) {
 419				if (evergreen_get_vblank_counter(adev, i) != frame_count)
 420					break;
 421				udelay(1);
 422			}
 423
 424			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 425			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 426			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 427			tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 428			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 429			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 430			save->crtc_enabled[i] = false;
 431			/* ***** */
 432		} else {
 433			save->crtc_enabled[i] = false;
 434		}
 435	}
 436}
 437
 438static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
 439				      struct amdgpu_mode_mc_save *save)
 440{
 441	u32 tmp;
 442	int i, j;
 443
 444	/* update crtc base addresses */
 445	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 446		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 447		       upper_32_bits(adev->mc.vram_start));
 448		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 449		       upper_32_bits(adev->mc.vram_start));
 450		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 451		       (u32)adev->mc.vram_start);
 452		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
 453		       (u32)adev->mc.vram_start);
 454	}
 455
 456	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
 457	WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
 458
 459	/* unlock regs and wait for update */
 460	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 461		if (save->crtc_enabled[i]) {
 462			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
 463			if ((tmp & 0x7) != 0) {
 464				tmp &= ~0x7;
 465				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 466			}
 467			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 468			if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
 469				tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
 470				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
 471			}
 472			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
 473			if (tmp & 1) {
 474				tmp &= ~1;
 475				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
 476			}
 477			for (j = 0; j < adev->usec_timeout; j++) {
 478				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 479				if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
 480					break;
 481				udelay(1);
 482			}
 483		}
 484	}
 485
 486	/* Unlock vga access */
 487	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 488	mdelay(1);
 489	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 490
 491}
 492
 493static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 494					  bool render)
 495{
 496	if (!render)
 497		WREG32(mmVGA_RENDER_CONTROL,
 498			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
 499
 500}
 501
 502static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
 503{
 504	int num_crtc = 0;
 505
 506	switch (adev->asic_type) {
 507	case CHIP_TAHITI:
 508	case CHIP_PITCAIRN:
 509	case CHIP_VERDE:
 510		num_crtc = 6;
 511		break;
 512	case CHIP_OLAND:
 513		num_crtc = 2;
 514		break;
 515	default:
 516		num_crtc = 0;
 517	}
 518	return num_crtc;
 519}
 520
 521void dce_v6_0_disable_dce(struct amdgpu_device *adev)
 522{
 523	/*Disable VGA render and enabled crtc, if has DCE engine*/
 524	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 525		u32 tmp;
 526		int crtc_enabled, i;
 527
 528		dce_v6_0_set_vga_render_state(adev, false);
 529
 530		/*Disable crtc*/
 531		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
 532			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
 533				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 534			if (crtc_enabled) {
 535				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 536				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 537				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 538				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 539				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 540			}
 541		}
 542	}
 543}
 544
 545static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 546{
 547
 548	struct drm_device *dev = encoder->dev;
 549	struct amdgpu_device *adev = dev->dev_private;
 550	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 551	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 552	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 553	int bpc = 0;
 554	u32 tmp = 0;
 555	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 556
 557	if (connector) {
 558		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 559		bpc = amdgpu_connector_get_monitor_bpc(connector);
 560		dither = amdgpu_connector->dither;
 561	}
 562
 563	/* LVDS FMT is set up by atom */
 564	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 565		return;
 566
 567	if (bpc == 0)
 568		return;
 569
 570
 571	switch (bpc) {
 572	case 6:
 573		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 574			/* XXX sort out optimal dither settings */
 575			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 576				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 577				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
 578		else
 579			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
 580		break;
 581	case 8:
 582		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 583			/* XXX sort out optimal dither settings */
 584			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 585				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 586				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 587				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 588				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
 589		else
 590			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 591				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
 592		break;
 593	case 10:
 594	default:
 595		/* not needed */
 596		break;
 597	}
 598
 599	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 600}
 601
 602/**
 603 * cik_get_number_of_dram_channels - get the number of dram channels
 604 *
 605 * @adev: amdgpu_device pointer
 606 *
 607 * Look up the number of video ram channels (CIK).
 608 * Used for display watermark bandwidth calculations
 609 * Returns the number of dram channels
 610 */
 611static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
 612{
 613	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 614
 615	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 616	case 0:
 617	default:
 618		return 1;
 619	case 1:
 620		return 2;
 621	case 2:
 622		return 4;
 623	case 3:
 624		return 8;
 625	case 4:
 626		return 3;
 627	case 5:
 628		return 6;
 629	case 6:
 630		return 10;
 631	case 7:
 632		return 12;
 633	case 8:
 634		return 16;
 635	}
 636}
 637
 638struct dce6_wm_params {
 639	u32 dram_channels; /* number of dram channels */
 640	u32 yclk;          /* bandwidth per dram data pin in kHz */
 641	u32 sclk;          /* engine clock in kHz */
 642	u32 disp_clk;      /* display clock in kHz */
 643	u32 src_width;     /* viewport width */
 644	u32 active_time;   /* active display time in ns */
 645	u32 blank_time;    /* blank time in ns */
 646	bool interlaced;    /* mode is interlaced */
 647	fixed20_12 vsc;    /* vertical scale ratio */
 648	u32 num_heads;     /* number of active crtcs */
 649	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 650	u32 lb_size;       /* line buffer allocated to pipe */
 651	u32 vtaps;         /* vertical scaler taps */
 652};
 653
 654/**
 655 * dce_v6_0_dram_bandwidth - get the dram bandwidth
 656 *
 657 * @wm: watermark calculation data
 658 *
 659 * Calculate the raw dram bandwidth (CIK).
 660 * Used for display watermark bandwidth calculations
 661 * Returns the dram bandwidth in MBytes/s
 662 */
 663static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
 664{
 665	/* Calculate raw DRAM Bandwidth */
 666	fixed20_12 dram_efficiency; /* 0.7 */
 667	fixed20_12 yclk, dram_channels, bandwidth;
 668	fixed20_12 a;
 669
 670	a.full = dfixed_const(1000);
 671	yclk.full = dfixed_const(wm->yclk);
 672	yclk.full = dfixed_div(yclk, a);
 673	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 674	a.full = dfixed_const(10);
 675	dram_efficiency.full = dfixed_const(7);
 676	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 677	bandwidth.full = dfixed_mul(dram_channels, yclk);
 678	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 679
 680	return dfixed_trunc(bandwidth);
 681}
 682
 683/**
 684 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
 685 *
 686 * @wm: watermark calculation data
 687 *
 688 * Calculate the dram bandwidth used for display (CIK).
 689 * Used for display watermark bandwidth calculations
 690 * Returns the dram bandwidth for display in MBytes/s
 691 */
 692static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 693{
 694	/* Calculate DRAM Bandwidth and the part allocated to display. */
 695	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 696	fixed20_12 yclk, dram_channels, bandwidth;
 697	fixed20_12 a;
 698
 699	a.full = dfixed_const(1000);
 700	yclk.full = dfixed_const(wm->yclk);
 701	yclk.full = dfixed_div(yclk, a);
 702	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 703	a.full = dfixed_const(10);
 704	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 705	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 706	bandwidth.full = dfixed_mul(dram_channels, yclk);
 707	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 708
 709	return dfixed_trunc(bandwidth);
 710}
 711
 712/**
 713 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
 714 *
 715 * @wm: watermark calculation data
 716 *
 717 * Calculate the data return bandwidth used for display (CIK).
 718 * Used for display watermark bandwidth calculations
 719 * Returns the data return bandwidth in MBytes/s
 720 */
 721static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
 722{
 723	/* Calculate the display Data return Bandwidth */
 724	fixed20_12 return_efficiency; /* 0.8 */
 725	fixed20_12 sclk, bandwidth;
 726	fixed20_12 a;
 727
 728	a.full = dfixed_const(1000);
 729	sclk.full = dfixed_const(wm->sclk);
 730	sclk.full = dfixed_div(sclk, a);
 731	a.full = dfixed_const(10);
 732	return_efficiency.full = dfixed_const(8);
 733	return_efficiency.full = dfixed_div(return_efficiency, a);
 734	a.full = dfixed_const(32);
 735	bandwidth.full = dfixed_mul(a, sclk);
 736	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 737
 738	return dfixed_trunc(bandwidth);
 739}
 740
 741/**
 742 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
 743 *
 744 * @wm: watermark calculation data
 745 *
 746 * Calculate the dmif bandwidth used for display (CIK).
 747 * Used for display watermark bandwidth calculations
 748 * Returns the dmif bandwidth in MBytes/s
 749 */
 750static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
 751{
 752	/* Calculate the DMIF Request Bandwidth */
 753	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 754	fixed20_12 disp_clk, bandwidth;
 755	fixed20_12 a, b;
 756
 757	a.full = dfixed_const(1000);
 758	disp_clk.full = dfixed_const(wm->disp_clk);
 759	disp_clk.full = dfixed_div(disp_clk, a);
 760	a.full = dfixed_const(32);
 761	b.full = dfixed_mul(a, disp_clk);
 762
 763	a.full = dfixed_const(10);
 764	disp_clk_request_efficiency.full = dfixed_const(8);
 765	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 766
 767	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 768
 769	return dfixed_trunc(bandwidth);
 770}
 771
 772/**
 773 * dce_v6_0_available_bandwidth - get the min available bandwidth
 774 *
 775 * @wm: watermark calculation data
 776 *
 777 * Calculate the min available bandwidth used for display (CIK).
 778 * Used for display watermark bandwidth calculations
 779 * Returns the min available bandwidth in MBytes/s
 780 */
 781static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
 782{
 783	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 784	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
 785	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
 786	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
 787
 788	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 789}
 790
 791/**
 792 * dce_v6_0_average_bandwidth - get the average available bandwidth
 793 *
 794 * @wm: watermark calculation data
 795 *
 796 * Calculate the average available bandwidth used for display (CIK).
 797 * Used for display watermark bandwidth calculations
 798 * Returns the average available bandwidth in MBytes/s
 799 */
 800static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
 801{
 802	/* Calculate the display mode Average Bandwidth
 803	 * DisplayMode should contain the source and destination dimensions,
 804	 * timing, etc.
 805	 */
 806	fixed20_12 bpp;
 807	fixed20_12 line_time;
 808	fixed20_12 src_width;
 809	fixed20_12 bandwidth;
 810	fixed20_12 a;
 811
 812	a.full = dfixed_const(1000);
 813	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 814	line_time.full = dfixed_div(line_time, a);
 815	bpp.full = dfixed_const(wm->bytes_per_pixel);
 816	src_width.full = dfixed_const(wm->src_width);
 817	bandwidth.full = dfixed_mul(src_width, bpp);
 818	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 819	bandwidth.full = dfixed_div(bandwidth, line_time);
 820
 821	return dfixed_trunc(bandwidth);
 822}
 823
 824/**
 825 * dce_v6_0_latency_watermark - get the latency watermark
 826 *
 827 * @wm: watermark calculation data
 828 *
 829 * Calculate the latency watermark (CIK).
 830 * Used for display watermark bandwidth calculations
 831 * Returns the latency watermark in ns
 832 */
 833static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
 834{
 835	/* First calculate the latency in ns */
 836	u32 mc_latency = 2000; /* 2000 ns. */
 837	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
 838	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 839	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 840	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 841	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 842		(wm->num_heads * cursor_line_pair_return_time);
 843	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 844	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 845	u32 tmp, dmif_size = 12288;
 846	fixed20_12 a, b, c;
 847
 848	if (wm->num_heads == 0)
 849		return 0;
 850
 851	a.full = dfixed_const(2);
 852	b.full = dfixed_const(1);
 853	if ((wm->vsc.full > a.full) ||
 854	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 855	    (wm->vtaps >= 5) ||
 856	    ((wm->vsc.full >= a.full) && wm->interlaced))
 857		max_src_lines_per_dst_line = 4;
 858	else
 859		max_src_lines_per_dst_line = 2;
 860
 861	a.full = dfixed_const(available_bandwidth);
 862	b.full = dfixed_const(wm->num_heads);
 863	a.full = dfixed_div(a, b);
 
 
 864
 865	b.full = dfixed_const(mc_latency + 512);
 866	c.full = dfixed_const(wm->disp_clk);
 867	b.full = dfixed_div(b, c);
 868
 869	c.full = dfixed_const(dmif_size);
 870	b.full = dfixed_div(c, b);
 871
 872	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
 873
 874	b.full = dfixed_const(1000);
 875	c.full = dfixed_const(wm->disp_clk);
 876	b.full = dfixed_div(c, b);
 877	c.full = dfixed_const(wm->bytes_per_pixel);
 878	b.full = dfixed_mul(b, c);
 879
 880	lb_fill_bw = min(tmp, dfixed_trunc(b));
 881
 882	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 883	b.full = dfixed_const(1000);
 884	c.full = dfixed_const(lb_fill_bw);
 885	b.full = dfixed_div(c, b);
 886	a.full = dfixed_div(a, b);
 887	line_fill_time = dfixed_trunc(a);
 888
 889	if (line_fill_time < wm->active_time)
 890		return latency;
 891	else
 892		return latency + (line_fill_time - wm->active_time);
 893
 894}
 895
 896/**
 897 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 898 * average and available dram bandwidth
 899 *
 900 * @wm: watermark calculation data
 901 *
 902 * Check if the display average bandwidth fits in the display
 903 * dram bandwidth (CIK).
 904 * Used for display watermark bandwidth calculations
 905 * Returns true if the display fits, false if not.
 906 */
 907static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 908{
 909	if (dce_v6_0_average_bandwidth(wm) <=
 910	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 911		return true;
 912	else
 913		return false;
 914}
 915
 916/**
 917 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
 918 * average and available bandwidth
 919 *
 920 * @wm: watermark calculation data
 921 *
 922 * Check if the display average bandwidth fits in the display
 923 * available bandwidth (CIK).
 924 * Used for display watermark bandwidth calculations
 925 * Returns true if the display fits, false if not.
 926 */
 927static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
 928{
 929	if (dce_v6_0_average_bandwidth(wm) <=
 930	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
 931		return true;
 932	else
 933		return false;
 934}
 935
 936/**
 937 * dce_v6_0_check_latency_hiding - check latency hiding
 938 *
 939 * @wm: watermark calculation data
 940 *
 941 * Check latency hiding (CIK).
 942 * Used for display watermark bandwidth calculations
 943 * Returns true if the display fits, false if not.
 944 */
 945static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
 946{
 947	u32 lb_partitions = wm->lb_size / wm->src_width;
 948	u32 line_time = wm->active_time + wm->blank_time;
 949	u32 latency_tolerant_lines;
 950	u32 latency_hiding;
 951	fixed20_12 a;
 952
 953	a.full = dfixed_const(1);
 954	if (wm->vsc.full > a.full)
 955		latency_tolerant_lines = 1;
 956	else {
 957		if (lb_partitions <= (wm->vtaps + 1))
 958			latency_tolerant_lines = 1;
 959		else
 960			latency_tolerant_lines = 2;
 961	}
 962
 963	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 964
 965	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
 966		return true;
 967	else
 968		return false;
 969}
 970
 971/**
 972 * dce_v6_0_program_watermarks - program display watermarks
 973 *
 974 * @adev: amdgpu_device pointer
 975 * @amdgpu_crtc: the selected display controller
 976 * @lb_size: line buffer size
 977 * @num_heads: number of display controllers in use
 978 *
 979 * Calculate and program the display watermarks for the
 980 * selected display controller (CIK).
 981 */
 982static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 983					struct amdgpu_crtc *amdgpu_crtc,
 984					u32 lb_size, u32 num_heads)
 985{
 986	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 987	struct dce6_wm_params wm_low, wm_high;
 988	u32 dram_channels;
 989	u32 pixel_period;
 990	u32 line_time = 0;
 991	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 992	u32 priority_a_mark = 0, priority_b_mark = 0;
 993	u32 priority_a_cnt = PRIORITY_OFF;
 994	u32 priority_b_cnt = PRIORITY_OFF;
 995	u32 tmp, arb_control3;
 996	fixed20_12 a, b, c;
 997
 998	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 999		pixel_period = 1000000 / (u32)mode->clock;
1000		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
 
 
 
1001		priority_a_cnt = 0;
1002		priority_b_cnt = 0;
1003
1004		dram_channels = si_get_number_of_dram_channels(adev);
1005
1006		/* watermark for high clocks */
1007		if (adev->pm.dpm_enabled) {
1008			wm_high.yclk =
1009				amdgpu_dpm_get_mclk(adev, false) * 10;
1010			wm_high.sclk =
1011				amdgpu_dpm_get_sclk(adev, false) * 10;
1012		} else {
1013			wm_high.yclk = adev->pm.current_mclk * 10;
1014			wm_high.sclk = adev->pm.current_sclk * 10;
1015		}
1016
1017		wm_high.disp_clk = mode->clock;
1018		wm_high.src_width = mode->crtc_hdisplay;
1019		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1020		wm_high.blank_time = line_time - wm_high.active_time;
1021		wm_high.interlaced = false;
1022		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1023			wm_high.interlaced = true;
1024		wm_high.vsc = amdgpu_crtc->vsc;
1025		wm_high.vtaps = 1;
1026		if (amdgpu_crtc->rmx_type != RMX_OFF)
1027			wm_high.vtaps = 2;
1028		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1029		wm_high.lb_size = lb_size;
1030		wm_high.dram_channels = dram_channels;
1031		wm_high.num_heads = num_heads;
1032
1033		if (adev->pm.dpm_enabled) {
1034		/* watermark for low clocks */
1035			wm_low.yclk =
1036				amdgpu_dpm_get_mclk(adev, true) * 10;
1037			wm_low.sclk =
1038				amdgpu_dpm_get_sclk(adev, true) * 10;
1039		} else {
1040			wm_low.yclk = adev->pm.current_mclk * 10;
1041			wm_low.sclk = adev->pm.current_sclk * 10;
1042		}
1043
1044		wm_low.disp_clk = mode->clock;
1045		wm_low.src_width = mode->crtc_hdisplay;
1046		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1047		wm_low.blank_time = line_time - wm_low.active_time;
1048		wm_low.interlaced = false;
1049		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1050			wm_low.interlaced = true;
1051		wm_low.vsc = amdgpu_crtc->vsc;
1052		wm_low.vtaps = 1;
1053		if (amdgpu_crtc->rmx_type != RMX_OFF)
1054			wm_low.vtaps = 2;
1055		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1056		wm_low.lb_size = lb_size;
1057		wm_low.dram_channels = dram_channels;
1058		wm_low.num_heads = num_heads;
1059
1060		/* set for high clocks */
1061		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1062		/* set for low clocks */
1063		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1064
1065		/* possibly force display priority to high */
1066		/* should really do this at mode validation time... */
1067		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1068		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1069		    !dce_v6_0_check_latency_hiding(&wm_high) ||
1070		    (adev->mode_info.disp_priority == 2)) {
1071			DRM_DEBUG_KMS("force priority to high\n");
1072			priority_a_cnt |= PRIORITY_ALWAYS_ON;
1073			priority_b_cnt |= PRIORITY_ALWAYS_ON;
1074		}
1075		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1076		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1077		    !dce_v6_0_check_latency_hiding(&wm_low) ||
1078		    (adev->mode_info.disp_priority == 2)) {
1079			DRM_DEBUG_KMS("force priority to high\n");
1080			priority_a_cnt |= PRIORITY_ALWAYS_ON;
1081			priority_b_cnt |= PRIORITY_ALWAYS_ON;
1082		}
1083
1084		a.full = dfixed_const(1000);
1085		b.full = dfixed_const(mode->clock);
1086		b.full = dfixed_div(b, a);
1087		c.full = dfixed_const(latency_watermark_a);
1088		c.full = dfixed_mul(c, b);
1089		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1090		c.full = dfixed_div(c, a);
1091		a.full = dfixed_const(16);
1092		c.full = dfixed_div(c, a);
1093		priority_a_mark = dfixed_trunc(c);
1094		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1095
1096		a.full = dfixed_const(1000);
1097		b.full = dfixed_const(mode->clock);
1098		b.full = dfixed_div(b, a);
1099		c.full = dfixed_const(latency_watermark_b);
1100		c.full = dfixed_mul(c, b);
1101		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1102		c.full = dfixed_div(c, a);
1103		a.full = dfixed_const(16);
1104		c.full = dfixed_div(c, a);
1105		priority_b_mark = dfixed_trunc(c);
1106		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 
 
1107	}
1108
1109	/* select wm A */
1110	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1111	tmp = arb_control3;
1112	tmp &= ~LATENCY_WATERMARK_MASK(3);
1113	tmp |= LATENCY_WATERMARK_MASK(1);
1114	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1115	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1116	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
1117		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1118	/* select wm B */
1119	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1120	tmp &= ~LATENCY_WATERMARK_MASK(3);
1121	tmp |= LATENCY_WATERMARK_MASK(2);
1122	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1123	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1124	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1125		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1126	/* restore original selection */
1127	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1128
1129	/* write the priority marks */
1130	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1131	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1132
1133	/* save values for DPM */
1134	amdgpu_crtc->line_time = line_time;
1135	amdgpu_crtc->wm_high = latency_watermark_a;
 
 
 
1136}
1137
1138/* watermark setup */
1139static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1140				   struct amdgpu_crtc *amdgpu_crtc,
1141				   struct drm_display_mode *mode,
1142				   struct drm_display_mode *other_mode)
1143{
1144	u32 tmp, buffer_alloc, i;
1145	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1146	/*
1147	 * Line Buffer Setup
1148	 * There are 3 line buffers, each one shared by 2 display controllers.
1149	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1150	 * the display controllers.  The paritioning is done via one of four
1151	 * preset allocations specified in bits 21:20:
1152	 *  0 - half lb
1153	 *  2 - whole lb, other crtc must be disabled
1154	 */
1155	/* this can get tricky if we have two large displays on a paired group
1156	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1157	 * non-linked crtcs for maximum line buffer allocation.
1158	 */
1159	if (amdgpu_crtc->base.enabled && mode) {
1160		if (other_mode) {
1161			tmp = 0; /* 1/2 */
1162			buffer_alloc = 1;
1163		} else {
1164			tmp = 2; /* whole */
1165			buffer_alloc = 2;
1166		}
1167	} else {
1168		tmp = 0;
1169		buffer_alloc = 0;
1170	}
1171
1172	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1173	       DC_LB_MEMORY_CONFIG(tmp));
1174
1175	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1176	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1177	for (i = 0; i < adev->usec_timeout; i++) {
1178		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1179		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1180			break;
1181		udelay(1);
1182	}
1183
1184	if (amdgpu_crtc->base.enabled && mode) {
1185		switch (tmp) {
1186		case 0:
1187		default:
1188			return 4096 * 2;
1189		case 2:
1190			return 8192 * 2;
1191		}
1192	}
1193
1194	/* controller not enabled, so no lb used */
1195	return 0;
1196}
1197
1198
1199/**
1200 *
1201 * dce_v6_0_bandwidth_update - program display watermarks
1202 *
1203 * @adev: amdgpu_device pointer
1204 *
1205 * Calculate and program the display watermarks and line
1206 * buffer allocation (CIK).
1207 */
1208static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1209{
1210	struct drm_display_mode *mode0 = NULL;
1211	struct drm_display_mode *mode1 = NULL;
1212	u32 num_heads = 0, lb_size;
1213	int i;
1214
1215	if (!adev->mode_info.mode_config_initialized)
1216		return;
1217
1218	amdgpu_update_display_priority(adev);
1219
1220	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1221		if (adev->mode_info.crtcs[i]->base.enabled)
1222			num_heads++;
1223	}
1224	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1225		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1226		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1227		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1228		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1229		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1230		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1231	}
1232}
1233/*
1234static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1235{
1236	int i;
1237	u32 offset, tmp;
1238
1239	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1240		offset = adev->mode_info.audio.pin[i].offset;
1241		tmp = RREG32_AUDIO_ENDPT(offset,
1242				      AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1243		if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1244			adev->mode_info.audio.pin[i].connected = false;
1245		else
1246			adev->mode_info.audio.pin[i].connected = true;
1247	}
1248
1249}
1250
1251static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1252{
1253	int i;
1254
1255	dce_v6_0_audio_get_connected_pins(adev);
1256
1257	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1258		if (adev->mode_info.audio.pin[i].connected)
1259			return &adev->mode_info.audio.pin[i];
1260	}
1261	DRM_ERROR("No connected audio pins found!\n");
1262	return NULL;
1263}
1264
1265static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1266{
1267	struct amdgpu_device *adev = encoder->dev->dev_private;
1268	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1269	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1270	u32 offset;
1271
1272	if (!dig || !dig->afmt || !dig->afmt->pin)
1273		return;
1274
1275	offset = dig->afmt->offset;
1276
1277	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1278	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1279
1280}
1281
1282static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1283						struct drm_display_mode *mode)
1284{
1285	DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286}
1287
1288static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1289{
1290	DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291}
1292
1293static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1294{
1295	DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296
1297}
1298*/
1299static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1300				  struct amdgpu_audio_pin *pin,
1301				  bool enable)
1302{
1303	DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
 
 
 
 
1304}
1305
1306static const u32 pin_offsets[7] =
1307{
1308	(0x1780 - 0x1780),
1309	(0x1786 - 0x1780),
1310	(0x178c - 0x1780),
1311	(0x1792 - 0x1780),
1312	(0x1798 - 0x1780),
1313	(0x179d - 0x1780),
1314	(0x17a4 - 0x1780),
1315};
1316
1317static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1318{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319	return 0;
1320}
1321
1322static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1323{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
 
 
 
 
 
1325}
1326
1327/*
1328static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1329{
1330	DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1331}
1332*/
1333/*
1334 * build a HDMI Video Info Frame
1335 */
1336/*
1337static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1338					       void *buffer, size_t size)
1339{
1340	DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1341}
1342
1343static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1344{
1345	DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346}
1347*/
1348/*
1349 * update the info frames with the data from the current display mode
1350 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1351static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1352				  struct drm_display_mode *mode)
1353{
1354	DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355}
1356
1357static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1358{
1359	struct drm_device *dev = encoder->dev;
1360	struct amdgpu_device *adev = dev->dev_private;
1361	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1362	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1363
1364	if (!dig || !dig->afmt)
1365		return;
1366
1367	/* Silent, r600_hdmi_enable will raise WARN for us */
1368	if (enable && dig->afmt->enabled)
1369		return;
 
1370	if (!enable && !dig->afmt->enabled)
1371		return;
1372
1373	if (!enable && dig->afmt->pin) {
1374		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1375		dig->afmt->pin = NULL;
1376	}
1377
1378	dig->afmt->enabled = enable;
1379
1380	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1381		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1382}
1383
1384static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1385{
1386	int i, j;
1387
1388	for (i = 0; i < adev->mode_info.num_dig; i++)
1389		adev->mode_info.afmt[i] = NULL;
1390
1391	/* DCE6 has audio blocks tied to DIG encoders */
1392	for (i = 0; i < adev->mode_info.num_dig; i++) {
1393		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1394		if (adev->mode_info.afmt[i]) {
1395			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1396			adev->mode_info.afmt[i]->id = i;
1397		} else {
1398			for (j = 0; j < i; j++) {
1399				kfree(adev->mode_info.afmt[j]);
1400				adev->mode_info.afmt[j] = NULL;
1401			}
1402			DRM_ERROR("Out of memory allocating afmt table\n");
1403			return -ENOMEM;
1404		}
1405	}
1406	return 0;
1407}
1408
1409static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1410{
1411	int i;
1412
1413	for (i = 0; i < adev->mode_info.num_dig; i++) {
1414		kfree(adev->mode_info.afmt[i]);
1415		adev->mode_info.afmt[i] = NULL;
1416	}
1417}
1418
1419static const u32 vga_control_regs[6] =
1420{
1421	mmD1VGA_CONTROL,
1422	mmD2VGA_CONTROL,
1423	mmD3VGA_CONTROL,
1424	mmD4VGA_CONTROL,
1425	mmD5VGA_CONTROL,
1426	mmD6VGA_CONTROL,
1427};
1428
1429static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1430{
1431	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1432	struct drm_device *dev = crtc->dev;
1433	struct amdgpu_device *adev = dev->dev_private;
1434	u32 vga_control;
1435
1436	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1437	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1438}
1439
1440static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1441{
1442	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1443	struct drm_device *dev = crtc->dev;
1444	struct amdgpu_device *adev = dev->dev_private;
1445
1446	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1447}
1448
1449static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1450				     struct drm_framebuffer *fb,
1451				     int x, int y, int atomic)
1452{
1453	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1454	struct drm_device *dev = crtc->dev;
1455	struct amdgpu_device *adev = dev->dev_private;
1456	struct amdgpu_framebuffer *amdgpu_fb;
1457	struct drm_framebuffer *target_fb;
1458	struct drm_gem_object *obj;
1459	struct amdgpu_bo *abo;
1460	uint64_t fb_location, tiling_flags;
1461	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1462	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1463	u32 viewport_w, viewport_h;
1464	int r;
1465	bool bypass_lut = false;
1466	struct drm_format_name_buf format_name;
1467
1468	/* no fb bound */
1469	if (!atomic && !crtc->primary->fb) {
1470		DRM_DEBUG_KMS("No FB bound\n");
1471		return 0;
1472	}
1473
1474	if (atomic) {
1475		amdgpu_fb = to_amdgpu_framebuffer(fb);
1476		target_fb = fb;
1477	} else {
1478		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1479		target_fb = crtc->primary->fb;
1480	}
1481
1482	/* If atomic, assume fb object is pinned & idle & fenced and
1483	 * just update base pointers
1484	 */
1485	obj = amdgpu_fb->obj;
1486	abo = gem_to_amdgpu_bo(obj);
1487	r = amdgpu_bo_reserve(abo, false);
1488	if (unlikely(r != 0))
1489		return r;
1490
1491	if (atomic) {
1492		fb_location = amdgpu_bo_gpu_offset(abo);
1493	} else {
1494		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1495		if (unlikely(r != 0)) {
1496			amdgpu_bo_unreserve(abo);
1497			return -EINVAL;
1498		}
1499	}
1500
1501	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1502	amdgpu_bo_unreserve(abo);
1503
1504	switch (target_fb->pixel_format) {
1505	case DRM_FORMAT_C8:
1506		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1507			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1508		break;
1509	case DRM_FORMAT_XRGB4444:
1510	case DRM_FORMAT_ARGB4444:
1511		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1512			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1513#ifdef __BIG_ENDIAN
1514		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1515#endif
1516		break;
1517	case DRM_FORMAT_XRGB1555:
1518	case DRM_FORMAT_ARGB1555:
1519		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1520			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1521#ifdef __BIG_ENDIAN
1522		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1523#endif
1524		break;
1525	case DRM_FORMAT_BGRX5551:
1526	case DRM_FORMAT_BGRA5551:
1527		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1528			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1529#ifdef __BIG_ENDIAN
1530		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1531#endif
1532		break;
1533	case DRM_FORMAT_RGB565:
1534		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1535			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1536#ifdef __BIG_ENDIAN
1537		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1538#endif
1539		break;
1540	case DRM_FORMAT_XRGB8888:
1541	case DRM_FORMAT_ARGB8888:
1542		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1543			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1544#ifdef __BIG_ENDIAN
1545		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1546#endif
1547		break;
1548	case DRM_FORMAT_XRGB2101010:
1549	case DRM_FORMAT_ARGB2101010:
1550		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1551			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1552#ifdef __BIG_ENDIAN
1553		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1554#endif
1555		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1556		bypass_lut = true;
1557		break;
1558	case DRM_FORMAT_BGRX1010102:
1559	case DRM_FORMAT_BGRA1010102:
1560		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1561			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1562#ifdef __BIG_ENDIAN
1563		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1564#endif
1565		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1566		bypass_lut = true;
1567		break;
1568	default:
1569		DRM_ERROR("Unsupported screen format %s\n",
1570		          drm_get_format_name(target_fb->pixel_format, &format_name));
1571		return -EINVAL;
1572	}
1573
1574	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1575		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1576
1577		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1578		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1579		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1580		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1581		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1582
1583		fb_format |= GRPH_NUM_BANKS(num_banks);
1584		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1585		fb_format |= GRPH_TILE_SPLIT(tile_split);
1586		fb_format |= GRPH_BANK_WIDTH(bankw);
1587		fb_format |= GRPH_BANK_HEIGHT(bankh);
1588		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1589	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1590		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1591	}
1592
1593	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1594	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1595
1596	dce_v6_0_vga_enable(crtc, false);
1597
1598	/* Make sure surface address is updated at vertical blank rather than
1599	 * horizontal blank
1600	 */
1601	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1602
1603	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1604	       upper_32_bits(fb_location));
1605	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1606	       upper_32_bits(fb_location));
1607	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1608	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1609	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1610	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1611	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1612	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1613
1614	/*
1615	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1616	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1617	 * retain the full precision throughout the pipeline.
1618	 */
1619	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1620		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1621		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1622
1623	if (bypass_lut)
1624		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1625
1626	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1627	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1628	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1629	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1630	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1631	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1632
1633	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1634	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1635
1636	dce_v6_0_grph_enable(crtc, true);
1637
1638	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1639		       target_fb->height);
1640	x &= ~3;
1641	y &= ~1;
1642	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1643	       (x << 16) | y);
1644	viewport_w = crtc->mode.hdisplay;
1645	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1646
1647	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1648	       (viewport_w << 16) | viewport_h);
1649
1650	/* set pageflip to happen anywhere in vblank interval */
1651	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1652
1653	if (!atomic && fb && fb != crtc->primary->fb) {
1654		amdgpu_fb = to_amdgpu_framebuffer(fb);
1655		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1656		r = amdgpu_bo_reserve(abo, false);
1657		if (unlikely(r != 0))
1658			return r;
1659		amdgpu_bo_unpin(abo);
1660		amdgpu_bo_unreserve(abo);
1661	}
1662
1663	/* Bytes per pixel may have changed */
1664	dce_v6_0_bandwidth_update(adev);
1665
1666	return 0;
1667
1668}
1669
1670static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1671				    struct drm_display_mode *mode)
1672{
1673	struct drm_device *dev = crtc->dev;
1674	struct amdgpu_device *adev = dev->dev_private;
1675	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1676
1677	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1678		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
1679		       INTERLEAVE_EN);
1680	else
1681		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1682}
1683
1684static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1685{
1686
1687	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1688	struct drm_device *dev = crtc->dev;
1689	struct amdgpu_device *adev = dev->dev_private;
 
1690	int i;
1691
1692	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1693
1694	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1695	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1696		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
1697	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1698	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
1699	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1700	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
1701	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1702	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
1703		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
1704
1705	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1706
1707	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1708	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1709	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1710
1711	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1712	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1713	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1714
1715	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1716	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1717
1718	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
 
 
 
1719	for (i = 0; i < 256; i++) {
1720		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1721		       (amdgpu_crtc->lut_r[i] << 20) |
1722		       (amdgpu_crtc->lut_g[i] << 10) |
1723		       (amdgpu_crtc->lut_b[i] << 0));
1724	}
1725
1726	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1727	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
1728		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
1729		ICON_DEGAMMA_MODE(0) |
1730		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
1731	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1732	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
1733		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
1734	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1735	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
1736		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
1737	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1738	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
1739		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
1740	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
1741	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1742
1743
1744}
1745
1746static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1747{
1748	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1749	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1750
1751	switch (amdgpu_encoder->encoder_id) {
1752	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1753		return dig->linkb ? 1 : 0;
1754	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1755		return dig->linkb ? 3 : 2;
1756	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1757		return dig->linkb ? 5 : 4;
1758	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1759		return 6;
1760	default:
1761		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1762		return 0;
1763	}
1764}
1765
1766/**
1767 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1768 *
1769 * @crtc: drm crtc
1770 *
1771 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
1772 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
1773 * monitors a dedicated PPLL must be used.  If a particular board has
1774 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1775 * as there is no need to program the PLL itself.  If we are not able to
1776 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1777 * avoid messing up an existing monitor.
1778 *
1779 *
1780 */
1781static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1782{
1783	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1784	struct drm_device *dev = crtc->dev;
1785	struct amdgpu_device *adev = dev->dev_private;
1786	u32 pll_in_use;
1787	int pll;
1788
1789	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1790		if (adev->clock.dp_extclk)
1791			/* skip PPLL programming if using ext clock */
1792			return ATOM_PPLL_INVALID;
1793		else
1794			return ATOM_PPLL0;
1795	} else {
1796		/* use the same PPLL for all monitors with the same clock */
1797		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1798		if (pll != ATOM_PPLL_INVALID)
1799			return pll;
1800	}
1801
1802	/*  PPLL1, and PPLL2 */
1803	pll_in_use = amdgpu_pll_get_use_mask(crtc);
1804	if (!(pll_in_use & (1 << ATOM_PPLL2)))
1805		return ATOM_PPLL2;
1806	if (!(pll_in_use & (1 << ATOM_PPLL1)))
1807		return ATOM_PPLL1;
1808	DRM_ERROR("unable to allocate a PPLL\n");
1809	return ATOM_PPLL_INVALID;
1810}
1811
1812static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1813{
1814	struct amdgpu_device *adev = crtc->dev->dev_private;
1815	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1816	uint32_t cur_lock;
1817
1818	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
1819	if (lock)
1820		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1821	else
1822		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1823	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1824}
1825
1826static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1827{
1828	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1829	struct amdgpu_device *adev = crtc->dev->dev_private;
1830
1831	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1832		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1833		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1834
1835
1836}
1837
1838static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1839{
1840	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1841	struct amdgpu_device *adev = crtc->dev->dev_private;
1842
1843	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1844	       upper_32_bits(amdgpu_crtc->cursor_addr));
1845	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1846	       lower_32_bits(amdgpu_crtc->cursor_addr));
1847
1848	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1849		   CUR_CONTROL__CURSOR_EN_MASK |
1850		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1851		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1852
1853}
1854
1855static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1856				       int x, int y)
1857{
1858	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1859	struct amdgpu_device *adev = crtc->dev->dev_private;
1860	int xorigin = 0, yorigin = 0;
1861
1862	int w = amdgpu_crtc->cursor_width;
1863
1864	amdgpu_crtc->cursor_x = x;
1865	amdgpu_crtc->cursor_y = y;
1866
1867	/* avivo cursor are offset into the total surface */
1868	x += crtc->x;
1869	y += crtc->y;
1870	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1871
1872	if (x < 0) {
1873		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1874		x = 0;
1875	}
1876	if (y < 0) {
1877		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1878		y = 0;
1879	}
1880
1881	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1882	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1883	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1884	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1885
1886	return 0;
1887}
1888
1889static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1890				     int x, int y)
1891{
1892	int ret;
1893
1894	dce_v6_0_lock_cursor(crtc, true);
1895	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1896	dce_v6_0_lock_cursor(crtc, false);
1897
1898	return ret;
1899}
1900
1901static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1902				     struct drm_file *file_priv,
1903				     uint32_t handle,
1904				     uint32_t width,
1905				     uint32_t height,
1906				     int32_t hot_x,
1907				     int32_t hot_y)
1908{
1909	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1910	struct drm_gem_object *obj;
1911	struct amdgpu_bo *aobj;
1912	int ret;
1913
1914	if (!handle) {
1915		/* turn off cursor */
1916		dce_v6_0_hide_cursor(crtc);
1917		obj = NULL;
1918		goto unpin;
1919	}
1920
1921	if ((width > amdgpu_crtc->max_cursor_width) ||
1922	    (height > amdgpu_crtc->max_cursor_height)) {
1923		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1924		return -EINVAL;
1925	}
1926
1927	obj = drm_gem_object_lookup(file_priv, handle);
1928	if (!obj) {
1929		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1930		return -ENOENT;
1931	}
1932
1933	aobj = gem_to_amdgpu_bo(obj);
1934	ret = amdgpu_bo_reserve(aobj, false);
1935	if (ret != 0) {
1936		drm_gem_object_unreference_unlocked(obj);
1937		return ret;
1938	}
1939
1940	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
1941	amdgpu_bo_unreserve(aobj);
1942	if (ret) {
1943		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
1944		drm_gem_object_unreference_unlocked(obj);
1945		return ret;
1946	}
1947
1948	dce_v6_0_lock_cursor(crtc, true);
1949
1950	if (width != amdgpu_crtc->cursor_width ||
1951	    height != amdgpu_crtc->cursor_height ||
1952	    hot_x != amdgpu_crtc->cursor_hot_x ||
1953	    hot_y != amdgpu_crtc->cursor_hot_y) {
1954		int x, y;
1955
1956		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
1957		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
1958
1959		dce_v6_0_cursor_move_locked(crtc, x, y);
1960
1961		amdgpu_crtc->cursor_width = width;
1962		amdgpu_crtc->cursor_height = height;
1963		amdgpu_crtc->cursor_hot_x = hot_x;
1964		amdgpu_crtc->cursor_hot_y = hot_y;
1965	}
1966
1967	dce_v6_0_show_cursor(crtc);
1968	dce_v6_0_lock_cursor(crtc, false);
1969
1970unpin:
1971	if (amdgpu_crtc->cursor_bo) {
1972		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1973		ret = amdgpu_bo_reserve(aobj, false);
1974		if (likely(ret == 0)) {
1975			amdgpu_bo_unpin(aobj);
1976			amdgpu_bo_unreserve(aobj);
1977		}
1978		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
1979	}
1980
1981	amdgpu_crtc->cursor_bo = obj;
1982	return 0;
1983}
1984
1985static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1986{
1987	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1988
1989	if (amdgpu_crtc->cursor_bo) {
1990		dce_v6_0_lock_cursor(crtc, true);
1991
1992		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1993					    amdgpu_crtc->cursor_y);
1994
1995		dce_v6_0_show_cursor(crtc);
1996		dce_v6_0_lock_cursor(crtc, false);
1997	}
1998}
1999
2000static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2001				   u16 *blue, uint32_t size)
 
2002{
2003	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2004	int i;
2005
2006	/* userspace palettes are always correct as is */
2007	for (i = 0; i < size; i++) {
2008		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2009		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2010		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2011	}
2012	dce_v6_0_crtc_load_lut(crtc);
2013
2014	return 0;
2015}
2016
2017static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2018{
2019	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2020
2021	drm_crtc_cleanup(crtc);
2022	kfree(amdgpu_crtc);
2023}
2024
2025static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2026	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2027	.cursor_move = dce_v6_0_crtc_cursor_move,
2028	.gamma_set = dce_v6_0_crtc_gamma_set,
2029	.set_config = amdgpu_crtc_set_config,
2030	.destroy = dce_v6_0_crtc_destroy,
2031	.page_flip_target = amdgpu_crtc_page_flip_target,
2032};
2033
2034static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2035{
2036	struct drm_device *dev = crtc->dev;
2037	struct amdgpu_device *adev = dev->dev_private;
2038	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2039	unsigned type;
2040
2041	switch (mode) {
2042	case DRM_MODE_DPMS_ON:
2043		amdgpu_crtc->enabled = true;
2044		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2045		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2046		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2047		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 
2048		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2049		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2050		drm_crtc_vblank_on(crtc);
2051		dce_v6_0_crtc_load_lut(crtc);
2052		break;
2053	case DRM_MODE_DPMS_STANDBY:
2054	case DRM_MODE_DPMS_SUSPEND:
2055	case DRM_MODE_DPMS_OFF:
2056		drm_crtc_vblank_off(crtc);
2057		if (amdgpu_crtc->enabled)
2058			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2059		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2060		amdgpu_crtc->enabled = false;
2061		break;
2062	}
2063	/* adjust pm to dpms */
2064	amdgpu_pm_compute_clocks(adev);
2065}
2066
2067static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2068{
2069	/* disable crtc pair power gating before programming */
2070	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2071	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2072	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2073}
2074
2075static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2076{
2077	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2078	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2079}
2080
2081static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2082{
2083
2084	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2085	struct drm_device *dev = crtc->dev;
2086	struct amdgpu_device *adev = dev->dev_private;
2087	struct amdgpu_atom_ss ss;
2088	int i;
2089
2090	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2091	if (crtc->primary->fb) {
2092		int r;
2093		struct amdgpu_framebuffer *amdgpu_fb;
2094		struct amdgpu_bo *abo;
2095
2096		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2097		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2098		r = amdgpu_bo_reserve(abo, false);
2099		if (unlikely(r))
2100			DRM_ERROR("failed to reserve abo before unpin\n");
2101		else {
2102			amdgpu_bo_unpin(abo);
2103			amdgpu_bo_unreserve(abo);
2104		}
2105	}
2106	/* disable the GRPH */
2107	dce_v6_0_grph_enable(crtc, false);
2108
2109	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2110
2111	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2112		if (adev->mode_info.crtcs[i] &&
2113		    adev->mode_info.crtcs[i]->enabled &&
2114		    i != amdgpu_crtc->crtc_id &&
2115		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2116			/* one other crtc is using this pll don't turn
2117			 * off the pll
2118			 */
2119			goto done;
2120		}
2121	}
2122
2123	switch (amdgpu_crtc->pll_id) {
2124	case ATOM_PPLL1:
2125	case ATOM_PPLL2:
2126		/* disable the ppll */
2127		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2128						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2129		break;
2130	default:
2131		break;
2132	}
2133done:
2134	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2135	amdgpu_crtc->adjusted_clock = 0;
2136	amdgpu_crtc->encoder = NULL;
2137	amdgpu_crtc->connector = NULL;
2138}
2139
2140static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2141				  struct drm_display_mode *mode,
2142				  struct drm_display_mode *adjusted_mode,
2143				  int x, int y, struct drm_framebuffer *old_fb)
2144{
2145	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2146
2147	if (!amdgpu_crtc->adjusted_clock)
2148		return -EINVAL;
2149
2150	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2151	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2152	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2153	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2154	amdgpu_atombios_crtc_scaler_setup(crtc);
2155	dce_v6_0_cursor_reset(crtc);
2156	/* update the hw version fpr dpm */
2157	amdgpu_crtc->hw_mode = *adjusted_mode;
2158
2159	return 0;
2160}
2161
2162static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2163				     const struct drm_display_mode *mode,
2164				     struct drm_display_mode *adjusted_mode)
2165{
2166
2167	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2168	struct drm_device *dev = crtc->dev;
2169	struct drm_encoder *encoder;
2170
2171	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2172	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2173		if (encoder->crtc == crtc) {
2174			amdgpu_crtc->encoder = encoder;
2175			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2176			break;
2177		}
2178	}
2179	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2180		amdgpu_crtc->encoder = NULL;
2181		amdgpu_crtc->connector = NULL;
2182		return false;
2183	}
2184	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2185		return false;
2186	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2187		return false;
2188	/* pick pll */
2189	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2190	/* if we can't get a PPLL for a non-DP encoder, fail */
2191	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2192	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2193		return false;
2194
2195	return true;
2196}
2197
2198static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2199				  struct drm_framebuffer *old_fb)
2200{
2201	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2202}
2203
2204static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2205					 struct drm_framebuffer *fb,
2206					 int x, int y, enum mode_set_atomic state)
2207{
2208       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2209}
2210
2211static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2212	.dpms = dce_v6_0_crtc_dpms,
2213	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2214	.mode_set = dce_v6_0_crtc_mode_set,
2215	.mode_set_base = dce_v6_0_crtc_set_base,
2216	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2217	.prepare = dce_v6_0_crtc_prepare,
2218	.commit = dce_v6_0_crtc_commit,
2219	.load_lut = dce_v6_0_crtc_load_lut,
2220	.disable = dce_v6_0_crtc_disable,
2221};
2222
2223static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2224{
2225	struct amdgpu_crtc *amdgpu_crtc;
2226	int i;
2227
2228	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2229			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2230	if (amdgpu_crtc == NULL)
2231		return -ENOMEM;
2232
2233	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2234
2235	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2236	amdgpu_crtc->crtc_id = index;
2237	adev->mode_info.crtcs[index] = amdgpu_crtc;
2238
2239	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2240	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2241	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2242	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2243
2244	for (i = 0; i < 256; i++) {
2245		amdgpu_crtc->lut_r[i] = i << 2;
2246		amdgpu_crtc->lut_g[i] = i << 2;
2247		amdgpu_crtc->lut_b[i] = i << 2;
2248	}
2249
2250	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2251
2252	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2253	amdgpu_crtc->adjusted_clock = 0;
2254	amdgpu_crtc->encoder = NULL;
2255	amdgpu_crtc->connector = NULL;
2256	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2257
2258	return 0;
2259}
2260
2261static int dce_v6_0_early_init(void *handle)
2262{
2263	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2264
2265	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2266	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2267
2268	dce_v6_0_set_display_funcs(adev);
2269	dce_v6_0_set_irq_funcs(adev);
2270
2271	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2272
2273	switch (adev->asic_type) {
2274	case CHIP_TAHITI:
2275	case CHIP_PITCAIRN:
2276	case CHIP_VERDE:
2277		adev->mode_info.num_hpd = 6;
2278		adev->mode_info.num_dig = 6;
2279		break;
2280	case CHIP_OLAND:
2281		adev->mode_info.num_hpd = 2;
2282		adev->mode_info.num_dig = 2;
2283		break;
2284	default:
2285		return -EINVAL;
2286	}
2287
 
 
2288	return 0;
2289}
2290
2291static int dce_v6_0_sw_init(void *handle)
2292{
2293	int r, i;
2294	bool ret;
2295	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296
2297	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2298		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2299		if (r)
2300			return r;
2301	}
2302
2303	for (i = 8; i < 20; i += 2) {
2304		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2305		if (r)
2306			return r;
2307	}
2308
2309	/* HPD hotplug */
2310	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2311	if (r)
2312		return r;
2313
2314	adev->mode_info.mode_config_initialized = true;
2315
2316	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2317	adev->ddev->mode_config.async_page_flip = true;
2318	adev->ddev->mode_config.max_width = 16384;
2319	adev->ddev->mode_config.max_height = 16384;
2320	adev->ddev->mode_config.preferred_depth = 24;
2321	adev->ddev->mode_config.prefer_shadow = 1;
2322	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2323
2324	r = amdgpu_modeset_create_props(adev);
2325	if (r)
2326		return r;
2327
2328	adev->ddev->mode_config.max_width = 16384;
2329	adev->ddev->mode_config.max_height = 16384;
2330
2331	/* allocate crtcs */
2332	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2333		r = dce_v6_0_crtc_init(adev, i);
2334		if (r)
2335			return r;
2336	}
2337
2338	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2339	if (ret)
2340		amdgpu_print_display_setup(adev->ddev);
2341	else
2342		return -EINVAL;
2343
2344	/* setup afmt */
2345	r = dce_v6_0_afmt_init(adev);
2346	if (r)
2347		return r;
2348
2349	r = dce_v6_0_audio_init(adev);
2350	if (r)
2351		return r;
2352
2353	drm_kms_helper_poll_init(adev->ddev);
2354
2355	return r;
2356}
2357
2358static int dce_v6_0_sw_fini(void *handle)
2359{
2360	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2361
2362	kfree(adev->mode_info.bios_hardcoded_edid);
2363
2364	drm_kms_helper_poll_fini(adev->ddev);
2365
2366	dce_v6_0_audio_fini(adev);
2367	dce_v6_0_afmt_fini(adev);
2368
2369	drm_mode_config_cleanup(adev->ddev);
2370	adev->mode_info.mode_config_initialized = false;
2371
2372	return 0;
2373}
2374
2375static int dce_v6_0_hw_init(void *handle)
2376{
2377	int i;
2378	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2379
 
 
2380	/* init dig PHYs, disp eng pll */
2381	amdgpu_atombios_encoder_init_dig(adev);
2382	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2383
2384	/* initialize hpd */
2385	dce_v6_0_hpd_init(adev);
2386
2387	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2388		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2389	}
2390
2391	dce_v6_0_pageflip_interrupt_init(adev);
2392
2393	return 0;
2394}
2395
2396static int dce_v6_0_hw_fini(void *handle)
2397{
2398	int i;
2399	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2400
2401	dce_v6_0_hpd_fini(adev);
2402
2403	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2404		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2405	}
2406
2407	dce_v6_0_pageflip_interrupt_fini(adev);
2408
2409	return 0;
2410}
2411
2412static int dce_v6_0_suspend(void *handle)
2413{
 
 
 
 
 
2414	return dce_v6_0_hw_fini(handle);
2415}
2416
2417static int dce_v6_0_resume(void *handle)
2418{
2419	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2420	int ret;
2421
 
 
 
2422	ret = dce_v6_0_hw_init(handle);
2423
2424	/* turn on the BL */
2425	if (adev->mode_info.bl_encoder) {
2426		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2427								  adev->mode_info.bl_encoder);
2428		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2429						    bl_level);
2430	}
2431
2432	return ret;
2433}
2434
2435static bool dce_v6_0_is_idle(void *handle)
2436{
2437	return true;
2438}
2439
2440static int dce_v6_0_wait_for_idle(void *handle)
2441{
2442	return 0;
2443}
2444
2445static int dce_v6_0_soft_reset(void *handle)
2446{
2447	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2448	return 0;
2449}
2450
2451static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2452						     int crtc,
2453						     enum amdgpu_interrupt_state state)
2454{
2455	u32 reg_block, interrupt_mask;
2456
2457	if (crtc >= adev->mode_info.num_crtc) {
2458		DRM_DEBUG("invalid crtc %d\n", crtc);
2459		return;
2460	}
2461
2462	switch (crtc) {
2463	case 0:
2464		reg_block = SI_CRTC0_REGISTER_OFFSET;
2465		break;
2466	case 1:
2467		reg_block = SI_CRTC1_REGISTER_OFFSET;
2468		break;
2469	case 2:
2470		reg_block = SI_CRTC2_REGISTER_OFFSET;
2471		break;
2472	case 3:
2473		reg_block = SI_CRTC3_REGISTER_OFFSET;
2474		break;
2475	case 4:
2476		reg_block = SI_CRTC4_REGISTER_OFFSET;
2477		break;
2478	case 5:
2479		reg_block = SI_CRTC5_REGISTER_OFFSET;
2480		break;
2481	default:
2482		DRM_DEBUG("invalid crtc %d\n", crtc);
2483		return;
2484	}
2485
2486	switch (state) {
2487	case AMDGPU_IRQ_STATE_DISABLE:
2488		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2489		interrupt_mask &= ~VBLANK_INT_MASK;
2490		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2491		break;
2492	case AMDGPU_IRQ_STATE_ENABLE:
2493		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2494		interrupt_mask |= VBLANK_INT_MASK;
2495		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2496		break;
2497	default:
2498		break;
2499	}
2500}
2501
2502static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2503						    int crtc,
2504						    enum amdgpu_interrupt_state state)
2505{
2506
2507}
2508
2509static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2510					    struct amdgpu_irq_src *src,
2511					    unsigned type,
2512					    enum amdgpu_interrupt_state state)
2513{
2514	u32 dc_hpd_int_cntl;
2515
2516	if (type >= adev->mode_info.num_hpd) {
2517		DRM_DEBUG("invalid hdp %d\n", type);
2518		return 0;
2519	}
2520
2521	switch (state) {
2522	case AMDGPU_IRQ_STATE_DISABLE:
2523		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2524		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2525		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2526		break;
2527	case AMDGPU_IRQ_STATE_ENABLE:
2528		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2529		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2530		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2531		break;
2532	default:
2533		break;
2534	}
2535
2536	return 0;
2537}
2538
2539static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2540					     struct amdgpu_irq_src *src,
2541					     unsigned type,
2542					     enum amdgpu_interrupt_state state)
2543{
2544	switch (type) {
2545	case AMDGPU_CRTC_IRQ_VBLANK1:
2546		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2547		break;
2548	case AMDGPU_CRTC_IRQ_VBLANK2:
2549		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2550		break;
2551	case AMDGPU_CRTC_IRQ_VBLANK3:
2552		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2553		break;
2554	case AMDGPU_CRTC_IRQ_VBLANK4:
2555		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2556		break;
2557	case AMDGPU_CRTC_IRQ_VBLANK5:
2558		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2559		break;
2560	case AMDGPU_CRTC_IRQ_VBLANK6:
2561		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2562		break;
2563	case AMDGPU_CRTC_IRQ_VLINE1:
2564		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2565		break;
2566	case AMDGPU_CRTC_IRQ_VLINE2:
2567		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2568		break;
2569	case AMDGPU_CRTC_IRQ_VLINE3:
2570		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2571		break;
2572	case AMDGPU_CRTC_IRQ_VLINE4:
2573		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2574		break;
2575	case AMDGPU_CRTC_IRQ_VLINE5:
2576		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2577		break;
2578	case AMDGPU_CRTC_IRQ_VLINE6:
2579		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2580		break;
2581	default:
2582		break;
2583	}
2584	return 0;
2585}
2586
2587static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2588			     struct amdgpu_irq_src *source,
2589			     struct amdgpu_iv_entry *entry)
2590{
2591	unsigned crtc = entry->src_id - 1;
2592	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2593	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
 
2594
2595	switch (entry->src_data) {
2596	case 0: /* vblank */
2597		if (disp_int & interrupt_status_offsets[crtc].vblank)
2598			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2599		else
2600			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2601
2602		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2603			drm_handle_vblank(adev->ddev, crtc);
2604		}
2605		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2606		break;
2607	case 1: /* vline */
2608		if (disp_int & interrupt_status_offsets[crtc].vline)
2609			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2610		else
2611			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2612
2613		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2614		break;
2615	default:
2616		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2617		break;
2618	}
2619
2620	return 0;
2621}
2622
2623static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2624						 struct amdgpu_irq_src *src,
2625						 unsigned type,
2626						 enum amdgpu_interrupt_state state)
2627{
2628	u32 reg;
2629
2630	if (type >= adev->mode_info.num_crtc) {
2631		DRM_ERROR("invalid pageflip crtc %d\n", type);
2632		return -EINVAL;
2633	}
2634
2635	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2636	if (state == AMDGPU_IRQ_STATE_DISABLE)
2637		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2638		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2639	else
2640		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2641		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2642
2643	return 0;
2644}
2645
2646static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2647				 struct amdgpu_irq_src *source,
2648				 struct amdgpu_iv_entry *entry)
2649{
2650		unsigned long flags;
2651	unsigned crtc_id;
2652	struct amdgpu_crtc *amdgpu_crtc;
2653	struct amdgpu_flip_work *works;
2654
2655	crtc_id = (entry->src_id - 8) >> 1;
2656	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2657
2658	if (crtc_id >= adev->mode_info.num_crtc) {
2659		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2660		return -EINVAL;
2661	}
2662
2663	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
2664	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2665		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
2666		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2667
2668	/* IRQ could occur when in initial stage */
2669	if (amdgpu_crtc == NULL)
2670		return 0;
2671
2672	spin_lock_irqsave(&adev->ddev->event_lock, flags);
2673	works = amdgpu_crtc->pflip_works;
2674	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2675		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2676						"AMDGPU_FLIP_SUBMITTED(%d)\n",
2677						amdgpu_crtc->pflip_status,
2678						AMDGPU_FLIP_SUBMITTED);
2679		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2680		return 0;
2681	}
2682
2683	/* page flip completed. clean up */
2684	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2685	amdgpu_crtc->pflip_works = NULL;
2686
2687	/* wakeup usersapce */
2688	if (works->event)
2689		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2690
2691	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2692
2693	drm_crtc_vblank_put(&amdgpu_crtc->base);
2694	schedule_work(&works->unpin_work);
2695
2696	return 0;
2697}
2698
2699static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2700			    struct amdgpu_irq_src *source,
2701			    struct amdgpu_iv_entry *entry)
2702{
2703	uint32_t disp_int, mask, tmp;
2704	unsigned hpd;
2705
2706	if (entry->src_data >= adev->mode_info.num_hpd) {
2707		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2708		return 0;
2709	}
2710
2711	hpd = entry->src_data;
2712	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2713	mask = interrupt_status_offsets[hpd].hpd;
2714
2715	if (disp_int & mask) {
2716		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
2717		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2718		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
2719		schedule_work(&adev->hotplug_work);
2720		DRM_INFO("IH: HPD%d\n", hpd + 1);
2721	}
2722
2723	return 0;
2724
2725}
2726
2727static int dce_v6_0_set_clockgating_state(void *handle,
2728					  enum amd_clockgating_state state)
2729{
2730	return 0;
2731}
2732
2733static int dce_v6_0_set_powergating_state(void *handle,
2734					  enum amd_powergating_state state)
2735{
2736	return 0;
2737}
2738
2739static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2740	.name = "dce_v6_0",
2741	.early_init = dce_v6_0_early_init,
2742	.late_init = NULL,
2743	.sw_init = dce_v6_0_sw_init,
2744	.sw_fini = dce_v6_0_sw_fini,
2745	.hw_init = dce_v6_0_hw_init,
2746	.hw_fini = dce_v6_0_hw_fini,
2747	.suspend = dce_v6_0_suspend,
2748	.resume = dce_v6_0_resume,
2749	.is_idle = dce_v6_0_is_idle,
2750	.wait_for_idle = dce_v6_0_wait_for_idle,
2751	.soft_reset = dce_v6_0_soft_reset,
2752	.set_clockgating_state = dce_v6_0_set_clockgating_state,
2753	.set_powergating_state = dce_v6_0_set_powergating_state,
2754};
2755
2756static void
2757dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2758			  struct drm_display_mode *mode,
2759			  struct drm_display_mode *adjusted_mode)
2760{
2761
2762	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 
2763
2764	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2765
2766	/* need to call this here rather than in prepare() since we need some crtc info */
2767	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2768
2769	/* set scaler clears this on some chips */
2770	dce_v6_0_set_interleave(encoder->crtc, mode);
2771
2772	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2773		dce_v6_0_afmt_enable(encoder, true);
2774		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2775	}
2776}
2777
2778static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2779{
2780
2781	struct amdgpu_device *adev = encoder->dev->dev_private;
2782	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2783	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2784
2785	if ((amdgpu_encoder->active_device &
2786	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2787	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2788	     ENCODER_OBJECT_ID_NONE)) {
2789		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2790		if (dig) {
2791			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2792			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2793				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2794		}
2795	}
2796
2797	amdgpu_atombios_scratch_regs_lock(adev, true);
2798
2799	if (connector) {
2800		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2801
2802		/* select the clock/data port if it uses a router */
2803		if (amdgpu_connector->router.cd_valid)
2804			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2805
2806		/* turn eDP panel on for mode set */
2807		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2808			amdgpu_atombios_encoder_set_edp_panel_power(connector,
2809							     ATOM_TRANSMITTER_ACTION_POWER_ON);
2810	}
2811
2812	/* this is needed for the pll/ss setup to work correctly in some cases */
2813	amdgpu_atombios_encoder_set_crtc_source(encoder);
2814	/* set up the FMT blocks */
2815	dce_v6_0_program_fmt(encoder);
2816}
2817
2818static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2819{
2820
2821	struct drm_device *dev = encoder->dev;
2822	struct amdgpu_device *adev = dev->dev_private;
2823
2824	/* need to call this here as we need the crtc set up */
2825	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2826	amdgpu_atombios_scratch_regs_lock(adev, false);
2827}
2828
2829static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2830{
2831
2832	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2833	struct amdgpu_encoder_atom_dig *dig;
 
2834
2835	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2836
2837	if (amdgpu_atombios_encoder_is_digital(encoder)) {
2838		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2839			dce_v6_0_afmt_enable(encoder, false);
2840		dig = amdgpu_encoder->enc_priv;
2841		dig->dig_encoder = -1;
2842	}
2843	amdgpu_encoder->active_device = 0;
2844}
2845
2846/* these are handled by the primary encoders */
2847static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2848{
2849
2850}
2851
2852static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2853{
2854
2855}
2856
2857static void
2858dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2859		      struct drm_display_mode *mode,
2860		      struct drm_display_mode *adjusted_mode)
2861{
2862
2863}
2864
2865static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2866{
2867
2868}
2869
2870static void
2871dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2872{
2873
2874}
2875
2876static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2877				    const struct drm_display_mode *mode,
2878				    struct drm_display_mode *adjusted_mode)
2879{
2880	return true;
2881}
2882
2883static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2884	.dpms = dce_v6_0_ext_dpms,
2885	.mode_fixup = dce_v6_0_ext_mode_fixup,
2886	.prepare = dce_v6_0_ext_prepare,
2887	.mode_set = dce_v6_0_ext_mode_set,
2888	.commit = dce_v6_0_ext_commit,
2889	.disable = dce_v6_0_ext_disable,
2890	/* no detect for TMDS/LVDS yet */
2891};
2892
2893static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2894	.dpms = amdgpu_atombios_encoder_dpms,
2895	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2896	.prepare = dce_v6_0_encoder_prepare,
2897	.mode_set = dce_v6_0_encoder_mode_set,
2898	.commit = dce_v6_0_encoder_commit,
2899	.disable = dce_v6_0_encoder_disable,
2900	.detect = amdgpu_atombios_encoder_dig_detect,
2901};
2902
2903static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2904	.dpms = amdgpu_atombios_encoder_dpms,
2905	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2906	.prepare = dce_v6_0_encoder_prepare,
2907	.mode_set = dce_v6_0_encoder_mode_set,
2908	.commit = dce_v6_0_encoder_commit,
2909	.detect = amdgpu_atombios_encoder_dac_detect,
2910};
2911
2912static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
2913{
2914	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2915	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2916		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
2917	kfree(amdgpu_encoder->enc_priv);
2918	drm_encoder_cleanup(encoder);
2919	kfree(amdgpu_encoder);
2920}
2921
2922static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
2923	.destroy = dce_v6_0_encoder_destroy,
2924};
2925
2926static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
2927				 uint32_t encoder_enum,
2928				 uint32_t supported_device,
2929				 u16 caps)
2930{
2931	struct drm_device *dev = adev->ddev;
2932	struct drm_encoder *encoder;
2933	struct amdgpu_encoder *amdgpu_encoder;
2934
2935	/* see if we already added it */
2936	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2937		amdgpu_encoder = to_amdgpu_encoder(encoder);
2938		if (amdgpu_encoder->encoder_enum == encoder_enum) {
2939			amdgpu_encoder->devices |= supported_device;
2940			return;
2941		}
2942
2943	}
2944
2945	/* add a new one */
2946	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
2947	if (!amdgpu_encoder)
2948		return;
2949
2950	encoder = &amdgpu_encoder->base;
2951	switch (adev->mode_info.num_crtc) {
2952	case 1:
2953		encoder->possible_crtcs = 0x1;
2954		break;
2955	case 2:
2956	default:
2957		encoder->possible_crtcs = 0x3;
2958		break;
2959	case 4:
2960		encoder->possible_crtcs = 0xf;
2961		break;
2962	case 6:
2963		encoder->possible_crtcs = 0x3f;
2964		break;
2965	}
2966
2967	amdgpu_encoder->enc_priv = NULL;
2968	amdgpu_encoder->encoder_enum = encoder_enum;
2969	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2970	amdgpu_encoder->devices = supported_device;
2971	amdgpu_encoder->rmx_type = RMX_OFF;
2972	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
2973	amdgpu_encoder->is_ext_encoder = false;
2974	amdgpu_encoder->caps = caps;
2975
2976	switch (amdgpu_encoder->encoder_id) {
2977	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2978	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2979		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2980				 DRM_MODE_ENCODER_DAC, NULL);
2981		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
2982		break;
2983	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2984	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2985	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2986	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2987	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2988		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2989			amdgpu_encoder->rmx_type = RMX_FULL;
2990			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2991					 DRM_MODE_ENCODER_LVDS, NULL);
2992			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
2993		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2994			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2995					 DRM_MODE_ENCODER_DAC, NULL);
2996			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2997		} else {
2998			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2999					 DRM_MODE_ENCODER_TMDS, NULL);
3000			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3001		}
3002		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3003		break;
3004	case ENCODER_OBJECT_ID_SI170B:
3005	case ENCODER_OBJECT_ID_CH7303:
3006	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3007	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3008	case ENCODER_OBJECT_ID_TITFP513:
3009	case ENCODER_OBJECT_ID_VT1623:
3010	case ENCODER_OBJECT_ID_HDMI_SI1930:
3011	case ENCODER_OBJECT_ID_TRAVIS:
3012	case ENCODER_OBJECT_ID_NUTMEG:
3013		/* these are handled by the primary encoders */
3014		amdgpu_encoder->is_ext_encoder = true;
3015		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3016			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3017					 DRM_MODE_ENCODER_LVDS, NULL);
3018		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3019			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3020					 DRM_MODE_ENCODER_DAC, NULL);
3021		else
3022			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3023					 DRM_MODE_ENCODER_TMDS, NULL);
3024		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3025		break;
3026	}
3027}
3028
3029static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3030	.set_vga_render_state = &dce_v6_0_set_vga_render_state,
3031	.bandwidth_update = &dce_v6_0_bandwidth_update,
3032	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3033	.vblank_wait = &dce_v6_0_vblank_wait,
3034	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3035	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3036	.hpd_sense = &dce_v6_0_hpd_sense,
3037	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3038	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3039	.page_flip = &dce_v6_0_page_flip,
3040	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3041	.add_encoder = &dce_v6_0_encoder_add,
3042	.add_connector = &amdgpu_connector_add,
3043	.stop_mc_access = &dce_v6_0_stop_mc_access,
3044	.resume_mc_access = &dce_v6_0_resume_mc_access,
3045};
3046
3047static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3048{
3049	if (adev->mode_info.funcs == NULL)
3050		adev->mode_info.funcs = &dce_v6_0_display_funcs;
3051}
3052
3053static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3054	.set = dce_v6_0_set_crtc_interrupt_state,
3055	.process = dce_v6_0_crtc_irq,
3056};
3057
3058static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3059	.set = dce_v6_0_set_pageflip_interrupt_state,
3060	.process = dce_v6_0_pageflip_irq,
3061};
3062
3063static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3064	.set = dce_v6_0_set_hpd_interrupt_state,
3065	.process = dce_v6_0_hpd_irq,
3066};
3067
3068static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3069{
3070	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
 
 
 
3071	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3072
3073	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3074	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3075
3076	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3077	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3078}
3079
3080const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3081{
3082	.type = AMD_IP_BLOCK_TYPE_DCE,
3083	.major = 6,
3084	.minor = 0,
3085	.rev = 0,
3086	.funcs = &dce_v6_0_ip_funcs,
3087};
3088
3089const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3090{
3091	.type = AMD_IP_BLOCK_TYPE_DCE,
3092	.major = 6,
3093	.minor = 4,
3094	.rev = 0,
3095	.funcs = &dce_v6_0_ip_funcs,
3096};