Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "drmP.h"
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "cikd.h"
  28#include "atom.h"
  29#include "amdgpu_atombios.h"
  30#include "atombios_crtc.h"
  31#include "atombios_encoders.h"
  32#include "amdgpu_pll.h"
  33#include "amdgpu_connectors.h"
 
  34
  35#include "dce/dce_8_0_d.h"
  36#include "dce/dce_8_0_sh_mask.h"
  37
  38#include "gca/gfx_7_2_enum.h"
  39
  40#include "gmc/gmc_7_1_d.h"
  41#include "gmc/gmc_7_1_sh_mask.h"
  42
  43#include "oss/oss_2_0_d.h"
  44#include "oss/oss_2_0_sh_mask.h"
  45
  46static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  47static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  48
  49static const u32 crtc_offsets[6] =
  50{
  51	CRTC0_REGISTER_OFFSET,
  52	CRTC1_REGISTER_OFFSET,
  53	CRTC2_REGISTER_OFFSET,
  54	CRTC3_REGISTER_OFFSET,
  55	CRTC4_REGISTER_OFFSET,
  56	CRTC5_REGISTER_OFFSET
  57};
  58
 
 
 
 
 
 
 
 
 
 
  59static const uint32_t dig_offsets[] = {
  60	CRTC0_REGISTER_OFFSET,
  61	CRTC1_REGISTER_OFFSET,
  62	CRTC2_REGISTER_OFFSET,
  63	CRTC3_REGISTER_OFFSET,
  64	CRTC4_REGISTER_OFFSET,
  65	CRTC5_REGISTER_OFFSET,
  66	(0x13830 - 0x7030) >> 2,
  67};
  68
  69static const struct {
  70	uint32_t	reg;
  71	uint32_t	vblank;
  72	uint32_t	vline;
  73	uint32_t	hpd;
  74
  75} interrupt_status_offsets[6] = { {
  76	.reg = mmDISP_INTERRUPT_STATUS,
  77	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  78	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  79	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  80}, {
  81	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  82	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  83	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  84	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  85}, {
  86	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  87	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  88	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
  89	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
  90}, {
  91	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
  92	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
  93	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
  94	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
  95}, {
  96	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
  97	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
  98	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
  99	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 100}, {
 101	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 102	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 103	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 104	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 105} };
 106
 107static const uint32_t hpd_int_control_offsets[6] = {
 108	mmDC_HPD1_INT_CONTROL,
 109	mmDC_HPD2_INT_CONTROL,
 110	mmDC_HPD3_INT_CONTROL,
 111	mmDC_HPD4_INT_CONTROL,
 112	mmDC_HPD5_INT_CONTROL,
 113	mmDC_HPD6_INT_CONTROL,
 114};
 115
 116static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 117				     u32 block_offset, u32 reg)
 118{
 119	unsigned long flags;
 120	u32 r;
 121
 122	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 123	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 124	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 125	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 126
 127	return r;
 128}
 129
 130static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 131				      u32 block_offset, u32 reg, u32 v)
 132{
 133	unsigned long flags;
 134
 135	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 136	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 137	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 138	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 139}
 140
 141static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 142{
 143	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
 144			CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
 145		return true;
 146	else
 147		return false;
 148}
 149
 150static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 151{
 152	u32 pos1, pos2;
 153
 154	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 155	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 156
 157	if (pos1 != pos2)
 158		return true;
 159	else
 160		return false;
 161}
 162
 163/**
 164 * dce_v8_0_vblank_wait - vblank wait asic callback.
 165 *
 166 * @adev: amdgpu_device pointer
 167 * @crtc: crtc to wait for vblank on
 168 *
 169 * Wait for vblank on the requested crtc (evergreen+).
 170 */
 171static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 172{
 173	unsigned i = 0;
 174
 175	if (crtc >= adev->mode_info.num_crtc)
 176		return;
 177
 178	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 179		return;
 180
 181	/* depending on when we hit vblank, we may be close to active; if so,
 182	 * wait for another frame.
 183	 */
 184	while (dce_v8_0_is_in_vblank(adev, crtc)) {
 185		if (i++ % 100 == 0) {
 
 186			if (!dce_v8_0_is_counter_moving(adev, crtc))
 187				break;
 188		}
 189	}
 190
 191	while (!dce_v8_0_is_in_vblank(adev, crtc)) {
 192		if (i++ % 100 == 0) {
 
 193			if (!dce_v8_0_is_counter_moving(adev, crtc))
 194				break;
 195		}
 196	}
 197}
 198
 199static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 200{
 201	if (crtc >= adev->mode_info.num_crtc)
 202		return 0;
 203	else
 204		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 205}
 206
 207static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 208{
 209	unsigned i;
 210
 211	/* Enable pflip interrupts */
 212	for (i = 0; i < adev->mode_info.num_crtc; i++)
 213		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 214}
 215
 216static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 217{
 218	unsigned i;
 219
 220	/* Disable pflip interrupts */
 221	for (i = 0; i < adev->mode_info.num_crtc; i++)
 222		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 223}
 224
 225/**
 226 * dce_v8_0_page_flip - pageflip callback.
 227 *
 228 * @adev: amdgpu_device pointer
 229 * @crtc_id: crtc to cleanup pageflip on
 230 * @crtc_base: new address of the crtc (GPU MC address)
 231 *
 232 * Triggers the actual pageflip by updating the primary
 233 * surface base address.
 234 */
 235static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 236			      int crtc_id, u64 crtc_base)
 237{
 238	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 239
 
 
 
 240	/* update the primary scanout addresses */
 241	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 242	       upper_32_bits(crtc_base));
 243	/* writing to the low address triggers the update */
 244	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 245	       lower_32_bits(crtc_base));
 246	/* post the write */
 247	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 248}
 249
 250static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 251					u32 *vbl, u32 *position)
 252{
 253	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 254		return -EINVAL;
 255
 256	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 257	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 258
 259	return 0;
 260}
 261
 262/**
 263 * dce_v8_0_hpd_sense - hpd sense callback.
 264 *
 265 * @adev: amdgpu_device pointer
 266 * @hpd: hpd (hotplug detect) pin
 267 *
 268 * Checks if a digital monitor is connected (evergreen+).
 269 * Returns true if connected, false if not connected.
 270 */
 271static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 272			       enum amdgpu_hpd_id hpd)
 273{
 274	bool connected = false;
 275
 276	switch (hpd) {
 277	case AMDGPU_HPD_1:
 278		if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 279			connected = true;
 280		break;
 281	case AMDGPU_HPD_2:
 282		if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
 283			connected = true;
 284		break;
 285	case AMDGPU_HPD_3:
 286		if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
 287			connected = true;
 288		break;
 289	case AMDGPU_HPD_4:
 290		if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
 291			connected = true;
 292		break;
 293	case AMDGPU_HPD_5:
 294		if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
 295			connected = true;
 296		break;
 297	case AMDGPU_HPD_6:
 298		if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
 299			connected = true;
 300		break;
 301	default:
 302		break;
 303	}
 304
 305	return connected;
 306}
 307
 308/**
 309 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 310 *
 311 * @adev: amdgpu_device pointer
 312 * @hpd: hpd (hotplug detect) pin
 313 *
 314 * Set the polarity of the hpd pin (evergreen+).
 315 */
 316static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 317				      enum amdgpu_hpd_id hpd)
 318{
 319	u32 tmp;
 320	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 321
 322	switch (hpd) {
 323	case AMDGPU_HPD_1:
 324		tmp = RREG32(mmDC_HPD1_INT_CONTROL);
 325		if (connected)
 326			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 327		else
 328			tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 329		WREG32(mmDC_HPD1_INT_CONTROL, tmp);
 330		break;
 331	case AMDGPU_HPD_2:
 332		tmp = RREG32(mmDC_HPD2_INT_CONTROL);
 333		if (connected)
 334			tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
 335		else
 336			tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
 337		WREG32(mmDC_HPD2_INT_CONTROL, tmp);
 338		break;
 339	case AMDGPU_HPD_3:
 340		tmp = RREG32(mmDC_HPD3_INT_CONTROL);
 341		if (connected)
 342			tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
 343		else
 344			tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
 345		WREG32(mmDC_HPD3_INT_CONTROL, tmp);
 346		break;
 347	case AMDGPU_HPD_4:
 348		tmp = RREG32(mmDC_HPD4_INT_CONTROL);
 349		if (connected)
 350			tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
 351		else
 352			tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
 353		WREG32(mmDC_HPD4_INT_CONTROL, tmp);
 354		break;
 355	case AMDGPU_HPD_5:
 356		tmp = RREG32(mmDC_HPD5_INT_CONTROL);
 357		if (connected)
 358			tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
 359		else
 360			tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
 361		WREG32(mmDC_HPD5_INT_CONTROL, tmp);
 362			break;
 363	case AMDGPU_HPD_6:
 364		tmp = RREG32(mmDC_HPD6_INT_CONTROL);
 365		if (connected)
 366			tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
 367		else
 368			tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
 369		WREG32(mmDC_HPD6_INT_CONTROL, tmp);
 370		break;
 371	default:
 372		break;
 373	}
 374}
 375
 376/**
 377 * dce_v8_0_hpd_init - hpd setup callback.
 378 *
 379 * @adev: amdgpu_device pointer
 380 *
 381 * Setup the hpd pins used by the card (evergreen+).
 382 * Enable the pin, set the polarity, and enable the hpd interrupts.
 383 */
 384static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 385{
 386	struct drm_device *dev = adev->ddev;
 387	struct drm_connector *connector;
 388	u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
 389		(0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
 390		DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 391
 392	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 393		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 394
 
 
 
 
 
 
 
 395		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 396		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 397			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 398			 * aux dp channel on imac and help (but not completely fix)
 399			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 400			 * also avoid interrupt storms during dpms.
 401			 */
 
 
 
 402			continue;
 403		}
 404		switch (amdgpu_connector->hpd.hpd) {
 405		case AMDGPU_HPD_1:
 406			WREG32(mmDC_HPD1_CONTROL, tmp);
 407			break;
 408		case AMDGPU_HPD_2:
 409			WREG32(mmDC_HPD2_CONTROL, tmp);
 410			break;
 411		case AMDGPU_HPD_3:
 412			WREG32(mmDC_HPD3_CONTROL, tmp);
 413			break;
 414		case AMDGPU_HPD_4:
 415			WREG32(mmDC_HPD4_CONTROL, tmp);
 416			break;
 417		case AMDGPU_HPD_5:
 418			WREG32(mmDC_HPD5_CONTROL, tmp);
 419			break;
 420		case AMDGPU_HPD_6:
 421			WREG32(mmDC_HPD6_CONTROL, tmp);
 422			break;
 423		default:
 424			break;
 425		}
 426		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 427		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 428	}
 429}
 430
 431/**
 432 * dce_v8_0_hpd_fini - hpd tear down callback.
 433 *
 434 * @adev: amdgpu_device pointer
 435 *
 436 * Tear down the hpd pins used by the card (evergreen+).
 437 * Disable the hpd interrupts.
 438 */
 439static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 440{
 441	struct drm_device *dev = adev->ddev;
 442	struct drm_connector *connector;
 
 443
 444	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 445		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 446
 447		switch (amdgpu_connector->hpd.hpd) {
 448		case AMDGPU_HPD_1:
 449			WREG32(mmDC_HPD1_CONTROL, 0);
 450			break;
 451		case AMDGPU_HPD_2:
 452			WREG32(mmDC_HPD2_CONTROL, 0);
 453			break;
 454		case AMDGPU_HPD_3:
 455			WREG32(mmDC_HPD3_CONTROL, 0);
 456			break;
 457		case AMDGPU_HPD_4:
 458			WREG32(mmDC_HPD4_CONTROL, 0);
 459			break;
 460		case AMDGPU_HPD_5:
 461			WREG32(mmDC_HPD5_CONTROL, 0);
 462			break;
 463		case AMDGPU_HPD_6:
 464			WREG32(mmDC_HPD6_CONTROL, 0);
 465			break;
 466		default:
 467			break;
 468		}
 469		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 470	}
 471}
 472
 473static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 474{
 475	return mmDC_GPIO_HPD_A;
 476}
 477
 478static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 479{
 480	u32 crtc_hung = 0;
 481	u32 crtc_status[6];
 482	u32 i, j, tmp;
 483
 484	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 485		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 486			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 487			crtc_hung |= (1 << i);
 488		}
 489	}
 490
 491	for (j = 0; j < 10; j++) {
 492		for (i = 0; i < adev->mode_info.num_crtc; i++) {
 493			if (crtc_hung & (1 << i)) {
 494				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 495				if (tmp != crtc_status[i])
 496					crtc_hung &= ~(1 << i);
 497			}
 498		}
 499		if (crtc_hung == 0)
 500			return false;
 501		udelay(100);
 502	}
 503
 504	return true;
 505}
 506
 507static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
 508				    struct amdgpu_mode_mc_save *save)
 509{
 510	u32 crtc_enabled, tmp;
 511	int i;
 512
 513	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 514	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 515
 516	/* disable VGA render */
 517	tmp = RREG32(mmVGA_RENDER_CONTROL);
 518	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 519	WREG32(mmVGA_RENDER_CONTROL, tmp);
 520
 521	/* blank the display controllers */
 522	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 523		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 524					     CRTC_CONTROL, CRTC_MASTER_EN);
 525		if (crtc_enabled) {
 526#if 0
 527			u32 frame_count;
 528			int j;
 529
 530			save->crtc_enabled[i] = true;
 531			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 532			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
 533				amdgpu_display_vblank_wait(adev, i);
 534				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 535				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
 536				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 537				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 538			}
 539			/* wait for the next frame */
 540			frame_count = amdgpu_display_vblank_get_counter(adev, i);
 541			for (j = 0; j < adev->usec_timeout; j++) {
 542				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
 543					break;
 544				udelay(1);
 545			}
 546			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 547			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
 548				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
 549				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
 550			}
 551			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
 552			if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
 553				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
 554				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
 555			}
 
 556#else
 557			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 558			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 559			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 560			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 561			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 562			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 563			save->crtc_enabled[i] = false;
 564			/* ***** */
 565#endif
 566		} else {
 567			save->crtc_enabled[i] = false;
 568		}
 569	}
 570}
 571
 572static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
 573				      struct amdgpu_mode_mc_save *save)
 574{
 575	u32 tmp, frame_count;
 576	int i, j;
 577
 578	/* update crtc base addresses */
 579	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 580		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 581		       upper_32_bits(adev->mc.vram_start));
 582		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 583		       upper_32_bits(adev->mc.vram_start));
 584		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 585		       (u32)adev->mc.vram_start);
 586		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
 587		       (u32)adev->mc.vram_start);
 588
 589		if (save->crtc_enabled[i]) {
 590			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
 591			if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
 592				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
 593				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 594			}
 595			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 596			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
 597				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
 598				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
 599			}
 600			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
 601			if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
 602				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
 603				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
 604			}
 605			for (j = 0; j < adev->usec_timeout; j++) {
 606				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 607				if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
 608					break;
 609				udelay(1);
 610			}
 611			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 612			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
 613			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 614			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 615			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 616			/* wait for the next frame */
 617			frame_count = amdgpu_display_vblank_get_counter(adev, i);
 618			for (j = 0; j < adev->usec_timeout; j++) {
 619				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
 620					break;
 621				udelay(1);
 622			}
 623		}
 
 624	}
 625
 626	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
 627	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
 628
 629	/* Unlock vga access */
 630	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 631	mdelay(1);
 632	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 633}
 634
 635static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 636					  bool render)
 637{
 638	u32 tmp;
 639
 640	/* Lockout access through VGA aperture*/
 641	tmp = RREG32(mmVGA_HDP_CONTROL);
 642	if (render)
 643		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 644	else
 645		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 646	WREG32(mmVGA_HDP_CONTROL, tmp);
 647
 648	/* disable VGA render */
 649	tmp = RREG32(mmVGA_RENDER_CONTROL);
 650	if (render)
 651		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 652	else
 653		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 654	WREG32(mmVGA_RENDER_CONTROL, tmp);
 655}
 656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 658{
 659	struct drm_device *dev = encoder->dev;
 660	struct amdgpu_device *adev = dev->dev_private;
 661	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 662	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 663	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 664	int bpc = 0;
 665	u32 tmp = 0;
 666	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 667
 668	if (connector) {
 669		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 670		bpc = amdgpu_connector_get_monitor_bpc(connector);
 671		dither = amdgpu_connector->dither;
 672	}
 673
 674	/* LVDS/eDP FMT is set up by atom */
 675	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 676		return;
 677
 678	/* not needed for analog */
 679	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 680	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 681		return;
 682
 683	if (bpc == 0)
 684		return;
 685
 686	switch (bpc) {
 687	case 6:
 688		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 689			/* XXX sort out optimal dither settings */
 690			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 691				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 692				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 693				(0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 694		else
 695			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 696			(0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 697		break;
 698	case 8:
 699		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 700			/* XXX sort out optimal dither settings */
 701			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 702				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 703				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 704				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 705				(1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 706		else
 707			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 708			(1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 709		break;
 710	case 10:
 711		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 712			/* XXX sort out optimal dither settings */
 713			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 714				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 715				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 716				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 717				(2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 718		else
 719			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 720			(2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 721		break;
 722	default:
 723		/* not needed */
 724		break;
 725	}
 726
 727	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 728}
 729
 730
 731/* display watermark setup */
 732/**
 733 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 734 *
 735 * @adev: amdgpu_device pointer
 736 * @amdgpu_crtc: the selected display controller
 737 * @mode: the current display mode on the selected display
 738 * controller
 739 *
 740 * Setup up the line buffer allocation for
 741 * the selected display controller (CIK).
 742 * Returns the line buffer size in pixels.
 743 */
 744static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 745				       struct amdgpu_crtc *amdgpu_crtc,
 746				       struct drm_display_mode *mode)
 747{
 748	u32 tmp, buffer_alloc, i;
 749	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 750	/*
 751	 * Line Buffer Setup
 752	 * There are 6 line buffers, one for each display controllers.
 753	 * There are 3 partitions per LB. Select the number of partitions
 754	 * to enable based on the display width.  For display widths larger
 755	 * than 4096, you need use to use 2 display controllers and combine
 756	 * them using the stereo blender.
 757	 */
 758	if (amdgpu_crtc->base.enabled && mode) {
 759		if (mode->crtc_hdisplay < 1920) {
 760			tmp = 1;
 761			buffer_alloc = 2;
 762		} else if (mode->crtc_hdisplay < 2560) {
 763			tmp = 2;
 764			buffer_alloc = 2;
 765		} else if (mode->crtc_hdisplay < 4096) {
 766			tmp = 0;
 767			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 768		} else {
 769			DRM_DEBUG_KMS("Mode too big for LB!\n");
 770			tmp = 0;
 771			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 772		}
 773	} else {
 774		tmp = 1;
 775		buffer_alloc = 0;
 776	}
 777
 778	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 779	      (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 780	      (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 781
 782	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 783	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 784	for (i = 0; i < adev->usec_timeout; i++) {
 785		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 786		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 787			break;
 788		udelay(1);
 789	}
 790
 791	if (amdgpu_crtc->base.enabled && mode) {
 792		switch (tmp) {
 793		case 0:
 794		default:
 795			return 4096 * 2;
 796		case 1:
 797			return 1920 * 2;
 798		case 2:
 799			return 2560 * 2;
 800		}
 801	}
 802
 803	/* controller not enabled, so no lb used */
 804	return 0;
 805}
 806
 807/**
 808 * cik_get_number_of_dram_channels - get the number of dram channels
 809 *
 810 * @adev: amdgpu_device pointer
 811 *
 812 * Look up the number of video ram channels (CIK).
 813 * Used for display watermark bandwidth calculations
 814 * Returns the number of dram channels
 815 */
 816static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 817{
 818	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 819
 820	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 821	case 0:
 822	default:
 823		return 1;
 824	case 1:
 825		return 2;
 826	case 2:
 827		return 4;
 828	case 3:
 829		return 8;
 830	case 4:
 831		return 3;
 832	case 5:
 833		return 6;
 834	case 6:
 835		return 10;
 836	case 7:
 837		return 12;
 838	case 8:
 839		return 16;
 840	}
 841}
 842
 843struct dce8_wm_params {
 844	u32 dram_channels; /* number of dram channels */
 845	u32 yclk;          /* bandwidth per dram data pin in kHz */
 846	u32 sclk;          /* engine clock in kHz */
 847	u32 disp_clk;      /* display clock in kHz */
 848	u32 src_width;     /* viewport width */
 849	u32 active_time;   /* active display time in ns */
 850	u32 blank_time;    /* blank time in ns */
 851	bool interlaced;    /* mode is interlaced */
 852	fixed20_12 vsc;    /* vertical scale ratio */
 853	u32 num_heads;     /* number of active crtcs */
 854	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 855	u32 lb_size;       /* line buffer allocated to pipe */
 856	u32 vtaps;         /* vertical scaler taps */
 857};
 858
 859/**
 860 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 861 *
 862 * @wm: watermark calculation data
 863 *
 864 * Calculate the raw dram bandwidth (CIK).
 865 * Used for display watermark bandwidth calculations
 866 * Returns the dram bandwidth in MBytes/s
 867 */
 868static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 869{
 870	/* Calculate raw DRAM Bandwidth */
 871	fixed20_12 dram_efficiency; /* 0.7 */
 872	fixed20_12 yclk, dram_channels, bandwidth;
 873	fixed20_12 a;
 874
 875	a.full = dfixed_const(1000);
 876	yclk.full = dfixed_const(wm->yclk);
 877	yclk.full = dfixed_div(yclk, a);
 878	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 879	a.full = dfixed_const(10);
 880	dram_efficiency.full = dfixed_const(7);
 881	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 882	bandwidth.full = dfixed_mul(dram_channels, yclk);
 883	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 884
 885	return dfixed_trunc(bandwidth);
 886}
 887
 888/**
 889 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 890 *
 891 * @wm: watermark calculation data
 892 *
 893 * Calculate the dram bandwidth used for display (CIK).
 894 * Used for display watermark bandwidth calculations
 895 * Returns the dram bandwidth for display in MBytes/s
 896 */
 897static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 898{
 899	/* Calculate DRAM Bandwidth and the part allocated to display. */
 900	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 901	fixed20_12 yclk, dram_channels, bandwidth;
 902	fixed20_12 a;
 903
 904	a.full = dfixed_const(1000);
 905	yclk.full = dfixed_const(wm->yclk);
 906	yclk.full = dfixed_div(yclk, a);
 907	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 908	a.full = dfixed_const(10);
 909	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 910	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 911	bandwidth.full = dfixed_mul(dram_channels, yclk);
 912	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 913
 914	return dfixed_trunc(bandwidth);
 915}
 916
 917/**
 918 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 919 *
 920 * @wm: watermark calculation data
 921 *
 922 * Calculate the data return bandwidth used for display (CIK).
 923 * Used for display watermark bandwidth calculations
 924 * Returns the data return bandwidth in MBytes/s
 925 */
 926static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 927{
 928	/* Calculate the display Data return Bandwidth */
 929	fixed20_12 return_efficiency; /* 0.8 */
 930	fixed20_12 sclk, bandwidth;
 931	fixed20_12 a;
 932
 933	a.full = dfixed_const(1000);
 934	sclk.full = dfixed_const(wm->sclk);
 935	sclk.full = dfixed_div(sclk, a);
 936	a.full = dfixed_const(10);
 937	return_efficiency.full = dfixed_const(8);
 938	return_efficiency.full = dfixed_div(return_efficiency, a);
 939	a.full = dfixed_const(32);
 940	bandwidth.full = dfixed_mul(a, sclk);
 941	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 942
 943	return dfixed_trunc(bandwidth);
 944}
 945
 946/**
 947 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 948 *
 949 * @wm: watermark calculation data
 950 *
 951 * Calculate the dmif bandwidth used for display (CIK).
 952 * Used for display watermark bandwidth calculations
 953 * Returns the dmif bandwidth in MBytes/s
 954 */
 955static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 956{
 957	/* Calculate the DMIF Request Bandwidth */
 958	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 959	fixed20_12 disp_clk, bandwidth;
 960	fixed20_12 a, b;
 961
 962	a.full = dfixed_const(1000);
 963	disp_clk.full = dfixed_const(wm->disp_clk);
 964	disp_clk.full = dfixed_div(disp_clk, a);
 965	a.full = dfixed_const(32);
 966	b.full = dfixed_mul(a, disp_clk);
 967
 968	a.full = dfixed_const(10);
 969	disp_clk_request_efficiency.full = dfixed_const(8);
 970	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 971
 972	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 973
 974	return dfixed_trunc(bandwidth);
 975}
 976
 977/**
 978 * dce_v8_0_available_bandwidth - get the min available bandwidth
 979 *
 980 * @wm: watermark calculation data
 981 *
 982 * Calculate the min available bandwidth used for display (CIK).
 983 * Used for display watermark bandwidth calculations
 984 * Returns the min available bandwidth in MBytes/s
 985 */
 986static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 987{
 988	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 989	u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 990	u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 991	u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 992
 993	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 994}
 995
 996/**
 997 * dce_v8_0_average_bandwidth - get the average available bandwidth
 998 *
 999 * @wm: watermark calculation data
1000 *
1001 * Calculate the average available bandwidth used for display (CIK).
1002 * Used for display watermark bandwidth calculations
1003 * Returns the average available bandwidth in MBytes/s
1004 */
1005static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
1006{
1007	/* Calculate the display mode Average Bandwidth
1008	 * DisplayMode should contain the source and destination dimensions,
1009	 * timing, etc.
1010	 */
1011	fixed20_12 bpp;
1012	fixed20_12 line_time;
1013	fixed20_12 src_width;
1014	fixed20_12 bandwidth;
1015	fixed20_12 a;
1016
1017	a.full = dfixed_const(1000);
1018	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1019	line_time.full = dfixed_div(line_time, a);
1020	bpp.full = dfixed_const(wm->bytes_per_pixel);
1021	src_width.full = dfixed_const(wm->src_width);
1022	bandwidth.full = dfixed_mul(src_width, bpp);
1023	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1024	bandwidth.full = dfixed_div(bandwidth, line_time);
1025
1026	return dfixed_trunc(bandwidth);
1027}
1028
1029/**
1030 * dce_v8_0_latency_watermark - get the latency watermark
1031 *
1032 * @wm: watermark calculation data
1033 *
1034 * Calculate the latency watermark (CIK).
1035 * Used for display watermark bandwidth calculations
1036 * Returns the latency watermark in ns
1037 */
1038static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
1039{
1040	/* First calculate the latency in ns */
1041	u32 mc_latency = 2000; /* 2000 ns. */
1042	u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
1043	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1044	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1045	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1046	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1047		(wm->num_heads * cursor_line_pair_return_time);
1048	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1049	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1050	u32 tmp, dmif_size = 12288;
1051	fixed20_12 a, b, c;
1052
1053	if (wm->num_heads == 0)
1054		return 0;
1055
1056	a.full = dfixed_const(2);
1057	b.full = dfixed_const(1);
1058	if ((wm->vsc.full > a.full) ||
1059	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1060	    (wm->vtaps >= 5) ||
1061	    ((wm->vsc.full >= a.full) && wm->interlaced))
1062		max_src_lines_per_dst_line = 4;
1063	else
1064		max_src_lines_per_dst_line = 2;
1065
1066	a.full = dfixed_const(available_bandwidth);
1067	b.full = dfixed_const(wm->num_heads);
1068	a.full = dfixed_div(a, b);
1069
1070	b.full = dfixed_const(mc_latency + 512);
1071	c.full = dfixed_const(wm->disp_clk);
1072	b.full = dfixed_div(b, c);
1073
1074	c.full = dfixed_const(dmif_size);
1075	b.full = dfixed_div(c, b);
1076
1077	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1078
1079	b.full = dfixed_const(1000);
1080	c.full = dfixed_const(wm->disp_clk);
1081	b.full = dfixed_div(c, b);
1082	c.full = dfixed_const(wm->bytes_per_pixel);
1083	b.full = dfixed_mul(b, c);
1084
1085	lb_fill_bw = min(tmp, dfixed_trunc(b));
1086
1087	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1088	b.full = dfixed_const(1000);
1089	c.full = dfixed_const(lb_fill_bw);
1090	b.full = dfixed_div(c, b);
1091	a.full = dfixed_div(a, b);
1092	line_fill_time = dfixed_trunc(a);
1093
1094	if (line_fill_time < wm->active_time)
1095		return latency;
1096	else
1097		return latency + (line_fill_time - wm->active_time);
1098
1099}
1100
1101/**
1102 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1103 * average and available dram bandwidth
1104 *
1105 * @wm: watermark calculation data
1106 *
1107 * Check if the display average bandwidth fits in the display
1108 * dram bandwidth (CIK).
1109 * Used for display watermark bandwidth calculations
1110 * Returns true if the display fits, false if not.
1111 */
1112static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1113{
1114	if (dce_v8_0_average_bandwidth(wm) <=
1115	    (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1116		return true;
1117	else
1118		return false;
1119}
1120
1121/**
1122 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1123 * average and available bandwidth
1124 *
1125 * @wm: watermark calculation data
1126 *
1127 * Check if the display average bandwidth fits in the display
1128 * available bandwidth (CIK).
1129 * Used for display watermark bandwidth calculations
1130 * Returns true if the display fits, false if not.
1131 */
1132static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1133{
1134	if (dce_v8_0_average_bandwidth(wm) <=
1135	    (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1136		return true;
1137	else
1138		return false;
1139}
1140
1141/**
1142 * dce_v8_0_check_latency_hiding - check latency hiding
1143 *
1144 * @wm: watermark calculation data
1145 *
1146 * Check latency hiding (CIK).
1147 * Used for display watermark bandwidth calculations
1148 * Returns true if the display fits, false if not.
1149 */
1150static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1151{
1152	u32 lb_partitions = wm->lb_size / wm->src_width;
1153	u32 line_time = wm->active_time + wm->blank_time;
1154	u32 latency_tolerant_lines;
1155	u32 latency_hiding;
1156	fixed20_12 a;
1157
1158	a.full = dfixed_const(1);
1159	if (wm->vsc.full > a.full)
1160		latency_tolerant_lines = 1;
1161	else {
1162		if (lb_partitions <= (wm->vtaps + 1))
1163			latency_tolerant_lines = 1;
1164		else
1165			latency_tolerant_lines = 2;
1166	}
1167
1168	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1169
1170	if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1171		return true;
1172	else
1173		return false;
1174}
1175
1176/**
1177 * dce_v8_0_program_watermarks - program display watermarks
1178 *
1179 * @adev: amdgpu_device pointer
1180 * @amdgpu_crtc: the selected display controller
1181 * @lb_size: line buffer size
1182 * @num_heads: number of display controllers in use
1183 *
1184 * Calculate and program the display watermarks for the
1185 * selected display controller (CIK).
1186 */
1187static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1188					struct amdgpu_crtc *amdgpu_crtc,
1189					u32 lb_size, u32 num_heads)
1190{
1191	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1192	struct dce8_wm_params wm_low, wm_high;
1193	u32 pixel_period;
1194	u32 line_time = 0;
1195	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1196	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1197
1198	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1199		pixel_period = 1000000 / (u32)mode->clock;
1200		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1201
1202		/* watermark for high clocks */
1203		if (adev->pm.dpm_enabled) {
1204			wm_high.yclk =
1205				amdgpu_dpm_get_mclk(adev, false) * 10;
1206			wm_high.sclk =
1207				amdgpu_dpm_get_sclk(adev, false) * 10;
1208		} else {
1209			wm_high.yclk = adev->pm.current_mclk * 10;
1210			wm_high.sclk = adev->pm.current_sclk * 10;
1211		}
1212
1213		wm_high.disp_clk = mode->clock;
1214		wm_high.src_width = mode->crtc_hdisplay;
1215		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1216		wm_high.blank_time = line_time - wm_high.active_time;
1217		wm_high.interlaced = false;
1218		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1219			wm_high.interlaced = true;
1220		wm_high.vsc = amdgpu_crtc->vsc;
1221		wm_high.vtaps = 1;
1222		if (amdgpu_crtc->rmx_type != RMX_OFF)
1223			wm_high.vtaps = 2;
1224		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1225		wm_high.lb_size = lb_size;
1226		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1227		wm_high.num_heads = num_heads;
1228
1229		/* set for high clocks */
1230		latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1231
1232		/* possibly force display priority to high */
1233		/* should really do this at mode validation time... */
1234		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1235		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1236		    !dce_v8_0_check_latency_hiding(&wm_high) ||
1237		    (adev->mode_info.disp_priority == 2)) {
1238			DRM_DEBUG_KMS("force priority to high\n");
1239		}
1240
1241		/* watermark for low clocks */
1242		if (adev->pm.dpm_enabled) {
1243			wm_low.yclk =
1244				amdgpu_dpm_get_mclk(adev, true) * 10;
1245			wm_low.sclk =
1246				amdgpu_dpm_get_sclk(adev, true) * 10;
1247		} else {
1248			wm_low.yclk = adev->pm.current_mclk * 10;
1249			wm_low.sclk = adev->pm.current_sclk * 10;
1250		}
1251
1252		wm_low.disp_clk = mode->clock;
1253		wm_low.src_width = mode->crtc_hdisplay;
1254		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1255		wm_low.blank_time = line_time - wm_low.active_time;
1256		wm_low.interlaced = false;
1257		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1258			wm_low.interlaced = true;
1259		wm_low.vsc = amdgpu_crtc->vsc;
1260		wm_low.vtaps = 1;
1261		if (amdgpu_crtc->rmx_type != RMX_OFF)
1262			wm_low.vtaps = 2;
1263		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1264		wm_low.lb_size = lb_size;
1265		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1266		wm_low.num_heads = num_heads;
1267
1268		/* set for low clocks */
1269		latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1270
1271		/* possibly force display priority to high */
1272		/* should really do this at mode validation time... */
1273		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1274		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1275		    !dce_v8_0_check_latency_hiding(&wm_low) ||
1276		    (adev->mode_info.disp_priority == 2)) {
1277			DRM_DEBUG_KMS("force priority to high\n");
1278		}
1279		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1280	}
1281
1282	/* select wm A */
1283	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1284	tmp = wm_mask;
1285	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1286	tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1287	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1288	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1289	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1290		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1291	/* select wm B */
1292	tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1293	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1294	tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1295	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1296	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1297	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1298		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1299	/* restore original selection */
1300	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1301
1302	/* save values for DPM */
1303	amdgpu_crtc->line_time = line_time;
1304	amdgpu_crtc->wm_high = latency_watermark_a;
1305	amdgpu_crtc->wm_low = latency_watermark_b;
1306	/* Save number of lines the linebuffer leads before the scanout */
1307	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1308}
1309
1310/**
1311 * dce_v8_0_bandwidth_update - program display watermarks
1312 *
1313 * @adev: amdgpu_device pointer
1314 *
1315 * Calculate and program the display watermarks and line
1316 * buffer allocation (CIK).
1317 */
1318static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1319{
1320	struct drm_display_mode *mode = NULL;
1321	u32 num_heads = 0, lb_size;
1322	int i;
1323
1324	amdgpu_update_display_priority(adev);
1325
1326	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1327		if (adev->mode_info.crtcs[i]->base.enabled)
1328			num_heads++;
1329	}
1330	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1331		mode = &adev->mode_info.crtcs[i]->base.mode;
1332		lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1333		dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1334					    lb_size, num_heads);
1335	}
1336}
1337
1338static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1339{
1340	int i;
1341	u32 offset, tmp;
1342
1343	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1344		offset = adev->mode_info.audio.pin[i].offset;
1345		tmp = RREG32_AUDIO_ENDPT(offset,
1346					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1347		if (((tmp &
1348		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1349		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1350			adev->mode_info.audio.pin[i].connected = false;
1351		else
1352			adev->mode_info.audio.pin[i].connected = true;
1353	}
1354}
1355
1356static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1357{
1358	int i;
1359
1360	dce_v8_0_audio_get_connected_pins(adev);
1361
1362	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1363		if (adev->mode_info.audio.pin[i].connected)
1364			return &adev->mode_info.audio.pin[i];
1365	}
1366	DRM_ERROR("No connected audio pins found!\n");
1367	return NULL;
1368}
1369
1370static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1371{
1372	struct amdgpu_device *adev = encoder->dev->dev_private;
1373	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1374	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1375	u32 offset;
1376
1377	if (!dig || !dig->afmt || !dig->afmt->pin)
1378		return;
1379
1380	offset = dig->afmt->offset;
1381
1382	WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1383	       (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1384}
1385
1386static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1387						struct drm_display_mode *mode)
1388{
1389	struct amdgpu_device *adev = encoder->dev->dev_private;
1390	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1391	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1392	struct drm_connector *connector;
1393	struct amdgpu_connector *amdgpu_connector = NULL;
1394	u32 tmp = 0, offset;
1395
1396	if (!dig || !dig->afmt || !dig->afmt->pin)
1397		return;
1398
1399	offset = dig->afmt->pin->offset;
1400
1401	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1402		if (connector->encoder == encoder) {
1403			amdgpu_connector = to_amdgpu_connector(connector);
1404			break;
1405		}
1406	}
1407
1408	if (!amdgpu_connector) {
1409		DRM_ERROR("Couldn't find encoder's connector\n");
1410		return;
1411	}
1412
1413	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1414		if (connector->latency_present[1])
1415			tmp =
1416			(connector->video_latency[1] <<
1417			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1418			(connector->audio_latency[1] <<
1419			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1420		else
1421			tmp =
1422			(0 <<
1423			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1424			(0 <<
1425			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1426	} else {
1427		if (connector->latency_present[0])
1428			tmp =
1429			(connector->video_latency[0] <<
1430			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1431			(connector->audio_latency[0] <<
1432			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1433		else
1434			tmp =
1435			(0 <<
1436			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1437			(0 <<
1438			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1439
1440	}
1441	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1442}
1443
1444static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1445{
1446	struct amdgpu_device *adev = encoder->dev->dev_private;
1447	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1448	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1449	struct drm_connector *connector;
1450	struct amdgpu_connector *amdgpu_connector = NULL;
1451	u32 offset, tmp;
1452	u8 *sadb = NULL;
1453	int sad_count;
1454
1455	if (!dig || !dig->afmt || !dig->afmt->pin)
1456		return;
1457
1458	offset = dig->afmt->pin->offset;
1459
1460	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1461		if (connector->encoder == encoder) {
1462			amdgpu_connector = to_amdgpu_connector(connector);
1463			break;
1464		}
1465	}
1466
1467	if (!amdgpu_connector) {
1468		DRM_ERROR("Couldn't find encoder's connector\n");
1469		return;
1470	}
1471
1472	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1473	if (sad_count < 0) {
1474		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1475		sad_count = 0;
1476	}
1477
1478	/* program the speaker allocation */
1479	tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1480	tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1481		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1482	/* set HDMI mode */
1483	tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1484	if (sad_count)
1485		tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1486	else
1487		tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1488	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1489
1490	kfree(sadb);
1491}
1492
1493static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1494{
1495	struct amdgpu_device *adev = encoder->dev->dev_private;
1496	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1497	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1498	u32 offset;
1499	struct drm_connector *connector;
1500	struct amdgpu_connector *amdgpu_connector = NULL;
1501	struct cea_sad *sads;
1502	int i, sad_count;
1503
1504	static const u16 eld_reg_to_type[][2] = {
1505		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1506		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1507		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1508		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1509		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1510		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1511		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1512		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1513		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1514		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1515		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1516		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1517	};
1518
1519	if (!dig || !dig->afmt || !dig->afmt->pin)
1520		return;
1521
1522	offset = dig->afmt->pin->offset;
1523
1524	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1525		if (connector->encoder == encoder) {
1526			amdgpu_connector = to_amdgpu_connector(connector);
1527			break;
1528		}
1529	}
1530
1531	if (!amdgpu_connector) {
1532		DRM_ERROR("Couldn't find encoder's connector\n");
1533		return;
1534	}
1535
1536	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1537	if (sad_count <= 0) {
1538		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1539		return;
1540	}
1541	BUG_ON(!sads);
1542
1543	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1544		u32 value = 0;
1545		u8 stereo_freqs = 0;
1546		int max_channels = -1;
1547		int j;
1548
1549		for (j = 0; j < sad_count; j++) {
1550			struct cea_sad *sad = &sads[j];
1551
1552			if (sad->format == eld_reg_to_type[i][1]) {
1553				if (sad->channels > max_channels) {
1554				value = (sad->channels <<
1555				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1556				(sad->byte2 <<
1557				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1558				(sad->freq <<
1559				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1560				max_channels = sad->channels;
1561				}
1562
1563				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1564					stereo_freqs |= sad->freq;
1565				else
1566					break;
1567			}
1568		}
1569
1570		value |= (stereo_freqs <<
1571			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1572
1573		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1574	}
1575
1576	kfree(sads);
1577}
1578
1579static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1580				  struct amdgpu_audio_pin *pin,
1581				  bool enable)
1582{
1583	if (!pin)
1584		return;
1585
1586	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1587		enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1588}
1589
1590static const u32 pin_offsets[7] =
1591{
1592	(0x1780 - 0x1780),
1593	(0x1786 - 0x1780),
1594	(0x178c - 0x1780),
1595	(0x1792 - 0x1780),
1596	(0x1798 - 0x1780),
1597	(0x179d - 0x1780),
1598	(0x17a4 - 0x1780),
1599};
1600
1601static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1602{
1603	int i;
1604
1605	if (!amdgpu_audio)
1606		return 0;
1607
1608	adev->mode_info.audio.enabled = true;
1609
1610	if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1611		adev->mode_info.audio.num_pins = 7;
1612	else if ((adev->asic_type == CHIP_KABINI) ||
1613		 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1614		adev->mode_info.audio.num_pins = 3;
1615	else if ((adev->asic_type == CHIP_BONAIRE) ||
1616		 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1617		adev->mode_info.audio.num_pins = 7;
1618	else
1619		adev->mode_info.audio.num_pins = 3;
1620
1621	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1622		adev->mode_info.audio.pin[i].channels = -1;
1623		adev->mode_info.audio.pin[i].rate = -1;
1624		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1625		adev->mode_info.audio.pin[i].status_bits = 0;
1626		adev->mode_info.audio.pin[i].category_code = 0;
1627		adev->mode_info.audio.pin[i].connected = false;
1628		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1629		adev->mode_info.audio.pin[i].id = i;
1630		/* disable audio.  it will be set up later */
1631		/* XXX remove once we switch to ip funcs */
1632		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1633	}
1634
1635	return 0;
1636}
1637
1638static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1639{
1640	int i;
1641
1642	if (!amdgpu_audio)
1643		return;
1644
1645	if (!adev->mode_info.audio.enabled)
1646		return;
1647
1648	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1649		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1650
1651	adev->mode_info.audio.enabled = false;
1652}
1653
1654/*
1655 * update the N and CTS parameters for a given pixel clock rate
1656 */
1657static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1658{
1659	struct drm_device *dev = encoder->dev;
1660	struct amdgpu_device *adev = dev->dev_private;
1661	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1662	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1663	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1664	uint32_t offset = dig->afmt->offset;
1665
1666	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1667	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1668
1669	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1670	WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1671
1672	WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1673	WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1674}
1675
1676/*
1677 * build a HDMI Video Info Frame
1678 */
1679static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1680					       void *buffer, size_t size)
1681{
1682	struct drm_device *dev = encoder->dev;
1683	struct amdgpu_device *adev = dev->dev_private;
1684	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1685	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1686	uint32_t offset = dig->afmt->offset;
1687	uint8_t *frame = buffer + 3;
1688	uint8_t *header = buffer;
1689
1690	WREG32(mmAFMT_AVI_INFO0 + offset,
1691		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1692	WREG32(mmAFMT_AVI_INFO1 + offset,
1693		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1694	WREG32(mmAFMT_AVI_INFO2 + offset,
1695		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1696	WREG32(mmAFMT_AVI_INFO3 + offset,
1697		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1698}
1699
1700static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1701{
1702	struct drm_device *dev = encoder->dev;
1703	struct amdgpu_device *adev = dev->dev_private;
1704	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1705	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1706	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1707	u32 dto_phase = 24 * 1000;
1708	u32 dto_modulo = clock;
1709
1710	if (!dig || !dig->afmt)
1711		return;
1712
1713	/* XXX two dtos; generally use dto0 for hdmi */
1714	/* Express [24MHz / target pixel clock] as an exact rational
1715	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1716	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1717	 */
1718	WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1719	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1720	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1721}
1722
1723/*
1724 * update the info frames with the data from the current display mode
1725 */
1726static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1727				  struct drm_display_mode *mode)
1728{
1729	struct drm_device *dev = encoder->dev;
1730	struct amdgpu_device *adev = dev->dev_private;
1731	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1732	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1733	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1734	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1735	struct hdmi_avi_infoframe frame;
1736	uint32_t offset, val;
1737	ssize_t err;
1738	int bpc = 8;
1739
1740	if (!dig || !dig->afmt)
1741		return;
1742
1743	/* Silent, r600_hdmi_enable will raise WARN for us */
1744	if (!dig->afmt->enabled)
1745		return;
 
1746	offset = dig->afmt->offset;
1747
1748	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1749	if (encoder->crtc) {
1750		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1751		bpc = amdgpu_crtc->bpc;
1752	}
1753
1754	/* disable audio prior to setting up hw */
1755	dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1756	dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1757
1758	dce_v8_0_audio_set_dto(encoder, mode->clock);
1759
1760	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1761	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1762
1763	WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1764
1765	val = RREG32(mmHDMI_CONTROL + offset);
1766	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1767	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1768
1769	switch (bpc) {
1770	case 0:
1771	case 6:
1772	case 8:
1773	case 16:
1774	default:
1775		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1776			  connector->name, bpc);
1777		break;
1778	case 10:
1779		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1780		val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1781		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1782			  connector->name);
1783		break;
1784	case 12:
1785		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1786		val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1787		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1788			  connector->name);
1789		break;
1790	}
1791
1792	WREG32(mmHDMI_CONTROL + offset, val);
1793
1794	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1795	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1796	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1797	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1798
1799	WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1800	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1801	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1802
1803	WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1804	       AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1805
1806	WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1807	       (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1808
1809	WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1810
1811	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1812	       (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1813	       (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1814
1815	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1816	       AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1817
1818	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1819
1820	if (bpc > 8)
1821		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1822		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1823	else
1824		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1825		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1826		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1827
1828	dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1829
1830	WREG32(mmAFMT_60958_0 + offset,
1831	       (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1832
1833	WREG32(mmAFMT_60958_1 + offset,
1834	       (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1835
1836	WREG32(mmAFMT_60958_2 + offset,
1837	       (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1838	       (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1839	       (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1840	       (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1841	       (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1842	       (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1843
1844	dce_v8_0_audio_write_speaker_allocation(encoder);
1845
1846
1847	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1848	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1849
1850	dce_v8_0_afmt_audio_select_pin(encoder);
1851	dce_v8_0_audio_write_sad_regs(encoder);
1852	dce_v8_0_audio_write_latency_fields(encoder, mode);
1853
1854	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1855	if (err < 0) {
1856		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1857		return;
1858	}
1859
1860	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1861	if (err < 0) {
1862		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1863		return;
1864	}
1865
1866	dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1867
1868	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1869		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1870		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
1871
1872	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1873		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1874		 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1875
1876	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1877		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1878
1879	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
1880	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1881	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1882	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1883	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1884
1885	/* enable audio after to setting up hw */
1886	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1887}
1888
1889static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1890{
1891	struct drm_device *dev = encoder->dev;
1892	struct amdgpu_device *adev = dev->dev_private;
1893	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1894	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1895
1896	if (!dig || !dig->afmt)
1897		return;
1898
1899	/* Silent, r600_hdmi_enable will raise WARN for us */
1900	if (enable && dig->afmt->enabled)
1901		return;
1902	if (!enable && !dig->afmt->enabled)
1903		return;
1904
1905	if (!enable && dig->afmt->pin) {
1906		dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1907		dig->afmt->pin = NULL;
1908	}
1909
1910	dig->afmt->enabled = enable;
1911
1912	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1913		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1914}
1915
1916static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1917{
1918	int i;
1919
1920	for (i = 0; i < adev->mode_info.num_dig; i++)
1921		adev->mode_info.afmt[i] = NULL;
1922
1923	/* DCE8 has audio blocks tied to DIG encoders */
1924	for (i = 0; i < adev->mode_info.num_dig; i++) {
1925		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1926		if (adev->mode_info.afmt[i]) {
1927			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1928			adev->mode_info.afmt[i]->id = i;
1929		} else {
1930			int j;
1931			for (j = 0; j < i; j++) {
1932				kfree(adev->mode_info.afmt[j]);
1933				adev->mode_info.afmt[j] = NULL;
1934			}
1935			return -ENOMEM;
1936		}
1937	}
1938	return 0;
1939}
1940
1941static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1942{
1943	int i;
1944
1945	for (i = 0; i < adev->mode_info.num_dig; i++) {
1946		kfree(adev->mode_info.afmt[i]);
1947		adev->mode_info.afmt[i] = NULL;
1948	}
1949}
1950
1951static const u32 vga_control_regs[6] =
1952{
1953	mmD1VGA_CONTROL,
1954	mmD2VGA_CONTROL,
1955	mmD3VGA_CONTROL,
1956	mmD4VGA_CONTROL,
1957	mmD5VGA_CONTROL,
1958	mmD6VGA_CONTROL,
1959};
1960
1961static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1962{
1963	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1964	struct drm_device *dev = crtc->dev;
1965	struct amdgpu_device *adev = dev->dev_private;
1966	u32 vga_control;
1967
1968	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1969	if (enable)
1970		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1971	else
1972		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1973}
1974
1975static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1976{
1977	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1978	struct drm_device *dev = crtc->dev;
1979	struct amdgpu_device *adev = dev->dev_private;
1980
1981	if (enable)
1982		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1983	else
1984		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1985}
1986
1987static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1988				     struct drm_framebuffer *fb,
1989				     int x, int y, int atomic)
1990{
1991	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1992	struct drm_device *dev = crtc->dev;
1993	struct amdgpu_device *adev = dev->dev_private;
1994	struct amdgpu_framebuffer *amdgpu_fb;
1995	struct drm_framebuffer *target_fb;
1996	struct drm_gem_object *obj;
1997	struct amdgpu_bo *rbo;
1998	uint64_t fb_location, tiling_flags;
1999	uint32_t fb_format, fb_pitch_pixels;
2000	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2001	u32 pipe_config;
2002	u32 tmp, viewport_w, viewport_h;
2003	int r;
2004	bool bypass_lut = false;
 
2005
2006	/* no fb bound */
2007	if (!atomic && !crtc->primary->fb) {
2008		DRM_DEBUG_KMS("No FB bound\n");
2009		return 0;
2010	}
2011
2012	if (atomic) {
2013		amdgpu_fb = to_amdgpu_framebuffer(fb);
2014		target_fb = fb;
2015	} else {
2016		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2017		target_fb = crtc->primary->fb;
2018	}
2019
2020	/* If atomic, assume fb object is pinned & idle & fenced and
2021	 * just update base pointers
2022	 */
2023	obj = amdgpu_fb->obj;
2024	rbo = gem_to_amdgpu_bo(obj);
2025	r = amdgpu_bo_reserve(rbo, false);
2026	if (unlikely(r != 0))
2027		return r;
2028
2029	if (atomic) {
2030		fb_location = amdgpu_bo_gpu_offset(rbo);
2031	} else {
2032		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2033		if (unlikely(r != 0)) {
2034			amdgpu_bo_unreserve(rbo);
2035			return -EINVAL;
2036		}
2037	}
2038
2039	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2040	amdgpu_bo_unreserve(rbo);
2041
2042	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2043
2044	switch (target_fb->pixel_format) {
2045	case DRM_FORMAT_C8:
2046		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2047			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2048		break;
2049	case DRM_FORMAT_XRGB4444:
2050	case DRM_FORMAT_ARGB4444:
2051		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2052			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2053#ifdef __BIG_ENDIAN
2054		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2055#endif
2056		break;
2057	case DRM_FORMAT_XRGB1555:
2058	case DRM_FORMAT_ARGB1555:
2059		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2060			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2061#ifdef __BIG_ENDIAN
2062		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2063#endif
2064		break;
2065	case DRM_FORMAT_BGRX5551:
2066	case DRM_FORMAT_BGRA5551:
2067		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2068			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2069#ifdef __BIG_ENDIAN
2070		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2071#endif
2072		break;
2073	case DRM_FORMAT_RGB565:
2074		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2075			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2076#ifdef __BIG_ENDIAN
2077		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2078#endif
2079		break;
2080	case DRM_FORMAT_XRGB8888:
2081	case DRM_FORMAT_ARGB8888:
2082		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2083			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2084#ifdef __BIG_ENDIAN
2085		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2086#endif
2087		break;
2088	case DRM_FORMAT_XRGB2101010:
2089	case DRM_FORMAT_ARGB2101010:
2090		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2091			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2092#ifdef __BIG_ENDIAN
2093		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2094#endif
2095		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2096		bypass_lut = true;
2097		break;
2098	case DRM_FORMAT_BGRX1010102:
2099	case DRM_FORMAT_BGRA1010102:
2100		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2101			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2102#ifdef __BIG_ENDIAN
2103		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2104#endif
2105		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2106		bypass_lut = true;
2107		break;
2108	default:
2109		DRM_ERROR("Unsupported screen format %s\n",
2110			  drm_get_format_name(target_fb->pixel_format));
2111		return -EINVAL;
2112	}
2113
2114	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2115		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2116
2117		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2118		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2119		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2120		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2121		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2122
2123		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2124		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2125		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2126		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2127		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2128		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2129		fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
2130	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2131		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2132	}
2133
2134	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2135
2136	dce_v8_0_vga_enable(crtc, false);
2137
 
 
 
 
 
2138	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2139	       upper_32_bits(fb_location));
2140	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2141	       upper_32_bits(fb_location));
2142	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2143	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2144	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2145	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2146	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2147	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2148
2149	/*
2150	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2151	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2152	 * retain the full precision throughout the pipeline.
2153	 */
2154	WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2155		 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2156		 ~LUT_10BIT_BYPASS_EN);
2157
2158	if (bypass_lut)
2159		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2160
2161	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2162	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2163	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2164	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2165	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2166	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2167
2168	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2169	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2170
2171	dce_v8_0_grph_enable(crtc, true);
2172
2173	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2174	       target_fb->height);
2175
2176	x &= ~3;
2177	y &= ~1;
2178	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2179	       (x << 16) | y);
2180	viewport_w = crtc->mode.hdisplay;
2181	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2182	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2183	       (viewport_w << 16) | viewport_h);
2184
2185	/* pageflip setup */
2186	/* make sure flip is at vb rather than hb */
2187	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2188	tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
2189	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2190
2191	/* set pageflip to happen only at start of vblank interval (front porch) */
2192	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
2193
2194	if (!atomic && fb && fb != crtc->primary->fb) {
2195		amdgpu_fb = to_amdgpu_framebuffer(fb);
2196		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2197		r = amdgpu_bo_reserve(rbo, false);
2198		if (unlikely(r != 0))
2199			return r;
2200		amdgpu_bo_unpin(rbo);
2201		amdgpu_bo_unreserve(rbo);
2202	}
2203
2204	/* Bytes per pixel may have changed */
2205	dce_v8_0_bandwidth_update(adev);
2206
2207	return 0;
2208}
2209
2210static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2211				    struct drm_display_mode *mode)
2212{
2213	struct drm_device *dev = crtc->dev;
2214	struct amdgpu_device *adev = dev->dev_private;
2215	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2216
2217	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2218		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2219		       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2220	else
2221		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2222}
2223
2224static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2225{
2226	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2227	struct drm_device *dev = crtc->dev;
2228	struct amdgpu_device *adev = dev->dev_private;
2229	int i;
2230
2231	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2232
2233	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2234	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2235		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2236	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2237	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2238	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2239	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2240	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2241	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2242		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2243
2244	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2245
2246	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2247	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2248	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2249
2250	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2251	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2252	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2253
2254	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2255	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2256
2257	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2258	for (i = 0; i < 256; i++) {
2259		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2260		       (amdgpu_crtc->lut_r[i] << 20) |
2261		       (amdgpu_crtc->lut_g[i] << 10) |
2262		       (amdgpu_crtc->lut_b[i] << 0));
2263	}
2264
2265	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2266	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2267		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2268		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2269	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2270	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2271		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2272	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2273	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2274		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2275	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2276	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2277		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2278	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2279	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2280	/* XXX this only needs to be programmed once per crtc at startup,
2281	 * not sure where the best place for it is
2282	 */
2283	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2284	       ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2285}
2286
2287static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2288{
2289	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2290	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2291
2292	switch (amdgpu_encoder->encoder_id) {
2293	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2294		if (dig->linkb)
2295			return 1;
2296		else
2297			return 0;
2298		break;
2299	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2300		if (dig->linkb)
2301			return 3;
2302		else
2303			return 2;
2304		break;
2305	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2306		if (dig->linkb)
2307			return 5;
2308		else
2309			return 4;
2310		break;
2311	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2312		return 6;
2313		break;
2314	default:
2315		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2316		return 0;
2317	}
2318}
2319
2320/**
2321 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2322 *
2323 * @crtc: drm crtc
2324 *
2325 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2326 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2327 * monitors a dedicated PPLL must be used.  If a particular board has
2328 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2329 * as there is no need to program the PLL itself.  If we are not able to
2330 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2331 * avoid messing up an existing monitor.
2332 *
2333 * Asic specific PLL information
2334 *
2335 * DCE 8.x
2336 * KB/KV
2337 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2338 * CI
2339 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2340 *
2341 */
2342static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2343{
2344	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2345	struct drm_device *dev = crtc->dev;
2346	struct amdgpu_device *adev = dev->dev_private;
2347	u32 pll_in_use;
2348	int pll;
2349
2350	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2351		if (adev->clock.dp_extclk)
2352			/* skip PPLL programming if using ext clock */
2353			return ATOM_PPLL_INVALID;
2354		else {
2355			/* use the same PPLL for all DP monitors */
2356			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2357			if (pll != ATOM_PPLL_INVALID)
2358				return pll;
2359		}
2360	} else {
2361		/* use the same PPLL for all monitors with the same clock */
2362		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2363		if (pll != ATOM_PPLL_INVALID)
2364			return pll;
2365	}
2366	/* otherwise, pick one of the plls */
2367	if ((adev->asic_type == CHIP_KABINI) ||
2368	    (adev->asic_type == CHIP_MULLINS)) {
2369		/* KB/ML has PPLL1 and PPLL2 */
2370		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2371		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2372			return ATOM_PPLL2;
2373		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2374			return ATOM_PPLL1;
2375		DRM_ERROR("unable to allocate a PPLL\n");
2376		return ATOM_PPLL_INVALID;
2377	} else {
2378		/* CI/KV has PPLL0, PPLL1, and PPLL2 */
2379		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2380		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2381			return ATOM_PPLL2;
2382		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2383			return ATOM_PPLL1;
2384		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2385			return ATOM_PPLL0;
2386		DRM_ERROR("unable to allocate a PPLL\n");
2387		return ATOM_PPLL_INVALID;
2388	}
2389	return ATOM_PPLL_INVALID;
2390}
2391
2392static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2393{
2394	struct amdgpu_device *adev = crtc->dev->dev_private;
2395	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2396	uint32_t cur_lock;
2397
2398	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2399	if (lock)
2400		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2401	else
2402		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2403	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2404}
2405
2406static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2407{
2408	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2409	struct amdgpu_device *adev = crtc->dev->dev_private;
2410
2411	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2412		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2413		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2414}
2415
2416static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2417{
2418	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2419	struct amdgpu_device *adev = crtc->dev->dev_private;
2420
2421	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2422	       upper_32_bits(amdgpu_crtc->cursor_addr));
2423	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2424	       lower_32_bits(amdgpu_crtc->cursor_addr));
2425
2426	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2427		   CUR_CONTROL__CURSOR_EN_MASK |
2428		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2429		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2430}
2431
2432static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2433				       int x, int y)
2434{
2435	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2436	struct amdgpu_device *adev = crtc->dev->dev_private;
2437	int xorigin = 0, yorigin = 0;
2438
 
 
 
2439	/* avivo cursor are offset into the total surface */
2440	x += crtc->x;
2441	y += crtc->y;
2442	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2443
2444	if (x < 0) {
2445		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2446		x = 0;
2447	}
2448	if (y < 0) {
2449		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2450		y = 0;
2451	}
2452
2453	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2454	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2455	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2456	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2457
2458	amdgpu_crtc->cursor_x = x;
2459	amdgpu_crtc->cursor_y = y;
2460
2461	return 0;
2462}
2463
2464static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2465				     int x, int y)
2466{
2467	int ret;
2468
2469	dce_v8_0_lock_cursor(crtc, true);
2470	ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2471	dce_v8_0_lock_cursor(crtc, false);
2472
2473	return ret;
2474}
2475
2476static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2477				     struct drm_file *file_priv,
2478				     uint32_t handle,
2479				     uint32_t width,
2480				     uint32_t height,
2481				     int32_t hot_x,
2482				     int32_t hot_y)
2483{
2484	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2485	struct drm_gem_object *obj;
2486	struct amdgpu_bo *aobj;
2487	int ret;
2488
2489	if (!handle) {
2490		/* turn off cursor */
2491		dce_v8_0_hide_cursor(crtc);
2492		obj = NULL;
2493		goto unpin;
2494	}
2495
2496	if ((width > amdgpu_crtc->max_cursor_width) ||
2497	    (height > amdgpu_crtc->max_cursor_height)) {
2498		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2499		return -EINVAL;
2500	}
2501
2502	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
2503	if (!obj) {
2504		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2505		return -ENOENT;
2506	}
2507
2508	aobj = gem_to_amdgpu_bo(obj);
2509	ret = amdgpu_bo_reserve(aobj, false);
2510	if (ret != 0) {
2511		drm_gem_object_unreference_unlocked(obj);
2512		return ret;
2513	}
2514
2515	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2516	amdgpu_bo_unreserve(aobj);
2517	if (ret) {
2518		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2519		drm_gem_object_unreference_unlocked(obj);
2520		return ret;
2521	}
2522
2523	amdgpu_crtc->cursor_width = width;
2524	amdgpu_crtc->cursor_height = height;
2525
2526	dce_v8_0_lock_cursor(crtc, true);
2527
2528	if (hot_x != amdgpu_crtc->cursor_hot_x ||
 
 
2529	    hot_y != amdgpu_crtc->cursor_hot_y) {
2530		int x, y;
2531
2532		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2533		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2534
2535		dce_v8_0_cursor_move_locked(crtc, x, y);
2536
 
 
2537		amdgpu_crtc->cursor_hot_x = hot_x;
2538		amdgpu_crtc->cursor_hot_y = hot_y;
2539	}
2540
2541	dce_v8_0_show_cursor(crtc);
2542	dce_v8_0_lock_cursor(crtc, false);
2543
2544unpin:
2545	if (amdgpu_crtc->cursor_bo) {
2546		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2547		ret = amdgpu_bo_reserve(aobj, false);
2548		if (likely(ret == 0)) {
2549			amdgpu_bo_unpin(aobj);
2550			amdgpu_bo_unreserve(aobj);
2551		}
2552		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2553	}
2554
2555	amdgpu_crtc->cursor_bo = obj;
2556	return 0;
2557}
2558
2559static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2560{
2561	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2562
2563	if (amdgpu_crtc->cursor_bo) {
2564		dce_v8_0_lock_cursor(crtc, true);
2565
2566		dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2567					    amdgpu_crtc->cursor_y);
2568
2569		dce_v8_0_show_cursor(crtc);
2570
2571		dce_v8_0_lock_cursor(crtc, false);
2572	}
2573}
2574
2575static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2576				    u16 *blue, uint32_t start, uint32_t size)
2577{
2578	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2579	int end = (start + size > 256) ? 256 : start + size, i;
2580
2581	/* userspace palettes are always correct as is */
2582	for (i = start; i < end; i++) {
2583		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2584		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2585		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2586	}
2587	dce_v8_0_crtc_load_lut(crtc);
 
 
2588}
2589
2590static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2591{
2592	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2593
2594	drm_crtc_cleanup(crtc);
2595	kfree(amdgpu_crtc);
2596}
2597
2598static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2599	.cursor_set2 = dce_v8_0_crtc_cursor_set2,
2600	.cursor_move = dce_v8_0_crtc_cursor_move,
2601	.gamma_set = dce_v8_0_crtc_gamma_set,
2602	.set_config = amdgpu_crtc_set_config,
2603	.destroy = dce_v8_0_crtc_destroy,
2604	.page_flip = amdgpu_crtc_page_flip,
2605};
2606
2607static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2608{
2609	struct drm_device *dev = crtc->dev;
2610	struct amdgpu_device *adev = dev->dev_private;
2611	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2612	unsigned type;
2613
2614	switch (mode) {
2615	case DRM_MODE_DPMS_ON:
2616		amdgpu_crtc->enabled = true;
2617		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2618		dce_v8_0_vga_enable(crtc, true);
2619		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2620		dce_v8_0_vga_enable(crtc, false);
2621		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2622		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2623		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2624		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2625		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
2626		dce_v8_0_crtc_load_lut(crtc);
2627		break;
2628	case DRM_MODE_DPMS_STANDBY:
2629	case DRM_MODE_DPMS_SUSPEND:
2630	case DRM_MODE_DPMS_OFF:
2631		drm_vblank_off(dev, amdgpu_crtc->crtc_id);
2632		if (amdgpu_crtc->enabled) {
2633			dce_v8_0_vga_enable(crtc, true);
2634			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2635			dce_v8_0_vga_enable(crtc, false);
2636		}
2637		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2638		amdgpu_crtc->enabled = false;
2639		break;
2640	}
2641	/* adjust pm to dpms */
2642	amdgpu_pm_compute_clocks(adev);
2643}
2644
2645static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2646{
2647	/* disable crtc pair power gating before programming */
2648	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2649	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2650	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2651}
2652
2653static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2654{
2655	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2656	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2657}
2658
2659static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2660{
2661	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2662	struct drm_device *dev = crtc->dev;
2663	struct amdgpu_device *adev = dev->dev_private;
2664	struct amdgpu_atom_ss ss;
2665	int i;
2666
2667	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2668	if (crtc->primary->fb) {
2669		int r;
2670		struct amdgpu_framebuffer *amdgpu_fb;
2671		struct amdgpu_bo *rbo;
2672
2673		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2674		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2675		r = amdgpu_bo_reserve(rbo, false);
2676		if (unlikely(r))
2677			DRM_ERROR("failed to reserve rbo before unpin\n");
2678		else {
2679			amdgpu_bo_unpin(rbo);
2680			amdgpu_bo_unreserve(rbo);
2681		}
2682	}
2683	/* disable the GRPH */
2684	dce_v8_0_grph_enable(crtc, false);
2685
2686	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2687
2688	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2689		if (adev->mode_info.crtcs[i] &&
2690		    adev->mode_info.crtcs[i]->enabled &&
2691		    i != amdgpu_crtc->crtc_id &&
2692		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2693			/* one other crtc is using this pll don't turn
2694			 * off the pll
2695			 */
2696			goto done;
2697		}
2698	}
2699
2700	switch (amdgpu_crtc->pll_id) {
2701	case ATOM_PPLL1:
2702	case ATOM_PPLL2:
2703		/* disable the ppll */
2704		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2705					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2706		break;
2707	case ATOM_PPLL0:
2708		/* disable the ppll */
2709		if ((adev->asic_type == CHIP_KAVERI) ||
2710		    (adev->asic_type == CHIP_BONAIRE) ||
2711		    (adev->asic_type == CHIP_HAWAII))
2712			amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2713						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2714		break;
2715	default:
2716		break;
2717	}
2718done:
2719	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2720	amdgpu_crtc->adjusted_clock = 0;
2721	amdgpu_crtc->encoder = NULL;
2722	amdgpu_crtc->connector = NULL;
2723}
2724
2725static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2726				  struct drm_display_mode *mode,
2727				  struct drm_display_mode *adjusted_mode,
2728				  int x, int y, struct drm_framebuffer *old_fb)
2729{
2730	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2731
2732	if (!amdgpu_crtc->adjusted_clock)
2733		return -EINVAL;
2734
2735	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2736	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2737	dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2738	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2739	amdgpu_atombios_crtc_scaler_setup(crtc);
2740	dce_v8_0_cursor_reset(crtc);
2741	/* update the hw version fpr dpm */
2742	amdgpu_crtc->hw_mode = *adjusted_mode;
2743
2744	return 0;
2745}
2746
2747static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2748				     const struct drm_display_mode *mode,
2749				     struct drm_display_mode *adjusted_mode)
2750{
2751	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2752	struct drm_device *dev = crtc->dev;
2753	struct drm_encoder *encoder;
2754
2755	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2756	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2757		if (encoder->crtc == crtc) {
2758			amdgpu_crtc->encoder = encoder;
2759			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2760			break;
2761		}
2762	}
2763	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2764		amdgpu_crtc->encoder = NULL;
2765		amdgpu_crtc->connector = NULL;
2766		return false;
2767	}
2768	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2769		return false;
2770	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2771		return false;
2772	/* pick pll */
2773	amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2774	/* if we can't get a PPLL for a non-DP encoder, fail */
2775	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2776	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2777		return false;
2778
2779	return true;
2780}
2781
2782static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2783				  struct drm_framebuffer *old_fb)
2784{
2785	return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2786}
2787
2788static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2789					 struct drm_framebuffer *fb,
2790					 int x, int y, enum mode_set_atomic state)
2791{
2792       return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2793}
2794
2795static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2796	.dpms = dce_v8_0_crtc_dpms,
2797	.mode_fixup = dce_v8_0_crtc_mode_fixup,
2798	.mode_set = dce_v8_0_crtc_mode_set,
2799	.mode_set_base = dce_v8_0_crtc_set_base,
2800	.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2801	.prepare = dce_v8_0_crtc_prepare,
2802	.commit = dce_v8_0_crtc_commit,
2803	.load_lut = dce_v8_0_crtc_load_lut,
2804	.disable = dce_v8_0_crtc_disable,
2805};
2806
2807static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2808{
2809	struct amdgpu_crtc *amdgpu_crtc;
2810	int i;
2811
2812	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2813			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2814	if (amdgpu_crtc == NULL)
2815		return -ENOMEM;
2816
2817	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2818
2819	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2820	amdgpu_crtc->crtc_id = index;
2821	adev->mode_info.crtcs[index] = amdgpu_crtc;
2822
2823	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2824	amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2825	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2826	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2827
2828	for (i = 0; i < 256; i++) {
2829		amdgpu_crtc->lut_r[i] = i << 2;
2830		amdgpu_crtc->lut_g[i] = i << 2;
2831		amdgpu_crtc->lut_b[i] = i << 2;
2832	}
2833
2834	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2835
2836	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2837	amdgpu_crtc->adjusted_clock = 0;
2838	amdgpu_crtc->encoder = NULL;
2839	amdgpu_crtc->connector = NULL;
2840	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2841
2842	return 0;
2843}
2844
2845static int dce_v8_0_early_init(void *handle)
2846{
2847	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2848
2849	adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2850	adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2851
2852	dce_v8_0_set_display_funcs(adev);
2853	dce_v8_0_set_irq_funcs(adev);
2854
 
 
2855	switch (adev->asic_type) {
2856	case CHIP_BONAIRE:
2857	case CHIP_HAWAII:
2858		adev->mode_info.num_crtc = 6;
2859		adev->mode_info.num_hpd = 6;
2860		adev->mode_info.num_dig = 6;
2861		break;
2862	case CHIP_KAVERI:
2863		adev->mode_info.num_crtc = 4;
2864		adev->mode_info.num_hpd = 6;
2865		adev->mode_info.num_dig = 7;
2866		break;
2867	case CHIP_KABINI:
2868	case CHIP_MULLINS:
2869		adev->mode_info.num_crtc = 2;
2870		adev->mode_info.num_hpd = 6;
2871		adev->mode_info.num_dig = 6; /* ? */
2872		break;
2873	default:
2874		/* FIXME: not supported yet */
2875		return -EINVAL;
2876	}
2877
2878	return 0;
2879}
2880
2881static int dce_v8_0_sw_init(void *handle)
2882{
2883	int r, i;
2884	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2885
2886	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2887		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2888		if (r)
2889			return r;
2890	}
2891
2892	for (i = 8; i < 20; i += 2) {
2893		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2894		if (r)
2895			return r;
2896	}
2897
2898	/* HPD hotplug */
2899	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2900	if (r)
2901		return r;
2902
2903	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2904
 
 
2905	adev->ddev->mode_config.max_width = 16384;
2906	adev->ddev->mode_config.max_height = 16384;
2907
2908	adev->ddev->mode_config.preferred_depth = 24;
2909	adev->ddev->mode_config.prefer_shadow = 1;
2910
2911	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2912
2913	r = amdgpu_modeset_create_props(adev);
2914	if (r)
2915		return r;
2916
2917	adev->ddev->mode_config.max_width = 16384;
2918	adev->ddev->mode_config.max_height = 16384;
2919
2920	/* allocate crtcs */
2921	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2922		r = dce_v8_0_crtc_init(adev, i);
2923		if (r)
2924			return r;
2925	}
2926
2927	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2928		amdgpu_print_display_setup(adev->ddev);
2929	else
2930		return -EINVAL;
2931
2932	/* setup afmt */
2933	r = dce_v8_0_afmt_init(adev);
2934	if (r)
2935		return r;
2936
2937	r = dce_v8_0_audio_init(adev);
2938	if (r)
2939		return r;
2940
2941	drm_kms_helper_poll_init(adev->ddev);
2942
2943	adev->mode_info.mode_config_initialized = true;
2944	return 0;
2945}
2946
2947static int dce_v8_0_sw_fini(void *handle)
2948{
2949	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2950
2951	kfree(adev->mode_info.bios_hardcoded_edid);
2952
2953	drm_kms_helper_poll_fini(adev->ddev);
2954
2955	dce_v8_0_audio_fini(adev);
2956
2957	dce_v8_0_afmt_fini(adev);
2958
2959	drm_mode_config_cleanup(adev->ddev);
2960	adev->mode_info.mode_config_initialized = false;
2961
2962	return 0;
2963}
2964
2965static int dce_v8_0_hw_init(void *handle)
2966{
2967	int i;
2968	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2969
2970	/* init dig PHYs, disp eng pll */
2971	amdgpu_atombios_encoder_init_dig(adev);
2972	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2973
2974	/* initialize hpd */
2975	dce_v8_0_hpd_init(adev);
2976
2977	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2978		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2979	}
2980
2981	dce_v8_0_pageflip_interrupt_init(adev);
2982
2983	return 0;
2984}
2985
2986static int dce_v8_0_hw_fini(void *handle)
2987{
2988	int i;
2989	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2990
2991	dce_v8_0_hpd_fini(adev);
2992
2993	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2994		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2995	}
2996
2997	dce_v8_0_pageflip_interrupt_fini(adev);
2998
2999	return 0;
3000}
3001
3002static int dce_v8_0_suspend(void *handle)
3003{
3004	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3005
3006	amdgpu_atombios_scratch_regs_save(adev);
3007
3008	return dce_v8_0_hw_fini(handle);
3009}
3010
3011static int dce_v8_0_resume(void *handle)
3012{
3013	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3014	int ret;
3015
3016	ret = dce_v8_0_hw_init(handle);
3017
3018	amdgpu_atombios_scratch_regs_restore(adev);
3019
3020	/* turn on the BL */
3021	if (adev->mode_info.bl_encoder) {
3022		u8 bl_level = amdgpu_display_backlight_get_level(adev,
3023								  adev->mode_info.bl_encoder);
3024		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3025						    bl_level);
3026	}
3027
3028	return ret;
3029}
3030
3031static bool dce_v8_0_is_idle(void *handle)
3032{
3033	return true;
3034}
3035
3036static int dce_v8_0_wait_for_idle(void *handle)
3037{
3038	return 0;
3039}
3040
3041static void dce_v8_0_print_status(void *handle)
3042{
3043	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3044
3045	dev_info(adev->dev, "DCE 8.x registers\n");
3046	/* XXX todo */
3047}
3048
3049static int dce_v8_0_soft_reset(void *handle)
3050{
3051	u32 srbm_soft_reset = 0, tmp;
3052	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3053
3054	if (dce_v8_0_is_display_hung(adev))
3055		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3056
3057	if (srbm_soft_reset) {
3058		dce_v8_0_print_status((void *)adev);
3059
3060		tmp = RREG32(mmSRBM_SOFT_RESET);
3061		tmp |= srbm_soft_reset;
3062		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3063		WREG32(mmSRBM_SOFT_RESET, tmp);
3064		tmp = RREG32(mmSRBM_SOFT_RESET);
3065
3066		udelay(50);
3067
3068		tmp &= ~srbm_soft_reset;
3069		WREG32(mmSRBM_SOFT_RESET, tmp);
3070		tmp = RREG32(mmSRBM_SOFT_RESET);
3071
3072		/* Wait a little for things to settle down */
3073		udelay(50);
3074		dce_v8_0_print_status((void *)adev);
3075	}
3076	return 0;
3077}
3078
3079static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3080						     int crtc,
3081						     enum amdgpu_interrupt_state state)
3082{
3083	u32 reg_block, lb_interrupt_mask;
3084
3085	if (crtc >= adev->mode_info.num_crtc) {
3086		DRM_DEBUG("invalid crtc %d\n", crtc);
3087		return;
3088	}
3089
3090	switch (crtc) {
3091	case 0:
3092		reg_block = CRTC0_REGISTER_OFFSET;
3093		break;
3094	case 1:
3095		reg_block = CRTC1_REGISTER_OFFSET;
3096		break;
3097	case 2:
3098		reg_block = CRTC2_REGISTER_OFFSET;
3099		break;
3100	case 3:
3101		reg_block = CRTC3_REGISTER_OFFSET;
3102		break;
3103	case 4:
3104		reg_block = CRTC4_REGISTER_OFFSET;
3105		break;
3106	case 5:
3107		reg_block = CRTC5_REGISTER_OFFSET;
3108		break;
3109	default:
3110		DRM_DEBUG("invalid crtc %d\n", crtc);
3111		return;
3112	}
3113
3114	switch (state) {
3115	case AMDGPU_IRQ_STATE_DISABLE:
3116		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3117		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3118		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3119		break;
3120	case AMDGPU_IRQ_STATE_ENABLE:
3121		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3122		lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3123		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3124		break;
3125	default:
3126		break;
3127	}
3128}
3129
3130static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3131						    int crtc,
3132						    enum amdgpu_interrupt_state state)
3133{
3134	u32 reg_block, lb_interrupt_mask;
3135
3136	if (crtc >= adev->mode_info.num_crtc) {
3137		DRM_DEBUG("invalid crtc %d\n", crtc);
3138		return;
3139	}
3140
3141	switch (crtc) {
3142	case 0:
3143		reg_block = CRTC0_REGISTER_OFFSET;
3144		break;
3145	case 1:
3146		reg_block = CRTC1_REGISTER_OFFSET;
3147		break;
3148	case 2:
3149		reg_block = CRTC2_REGISTER_OFFSET;
3150		break;
3151	case 3:
3152		reg_block = CRTC3_REGISTER_OFFSET;
3153		break;
3154	case 4:
3155		reg_block = CRTC4_REGISTER_OFFSET;
3156		break;
3157	case 5:
3158		reg_block = CRTC5_REGISTER_OFFSET;
3159		break;
3160	default:
3161		DRM_DEBUG("invalid crtc %d\n", crtc);
3162		return;
3163	}
3164
3165	switch (state) {
3166	case AMDGPU_IRQ_STATE_DISABLE:
3167		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3168		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3169		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3170		break;
3171	case AMDGPU_IRQ_STATE_ENABLE:
3172		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3173		lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3174		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3175		break;
3176	default:
3177		break;
3178	}
3179}
3180
3181static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3182					    struct amdgpu_irq_src *src,
3183					    unsigned type,
3184					    enum amdgpu_interrupt_state state)
3185{
3186	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
3187
3188	switch (type) {
3189	case AMDGPU_HPD_1:
3190		dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
3191		break;
3192	case AMDGPU_HPD_2:
3193		dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
3194		break;
3195	case AMDGPU_HPD_3:
3196		dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
3197		break;
3198	case AMDGPU_HPD_4:
3199		dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
3200		break;
3201	case AMDGPU_HPD_5:
3202		dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
3203		break;
3204	case AMDGPU_HPD_6:
3205		dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
3206		break;
3207	default:
3208		DRM_DEBUG("invalid hdp %d\n", type);
3209		return 0;
3210	}
3211
3212	switch (state) {
3213	case AMDGPU_IRQ_STATE_DISABLE:
3214		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3215		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3216		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3217		break;
3218	case AMDGPU_IRQ_STATE_ENABLE:
3219		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3220		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3221		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3222		break;
3223	default:
3224		break;
3225	}
3226
3227	return 0;
3228}
3229
3230static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3231					     struct amdgpu_irq_src *src,
3232					     unsigned type,
3233					     enum amdgpu_interrupt_state state)
3234{
3235	switch (type) {
3236	case AMDGPU_CRTC_IRQ_VBLANK1:
3237		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3238		break;
3239	case AMDGPU_CRTC_IRQ_VBLANK2:
3240		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3241		break;
3242	case AMDGPU_CRTC_IRQ_VBLANK3:
3243		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3244		break;
3245	case AMDGPU_CRTC_IRQ_VBLANK4:
3246		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3247		break;
3248	case AMDGPU_CRTC_IRQ_VBLANK5:
3249		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3250		break;
3251	case AMDGPU_CRTC_IRQ_VBLANK6:
3252		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3253		break;
3254	case AMDGPU_CRTC_IRQ_VLINE1:
3255		dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3256		break;
3257	case AMDGPU_CRTC_IRQ_VLINE2:
3258		dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3259		break;
3260	case AMDGPU_CRTC_IRQ_VLINE3:
3261		dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3262		break;
3263	case AMDGPU_CRTC_IRQ_VLINE4:
3264		dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3265		break;
3266	case AMDGPU_CRTC_IRQ_VLINE5:
3267		dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3268		break;
3269	case AMDGPU_CRTC_IRQ_VLINE6:
3270		dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3271		break;
3272	default:
3273		break;
3274	}
3275	return 0;
3276}
3277
3278static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3279			     struct amdgpu_irq_src *source,
3280			     struct amdgpu_iv_entry *entry)
3281{
3282	unsigned crtc = entry->src_id - 1;
3283	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3284	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3285
3286	switch (entry->src_data) {
3287	case 0: /* vblank */
3288		if (disp_int & interrupt_status_offsets[crtc].vblank)
3289			WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3290		else
3291			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3292
3293		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3294			drm_handle_vblank(adev->ddev, crtc);
3295		}
3296		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3297
3298		break;
3299	case 1: /* vline */
3300		if (disp_int & interrupt_status_offsets[crtc].vline)
3301			WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3302		else
3303			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3304
3305		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3306
3307		break;
3308	default:
3309		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3310		break;
3311	}
3312
3313	return 0;
3314}
3315
3316static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3317						 struct amdgpu_irq_src *src,
3318						 unsigned type,
3319						 enum amdgpu_interrupt_state state)
3320{
3321	u32 reg;
3322
3323	if (type >= adev->mode_info.num_crtc) {
3324		DRM_ERROR("invalid pageflip crtc %d\n", type);
3325		return -EINVAL;
3326	}
3327
3328	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3329	if (state == AMDGPU_IRQ_STATE_DISABLE)
3330		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3331		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3332	else
3333		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3334		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3335
3336	return 0;
3337}
3338
3339static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3340				struct amdgpu_irq_src *source,
3341				struct amdgpu_iv_entry *entry)
3342{
3343	unsigned long flags;
3344	unsigned crtc_id;
3345	struct amdgpu_crtc *amdgpu_crtc;
3346	struct amdgpu_flip_work *works;
3347
3348	crtc_id = (entry->src_id - 8) >> 1;
3349	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3350
3351	if (crtc_id >= adev->mode_info.num_crtc) {
3352		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3353		return -EINVAL;
3354	}
3355
3356	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3357	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3358		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3359		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3360
3361	/* IRQ could occur when in initial stage */
3362	if (amdgpu_crtc == NULL)
3363		return 0;
3364
3365	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3366	works = amdgpu_crtc->pflip_works;
3367	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3368		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3369						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3370						amdgpu_crtc->pflip_status,
3371						AMDGPU_FLIP_SUBMITTED);
3372		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3373		return 0;
3374	}
3375
3376	/* page flip completed. clean up */
3377	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3378	amdgpu_crtc->pflip_works = NULL;
3379
3380	/* wakeup usersapce */
3381	if (works->event)
3382		drm_send_vblank_event(adev->ddev, crtc_id, works->event);
3383
3384	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3385
3386	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3387	schedule_work(&works->unpin_work);
3388
3389	return 0;
3390}
3391
3392static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3393			    struct amdgpu_irq_src *source,
3394			    struct amdgpu_iv_entry *entry)
3395{
3396	uint32_t disp_int, mask, int_control, tmp;
3397	unsigned hpd;
3398
3399	if (entry->src_data >= adev->mode_info.num_hpd) {
3400		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3401		return 0;
3402	}
3403
3404	hpd = entry->src_data;
3405	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3406	mask = interrupt_status_offsets[hpd].hpd;
3407	int_control = hpd_int_control_offsets[hpd];
3408
3409	if (disp_int & mask) {
3410		tmp = RREG32(int_control);
3411		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3412		WREG32(int_control, tmp);
3413		schedule_work(&adev->hotplug_work);
3414		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3415	}
3416
3417	return 0;
3418
3419}
3420
3421static int dce_v8_0_set_clockgating_state(void *handle,
3422					  enum amd_clockgating_state state)
3423{
3424	return 0;
3425}
3426
3427static int dce_v8_0_set_powergating_state(void *handle,
3428					  enum amd_powergating_state state)
3429{
3430	return 0;
3431}
3432
3433const struct amd_ip_funcs dce_v8_0_ip_funcs = {
 
3434	.early_init = dce_v8_0_early_init,
3435	.late_init = NULL,
3436	.sw_init = dce_v8_0_sw_init,
3437	.sw_fini = dce_v8_0_sw_fini,
3438	.hw_init = dce_v8_0_hw_init,
3439	.hw_fini = dce_v8_0_hw_fini,
3440	.suspend = dce_v8_0_suspend,
3441	.resume = dce_v8_0_resume,
3442	.is_idle = dce_v8_0_is_idle,
3443	.wait_for_idle = dce_v8_0_wait_for_idle,
3444	.soft_reset = dce_v8_0_soft_reset,
3445	.print_status = dce_v8_0_print_status,
3446	.set_clockgating_state = dce_v8_0_set_clockgating_state,
3447	.set_powergating_state = dce_v8_0_set_powergating_state,
3448};
3449
3450static void
3451dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3452			  struct drm_display_mode *mode,
3453			  struct drm_display_mode *adjusted_mode)
3454{
3455	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3456
3457	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3458
3459	/* need to call this here rather than in prepare() since we need some crtc info */
3460	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3461
3462	/* set scaler clears this on some chips */
3463	dce_v8_0_set_interleave(encoder->crtc, mode);
3464
3465	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3466		dce_v8_0_afmt_enable(encoder, true);
3467		dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3468	}
3469}
3470
3471static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3472{
3473	struct amdgpu_device *adev = encoder->dev->dev_private;
3474	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3475	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3476
3477	if ((amdgpu_encoder->active_device &
3478	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3479	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3480	     ENCODER_OBJECT_ID_NONE)) {
3481		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3482		if (dig) {
3483			dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3484			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3485				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3486		}
3487	}
3488
3489	amdgpu_atombios_scratch_regs_lock(adev, true);
3490
3491	if (connector) {
3492		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3493
3494		/* select the clock/data port if it uses a router */
3495		if (amdgpu_connector->router.cd_valid)
3496			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3497
3498		/* turn eDP panel on for mode set */
3499		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3500			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3501							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3502	}
3503
3504	/* this is needed for the pll/ss setup to work correctly in some cases */
3505	amdgpu_atombios_encoder_set_crtc_source(encoder);
3506	/* set up the FMT blocks */
3507	dce_v8_0_program_fmt(encoder);
3508}
3509
3510static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3511{
3512	struct drm_device *dev = encoder->dev;
3513	struct amdgpu_device *adev = dev->dev_private;
3514
3515	/* need to call this here as we need the crtc set up */
3516	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3517	amdgpu_atombios_scratch_regs_lock(adev, false);
3518}
3519
3520static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3521{
3522	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3523	struct amdgpu_encoder_atom_dig *dig;
3524
3525	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3526
3527	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3528		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3529			dce_v8_0_afmt_enable(encoder, false);
3530		dig = amdgpu_encoder->enc_priv;
3531		dig->dig_encoder = -1;
3532	}
3533	amdgpu_encoder->active_device = 0;
3534}
3535
3536/* these are handled by the primary encoders */
3537static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3538{
3539
3540}
3541
3542static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3543{
3544
3545}
3546
3547static void
3548dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3549		      struct drm_display_mode *mode,
3550		      struct drm_display_mode *adjusted_mode)
3551{
3552
3553}
3554
3555static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3556{
3557
3558}
3559
3560static void
3561dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3562{
3563
3564}
3565
3566static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3567	.dpms = dce_v8_0_ext_dpms,
3568	.prepare = dce_v8_0_ext_prepare,
3569	.mode_set = dce_v8_0_ext_mode_set,
3570	.commit = dce_v8_0_ext_commit,
3571	.disable = dce_v8_0_ext_disable,
3572	/* no detect for TMDS/LVDS yet */
3573};
3574
3575static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3576	.dpms = amdgpu_atombios_encoder_dpms,
3577	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3578	.prepare = dce_v8_0_encoder_prepare,
3579	.mode_set = dce_v8_0_encoder_mode_set,
3580	.commit = dce_v8_0_encoder_commit,
3581	.disable = dce_v8_0_encoder_disable,
3582	.detect = amdgpu_atombios_encoder_dig_detect,
3583};
3584
3585static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3586	.dpms = amdgpu_atombios_encoder_dpms,
3587	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3588	.prepare = dce_v8_0_encoder_prepare,
3589	.mode_set = dce_v8_0_encoder_mode_set,
3590	.commit = dce_v8_0_encoder_commit,
3591	.detect = amdgpu_atombios_encoder_dac_detect,
3592};
3593
3594static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3595{
3596	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3597	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3598		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3599	kfree(amdgpu_encoder->enc_priv);
3600	drm_encoder_cleanup(encoder);
3601	kfree(amdgpu_encoder);
3602}
3603
3604static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3605	.destroy = dce_v8_0_encoder_destroy,
3606};
3607
3608static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3609				 uint32_t encoder_enum,
3610				 uint32_t supported_device,
3611				 u16 caps)
3612{
3613	struct drm_device *dev = adev->ddev;
3614	struct drm_encoder *encoder;
3615	struct amdgpu_encoder *amdgpu_encoder;
3616
3617	/* see if we already added it */
3618	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3619		amdgpu_encoder = to_amdgpu_encoder(encoder);
3620		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3621			amdgpu_encoder->devices |= supported_device;
3622			return;
3623		}
3624
3625	}
3626
3627	/* add a new one */
3628	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3629	if (!amdgpu_encoder)
3630		return;
3631
3632	encoder = &amdgpu_encoder->base;
3633	switch (adev->mode_info.num_crtc) {
3634	case 1:
3635		encoder->possible_crtcs = 0x1;
3636		break;
3637	case 2:
3638	default:
3639		encoder->possible_crtcs = 0x3;
3640		break;
3641	case 4:
3642		encoder->possible_crtcs = 0xf;
3643		break;
3644	case 6:
3645		encoder->possible_crtcs = 0x3f;
3646		break;
3647	}
3648
3649	amdgpu_encoder->enc_priv = NULL;
3650
3651	amdgpu_encoder->encoder_enum = encoder_enum;
3652	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3653	amdgpu_encoder->devices = supported_device;
3654	amdgpu_encoder->rmx_type = RMX_OFF;
3655	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3656	amdgpu_encoder->is_ext_encoder = false;
3657	amdgpu_encoder->caps = caps;
3658
3659	switch (amdgpu_encoder->encoder_id) {
3660	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3661	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3662		drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3663				 DRM_MODE_ENCODER_DAC, NULL);
3664		drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3665		break;
3666	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3667	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3668	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3669	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3670	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3671		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3672			amdgpu_encoder->rmx_type = RMX_FULL;
3673			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3674					 DRM_MODE_ENCODER_LVDS, NULL);
3675			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3676		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3677			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3678					 DRM_MODE_ENCODER_DAC, NULL);
3679			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3680		} else {
3681			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3682					 DRM_MODE_ENCODER_TMDS, NULL);
3683			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3684		}
3685		drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3686		break;
3687	case ENCODER_OBJECT_ID_SI170B:
3688	case ENCODER_OBJECT_ID_CH7303:
3689	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3690	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3691	case ENCODER_OBJECT_ID_TITFP513:
3692	case ENCODER_OBJECT_ID_VT1623:
3693	case ENCODER_OBJECT_ID_HDMI_SI1930:
3694	case ENCODER_OBJECT_ID_TRAVIS:
3695	case ENCODER_OBJECT_ID_NUTMEG:
3696		/* these are handled by the primary encoders */
3697		amdgpu_encoder->is_ext_encoder = true;
3698		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3699			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3700					 DRM_MODE_ENCODER_LVDS, NULL);
3701		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3702			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3703					 DRM_MODE_ENCODER_DAC, NULL);
3704		else
3705			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3706					 DRM_MODE_ENCODER_TMDS, NULL);
3707		drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3708		break;
3709	}
3710}
3711
3712static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3713	.set_vga_render_state = &dce_v8_0_set_vga_render_state,
3714	.bandwidth_update = &dce_v8_0_bandwidth_update,
3715	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
3716	.vblank_wait = &dce_v8_0_vblank_wait,
3717	.is_display_hung = &dce_v8_0_is_display_hung,
3718	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3719	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3720	.hpd_sense = &dce_v8_0_hpd_sense,
3721	.hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3722	.hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3723	.page_flip = &dce_v8_0_page_flip,
3724	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3725	.add_encoder = &dce_v8_0_encoder_add,
3726	.add_connector = &amdgpu_connector_add,
3727	.stop_mc_access = &dce_v8_0_stop_mc_access,
3728	.resume_mc_access = &dce_v8_0_resume_mc_access,
3729};
3730
3731static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3732{
3733	if (adev->mode_info.funcs == NULL)
3734		adev->mode_info.funcs = &dce_v8_0_display_funcs;
3735}
3736
3737static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3738	.set = dce_v8_0_set_crtc_interrupt_state,
3739	.process = dce_v8_0_crtc_irq,
3740};
3741
3742static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3743	.set = dce_v8_0_set_pageflip_interrupt_state,
3744	.process = dce_v8_0_pageflip_irq,
3745};
3746
3747static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3748	.set = dce_v8_0_set_hpd_interrupt_state,
3749	.process = dce_v8_0_hpd_irq,
3750};
3751
3752static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3753{
3754	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3755	adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3756
3757	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3758	adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3759
3760	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3761	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3762}
v4.10.11
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "drmP.h"
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "cikd.h"
  28#include "atom.h"
  29#include "amdgpu_atombios.h"
  30#include "atombios_crtc.h"
  31#include "atombios_encoders.h"
  32#include "amdgpu_pll.h"
  33#include "amdgpu_connectors.h"
  34#include "dce_v8_0.h"
  35
  36#include "dce/dce_8_0_d.h"
  37#include "dce/dce_8_0_sh_mask.h"
  38
  39#include "gca/gfx_7_2_enum.h"
  40
  41#include "gmc/gmc_7_1_d.h"
  42#include "gmc/gmc_7_1_sh_mask.h"
  43
  44#include "oss/oss_2_0_d.h"
  45#include "oss/oss_2_0_sh_mask.h"
  46
  47static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52	CRTC0_REGISTER_OFFSET,
  53	CRTC1_REGISTER_OFFSET,
  54	CRTC2_REGISTER_OFFSET,
  55	CRTC3_REGISTER_OFFSET,
  56	CRTC4_REGISTER_OFFSET,
  57	CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62	HPD0_REGISTER_OFFSET,
  63	HPD1_REGISTER_OFFSET,
  64	HPD2_REGISTER_OFFSET,
  65	HPD3_REGISTER_OFFSET,
  66	HPD4_REGISTER_OFFSET,
  67	HPD5_REGISTER_OFFSET
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71	CRTC0_REGISTER_OFFSET,
  72	CRTC1_REGISTER_OFFSET,
  73	CRTC2_REGISTER_OFFSET,
  74	CRTC3_REGISTER_OFFSET,
  75	CRTC4_REGISTER_OFFSET,
  76	CRTC5_REGISTER_OFFSET,
  77	(0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81	uint32_t	reg;
  82	uint32_t	vblank;
  83	uint32_t	vline;
  84	uint32_t	hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87	.reg = mmDISP_INTERRUPT_STATUS,
  88	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 
 
 
 
 
 
 
 
 
 118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119				     u32 block_offset, u32 reg)
 120{
 121	unsigned long flags;
 122	u32 r;
 123
 124	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 125	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 126	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 127	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 128
 129	return r;
 130}
 131
 132static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 133				      u32 block_offset, u32 reg, u32 v)
 134{
 135	unsigned long flags;
 136
 137	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 138	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 139	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 140	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 141}
 142
 143static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 144{
 145	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
 146			CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
 147		return true;
 148	else
 149		return false;
 150}
 151
 152static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 153{
 154	u32 pos1, pos2;
 155
 156	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 157	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 158
 159	if (pos1 != pos2)
 160		return true;
 161	else
 162		return false;
 163}
 164
 165/**
 166 * dce_v8_0_vblank_wait - vblank wait asic callback.
 167 *
 168 * @adev: amdgpu_device pointer
 169 * @crtc: crtc to wait for vblank on
 170 *
 171 * Wait for vblank on the requested crtc (evergreen+).
 172 */
 173static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 174{
 175	unsigned i = 100;
 176
 177	if (crtc >= adev->mode_info.num_crtc)
 178		return;
 179
 180	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 181		return;
 182
 183	/* depending on when we hit vblank, we may be close to active; if so,
 184	 * wait for another frame.
 185	 */
 186	while (dce_v8_0_is_in_vblank(adev, crtc)) {
 187		if (i++ == 100) {
 188			i = 0;
 189			if (!dce_v8_0_is_counter_moving(adev, crtc))
 190				break;
 191		}
 192	}
 193
 194	while (!dce_v8_0_is_in_vblank(adev, crtc)) {
 195		if (i++ == 100) {
 196			i = 0;
 197			if (!dce_v8_0_is_counter_moving(adev, crtc))
 198				break;
 199		}
 200	}
 201}
 202
 203static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 204{
 205	if (crtc >= adev->mode_info.num_crtc)
 206		return 0;
 207	else
 208		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 209}
 210
 211static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 212{
 213	unsigned i;
 214
 215	/* Enable pflip interrupts */
 216	for (i = 0; i < adev->mode_info.num_crtc; i++)
 217		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 218}
 219
 220static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 221{
 222	unsigned i;
 223
 224	/* Disable pflip interrupts */
 225	for (i = 0; i < adev->mode_info.num_crtc; i++)
 226		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 227}
 228
 229/**
 230 * dce_v8_0_page_flip - pageflip callback.
 231 *
 232 * @adev: amdgpu_device pointer
 233 * @crtc_id: crtc to cleanup pageflip on
 234 * @crtc_base: new address of the crtc (GPU MC address)
 235 *
 236 * Triggers the actual pageflip by updating the primary
 237 * surface base address.
 238 */
 239static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 240			       int crtc_id, u64 crtc_base, bool async)
 241{
 242	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 243
 244	/* flip at hsync for async, default is vsync */
 245	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 246	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 247	/* update the primary scanout addresses */
 248	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 249	       upper_32_bits(crtc_base));
 250	/* writing to the low address triggers the update */
 251	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 252	       lower_32_bits(crtc_base));
 253	/* post the write */
 254	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 255}
 256
 257static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 258					u32 *vbl, u32 *position)
 259{
 260	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 261		return -EINVAL;
 262
 263	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 264	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 265
 266	return 0;
 267}
 268
 269/**
 270 * dce_v8_0_hpd_sense - hpd sense callback.
 271 *
 272 * @adev: amdgpu_device pointer
 273 * @hpd: hpd (hotplug detect) pin
 274 *
 275 * Checks if a digital monitor is connected (evergreen+).
 276 * Returns true if connected, false if not connected.
 277 */
 278static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 279			       enum amdgpu_hpd_id hpd)
 280{
 281	bool connected = false;
 282
 283	if (hpd >= adev->mode_info.num_hpd)
 284		return connected;
 285
 286	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
 287	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 288		connected = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 289
 290	return connected;
 291}
 292
 293/**
 294 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 295 *
 296 * @adev: amdgpu_device pointer
 297 * @hpd: hpd (hotplug detect) pin
 298 *
 299 * Set the polarity of the hpd pin (evergreen+).
 300 */
 301static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 302				      enum amdgpu_hpd_id hpd)
 303{
 304	u32 tmp;
 305	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 306
 307	if (hpd >= adev->mode_info.num_hpd)
 308		return;
 309
 310	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 311	if (connected)
 312		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 313	else
 314		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 315	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 316}
 317
 318/**
 319 * dce_v8_0_hpd_init - hpd setup callback.
 320 *
 321 * @adev: amdgpu_device pointer
 322 *
 323 * Setup the hpd pins used by the card (evergreen+).
 324 * Enable the pin, set the polarity, and enable the hpd interrupts.
 325 */
 326static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 327{
 328	struct drm_device *dev = adev->ddev;
 329	struct drm_connector *connector;
 330	u32 tmp;
 
 
 331
 332	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 333		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 334
 335		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 336			continue;
 337
 338		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 339		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 340		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 341
 342		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 343		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 344			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 345			 * aux dp channel on imac and help (but not completely fix)
 346			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 347			 * also avoid interrupt storms during dpms.
 348			 */
 349			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 350			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 351			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 352			continue;
 353		}
 354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 355		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 356		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 357	}
 358}
 359
 360/**
 361 * dce_v8_0_hpd_fini - hpd tear down callback.
 362 *
 363 * @adev: amdgpu_device pointer
 364 *
 365 * Tear down the hpd pins used by the card (evergreen+).
 366 * Disable the hpd interrupts.
 367 */
 368static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 369{
 370	struct drm_device *dev = adev->ddev;
 371	struct drm_connector *connector;
 372	u32 tmp;
 373
 374	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 375		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 376
 377		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 378			continue;
 379
 380		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 381		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 382		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 384		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 385	}
 386}
 387
 388static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 389{
 390	return mmDC_GPIO_HPD_A;
 391}
 392
 393static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 394{
 395	u32 crtc_hung = 0;
 396	u32 crtc_status[6];
 397	u32 i, j, tmp;
 398
 399	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 400		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 401			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 402			crtc_hung |= (1 << i);
 403		}
 404	}
 405
 406	for (j = 0; j < 10; j++) {
 407		for (i = 0; i < adev->mode_info.num_crtc; i++) {
 408			if (crtc_hung & (1 << i)) {
 409				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 410				if (tmp != crtc_status[i])
 411					crtc_hung &= ~(1 << i);
 412			}
 413		}
 414		if (crtc_hung == 0)
 415			return false;
 416		udelay(100);
 417	}
 418
 419	return true;
 420}
 421
 422static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
 423				    struct amdgpu_mode_mc_save *save)
 424{
 425	u32 crtc_enabled, tmp;
 426	int i;
 427
 428	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 429	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 430
 431	/* disable VGA render */
 432	tmp = RREG32(mmVGA_RENDER_CONTROL);
 433	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 434	WREG32(mmVGA_RENDER_CONTROL, tmp);
 435
 436	/* blank the display controllers */
 437	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 438		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 439					     CRTC_CONTROL, CRTC_MASTER_EN);
 440		if (crtc_enabled) {
 441#if 1
 
 
 
 442			save->crtc_enabled[i] = true;
 443			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 444			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
 445				/*it is correct only for RGB ; black is 0*/
 446				WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
 447				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
 448				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449			}
 450			mdelay(20);
 451#else
 452			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 453			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 454			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 455			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 456			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 457			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 458			save->crtc_enabled[i] = false;
 459			/* ***** */
 460#endif
 461		} else {
 462			save->crtc_enabled[i] = false;
 463		}
 464	}
 465}
 466
 467static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
 468				      struct amdgpu_mode_mc_save *save)
 469{
 470	u32 tmp;
 471	int i;
 472
 473	/* update crtc base addresses */
 474	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 475		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 476		       upper_32_bits(adev->mc.vram_start));
 
 
 477		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 478		       (u32)adev->mc.vram_start);
 
 
 479
 480		if (save->crtc_enabled[i]) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 481			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 482			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
 
 483			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 
 
 
 
 
 
 
 
 484		}
 485		mdelay(20);
 486	}
 487
 488	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
 489	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
 490
 491	/* Unlock vga access */
 492	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 493	mdelay(1);
 494	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 495}
 496
 497static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 498					  bool render)
 499{
 500	u32 tmp;
 501
 502	/* Lockout access through VGA aperture*/
 503	tmp = RREG32(mmVGA_HDP_CONTROL);
 504	if (render)
 505		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 506	else
 507		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 508	WREG32(mmVGA_HDP_CONTROL, tmp);
 509
 510	/* disable VGA render */
 511	tmp = RREG32(mmVGA_RENDER_CONTROL);
 512	if (render)
 513		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 514	else
 515		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 516	WREG32(mmVGA_RENDER_CONTROL, tmp);
 517}
 518
 519static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
 520{
 521	int num_crtc = 0;
 522
 523	switch (adev->asic_type) {
 524	case CHIP_BONAIRE:
 525	case CHIP_HAWAII:
 526		num_crtc = 6;
 527		break;
 528	case CHIP_KAVERI:
 529		num_crtc = 4;
 530		break;
 531	case CHIP_KABINI:
 532	case CHIP_MULLINS:
 533		num_crtc = 2;
 534		break;
 535	default:
 536		num_crtc = 0;
 537	}
 538	return num_crtc;
 539}
 540
 541void dce_v8_0_disable_dce(struct amdgpu_device *adev)
 542{
 543	/*Disable VGA render and enabled crtc, if has DCE engine*/
 544	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 545		u32 tmp;
 546		int crtc_enabled, i;
 547
 548		dce_v8_0_set_vga_render_state(adev, false);
 549
 550		/*Disable crtc*/
 551		for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
 552			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 553									 CRTC_CONTROL, CRTC_MASTER_EN);
 554			if (crtc_enabled) {
 555				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 556				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 557				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 558				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 559				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 560			}
 561		}
 562	}
 563}
 564
 565static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 566{
 567	struct drm_device *dev = encoder->dev;
 568	struct amdgpu_device *adev = dev->dev_private;
 569	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 570	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 571	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 572	int bpc = 0;
 573	u32 tmp = 0;
 574	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 575
 576	if (connector) {
 577		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 578		bpc = amdgpu_connector_get_monitor_bpc(connector);
 579		dither = amdgpu_connector->dither;
 580	}
 581
 582	/* LVDS/eDP FMT is set up by atom */
 583	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 584		return;
 585
 586	/* not needed for analog */
 587	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 588	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 589		return;
 590
 591	if (bpc == 0)
 592		return;
 593
 594	switch (bpc) {
 595	case 6:
 596		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 597			/* XXX sort out optimal dither settings */
 598			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 599				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 600				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 601				(0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 602		else
 603			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 604			(0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 605		break;
 606	case 8:
 607		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 608			/* XXX sort out optimal dither settings */
 609			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 610				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 611				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 612				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 613				(1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 614		else
 615			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 616			(1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 617		break;
 618	case 10:
 619		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 620			/* XXX sort out optimal dither settings */
 621			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 622				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 623				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 624				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 625				(2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 626		else
 627			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 628			(2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 629		break;
 630	default:
 631		/* not needed */
 632		break;
 633	}
 634
 635	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 636}
 637
 638
 639/* display watermark setup */
 640/**
 641 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 642 *
 643 * @adev: amdgpu_device pointer
 644 * @amdgpu_crtc: the selected display controller
 645 * @mode: the current display mode on the selected display
 646 * controller
 647 *
 648 * Setup up the line buffer allocation for
 649 * the selected display controller (CIK).
 650 * Returns the line buffer size in pixels.
 651 */
 652static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 653				       struct amdgpu_crtc *amdgpu_crtc,
 654				       struct drm_display_mode *mode)
 655{
 656	u32 tmp, buffer_alloc, i;
 657	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 658	/*
 659	 * Line Buffer Setup
 660	 * There are 6 line buffers, one for each display controllers.
 661	 * There are 3 partitions per LB. Select the number of partitions
 662	 * to enable based on the display width.  For display widths larger
 663	 * than 4096, you need use to use 2 display controllers and combine
 664	 * them using the stereo blender.
 665	 */
 666	if (amdgpu_crtc->base.enabled && mode) {
 667		if (mode->crtc_hdisplay < 1920) {
 668			tmp = 1;
 669			buffer_alloc = 2;
 670		} else if (mode->crtc_hdisplay < 2560) {
 671			tmp = 2;
 672			buffer_alloc = 2;
 673		} else if (mode->crtc_hdisplay < 4096) {
 674			tmp = 0;
 675			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 676		} else {
 677			DRM_DEBUG_KMS("Mode too big for LB!\n");
 678			tmp = 0;
 679			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 680		}
 681	} else {
 682		tmp = 1;
 683		buffer_alloc = 0;
 684	}
 685
 686	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 687	      (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 688	      (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 689
 690	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 691	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 692	for (i = 0; i < adev->usec_timeout; i++) {
 693		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 694		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 695			break;
 696		udelay(1);
 697	}
 698
 699	if (amdgpu_crtc->base.enabled && mode) {
 700		switch (tmp) {
 701		case 0:
 702		default:
 703			return 4096 * 2;
 704		case 1:
 705			return 1920 * 2;
 706		case 2:
 707			return 2560 * 2;
 708		}
 709	}
 710
 711	/* controller not enabled, so no lb used */
 712	return 0;
 713}
 714
 715/**
 716 * cik_get_number_of_dram_channels - get the number of dram channels
 717 *
 718 * @adev: amdgpu_device pointer
 719 *
 720 * Look up the number of video ram channels (CIK).
 721 * Used for display watermark bandwidth calculations
 722 * Returns the number of dram channels
 723 */
 724static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 725{
 726	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 727
 728	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 729	case 0:
 730	default:
 731		return 1;
 732	case 1:
 733		return 2;
 734	case 2:
 735		return 4;
 736	case 3:
 737		return 8;
 738	case 4:
 739		return 3;
 740	case 5:
 741		return 6;
 742	case 6:
 743		return 10;
 744	case 7:
 745		return 12;
 746	case 8:
 747		return 16;
 748	}
 749}
 750
 751struct dce8_wm_params {
 752	u32 dram_channels; /* number of dram channels */
 753	u32 yclk;          /* bandwidth per dram data pin in kHz */
 754	u32 sclk;          /* engine clock in kHz */
 755	u32 disp_clk;      /* display clock in kHz */
 756	u32 src_width;     /* viewport width */
 757	u32 active_time;   /* active display time in ns */
 758	u32 blank_time;    /* blank time in ns */
 759	bool interlaced;    /* mode is interlaced */
 760	fixed20_12 vsc;    /* vertical scale ratio */
 761	u32 num_heads;     /* number of active crtcs */
 762	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 763	u32 lb_size;       /* line buffer allocated to pipe */
 764	u32 vtaps;         /* vertical scaler taps */
 765};
 766
 767/**
 768 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 769 *
 770 * @wm: watermark calculation data
 771 *
 772 * Calculate the raw dram bandwidth (CIK).
 773 * Used for display watermark bandwidth calculations
 774 * Returns the dram bandwidth in MBytes/s
 775 */
 776static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 777{
 778	/* Calculate raw DRAM Bandwidth */
 779	fixed20_12 dram_efficiency; /* 0.7 */
 780	fixed20_12 yclk, dram_channels, bandwidth;
 781	fixed20_12 a;
 782
 783	a.full = dfixed_const(1000);
 784	yclk.full = dfixed_const(wm->yclk);
 785	yclk.full = dfixed_div(yclk, a);
 786	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 787	a.full = dfixed_const(10);
 788	dram_efficiency.full = dfixed_const(7);
 789	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 790	bandwidth.full = dfixed_mul(dram_channels, yclk);
 791	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 792
 793	return dfixed_trunc(bandwidth);
 794}
 795
 796/**
 797 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 798 *
 799 * @wm: watermark calculation data
 800 *
 801 * Calculate the dram bandwidth used for display (CIK).
 802 * Used for display watermark bandwidth calculations
 803 * Returns the dram bandwidth for display in MBytes/s
 804 */
 805static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 806{
 807	/* Calculate DRAM Bandwidth and the part allocated to display. */
 808	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 809	fixed20_12 yclk, dram_channels, bandwidth;
 810	fixed20_12 a;
 811
 812	a.full = dfixed_const(1000);
 813	yclk.full = dfixed_const(wm->yclk);
 814	yclk.full = dfixed_div(yclk, a);
 815	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 816	a.full = dfixed_const(10);
 817	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 818	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 819	bandwidth.full = dfixed_mul(dram_channels, yclk);
 820	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 821
 822	return dfixed_trunc(bandwidth);
 823}
 824
 825/**
 826 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 827 *
 828 * @wm: watermark calculation data
 829 *
 830 * Calculate the data return bandwidth used for display (CIK).
 831 * Used for display watermark bandwidth calculations
 832 * Returns the data return bandwidth in MBytes/s
 833 */
 834static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 835{
 836	/* Calculate the display Data return Bandwidth */
 837	fixed20_12 return_efficiency; /* 0.8 */
 838	fixed20_12 sclk, bandwidth;
 839	fixed20_12 a;
 840
 841	a.full = dfixed_const(1000);
 842	sclk.full = dfixed_const(wm->sclk);
 843	sclk.full = dfixed_div(sclk, a);
 844	a.full = dfixed_const(10);
 845	return_efficiency.full = dfixed_const(8);
 846	return_efficiency.full = dfixed_div(return_efficiency, a);
 847	a.full = dfixed_const(32);
 848	bandwidth.full = dfixed_mul(a, sclk);
 849	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 850
 851	return dfixed_trunc(bandwidth);
 852}
 853
 854/**
 855 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 856 *
 857 * @wm: watermark calculation data
 858 *
 859 * Calculate the dmif bandwidth used for display (CIK).
 860 * Used for display watermark bandwidth calculations
 861 * Returns the dmif bandwidth in MBytes/s
 862 */
 863static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 864{
 865	/* Calculate the DMIF Request Bandwidth */
 866	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 867	fixed20_12 disp_clk, bandwidth;
 868	fixed20_12 a, b;
 869
 870	a.full = dfixed_const(1000);
 871	disp_clk.full = dfixed_const(wm->disp_clk);
 872	disp_clk.full = dfixed_div(disp_clk, a);
 873	a.full = dfixed_const(32);
 874	b.full = dfixed_mul(a, disp_clk);
 875
 876	a.full = dfixed_const(10);
 877	disp_clk_request_efficiency.full = dfixed_const(8);
 878	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 879
 880	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 881
 882	return dfixed_trunc(bandwidth);
 883}
 884
 885/**
 886 * dce_v8_0_available_bandwidth - get the min available bandwidth
 887 *
 888 * @wm: watermark calculation data
 889 *
 890 * Calculate the min available bandwidth used for display (CIK).
 891 * Used for display watermark bandwidth calculations
 892 * Returns the min available bandwidth in MBytes/s
 893 */
 894static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 895{
 896	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 897	u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 898	u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 899	u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 900
 901	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 902}
 903
 904/**
 905 * dce_v8_0_average_bandwidth - get the average available bandwidth
 906 *
 907 * @wm: watermark calculation data
 908 *
 909 * Calculate the average available bandwidth used for display (CIK).
 910 * Used for display watermark bandwidth calculations
 911 * Returns the average available bandwidth in MBytes/s
 912 */
 913static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
 914{
 915	/* Calculate the display mode Average Bandwidth
 916	 * DisplayMode should contain the source and destination dimensions,
 917	 * timing, etc.
 918	 */
 919	fixed20_12 bpp;
 920	fixed20_12 line_time;
 921	fixed20_12 src_width;
 922	fixed20_12 bandwidth;
 923	fixed20_12 a;
 924
 925	a.full = dfixed_const(1000);
 926	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 927	line_time.full = dfixed_div(line_time, a);
 928	bpp.full = dfixed_const(wm->bytes_per_pixel);
 929	src_width.full = dfixed_const(wm->src_width);
 930	bandwidth.full = dfixed_mul(src_width, bpp);
 931	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 932	bandwidth.full = dfixed_div(bandwidth, line_time);
 933
 934	return dfixed_trunc(bandwidth);
 935}
 936
 937/**
 938 * dce_v8_0_latency_watermark - get the latency watermark
 939 *
 940 * @wm: watermark calculation data
 941 *
 942 * Calculate the latency watermark (CIK).
 943 * Used for display watermark bandwidth calculations
 944 * Returns the latency watermark in ns
 945 */
 946static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 947{
 948	/* First calculate the latency in ns */
 949	u32 mc_latency = 2000; /* 2000 ns. */
 950	u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
 951	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 952	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 953	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 954	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 955		(wm->num_heads * cursor_line_pair_return_time);
 956	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 957	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 958	u32 tmp, dmif_size = 12288;
 959	fixed20_12 a, b, c;
 960
 961	if (wm->num_heads == 0)
 962		return 0;
 963
 964	a.full = dfixed_const(2);
 965	b.full = dfixed_const(1);
 966	if ((wm->vsc.full > a.full) ||
 967	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 968	    (wm->vtaps >= 5) ||
 969	    ((wm->vsc.full >= a.full) && wm->interlaced))
 970		max_src_lines_per_dst_line = 4;
 971	else
 972		max_src_lines_per_dst_line = 2;
 973
 974	a.full = dfixed_const(available_bandwidth);
 975	b.full = dfixed_const(wm->num_heads);
 976	a.full = dfixed_div(a, b);
 977
 978	b.full = dfixed_const(mc_latency + 512);
 979	c.full = dfixed_const(wm->disp_clk);
 980	b.full = dfixed_div(b, c);
 981
 982	c.full = dfixed_const(dmif_size);
 983	b.full = dfixed_div(c, b);
 984
 985	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
 986
 987	b.full = dfixed_const(1000);
 988	c.full = dfixed_const(wm->disp_clk);
 989	b.full = dfixed_div(c, b);
 990	c.full = dfixed_const(wm->bytes_per_pixel);
 991	b.full = dfixed_mul(b, c);
 992
 993	lb_fill_bw = min(tmp, dfixed_trunc(b));
 994
 995	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 996	b.full = dfixed_const(1000);
 997	c.full = dfixed_const(lb_fill_bw);
 998	b.full = dfixed_div(c, b);
 999	a.full = dfixed_div(a, b);
1000	line_fill_time = dfixed_trunc(a);
1001
1002	if (line_fill_time < wm->active_time)
1003		return latency;
1004	else
1005		return latency + (line_fill_time - wm->active_time);
1006
1007}
1008
1009/**
1010 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1011 * average and available dram bandwidth
1012 *
1013 * @wm: watermark calculation data
1014 *
1015 * Check if the display average bandwidth fits in the display
1016 * dram bandwidth (CIK).
1017 * Used for display watermark bandwidth calculations
1018 * Returns true if the display fits, false if not.
1019 */
1020static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1021{
1022	if (dce_v8_0_average_bandwidth(wm) <=
1023	    (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1024		return true;
1025	else
1026		return false;
1027}
1028
1029/**
1030 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1031 * average and available bandwidth
1032 *
1033 * @wm: watermark calculation data
1034 *
1035 * Check if the display average bandwidth fits in the display
1036 * available bandwidth (CIK).
1037 * Used for display watermark bandwidth calculations
1038 * Returns true if the display fits, false if not.
1039 */
1040static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1041{
1042	if (dce_v8_0_average_bandwidth(wm) <=
1043	    (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1044		return true;
1045	else
1046		return false;
1047}
1048
1049/**
1050 * dce_v8_0_check_latency_hiding - check latency hiding
1051 *
1052 * @wm: watermark calculation data
1053 *
1054 * Check latency hiding (CIK).
1055 * Used for display watermark bandwidth calculations
1056 * Returns true if the display fits, false if not.
1057 */
1058static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1059{
1060	u32 lb_partitions = wm->lb_size / wm->src_width;
1061	u32 line_time = wm->active_time + wm->blank_time;
1062	u32 latency_tolerant_lines;
1063	u32 latency_hiding;
1064	fixed20_12 a;
1065
1066	a.full = dfixed_const(1);
1067	if (wm->vsc.full > a.full)
1068		latency_tolerant_lines = 1;
1069	else {
1070		if (lb_partitions <= (wm->vtaps + 1))
1071			latency_tolerant_lines = 1;
1072		else
1073			latency_tolerant_lines = 2;
1074	}
1075
1076	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1077
1078	if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1079		return true;
1080	else
1081		return false;
1082}
1083
1084/**
1085 * dce_v8_0_program_watermarks - program display watermarks
1086 *
1087 * @adev: amdgpu_device pointer
1088 * @amdgpu_crtc: the selected display controller
1089 * @lb_size: line buffer size
1090 * @num_heads: number of display controllers in use
1091 *
1092 * Calculate and program the display watermarks for the
1093 * selected display controller (CIK).
1094 */
1095static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1096					struct amdgpu_crtc *amdgpu_crtc,
1097					u32 lb_size, u32 num_heads)
1098{
1099	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1100	struct dce8_wm_params wm_low, wm_high;
1101	u32 pixel_period;
1102	u32 line_time = 0;
1103	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1104	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1105
1106	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1107		pixel_period = 1000000 / (u32)mode->clock;
1108		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1109
1110		/* watermark for high clocks */
1111		if (adev->pm.dpm_enabled) {
1112			wm_high.yclk =
1113				amdgpu_dpm_get_mclk(adev, false) * 10;
1114			wm_high.sclk =
1115				amdgpu_dpm_get_sclk(adev, false) * 10;
1116		} else {
1117			wm_high.yclk = adev->pm.current_mclk * 10;
1118			wm_high.sclk = adev->pm.current_sclk * 10;
1119		}
1120
1121		wm_high.disp_clk = mode->clock;
1122		wm_high.src_width = mode->crtc_hdisplay;
1123		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1124		wm_high.blank_time = line_time - wm_high.active_time;
1125		wm_high.interlaced = false;
1126		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1127			wm_high.interlaced = true;
1128		wm_high.vsc = amdgpu_crtc->vsc;
1129		wm_high.vtaps = 1;
1130		if (amdgpu_crtc->rmx_type != RMX_OFF)
1131			wm_high.vtaps = 2;
1132		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1133		wm_high.lb_size = lb_size;
1134		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1135		wm_high.num_heads = num_heads;
1136
1137		/* set for high clocks */
1138		latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1139
1140		/* possibly force display priority to high */
1141		/* should really do this at mode validation time... */
1142		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1143		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1144		    !dce_v8_0_check_latency_hiding(&wm_high) ||
1145		    (adev->mode_info.disp_priority == 2)) {
1146			DRM_DEBUG_KMS("force priority to high\n");
1147		}
1148
1149		/* watermark for low clocks */
1150		if (adev->pm.dpm_enabled) {
1151			wm_low.yclk =
1152				amdgpu_dpm_get_mclk(adev, true) * 10;
1153			wm_low.sclk =
1154				amdgpu_dpm_get_sclk(adev, true) * 10;
1155		} else {
1156			wm_low.yclk = adev->pm.current_mclk * 10;
1157			wm_low.sclk = adev->pm.current_sclk * 10;
1158		}
1159
1160		wm_low.disp_clk = mode->clock;
1161		wm_low.src_width = mode->crtc_hdisplay;
1162		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1163		wm_low.blank_time = line_time - wm_low.active_time;
1164		wm_low.interlaced = false;
1165		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1166			wm_low.interlaced = true;
1167		wm_low.vsc = amdgpu_crtc->vsc;
1168		wm_low.vtaps = 1;
1169		if (amdgpu_crtc->rmx_type != RMX_OFF)
1170			wm_low.vtaps = 2;
1171		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1172		wm_low.lb_size = lb_size;
1173		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1174		wm_low.num_heads = num_heads;
1175
1176		/* set for low clocks */
1177		latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1178
1179		/* possibly force display priority to high */
1180		/* should really do this at mode validation time... */
1181		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1182		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1183		    !dce_v8_0_check_latency_hiding(&wm_low) ||
1184		    (adev->mode_info.disp_priority == 2)) {
1185			DRM_DEBUG_KMS("force priority to high\n");
1186		}
1187		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1188	}
1189
1190	/* select wm A */
1191	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1192	tmp = wm_mask;
1193	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1194	tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1195	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1196	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1197	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1198		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1199	/* select wm B */
1200	tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1201	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1202	tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1203	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1204	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1205	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1206		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1207	/* restore original selection */
1208	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1209
1210	/* save values for DPM */
1211	amdgpu_crtc->line_time = line_time;
1212	amdgpu_crtc->wm_high = latency_watermark_a;
1213	amdgpu_crtc->wm_low = latency_watermark_b;
1214	/* Save number of lines the linebuffer leads before the scanout */
1215	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1216}
1217
1218/**
1219 * dce_v8_0_bandwidth_update - program display watermarks
1220 *
1221 * @adev: amdgpu_device pointer
1222 *
1223 * Calculate and program the display watermarks and line
1224 * buffer allocation (CIK).
1225 */
1226static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1227{
1228	struct drm_display_mode *mode = NULL;
1229	u32 num_heads = 0, lb_size;
1230	int i;
1231
1232	amdgpu_update_display_priority(adev);
1233
1234	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1235		if (adev->mode_info.crtcs[i]->base.enabled)
1236			num_heads++;
1237	}
1238	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1239		mode = &adev->mode_info.crtcs[i]->base.mode;
1240		lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1241		dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1242					    lb_size, num_heads);
1243	}
1244}
1245
1246static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1247{
1248	int i;
1249	u32 offset, tmp;
1250
1251	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1252		offset = adev->mode_info.audio.pin[i].offset;
1253		tmp = RREG32_AUDIO_ENDPT(offset,
1254					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1255		if (((tmp &
1256		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1257		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1258			adev->mode_info.audio.pin[i].connected = false;
1259		else
1260			adev->mode_info.audio.pin[i].connected = true;
1261	}
1262}
1263
1264static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1265{
1266	int i;
1267
1268	dce_v8_0_audio_get_connected_pins(adev);
1269
1270	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1271		if (adev->mode_info.audio.pin[i].connected)
1272			return &adev->mode_info.audio.pin[i];
1273	}
1274	DRM_ERROR("No connected audio pins found!\n");
1275	return NULL;
1276}
1277
1278static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1279{
1280	struct amdgpu_device *adev = encoder->dev->dev_private;
1281	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1282	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1283	u32 offset;
1284
1285	if (!dig || !dig->afmt || !dig->afmt->pin)
1286		return;
1287
1288	offset = dig->afmt->offset;
1289
1290	WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1291	       (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1292}
1293
1294static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1295						struct drm_display_mode *mode)
1296{
1297	struct amdgpu_device *adev = encoder->dev->dev_private;
1298	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1299	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1300	struct drm_connector *connector;
1301	struct amdgpu_connector *amdgpu_connector = NULL;
1302	u32 tmp = 0, offset;
1303
1304	if (!dig || !dig->afmt || !dig->afmt->pin)
1305		return;
1306
1307	offset = dig->afmt->pin->offset;
1308
1309	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1310		if (connector->encoder == encoder) {
1311			amdgpu_connector = to_amdgpu_connector(connector);
1312			break;
1313		}
1314	}
1315
1316	if (!amdgpu_connector) {
1317		DRM_ERROR("Couldn't find encoder's connector\n");
1318		return;
1319	}
1320
1321	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1322		if (connector->latency_present[1])
1323			tmp =
1324			(connector->video_latency[1] <<
1325			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1326			(connector->audio_latency[1] <<
1327			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1328		else
1329			tmp =
1330			(0 <<
1331			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1332			(0 <<
1333			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1334	} else {
1335		if (connector->latency_present[0])
1336			tmp =
1337			(connector->video_latency[0] <<
1338			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1339			(connector->audio_latency[0] <<
1340			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1341		else
1342			tmp =
1343			(0 <<
1344			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1345			(0 <<
1346			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1347
1348	}
1349	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1350}
1351
1352static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1353{
1354	struct amdgpu_device *adev = encoder->dev->dev_private;
1355	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1356	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1357	struct drm_connector *connector;
1358	struct amdgpu_connector *amdgpu_connector = NULL;
1359	u32 offset, tmp;
1360	u8 *sadb = NULL;
1361	int sad_count;
1362
1363	if (!dig || !dig->afmt || !dig->afmt->pin)
1364		return;
1365
1366	offset = dig->afmt->pin->offset;
1367
1368	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1369		if (connector->encoder == encoder) {
1370			amdgpu_connector = to_amdgpu_connector(connector);
1371			break;
1372		}
1373	}
1374
1375	if (!amdgpu_connector) {
1376		DRM_ERROR("Couldn't find encoder's connector\n");
1377		return;
1378	}
1379
1380	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1381	if (sad_count < 0) {
1382		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1383		sad_count = 0;
1384	}
1385
1386	/* program the speaker allocation */
1387	tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1388	tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1389		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1390	/* set HDMI mode */
1391	tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1392	if (sad_count)
1393		tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1394	else
1395		tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1396	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1397
1398	kfree(sadb);
1399}
1400
1401static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1402{
1403	struct amdgpu_device *adev = encoder->dev->dev_private;
1404	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1405	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1406	u32 offset;
1407	struct drm_connector *connector;
1408	struct amdgpu_connector *amdgpu_connector = NULL;
1409	struct cea_sad *sads;
1410	int i, sad_count;
1411
1412	static const u16 eld_reg_to_type[][2] = {
1413		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1414		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1415		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1416		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1417		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1418		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1419		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1420		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1421		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1422		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1423		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1424		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1425	};
1426
1427	if (!dig || !dig->afmt || !dig->afmt->pin)
1428		return;
1429
1430	offset = dig->afmt->pin->offset;
1431
1432	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1433		if (connector->encoder == encoder) {
1434			amdgpu_connector = to_amdgpu_connector(connector);
1435			break;
1436		}
1437	}
1438
1439	if (!amdgpu_connector) {
1440		DRM_ERROR("Couldn't find encoder's connector\n");
1441		return;
1442	}
1443
1444	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1445	if (sad_count <= 0) {
1446		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1447		return;
1448	}
1449	BUG_ON(!sads);
1450
1451	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1452		u32 value = 0;
1453		u8 stereo_freqs = 0;
1454		int max_channels = -1;
1455		int j;
1456
1457		for (j = 0; j < sad_count; j++) {
1458			struct cea_sad *sad = &sads[j];
1459
1460			if (sad->format == eld_reg_to_type[i][1]) {
1461				if (sad->channels > max_channels) {
1462					value = (sad->channels <<
1463						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1464					        (sad->byte2 <<
1465						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1466					        (sad->freq <<
1467						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1468					max_channels = sad->channels;
1469				}
1470
1471				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1472					stereo_freqs |= sad->freq;
1473				else
1474					break;
1475			}
1476		}
1477
1478		value |= (stereo_freqs <<
1479			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1480
1481		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1482	}
1483
1484	kfree(sads);
1485}
1486
1487static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1488				  struct amdgpu_audio_pin *pin,
1489				  bool enable)
1490{
1491	if (!pin)
1492		return;
1493
1494	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1495		enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1496}
1497
1498static const u32 pin_offsets[7] =
1499{
1500	(0x1780 - 0x1780),
1501	(0x1786 - 0x1780),
1502	(0x178c - 0x1780),
1503	(0x1792 - 0x1780),
1504	(0x1798 - 0x1780),
1505	(0x179d - 0x1780),
1506	(0x17a4 - 0x1780),
1507};
1508
1509static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1510{
1511	int i;
1512
1513	if (!amdgpu_audio)
1514		return 0;
1515
1516	adev->mode_info.audio.enabled = true;
1517
1518	if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1519		adev->mode_info.audio.num_pins = 7;
1520	else if ((adev->asic_type == CHIP_KABINI) ||
1521		 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1522		adev->mode_info.audio.num_pins = 3;
1523	else if ((adev->asic_type == CHIP_BONAIRE) ||
1524		 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1525		adev->mode_info.audio.num_pins = 7;
1526	else
1527		adev->mode_info.audio.num_pins = 3;
1528
1529	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1530		adev->mode_info.audio.pin[i].channels = -1;
1531		adev->mode_info.audio.pin[i].rate = -1;
1532		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1533		adev->mode_info.audio.pin[i].status_bits = 0;
1534		adev->mode_info.audio.pin[i].category_code = 0;
1535		adev->mode_info.audio.pin[i].connected = false;
1536		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1537		adev->mode_info.audio.pin[i].id = i;
1538		/* disable audio.  it will be set up later */
1539		/* XXX remove once we switch to ip funcs */
1540		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1541	}
1542
1543	return 0;
1544}
1545
1546static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1547{
1548	int i;
1549
1550	if (!amdgpu_audio)
1551		return;
1552
1553	if (!adev->mode_info.audio.enabled)
1554		return;
1555
1556	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1557		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1558
1559	adev->mode_info.audio.enabled = false;
1560}
1561
1562/*
1563 * update the N and CTS parameters for a given pixel clock rate
1564 */
1565static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1566{
1567	struct drm_device *dev = encoder->dev;
1568	struct amdgpu_device *adev = dev->dev_private;
1569	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1570	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1571	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1572	uint32_t offset = dig->afmt->offset;
1573
1574	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1575	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1576
1577	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1578	WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1579
1580	WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1581	WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1582}
1583
1584/*
1585 * build a HDMI Video Info Frame
1586 */
1587static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1588					       void *buffer, size_t size)
1589{
1590	struct drm_device *dev = encoder->dev;
1591	struct amdgpu_device *adev = dev->dev_private;
1592	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1593	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1594	uint32_t offset = dig->afmt->offset;
1595	uint8_t *frame = buffer + 3;
1596	uint8_t *header = buffer;
1597
1598	WREG32(mmAFMT_AVI_INFO0 + offset,
1599		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1600	WREG32(mmAFMT_AVI_INFO1 + offset,
1601		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1602	WREG32(mmAFMT_AVI_INFO2 + offset,
1603		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1604	WREG32(mmAFMT_AVI_INFO3 + offset,
1605		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1606}
1607
1608static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1609{
1610	struct drm_device *dev = encoder->dev;
1611	struct amdgpu_device *adev = dev->dev_private;
1612	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1613	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1614	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1615	u32 dto_phase = 24 * 1000;
1616	u32 dto_modulo = clock;
1617
1618	if (!dig || !dig->afmt)
1619		return;
1620
1621	/* XXX two dtos; generally use dto0 for hdmi */
1622	/* Express [24MHz / target pixel clock] as an exact rational
1623	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1624	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1625	 */
1626	WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1627	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1628	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1629}
1630
1631/*
1632 * update the info frames with the data from the current display mode
1633 */
1634static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1635				  struct drm_display_mode *mode)
1636{
1637	struct drm_device *dev = encoder->dev;
1638	struct amdgpu_device *adev = dev->dev_private;
1639	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1640	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1641	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1642	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1643	struct hdmi_avi_infoframe frame;
1644	uint32_t offset, val;
1645	ssize_t err;
1646	int bpc = 8;
1647
1648	if (!dig || !dig->afmt)
1649		return;
1650
1651	/* Silent, r600_hdmi_enable will raise WARN for us */
1652	if (!dig->afmt->enabled)
1653		return;
1654
1655	offset = dig->afmt->offset;
1656
1657	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1658	if (encoder->crtc) {
1659		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1660		bpc = amdgpu_crtc->bpc;
1661	}
1662
1663	/* disable audio prior to setting up hw */
1664	dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1665	dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1666
1667	dce_v8_0_audio_set_dto(encoder, mode->clock);
1668
1669	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1670	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1671
1672	WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1673
1674	val = RREG32(mmHDMI_CONTROL + offset);
1675	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1676	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1677
1678	switch (bpc) {
1679	case 0:
1680	case 6:
1681	case 8:
1682	case 16:
1683	default:
1684		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1685			  connector->name, bpc);
1686		break;
1687	case 10:
1688		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1689		val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1690		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1691			  connector->name);
1692		break;
1693	case 12:
1694		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1695		val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1696		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1697			  connector->name);
1698		break;
1699	}
1700
1701	WREG32(mmHDMI_CONTROL + offset, val);
1702
1703	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1704	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1705	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1706	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1707
1708	WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1709	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1710	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1711
1712	WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1713	       AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1714
1715	WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1716	       (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1717
1718	WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1719
1720	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1721	       (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1722	       (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1723
1724	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1725	       AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1726
1727	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1728
1729	if (bpc > 8)
1730		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1731		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1732	else
1733		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1734		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1735		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1736
1737	dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1738
1739	WREG32(mmAFMT_60958_0 + offset,
1740	       (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1741
1742	WREG32(mmAFMT_60958_1 + offset,
1743	       (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1744
1745	WREG32(mmAFMT_60958_2 + offset,
1746	       (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1747	       (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1748	       (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1749	       (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1750	       (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1751	       (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1752
1753	dce_v8_0_audio_write_speaker_allocation(encoder);
1754
1755
1756	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1757	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1758
1759	dce_v8_0_afmt_audio_select_pin(encoder);
1760	dce_v8_0_audio_write_sad_regs(encoder);
1761	dce_v8_0_audio_write_latency_fields(encoder, mode);
1762
1763	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1764	if (err < 0) {
1765		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1766		return;
1767	}
1768
1769	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1770	if (err < 0) {
1771		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1772		return;
1773	}
1774
1775	dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1776
1777	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1778		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1779		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1780
1781	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1782		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1783		 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1784
1785	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1786		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1787
 
1788	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1789	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1790	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1791	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1792
1793	/* enable audio after setting up hw */
1794	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1795}
1796
1797static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1798{
1799	struct drm_device *dev = encoder->dev;
1800	struct amdgpu_device *adev = dev->dev_private;
1801	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1802	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1803
1804	if (!dig || !dig->afmt)
1805		return;
1806
1807	/* Silent, r600_hdmi_enable will raise WARN for us */
1808	if (enable && dig->afmt->enabled)
1809		return;
1810	if (!enable && !dig->afmt->enabled)
1811		return;
1812
1813	if (!enable && dig->afmt->pin) {
1814		dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1815		dig->afmt->pin = NULL;
1816	}
1817
1818	dig->afmt->enabled = enable;
1819
1820	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1821		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1822}
1823
1824static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1825{
1826	int i;
1827
1828	for (i = 0; i < adev->mode_info.num_dig; i++)
1829		adev->mode_info.afmt[i] = NULL;
1830
1831	/* DCE8 has audio blocks tied to DIG encoders */
1832	for (i = 0; i < adev->mode_info.num_dig; i++) {
1833		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1834		if (adev->mode_info.afmt[i]) {
1835			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1836			adev->mode_info.afmt[i]->id = i;
1837		} else {
1838			int j;
1839			for (j = 0; j < i; j++) {
1840				kfree(adev->mode_info.afmt[j]);
1841				adev->mode_info.afmt[j] = NULL;
1842			}
1843			return -ENOMEM;
1844		}
1845	}
1846	return 0;
1847}
1848
1849static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1850{
1851	int i;
1852
1853	for (i = 0; i < adev->mode_info.num_dig; i++) {
1854		kfree(adev->mode_info.afmt[i]);
1855		adev->mode_info.afmt[i] = NULL;
1856	}
1857}
1858
1859static const u32 vga_control_regs[6] =
1860{
1861	mmD1VGA_CONTROL,
1862	mmD2VGA_CONTROL,
1863	mmD3VGA_CONTROL,
1864	mmD4VGA_CONTROL,
1865	mmD5VGA_CONTROL,
1866	mmD6VGA_CONTROL,
1867};
1868
1869static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1870{
1871	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1872	struct drm_device *dev = crtc->dev;
1873	struct amdgpu_device *adev = dev->dev_private;
1874	u32 vga_control;
1875
1876	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1877	if (enable)
1878		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1879	else
1880		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1881}
1882
1883static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1884{
1885	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1886	struct drm_device *dev = crtc->dev;
1887	struct amdgpu_device *adev = dev->dev_private;
1888
1889	if (enable)
1890		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1891	else
1892		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1893}
1894
1895static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1896				     struct drm_framebuffer *fb,
1897				     int x, int y, int atomic)
1898{
1899	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1900	struct drm_device *dev = crtc->dev;
1901	struct amdgpu_device *adev = dev->dev_private;
1902	struct amdgpu_framebuffer *amdgpu_fb;
1903	struct drm_framebuffer *target_fb;
1904	struct drm_gem_object *obj;
1905	struct amdgpu_bo *abo;
1906	uint64_t fb_location, tiling_flags;
1907	uint32_t fb_format, fb_pitch_pixels;
1908	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1909	u32 pipe_config;
1910	u32 viewport_w, viewport_h;
1911	int r;
1912	bool bypass_lut = false;
1913	struct drm_format_name_buf format_name;
1914
1915	/* no fb bound */
1916	if (!atomic && !crtc->primary->fb) {
1917		DRM_DEBUG_KMS("No FB bound\n");
1918		return 0;
1919	}
1920
1921	if (atomic) {
1922		amdgpu_fb = to_amdgpu_framebuffer(fb);
1923		target_fb = fb;
1924	} else {
1925		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1926		target_fb = crtc->primary->fb;
1927	}
1928
1929	/* If atomic, assume fb object is pinned & idle & fenced and
1930	 * just update base pointers
1931	 */
1932	obj = amdgpu_fb->obj;
1933	abo = gem_to_amdgpu_bo(obj);
1934	r = amdgpu_bo_reserve(abo, false);
1935	if (unlikely(r != 0))
1936		return r;
1937
1938	if (atomic) {
1939		fb_location = amdgpu_bo_gpu_offset(abo);
1940	} else {
1941		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1942		if (unlikely(r != 0)) {
1943			amdgpu_bo_unreserve(abo);
1944			return -EINVAL;
1945		}
1946	}
1947
1948	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1949	amdgpu_bo_unreserve(abo);
1950
1951	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1952
1953	switch (target_fb->pixel_format) {
1954	case DRM_FORMAT_C8:
1955		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1956			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1957		break;
1958	case DRM_FORMAT_XRGB4444:
1959	case DRM_FORMAT_ARGB4444:
1960		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1961			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1962#ifdef __BIG_ENDIAN
1963		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1964#endif
1965		break;
1966	case DRM_FORMAT_XRGB1555:
1967	case DRM_FORMAT_ARGB1555:
1968		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1969			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1970#ifdef __BIG_ENDIAN
1971		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1972#endif
1973		break;
1974	case DRM_FORMAT_BGRX5551:
1975	case DRM_FORMAT_BGRA5551:
1976		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1977			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1978#ifdef __BIG_ENDIAN
1979		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1980#endif
1981		break;
1982	case DRM_FORMAT_RGB565:
1983		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1984			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1985#ifdef __BIG_ENDIAN
1986		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1987#endif
1988		break;
1989	case DRM_FORMAT_XRGB8888:
1990	case DRM_FORMAT_ARGB8888:
1991		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1992			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1993#ifdef __BIG_ENDIAN
1994		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1995#endif
1996		break;
1997	case DRM_FORMAT_XRGB2101010:
1998	case DRM_FORMAT_ARGB2101010:
1999		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2000			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2001#ifdef __BIG_ENDIAN
2002		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2003#endif
2004		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2005		bypass_lut = true;
2006		break;
2007	case DRM_FORMAT_BGRX1010102:
2008	case DRM_FORMAT_BGRA1010102:
2009		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2010			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2011#ifdef __BIG_ENDIAN
2012		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2013#endif
2014		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2015		bypass_lut = true;
2016		break;
2017	default:
2018		DRM_ERROR("Unsupported screen format %s\n",
2019		          drm_get_format_name(target_fb->pixel_format, &format_name));
2020		return -EINVAL;
2021	}
2022
2023	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2024		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2025
2026		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2027		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2028		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2029		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2030		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2031
2032		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2033		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2034		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2035		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2036		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2037		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2038		fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
2039	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2040		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2041	}
2042
2043	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2044
2045	dce_v8_0_vga_enable(crtc, false);
2046
2047	/* Make sure surface address is updated at vertical blank rather than
2048	 * horizontal blank
2049	 */
2050	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
2051
2052	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2053	       upper_32_bits(fb_location));
2054	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2055	       upper_32_bits(fb_location));
2056	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2057	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2058	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2059	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2060	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2061	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2062
2063	/*
2064	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2065	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2066	 * retain the full precision throughout the pipeline.
2067	 */
2068	WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2069		 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2070		 ~LUT_10BIT_BYPASS_EN);
2071
2072	if (bypass_lut)
2073		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2074
2075	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2076	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2077	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2078	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2079	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2080	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2081
2082	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2083	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2084
2085	dce_v8_0_grph_enable(crtc, true);
2086
2087	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2088	       target_fb->height);
2089
2090	x &= ~3;
2091	y &= ~1;
2092	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2093	       (x << 16) | y);
2094	viewport_w = crtc->mode.hdisplay;
2095	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2096	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2097	       (viewport_w << 16) | viewport_h);
2098
2099	/* set pageflip to happen anywhere in vblank interval */
2100	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 
 
 
 
 
2101
2102	if (!atomic && fb && fb != crtc->primary->fb) {
2103		amdgpu_fb = to_amdgpu_framebuffer(fb);
2104		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2105		r = amdgpu_bo_reserve(abo, false);
2106		if (unlikely(r != 0))
2107			return r;
2108		amdgpu_bo_unpin(abo);
2109		amdgpu_bo_unreserve(abo);
2110	}
2111
2112	/* Bytes per pixel may have changed */
2113	dce_v8_0_bandwidth_update(adev);
2114
2115	return 0;
2116}
2117
2118static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2119				    struct drm_display_mode *mode)
2120{
2121	struct drm_device *dev = crtc->dev;
2122	struct amdgpu_device *adev = dev->dev_private;
2123	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2124
2125	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2126		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2127		       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2128	else
2129		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2130}
2131
2132static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2133{
2134	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2135	struct drm_device *dev = crtc->dev;
2136	struct amdgpu_device *adev = dev->dev_private;
2137	int i;
2138
2139	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2140
2141	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2142	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2143		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2144	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2145	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2146	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2147	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2148	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2149	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2150		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2151
2152	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2153
2154	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2155	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2156	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2157
2158	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2159	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2160	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2161
2162	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2163	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2164
2165	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2166	for (i = 0; i < 256; i++) {
2167		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2168		       (amdgpu_crtc->lut_r[i] << 20) |
2169		       (amdgpu_crtc->lut_g[i] << 10) |
2170		       (amdgpu_crtc->lut_b[i] << 0));
2171	}
2172
2173	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2174	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2175		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2176		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2177	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2178	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2179		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2180	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2181	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2182		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2183	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2184	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2185		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2186	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2187	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2188	/* XXX this only needs to be programmed once per crtc at startup,
2189	 * not sure where the best place for it is
2190	 */
2191	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2192	       ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2193}
2194
2195static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2196{
2197	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2198	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2199
2200	switch (amdgpu_encoder->encoder_id) {
2201	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2202		if (dig->linkb)
2203			return 1;
2204		else
2205			return 0;
2206		break;
2207	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2208		if (dig->linkb)
2209			return 3;
2210		else
2211			return 2;
2212		break;
2213	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2214		if (dig->linkb)
2215			return 5;
2216		else
2217			return 4;
2218		break;
2219	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2220		return 6;
2221		break;
2222	default:
2223		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2224		return 0;
2225	}
2226}
2227
2228/**
2229 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2230 *
2231 * @crtc: drm crtc
2232 *
2233 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2234 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2235 * monitors a dedicated PPLL must be used.  If a particular board has
2236 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2237 * as there is no need to program the PLL itself.  If we are not able to
2238 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2239 * avoid messing up an existing monitor.
2240 *
2241 * Asic specific PLL information
2242 *
2243 * DCE 8.x
2244 * KB/KV
2245 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2246 * CI
2247 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2248 *
2249 */
2250static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2251{
2252	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2253	struct drm_device *dev = crtc->dev;
2254	struct amdgpu_device *adev = dev->dev_private;
2255	u32 pll_in_use;
2256	int pll;
2257
2258	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2259		if (adev->clock.dp_extclk)
2260			/* skip PPLL programming if using ext clock */
2261			return ATOM_PPLL_INVALID;
2262		else {
2263			/* use the same PPLL for all DP monitors */
2264			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2265			if (pll != ATOM_PPLL_INVALID)
2266				return pll;
2267		}
2268	} else {
2269		/* use the same PPLL for all monitors with the same clock */
2270		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2271		if (pll != ATOM_PPLL_INVALID)
2272			return pll;
2273	}
2274	/* otherwise, pick one of the plls */
2275	if ((adev->asic_type == CHIP_KABINI) ||
2276	    (adev->asic_type == CHIP_MULLINS)) {
2277		/* KB/ML has PPLL1 and PPLL2 */
2278		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2279		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2280			return ATOM_PPLL2;
2281		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2282			return ATOM_PPLL1;
2283		DRM_ERROR("unable to allocate a PPLL\n");
2284		return ATOM_PPLL_INVALID;
2285	} else {
2286		/* CI/KV has PPLL0, PPLL1, and PPLL2 */
2287		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2288		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2289			return ATOM_PPLL2;
2290		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2291			return ATOM_PPLL1;
2292		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2293			return ATOM_PPLL0;
2294		DRM_ERROR("unable to allocate a PPLL\n");
2295		return ATOM_PPLL_INVALID;
2296	}
2297	return ATOM_PPLL_INVALID;
2298}
2299
2300static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2301{
2302	struct amdgpu_device *adev = crtc->dev->dev_private;
2303	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2304	uint32_t cur_lock;
2305
2306	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2307	if (lock)
2308		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2309	else
2310		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2311	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2312}
2313
2314static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2315{
2316	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2317	struct amdgpu_device *adev = crtc->dev->dev_private;
2318
2319	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2320		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2321		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2322}
2323
2324static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2325{
2326	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2327	struct amdgpu_device *adev = crtc->dev->dev_private;
2328
2329	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2330	       upper_32_bits(amdgpu_crtc->cursor_addr));
2331	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2332	       lower_32_bits(amdgpu_crtc->cursor_addr));
2333
2334	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2335		   CUR_CONTROL__CURSOR_EN_MASK |
2336		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2337		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2338}
2339
2340static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2341				       int x, int y)
2342{
2343	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2344	struct amdgpu_device *adev = crtc->dev->dev_private;
2345	int xorigin = 0, yorigin = 0;
2346
2347	amdgpu_crtc->cursor_x = x;
2348	amdgpu_crtc->cursor_y = y;
2349
2350	/* avivo cursor are offset into the total surface */
2351	x += crtc->x;
2352	y += crtc->y;
2353	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2354
2355	if (x < 0) {
2356		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2357		x = 0;
2358	}
2359	if (y < 0) {
2360		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2361		y = 0;
2362	}
2363
2364	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2365	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2366	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2367	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2368
 
 
 
2369	return 0;
2370}
2371
2372static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2373				     int x, int y)
2374{
2375	int ret;
2376
2377	dce_v8_0_lock_cursor(crtc, true);
2378	ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2379	dce_v8_0_lock_cursor(crtc, false);
2380
2381	return ret;
2382}
2383
2384static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2385				     struct drm_file *file_priv,
2386				     uint32_t handle,
2387				     uint32_t width,
2388				     uint32_t height,
2389				     int32_t hot_x,
2390				     int32_t hot_y)
2391{
2392	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2393	struct drm_gem_object *obj;
2394	struct amdgpu_bo *aobj;
2395	int ret;
2396
2397	if (!handle) {
2398		/* turn off cursor */
2399		dce_v8_0_hide_cursor(crtc);
2400		obj = NULL;
2401		goto unpin;
2402	}
2403
2404	if ((width > amdgpu_crtc->max_cursor_width) ||
2405	    (height > amdgpu_crtc->max_cursor_height)) {
2406		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2407		return -EINVAL;
2408	}
2409
2410	obj = drm_gem_object_lookup(file_priv, handle);
2411	if (!obj) {
2412		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2413		return -ENOENT;
2414	}
2415
2416	aobj = gem_to_amdgpu_bo(obj);
2417	ret = amdgpu_bo_reserve(aobj, false);
2418	if (ret != 0) {
2419		drm_gem_object_unreference_unlocked(obj);
2420		return ret;
2421	}
2422
2423	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2424	amdgpu_bo_unreserve(aobj);
2425	if (ret) {
2426		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2427		drm_gem_object_unreference_unlocked(obj);
2428		return ret;
2429	}
2430
 
 
 
2431	dce_v8_0_lock_cursor(crtc, true);
2432
2433	if (width != amdgpu_crtc->cursor_width ||
2434	    height != amdgpu_crtc->cursor_height ||
2435	    hot_x != amdgpu_crtc->cursor_hot_x ||
2436	    hot_y != amdgpu_crtc->cursor_hot_y) {
2437		int x, y;
2438
2439		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2440		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2441
2442		dce_v8_0_cursor_move_locked(crtc, x, y);
2443
2444		amdgpu_crtc->cursor_width = width;
2445		amdgpu_crtc->cursor_height = height;
2446		amdgpu_crtc->cursor_hot_x = hot_x;
2447		amdgpu_crtc->cursor_hot_y = hot_y;
2448	}
2449
2450	dce_v8_0_show_cursor(crtc);
2451	dce_v8_0_lock_cursor(crtc, false);
2452
2453unpin:
2454	if (amdgpu_crtc->cursor_bo) {
2455		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2456		ret = amdgpu_bo_reserve(aobj, false);
2457		if (likely(ret == 0)) {
2458			amdgpu_bo_unpin(aobj);
2459			amdgpu_bo_unreserve(aobj);
2460		}
2461		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2462	}
2463
2464	amdgpu_crtc->cursor_bo = obj;
2465	return 0;
2466}
2467
2468static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2469{
2470	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2471
2472	if (amdgpu_crtc->cursor_bo) {
2473		dce_v8_0_lock_cursor(crtc, true);
2474
2475		dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2476					    amdgpu_crtc->cursor_y);
2477
2478		dce_v8_0_show_cursor(crtc);
2479
2480		dce_v8_0_lock_cursor(crtc, false);
2481	}
2482}
2483
2484static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2485				   u16 *blue, uint32_t size)
2486{
2487	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2488	int i;
2489
2490	/* userspace palettes are always correct as is */
2491	for (i = 0; i < size; i++) {
2492		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2493		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2494		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2495	}
2496	dce_v8_0_crtc_load_lut(crtc);
2497
2498	return 0;
2499}
2500
2501static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2502{
2503	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2504
2505	drm_crtc_cleanup(crtc);
2506	kfree(amdgpu_crtc);
2507}
2508
2509static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2510	.cursor_set2 = dce_v8_0_crtc_cursor_set2,
2511	.cursor_move = dce_v8_0_crtc_cursor_move,
2512	.gamma_set = dce_v8_0_crtc_gamma_set,
2513	.set_config = amdgpu_crtc_set_config,
2514	.destroy = dce_v8_0_crtc_destroy,
2515	.page_flip_target = amdgpu_crtc_page_flip_target,
2516};
2517
2518static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2519{
2520	struct drm_device *dev = crtc->dev;
2521	struct amdgpu_device *adev = dev->dev_private;
2522	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2523	unsigned type;
2524
2525	switch (mode) {
2526	case DRM_MODE_DPMS_ON:
2527		amdgpu_crtc->enabled = true;
2528		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2529		dce_v8_0_vga_enable(crtc, true);
2530		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2531		dce_v8_0_vga_enable(crtc, false);
2532		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2533		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2534		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2535		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2536		drm_crtc_vblank_on(crtc);
2537		dce_v8_0_crtc_load_lut(crtc);
2538		break;
2539	case DRM_MODE_DPMS_STANDBY:
2540	case DRM_MODE_DPMS_SUSPEND:
2541	case DRM_MODE_DPMS_OFF:
2542		drm_crtc_vblank_off(crtc);
2543		if (amdgpu_crtc->enabled) {
2544			dce_v8_0_vga_enable(crtc, true);
2545			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2546			dce_v8_0_vga_enable(crtc, false);
2547		}
2548		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2549		amdgpu_crtc->enabled = false;
2550		break;
2551	}
2552	/* adjust pm to dpms */
2553	amdgpu_pm_compute_clocks(adev);
2554}
2555
2556static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2557{
2558	/* disable crtc pair power gating before programming */
2559	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2560	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2561	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2562}
2563
2564static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2565{
2566	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2567	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2568}
2569
2570static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2571{
2572	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2573	struct drm_device *dev = crtc->dev;
2574	struct amdgpu_device *adev = dev->dev_private;
2575	struct amdgpu_atom_ss ss;
2576	int i;
2577
2578	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2579	if (crtc->primary->fb) {
2580		int r;
2581		struct amdgpu_framebuffer *amdgpu_fb;
2582		struct amdgpu_bo *abo;
2583
2584		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2585		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2586		r = amdgpu_bo_reserve(abo, false);
2587		if (unlikely(r))
2588			DRM_ERROR("failed to reserve abo before unpin\n");
2589		else {
2590			amdgpu_bo_unpin(abo);
2591			amdgpu_bo_unreserve(abo);
2592		}
2593	}
2594	/* disable the GRPH */
2595	dce_v8_0_grph_enable(crtc, false);
2596
2597	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2598
2599	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2600		if (adev->mode_info.crtcs[i] &&
2601		    adev->mode_info.crtcs[i]->enabled &&
2602		    i != amdgpu_crtc->crtc_id &&
2603		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2604			/* one other crtc is using this pll don't turn
2605			 * off the pll
2606			 */
2607			goto done;
2608		}
2609	}
2610
2611	switch (amdgpu_crtc->pll_id) {
2612	case ATOM_PPLL1:
2613	case ATOM_PPLL2:
2614		/* disable the ppll */
2615		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2616                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2617		break;
2618	case ATOM_PPLL0:
2619		/* disable the ppll */
2620		if ((adev->asic_type == CHIP_KAVERI) ||
2621		    (adev->asic_type == CHIP_BONAIRE) ||
2622		    (adev->asic_type == CHIP_HAWAII))
2623			amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2624						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2625		break;
2626	default:
2627		break;
2628	}
2629done:
2630	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2631	amdgpu_crtc->adjusted_clock = 0;
2632	amdgpu_crtc->encoder = NULL;
2633	amdgpu_crtc->connector = NULL;
2634}
2635
2636static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2637				  struct drm_display_mode *mode,
2638				  struct drm_display_mode *adjusted_mode,
2639				  int x, int y, struct drm_framebuffer *old_fb)
2640{
2641	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2642
2643	if (!amdgpu_crtc->adjusted_clock)
2644		return -EINVAL;
2645
2646	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2647	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2648	dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2649	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2650	amdgpu_atombios_crtc_scaler_setup(crtc);
2651	dce_v8_0_cursor_reset(crtc);
2652	/* update the hw version fpr dpm */
2653	amdgpu_crtc->hw_mode = *adjusted_mode;
2654
2655	return 0;
2656}
2657
2658static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2659				     const struct drm_display_mode *mode,
2660				     struct drm_display_mode *adjusted_mode)
2661{
2662	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2663	struct drm_device *dev = crtc->dev;
2664	struct drm_encoder *encoder;
2665
2666	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2667	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2668		if (encoder->crtc == crtc) {
2669			amdgpu_crtc->encoder = encoder;
2670			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2671			break;
2672		}
2673	}
2674	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2675		amdgpu_crtc->encoder = NULL;
2676		amdgpu_crtc->connector = NULL;
2677		return false;
2678	}
2679	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2680		return false;
2681	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2682		return false;
2683	/* pick pll */
2684	amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2685	/* if we can't get a PPLL for a non-DP encoder, fail */
2686	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2687	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2688		return false;
2689
2690	return true;
2691}
2692
2693static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2694				  struct drm_framebuffer *old_fb)
2695{
2696	return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2697}
2698
2699static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2700					 struct drm_framebuffer *fb,
2701					 int x, int y, enum mode_set_atomic state)
2702{
2703       return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2704}
2705
2706static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2707	.dpms = dce_v8_0_crtc_dpms,
2708	.mode_fixup = dce_v8_0_crtc_mode_fixup,
2709	.mode_set = dce_v8_0_crtc_mode_set,
2710	.mode_set_base = dce_v8_0_crtc_set_base,
2711	.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2712	.prepare = dce_v8_0_crtc_prepare,
2713	.commit = dce_v8_0_crtc_commit,
2714	.load_lut = dce_v8_0_crtc_load_lut,
2715	.disable = dce_v8_0_crtc_disable,
2716};
2717
2718static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2719{
2720	struct amdgpu_crtc *amdgpu_crtc;
2721	int i;
2722
2723	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2724			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2725	if (amdgpu_crtc == NULL)
2726		return -ENOMEM;
2727
2728	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2729
2730	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2731	amdgpu_crtc->crtc_id = index;
2732	adev->mode_info.crtcs[index] = amdgpu_crtc;
2733
2734	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2735	amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2736	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2737	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2738
2739	for (i = 0; i < 256; i++) {
2740		amdgpu_crtc->lut_r[i] = i << 2;
2741		amdgpu_crtc->lut_g[i] = i << 2;
2742		amdgpu_crtc->lut_b[i] = i << 2;
2743	}
2744
2745	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2746
2747	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2748	amdgpu_crtc->adjusted_clock = 0;
2749	amdgpu_crtc->encoder = NULL;
2750	amdgpu_crtc->connector = NULL;
2751	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2752
2753	return 0;
2754}
2755
2756static int dce_v8_0_early_init(void *handle)
2757{
2758	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2759
2760	adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2761	adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2762
2763	dce_v8_0_set_display_funcs(adev);
2764	dce_v8_0_set_irq_funcs(adev);
2765
2766	adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2767
2768	switch (adev->asic_type) {
2769	case CHIP_BONAIRE:
2770	case CHIP_HAWAII:
 
2771		adev->mode_info.num_hpd = 6;
2772		adev->mode_info.num_dig = 6;
2773		break;
2774	case CHIP_KAVERI:
 
2775		adev->mode_info.num_hpd = 6;
2776		adev->mode_info.num_dig = 7;
2777		break;
2778	case CHIP_KABINI:
2779	case CHIP_MULLINS:
 
2780		adev->mode_info.num_hpd = 6;
2781		adev->mode_info.num_dig = 6; /* ? */
2782		break;
2783	default:
2784		/* FIXME: not supported yet */
2785		return -EINVAL;
2786	}
2787
2788	return 0;
2789}
2790
2791static int dce_v8_0_sw_init(void *handle)
2792{
2793	int r, i;
2794	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2795
2796	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2797		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2798		if (r)
2799			return r;
2800	}
2801
2802	for (i = 8; i < 20; i += 2) {
2803		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2804		if (r)
2805			return r;
2806	}
2807
2808	/* HPD hotplug */
2809	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2810	if (r)
2811		return r;
2812
2813	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2814
2815	adev->ddev->mode_config.async_page_flip = true;
2816
2817	adev->ddev->mode_config.max_width = 16384;
2818	adev->ddev->mode_config.max_height = 16384;
2819
2820	adev->ddev->mode_config.preferred_depth = 24;
2821	adev->ddev->mode_config.prefer_shadow = 1;
2822
2823	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2824
2825	r = amdgpu_modeset_create_props(adev);
2826	if (r)
2827		return r;
2828
2829	adev->ddev->mode_config.max_width = 16384;
2830	adev->ddev->mode_config.max_height = 16384;
2831
2832	/* allocate crtcs */
2833	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2834		r = dce_v8_0_crtc_init(adev, i);
2835		if (r)
2836			return r;
2837	}
2838
2839	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2840		amdgpu_print_display_setup(adev->ddev);
2841	else
2842		return -EINVAL;
2843
2844	/* setup afmt */
2845	r = dce_v8_0_afmt_init(adev);
2846	if (r)
2847		return r;
2848
2849	r = dce_v8_0_audio_init(adev);
2850	if (r)
2851		return r;
2852
2853	drm_kms_helper_poll_init(adev->ddev);
2854
2855	adev->mode_info.mode_config_initialized = true;
2856	return 0;
2857}
2858
2859static int dce_v8_0_sw_fini(void *handle)
2860{
2861	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2862
2863	kfree(adev->mode_info.bios_hardcoded_edid);
2864
2865	drm_kms_helper_poll_fini(adev->ddev);
2866
2867	dce_v8_0_audio_fini(adev);
2868
2869	dce_v8_0_afmt_fini(adev);
2870
2871	drm_mode_config_cleanup(adev->ddev);
2872	adev->mode_info.mode_config_initialized = false;
2873
2874	return 0;
2875}
2876
2877static int dce_v8_0_hw_init(void *handle)
2878{
2879	int i;
2880	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2881
2882	/* init dig PHYs, disp eng pll */
2883	amdgpu_atombios_encoder_init_dig(adev);
2884	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2885
2886	/* initialize hpd */
2887	dce_v8_0_hpd_init(adev);
2888
2889	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2890		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2891	}
2892
2893	dce_v8_0_pageflip_interrupt_init(adev);
2894
2895	return 0;
2896}
2897
2898static int dce_v8_0_hw_fini(void *handle)
2899{
2900	int i;
2901	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2902
2903	dce_v8_0_hpd_fini(adev);
2904
2905	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2906		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2907	}
2908
2909	dce_v8_0_pageflip_interrupt_fini(adev);
2910
2911	return 0;
2912}
2913
2914static int dce_v8_0_suspend(void *handle)
2915{
 
 
 
 
2916	return dce_v8_0_hw_fini(handle);
2917}
2918
2919static int dce_v8_0_resume(void *handle)
2920{
2921	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2922	int ret;
2923
2924	ret = dce_v8_0_hw_init(handle);
2925
 
 
2926	/* turn on the BL */
2927	if (adev->mode_info.bl_encoder) {
2928		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2929								  adev->mode_info.bl_encoder);
2930		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2931						    bl_level);
2932	}
2933
2934	return ret;
2935}
2936
2937static bool dce_v8_0_is_idle(void *handle)
2938{
2939	return true;
2940}
2941
2942static int dce_v8_0_wait_for_idle(void *handle)
2943{
2944	return 0;
2945}
2946
 
 
 
 
 
 
 
 
2947static int dce_v8_0_soft_reset(void *handle)
2948{
2949	u32 srbm_soft_reset = 0, tmp;
2950	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2951
2952	if (dce_v8_0_is_display_hung(adev))
2953		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2954
2955	if (srbm_soft_reset) {
 
 
2956		tmp = RREG32(mmSRBM_SOFT_RESET);
2957		tmp |= srbm_soft_reset;
2958		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2959		WREG32(mmSRBM_SOFT_RESET, tmp);
2960		tmp = RREG32(mmSRBM_SOFT_RESET);
2961
2962		udelay(50);
2963
2964		tmp &= ~srbm_soft_reset;
2965		WREG32(mmSRBM_SOFT_RESET, tmp);
2966		tmp = RREG32(mmSRBM_SOFT_RESET);
2967
2968		/* Wait a little for things to settle down */
2969		udelay(50);
 
2970	}
2971	return 0;
2972}
2973
2974static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2975						     int crtc,
2976						     enum amdgpu_interrupt_state state)
2977{
2978	u32 reg_block, lb_interrupt_mask;
2979
2980	if (crtc >= adev->mode_info.num_crtc) {
2981		DRM_DEBUG("invalid crtc %d\n", crtc);
2982		return;
2983	}
2984
2985	switch (crtc) {
2986	case 0:
2987		reg_block = CRTC0_REGISTER_OFFSET;
2988		break;
2989	case 1:
2990		reg_block = CRTC1_REGISTER_OFFSET;
2991		break;
2992	case 2:
2993		reg_block = CRTC2_REGISTER_OFFSET;
2994		break;
2995	case 3:
2996		reg_block = CRTC3_REGISTER_OFFSET;
2997		break;
2998	case 4:
2999		reg_block = CRTC4_REGISTER_OFFSET;
3000		break;
3001	case 5:
3002		reg_block = CRTC5_REGISTER_OFFSET;
3003		break;
3004	default:
3005		DRM_DEBUG("invalid crtc %d\n", crtc);
3006		return;
3007	}
3008
3009	switch (state) {
3010	case AMDGPU_IRQ_STATE_DISABLE:
3011		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3012		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3013		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3014		break;
3015	case AMDGPU_IRQ_STATE_ENABLE:
3016		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3017		lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3018		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3019		break;
3020	default:
3021		break;
3022	}
3023}
3024
3025static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3026						    int crtc,
3027						    enum amdgpu_interrupt_state state)
3028{
3029	u32 reg_block, lb_interrupt_mask;
3030
3031	if (crtc >= adev->mode_info.num_crtc) {
3032		DRM_DEBUG("invalid crtc %d\n", crtc);
3033		return;
3034	}
3035
3036	switch (crtc) {
3037	case 0:
3038		reg_block = CRTC0_REGISTER_OFFSET;
3039		break;
3040	case 1:
3041		reg_block = CRTC1_REGISTER_OFFSET;
3042		break;
3043	case 2:
3044		reg_block = CRTC2_REGISTER_OFFSET;
3045		break;
3046	case 3:
3047		reg_block = CRTC3_REGISTER_OFFSET;
3048		break;
3049	case 4:
3050		reg_block = CRTC4_REGISTER_OFFSET;
3051		break;
3052	case 5:
3053		reg_block = CRTC5_REGISTER_OFFSET;
3054		break;
3055	default:
3056		DRM_DEBUG("invalid crtc %d\n", crtc);
3057		return;
3058	}
3059
3060	switch (state) {
3061	case AMDGPU_IRQ_STATE_DISABLE:
3062		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3063		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3064		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3065		break;
3066	case AMDGPU_IRQ_STATE_ENABLE:
3067		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3068		lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3069		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3070		break;
3071	default:
3072		break;
3073	}
3074}
3075
3076static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3077					    struct amdgpu_irq_src *src,
3078					    unsigned type,
3079					    enum amdgpu_interrupt_state state)
3080{
3081	u32 dc_hpd_int_cntl;
3082
3083	if (type >= adev->mode_info.num_hpd) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3084		DRM_DEBUG("invalid hdp %d\n", type);
3085		return 0;
3086	}
3087
3088	switch (state) {
3089	case AMDGPU_IRQ_STATE_DISABLE:
3090		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3091		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3092		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3093		break;
3094	case AMDGPU_IRQ_STATE_ENABLE:
3095		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3096		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3097		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3098		break;
3099	default:
3100		break;
3101	}
3102
3103	return 0;
3104}
3105
3106static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3107					     struct amdgpu_irq_src *src,
3108					     unsigned type,
3109					     enum amdgpu_interrupt_state state)
3110{
3111	switch (type) {
3112	case AMDGPU_CRTC_IRQ_VBLANK1:
3113		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3114		break;
3115	case AMDGPU_CRTC_IRQ_VBLANK2:
3116		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3117		break;
3118	case AMDGPU_CRTC_IRQ_VBLANK3:
3119		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3120		break;
3121	case AMDGPU_CRTC_IRQ_VBLANK4:
3122		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3123		break;
3124	case AMDGPU_CRTC_IRQ_VBLANK5:
3125		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3126		break;
3127	case AMDGPU_CRTC_IRQ_VBLANK6:
3128		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3129		break;
3130	case AMDGPU_CRTC_IRQ_VLINE1:
3131		dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3132		break;
3133	case AMDGPU_CRTC_IRQ_VLINE2:
3134		dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3135		break;
3136	case AMDGPU_CRTC_IRQ_VLINE3:
3137		dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3138		break;
3139	case AMDGPU_CRTC_IRQ_VLINE4:
3140		dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3141		break;
3142	case AMDGPU_CRTC_IRQ_VLINE5:
3143		dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3144		break;
3145	case AMDGPU_CRTC_IRQ_VLINE6:
3146		dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3147		break;
3148	default:
3149		break;
3150	}
3151	return 0;
3152}
3153
3154static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3155			     struct amdgpu_irq_src *source,
3156			     struct amdgpu_iv_entry *entry)
3157{
3158	unsigned crtc = entry->src_id - 1;
3159	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3160	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3161
3162	switch (entry->src_data) {
3163	case 0: /* vblank */
3164		if (disp_int & interrupt_status_offsets[crtc].vblank)
3165			WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3166		else
3167			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3168
3169		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3170			drm_handle_vblank(adev->ddev, crtc);
3171		}
3172		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
 
3173		break;
3174	case 1: /* vline */
3175		if (disp_int & interrupt_status_offsets[crtc].vline)
3176			WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3177		else
3178			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3179
3180		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
 
3181		break;
3182	default:
3183		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3184		break;
3185	}
3186
3187	return 0;
3188}
3189
3190static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3191						 struct amdgpu_irq_src *src,
3192						 unsigned type,
3193						 enum amdgpu_interrupt_state state)
3194{
3195	u32 reg;
3196
3197	if (type >= adev->mode_info.num_crtc) {
3198		DRM_ERROR("invalid pageflip crtc %d\n", type);
3199		return -EINVAL;
3200	}
3201
3202	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3203	if (state == AMDGPU_IRQ_STATE_DISABLE)
3204		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3205		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3206	else
3207		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3208		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3209
3210	return 0;
3211}
3212
3213static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3214				struct amdgpu_irq_src *source,
3215				struct amdgpu_iv_entry *entry)
3216{
3217	unsigned long flags;
3218	unsigned crtc_id;
3219	struct amdgpu_crtc *amdgpu_crtc;
3220	struct amdgpu_flip_work *works;
3221
3222	crtc_id = (entry->src_id - 8) >> 1;
3223	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3224
3225	if (crtc_id >= adev->mode_info.num_crtc) {
3226		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3227		return -EINVAL;
3228	}
3229
3230	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3231	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3232		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3233		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3234
3235	/* IRQ could occur when in initial stage */
3236	if (amdgpu_crtc == NULL)
3237		return 0;
3238
3239	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3240	works = amdgpu_crtc->pflip_works;
3241	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3242		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3243						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3244						amdgpu_crtc->pflip_status,
3245						AMDGPU_FLIP_SUBMITTED);
3246		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3247		return 0;
3248	}
3249
3250	/* page flip completed. clean up */
3251	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3252	amdgpu_crtc->pflip_works = NULL;
3253
3254	/* wakeup usersapce */
3255	if (works->event)
3256		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3257
3258	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3259
3260	drm_crtc_vblank_put(&amdgpu_crtc->base);
3261	schedule_work(&works->unpin_work);
3262
3263	return 0;
3264}
3265
3266static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3267			    struct amdgpu_irq_src *source,
3268			    struct amdgpu_iv_entry *entry)
3269{
3270	uint32_t disp_int, mask, tmp;
3271	unsigned hpd;
3272
3273	if (entry->src_data >= adev->mode_info.num_hpd) {
3274		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3275		return 0;
3276	}
3277
3278	hpd = entry->src_data;
3279	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3280	mask = interrupt_status_offsets[hpd].hpd;
 
3281
3282	if (disp_int & mask) {
3283		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3284		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3285		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3286		schedule_work(&adev->hotplug_work);
3287		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3288	}
3289
3290	return 0;
3291
3292}
3293
3294static int dce_v8_0_set_clockgating_state(void *handle,
3295					  enum amd_clockgating_state state)
3296{
3297	return 0;
3298}
3299
3300static int dce_v8_0_set_powergating_state(void *handle,
3301					  enum amd_powergating_state state)
3302{
3303	return 0;
3304}
3305
3306static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3307	.name = "dce_v8_0",
3308	.early_init = dce_v8_0_early_init,
3309	.late_init = NULL,
3310	.sw_init = dce_v8_0_sw_init,
3311	.sw_fini = dce_v8_0_sw_fini,
3312	.hw_init = dce_v8_0_hw_init,
3313	.hw_fini = dce_v8_0_hw_fini,
3314	.suspend = dce_v8_0_suspend,
3315	.resume = dce_v8_0_resume,
3316	.is_idle = dce_v8_0_is_idle,
3317	.wait_for_idle = dce_v8_0_wait_for_idle,
3318	.soft_reset = dce_v8_0_soft_reset,
 
3319	.set_clockgating_state = dce_v8_0_set_clockgating_state,
3320	.set_powergating_state = dce_v8_0_set_powergating_state,
3321};
3322
3323static void
3324dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3325			  struct drm_display_mode *mode,
3326			  struct drm_display_mode *adjusted_mode)
3327{
3328	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3329
3330	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3331
3332	/* need to call this here rather than in prepare() since we need some crtc info */
3333	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3334
3335	/* set scaler clears this on some chips */
3336	dce_v8_0_set_interleave(encoder->crtc, mode);
3337
3338	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3339		dce_v8_0_afmt_enable(encoder, true);
3340		dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3341	}
3342}
3343
3344static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3345{
3346	struct amdgpu_device *adev = encoder->dev->dev_private;
3347	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3348	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3349
3350	if ((amdgpu_encoder->active_device &
3351	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3352	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3353	     ENCODER_OBJECT_ID_NONE)) {
3354		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3355		if (dig) {
3356			dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3357			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3358				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3359		}
3360	}
3361
3362	amdgpu_atombios_scratch_regs_lock(adev, true);
3363
3364	if (connector) {
3365		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3366
3367		/* select the clock/data port if it uses a router */
3368		if (amdgpu_connector->router.cd_valid)
3369			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3370
3371		/* turn eDP panel on for mode set */
3372		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3373			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3374							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3375	}
3376
3377	/* this is needed for the pll/ss setup to work correctly in some cases */
3378	amdgpu_atombios_encoder_set_crtc_source(encoder);
3379	/* set up the FMT blocks */
3380	dce_v8_0_program_fmt(encoder);
3381}
3382
3383static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3384{
3385	struct drm_device *dev = encoder->dev;
3386	struct amdgpu_device *adev = dev->dev_private;
3387
3388	/* need to call this here as we need the crtc set up */
3389	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3390	amdgpu_atombios_scratch_regs_lock(adev, false);
3391}
3392
3393static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3394{
3395	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3396	struct amdgpu_encoder_atom_dig *dig;
3397
3398	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3399
3400	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3401		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3402			dce_v8_0_afmt_enable(encoder, false);
3403		dig = amdgpu_encoder->enc_priv;
3404		dig->dig_encoder = -1;
3405	}
3406	amdgpu_encoder->active_device = 0;
3407}
3408
3409/* these are handled by the primary encoders */
3410static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3411{
3412
3413}
3414
3415static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3416{
3417
3418}
3419
3420static void
3421dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3422		      struct drm_display_mode *mode,
3423		      struct drm_display_mode *adjusted_mode)
3424{
3425
3426}
3427
3428static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3429{
3430
3431}
3432
3433static void
3434dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3435{
3436
3437}
3438
3439static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3440	.dpms = dce_v8_0_ext_dpms,
3441	.prepare = dce_v8_0_ext_prepare,
3442	.mode_set = dce_v8_0_ext_mode_set,
3443	.commit = dce_v8_0_ext_commit,
3444	.disable = dce_v8_0_ext_disable,
3445	/* no detect for TMDS/LVDS yet */
3446};
3447
3448static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3449	.dpms = amdgpu_atombios_encoder_dpms,
3450	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3451	.prepare = dce_v8_0_encoder_prepare,
3452	.mode_set = dce_v8_0_encoder_mode_set,
3453	.commit = dce_v8_0_encoder_commit,
3454	.disable = dce_v8_0_encoder_disable,
3455	.detect = amdgpu_atombios_encoder_dig_detect,
3456};
3457
3458static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3459	.dpms = amdgpu_atombios_encoder_dpms,
3460	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3461	.prepare = dce_v8_0_encoder_prepare,
3462	.mode_set = dce_v8_0_encoder_mode_set,
3463	.commit = dce_v8_0_encoder_commit,
3464	.detect = amdgpu_atombios_encoder_dac_detect,
3465};
3466
3467static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3468{
3469	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3470	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3471		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3472	kfree(amdgpu_encoder->enc_priv);
3473	drm_encoder_cleanup(encoder);
3474	kfree(amdgpu_encoder);
3475}
3476
3477static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3478	.destroy = dce_v8_0_encoder_destroy,
3479};
3480
3481static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3482				 uint32_t encoder_enum,
3483				 uint32_t supported_device,
3484				 u16 caps)
3485{
3486	struct drm_device *dev = adev->ddev;
3487	struct drm_encoder *encoder;
3488	struct amdgpu_encoder *amdgpu_encoder;
3489
3490	/* see if we already added it */
3491	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3492		amdgpu_encoder = to_amdgpu_encoder(encoder);
3493		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3494			amdgpu_encoder->devices |= supported_device;
3495			return;
3496		}
3497
3498	}
3499
3500	/* add a new one */
3501	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3502	if (!amdgpu_encoder)
3503		return;
3504
3505	encoder = &amdgpu_encoder->base;
3506	switch (adev->mode_info.num_crtc) {
3507	case 1:
3508		encoder->possible_crtcs = 0x1;
3509		break;
3510	case 2:
3511	default:
3512		encoder->possible_crtcs = 0x3;
3513		break;
3514	case 4:
3515		encoder->possible_crtcs = 0xf;
3516		break;
3517	case 6:
3518		encoder->possible_crtcs = 0x3f;
3519		break;
3520	}
3521
3522	amdgpu_encoder->enc_priv = NULL;
3523
3524	amdgpu_encoder->encoder_enum = encoder_enum;
3525	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3526	amdgpu_encoder->devices = supported_device;
3527	amdgpu_encoder->rmx_type = RMX_OFF;
3528	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3529	amdgpu_encoder->is_ext_encoder = false;
3530	amdgpu_encoder->caps = caps;
3531
3532	switch (amdgpu_encoder->encoder_id) {
3533	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3534	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3535		drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3536				 DRM_MODE_ENCODER_DAC, NULL);
3537		drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3538		break;
3539	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3540	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3541	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3542	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3543	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3544		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3545			amdgpu_encoder->rmx_type = RMX_FULL;
3546			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3547					 DRM_MODE_ENCODER_LVDS, NULL);
3548			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3549		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3550			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3551					 DRM_MODE_ENCODER_DAC, NULL);
3552			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3553		} else {
3554			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3555					 DRM_MODE_ENCODER_TMDS, NULL);
3556			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3557		}
3558		drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3559		break;
3560	case ENCODER_OBJECT_ID_SI170B:
3561	case ENCODER_OBJECT_ID_CH7303:
3562	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3563	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3564	case ENCODER_OBJECT_ID_TITFP513:
3565	case ENCODER_OBJECT_ID_VT1623:
3566	case ENCODER_OBJECT_ID_HDMI_SI1930:
3567	case ENCODER_OBJECT_ID_TRAVIS:
3568	case ENCODER_OBJECT_ID_NUTMEG:
3569		/* these are handled by the primary encoders */
3570		amdgpu_encoder->is_ext_encoder = true;
3571		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3572			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3573					 DRM_MODE_ENCODER_LVDS, NULL);
3574		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3575			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3576					 DRM_MODE_ENCODER_DAC, NULL);
3577		else
3578			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3579					 DRM_MODE_ENCODER_TMDS, NULL);
3580		drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3581		break;
3582	}
3583}
3584
3585static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3586	.set_vga_render_state = &dce_v8_0_set_vga_render_state,
3587	.bandwidth_update = &dce_v8_0_bandwidth_update,
3588	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
3589	.vblank_wait = &dce_v8_0_vblank_wait,
 
3590	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3591	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3592	.hpd_sense = &dce_v8_0_hpd_sense,
3593	.hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3594	.hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3595	.page_flip = &dce_v8_0_page_flip,
3596	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3597	.add_encoder = &dce_v8_0_encoder_add,
3598	.add_connector = &amdgpu_connector_add,
3599	.stop_mc_access = &dce_v8_0_stop_mc_access,
3600	.resume_mc_access = &dce_v8_0_resume_mc_access,
3601};
3602
3603static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3604{
3605	if (adev->mode_info.funcs == NULL)
3606		adev->mode_info.funcs = &dce_v8_0_display_funcs;
3607}
3608
3609static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3610	.set = dce_v8_0_set_crtc_interrupt_state,
3611	.process = dce_v8_0_crtc_irq,
3612};
3613
3614static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3615	.set = dce_v8_0_set_pageflip_interrupt_state,
3616	.process = dce_v8_0_pageflip_irq,
3617};
3618
3619static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3620	.set = dce_v8_0_set_hpd_interrupt_state,
3621	.process = dce_v8_0_hpd_irq,
3622};
3623
3624static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3625{
3626	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3627	adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3628
3629	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3630	adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3631
3632	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3633	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3634}
3635
3636const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3637{
3638	.type = AMD_IP_BLOCK_TYPE_DCE,
3639	.major = 8,
3640	.minor = 0,
3641	.rev = 0,
3642	.funcs = &dce_v8_0_ip_funcs,
3643};
3644
3645const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3646{
3647	.type = AMD_IP_BLOCK_TYPE_DCE,
3648	.major = 8,
3649	.minor = 1,
3650	.rev = 0,
3651	.funcs = &dce_v8_0_ip_funcs,
3652};
3653
3654const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3655{
3656	.type = AMD_IP_BLOCK_TYPE_DCE,
3657	.major = 8,
3658	.minor = 2,
3659	.rev = 0,
3660	.funcs = &dce_v8_0_ip_funcs,
3661};
3662
3663const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3664{
3665	.type = AMD_IP_BLOCK_TYPE_DCE,
3666	.major = 8,
3667	.minor = 3,
3668	.rev = 0,
3669	.funcs = &dce_v8_0_ip_funcs,
3670};
3671
3672const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3673{
3674	.type = AMD_IP_BLOCK_TYPE_DCE,
3675	.major = 8,
3676	.minor = 5,
3677	.rev = 0,
3678	.funcs = &dce_v8_0_ip_funcs,
3679};