Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <drm/drmP.h>
 
 
 
 
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "cikd.h"
  28#include "atom.h"
  29#include "amdgpu_atombios.h"
  30#include "atombios_crtc.h"
  31#include "atombios_encoders.h"
  32#include "amdgpu_pll.h"
  33#include "amdgpu_connectors.h"
 
  34#include "dce_v8_0.h"
  35
  36#include "dce/dce_8_0_d.h"
  37#include "dce/dce_8_0_sh_mask.h"
  38
  39#include "gca/gfx_7_2_enum.h"
  40
  41#include "gmc/gmc_7_1_d.h"
  42#include "gmc/gmc_7_1_sh_mask.h"
  43
  44#include "oss/oss_2_0_d.h"
  45#include "oss/oss_2_0_sh_mask.h"
  46
  47static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52	CRTC0_REGISTER_OFFSET,
  53	CRTC1_REGISTER_OFFSET,
  54	CRTC2_REGISTER_OFFSET,
  55	CRTC3_REGISTER_OFFSET,
  56	CRTC4_REGISTER_OFFSET,
  57	CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62	HPD0_REGISTER_OFFSET,
  63	HPD1_REGISTER_OFFSET,
  64	HPD2_REGISTER_OFFSET,
  65	HPD3_REGISTER_OFFSET,
  66	HPD4_REGISTER_OFFSET,
  67	HPD5_REGISTER_OFFSET
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71	CRTC0_REGISTER_OFFSET,
  72	CRTC1_REGISTER_OFFSET,
  73	CRTC2_REGISTER_OFFSET,
  74	CRTC3_REGISTER_OFFSET,
  75	CRTC4_REGISTER_OFFSET,
  76	CRTC5_REGISTER_OFFSET,
  77	(0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81	uint32_t	reg;
  82	uint32_t	vblank;
  83	uint32_t	vline;
  84	uint32_t	hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87	.reg = mmDISP_INTERRUPT_STATUS,
  88	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119				     u32 block_offset, u32 reg)
 120{
 121	unsigned long flags;
 122	u32 r;
 123
 124	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 125	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 126	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 127	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 128
 129	return r;
 130}
 131
 132static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 133				      u32 block_offset, u32 reg, u32 v)
 134{
 135	unsigned long flags;
 136
 137	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 138	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 139	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 140	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 141}
 142
 143static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 144{
 145	if (crtc >= adev->mode_info.num_crtc)
 146		return 0;
 147	else
 148		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 149}
 150
 151static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 152{
 153	unsigned i;
 154
 155	/* Enable pflip interrupts */
 156	for (i = 0; i < adev->mode_info.num_crtc; i++)
 157		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 158}
 159
 160static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 161{
 162	unsigned i;
 163
 164	/* Disable pflip interrupts */
 165	for (i = 0; i < adev->mode_info.num_crtc; i++)
 166		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 167}
 168
 169/**
 170 * dce_v8_0_page_flip - pageflip callback.
 171 *
 172 * @adev: amdgpu_device pointer
 173 * @crtc_id: crtc to cleanup pageflip on
 174 * @crtc_base: new address of the crtc (GPU MC address)
 
 175 *
 176 * Triggers the actual pageflip by updating the primary
 177 * surface base address.
 178 */
 179static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 180			       int crtc_id, u64 crtc_base, bool async)
 181{
 182	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
 183
 184	/* flip at hsync for async, default is vsync */
 185	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 186	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 
 
 
 187	/* update the primary scanout addresses */
 188	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 189	       upper_32_bits(crtc_base));
 190	/* writing to the low address triggers the update */
 191	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 192	       lower_32_bits(crtc_base));
 193	/* post the write */
 194	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 195}
 196
 197static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 198					u32 *vbl, u32 *position)
 199{
 200	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 201		return -EINVAL;
 202
 203	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 204	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 205
 206	return 0;
 207}
 208
 209/**
 210 * dce_v8_0_hpd_sense - hpd sense callback.
 211 *
 212 * @adev: amdgpu_device pointer
 213 * @hpd: hpd (hotplug detect) pin
 214 *
 215 * Checks if a digital monitor is connected (evergreen+).
 216 * Returns true if connected, false if not connected.
 217 */
 218static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 219			       enum amdgpu_hpd_id hpd)
 220{
 221	bool connected = false;
 222
 223	if (hpd >= adev->mode_info.num_hpd)
 224		return connected;
 225
 226	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
 227	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 228		connected = true;
 229
 230	return connected;
 231}
 232
 233/**
 234 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 235 *
 236 * @adev: amdgpu_device pointer
 237 * @hpd: hpd (hotplug detect) pin
 238 *
 239 * Set the polarity of the hpd pin (evergreen+).
 240 */
 241static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 242				      enum amdgpu_hpd_id hpd)
 243{
 244	u32 tmp;
 245	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 246
 247	if (hpd >= adev->mode_info.num_hpd)
 248		return;
 249
 250	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 251	if (connected)
 252		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 253	else
 254		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 255	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 256}
 257
 258/**
 259 * dce_v8_0_hpd_init - hpd setup callback.
 260 *
 261 * @adev: amdgpu_device pointer
 262 *
 263 * Setup the hpd pins used by the card (evergreen+).
 264 * Enable the pin, set the polarity, and enable the hpd interrupts.
 265 */
 266static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 267{
 268	struct drm_device *dev = adev->ddev;
 269	struct drm_connector *connector;
 
 270	u32 tmp;
 271
 272	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 273		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 274
 275		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 276			continue;
 277
 278		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 279		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 280		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 281
 282		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 283		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 284			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 285			 * aux dp channel on imac and help (but not completely fix)
 286			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 287			 * also avoid interrupt storms during dpms.
 288			 */
 289			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 290			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 291			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 292			continue;
 293		}
 294
 295		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 296		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 297	}
 
 298}
 299
 300/**
 301 * dce_v8_0_hpd_fini - hpd tear down callback.
 302 *
 303 * @adev: amdgpu_device pointer
 304 *
 305 * Tear down the hpd pins used by the card (evergreen+).
 306 * Disable the hpd interrupts.
 307 */
 308static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 309{
 310	struct drm_device *dev = adev->ddev;
 311	struct drm_connector *connector;
 
 312	u32 tmp;
 313
 314	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 315		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 316
 317		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 318			continue;
 319
 320		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 321		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 322		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 323
 324		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 325	}
 
 326}
 327
 328static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 329{
 330	return mmDC_GPIO_HPD_A;
 331}
 332
 333static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 334{
 335	u32 crtc_hung = 0;
 336	u32 crtc_status[6];
 337	u32 i, j, tmp;
 338
 339	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 340		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 341			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 342			crtc_hung |= (1 << i);
 343		}
 344	}
 345
 346	for (j = 0; j < 10; j++) {
 347		for (i = 0; i < adev->mode_info.num_crtc; i++) {
 348			if (crtc_hung & (1 << i)) {
 349				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 350				if (tmp != crtc_status[i])
 351					crtc_hung &= ~(1 << i);
 352			}
 353		}
 354		if (crtc_hung == 0)
 355			return false;
 356		udelay(100);
 357	}
 358
 359	return true;
 360}
 361
 362static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 363					  bool render)
 364{
 365	u32 tmp;
 366
 367	/* Lockout access through VGA aperture*/
 368	tmp = RREG32(mmVGA_HDP_CONTROL);
 369	if (render)
 370		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 371	else
 372		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 373	WREG32(mmVGA_HDP_CONTROL, tmp);
 374
 375	/* disable VGA render */
 376	tmp = RREG32(mmVGA_RENDER_CONTROL);
 377	if (render)
 378		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 379	else
 380		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 381	WREG32(mmVGA_RENDER_CONTROL, tmp);
 382}
 383
 384static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
 385{
 386	int num_crtc = 0;
 387
 388	switch (adev->asic_type) {
 389	case CHIP_BONAIRE:
 390	case CHIP_HAWAII:
 391		num_crtc = 6;
 392		break;
 393	case CHIP_KAVERI:
 394		num_crtc = 4;
 395		break;
 396	case CHIP_KABINI:
 397	case CHIP_MULLINS:
 398		num_crtc = 2;
 399		break;
 400	default:
 401		num_crtc = 0;
 402	}
 403	return num_crtc;
 404}
 405
 406void dce_v8_0_disable_dce(struct amdgpu_device *adev)
 407{
 408	/*Disable VGA render and enabled crtc, if has DCE engine*/
 409	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 410		u32 tmp;
 411		int crtc_enabled, i;
 412
 413		dce_v8_0_set_vga_render_state(adev, false);
 414
 415		/*Disable crtc*/
 416		for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
 417			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 418									 CRTC_CONTROL, CRTC_MASTER_EN);
 419			if (crtc_enabled) {
 420				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 421				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 422				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 423				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 424				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 425			}
 426		}
 427	}
 428}
 429
 430static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 431{
 432	struct drm_device *dev = encoder->dev;
 433	struct amdgpu_device *adev = dev->dev_private;
 434	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 435	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 436	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 437	int bpc = 0;
 438	u32 tmp = 0;
 439	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 440
 441	if (connector) {
 442		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 443		bpc = amdgpu_connector_get_monitor_bpc(connector);
 444		dither = amdgpu_connector->dither;
 445	}
 446
 447	/* LVDS/eDP FMT is set up by atom */
 448	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 449		return;
 450
 451	/* not needed for analog */
 452	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 453	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 454		return;
 455
 456	if (bpc == 0)
 457		return;
 458
 459	switch (bpc) {
 460	case 6:
 461		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 462			/* XXX sort out optimal dither settings */
 463			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 464				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 465				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 466				(0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 467		else
 468			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 469			(0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 470		break;
 471	case 8:
 472		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 473			/* XXX sort out optimal dither settings */
 474			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 475				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 476				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 477				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 478				(1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 479		else
 480			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 481			(1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 482		break;
 483	case 10:
 484		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 485			/* XXX sort out optimal dither settings */
 486			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 487				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 488				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 489				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 490				(2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 491		else
 492			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 493			(2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 494		break;
 495	default:
 496		/* not needed */
 497		break;
 498	}
 499
 500	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 501}
 502
 503
 504/* display watermark setup */
 505/**
 506 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 507 *
 508 * @adev: amdgpu_device pointer
 509 * @amdgpu_crtc: the selected display controller
 510 * @mode: the current display mode on the selected display
 511 * controller
 512 *
 513 * Setup up the line buffer allocation for
 514 * the selected display controller (CIK).
 515 * Returns the line buffer size in pixels.
 516 */
 517static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 518				       struct amdgpu_crtc *amdgpu_crtc,
 519				       struct drm_display_mode *mode)
 520{
 521	u32 tmp, buffer_alloc, i;
 522	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 523	/*
 524	 * Line Buffer Setup
 525	 * There are 6 line buffers, one for each display controllers.
 526	 * There are 3 partitions per LB. Select the number of partitions
 527	 * to enable based on the display width.  For display widths larger
 528	 * than 4096, you need use to use 2 display controllers and combine
 529	 * them using the stereo blender.
 530	 */
 531	if (amdgpu_crtc->base.enabled && mode) {
 532		if (mode->crtc_hdisplay < 1920) {
 533			tmp = 1;
 534			buffer_alloc = 2;
 535		} else if (mode->crtc_hdisplay < 2560) {
 536			tmp = 2;
 537			buffer_alloc = 2;
 538		} else if (mode->crtc_hdisplay < 4096) {
 539			tmp = 0;
 540			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 541		} else {
 542			DRM_DEBUG_KMS("Mode too big for LB!\n");
 543			tmp = 0;
 544			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 545		}
 546	} else {
 547		tmp = 1;
 548		buffer_alloc = 0;
 549	}
 550
 551	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 552	      (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 553	      (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 554
 555	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 556	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 557	for (i = 0; i < adev->usec_timeout; i++) {
 558		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 559		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 560			break;
 561		udelay(1);
 562	}
 563
 564	if (amdgpu_crtc->base.enabled && mode) {
 565		switch (tmp) {
 566		case 0:
 567		default:
 568			return 4096 * 2;
 569		case 1:
 570			return 1920 * 2;
 571		case 2:
 572			return 2560 * 2;
 573		}
 574	}
 575
 576	/* controller not enabled, so no lb used */
 577	return 0;
 578}
 579
 580/**
 581 * cik_get_number_of_dram_channels - get the number of dram channels
 582 *
 583 * @adev: amdgpu_device pointer
 584 *
 585 * Look up the number of video ram channels (CIK).
 586 * Used for display watermark bandwidth calculations
 587 * Returns the number of dram channels
 588 */
 589static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 590{
 591	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 592
 593	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 594	case 0:
 595	default:
 596		return 1;
 597	case 1:
 598		return 2;
 599	case 2:
 600		return 4;
 601	case 3:
 602		return 8;
 603	case 4:
 604		return 3;
 605	case 5:
 606		return 6;
 607	case 6:
 608		return 10;
 609	case 7:
 610		return 12;
 611	case 8:
 612		return 16;
 613	}
 614}
 615
 616struct dce8_wm_params {
 617	u32 dram_channels; /* number of dram channels */
 618	u32 yclk;          /* bandwidth per dram data pin in kHz */
 619	u32 sclk;          /* engine clock in kHz */
 620	u32 disp_clk;      /* display clock in kHz */
 621	u32 src_width;     /* viewport width */
 622	u32 active_time;   /* active display time in ns */
 623	u32 blank_time;    /* blank time in ns */
 624	bool interlaced;    /* mode is interlaced */
 625	fixed20_12 vsc;    /* vertical scale ratio */
 626	u32 num_heads;     /* number of active crtcs */
 627	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 628	u32 lb_size;       /* line buffer allocated to pipe */
 629	u32 vtaps;         /* vertical scaler taps */
 630};
 631
 632/**
 633 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 634 *
 635 * @wm: watermark calculation data
 636 *
 637 * Calculate the raw dram bandwidth (CIK).
 638 * Used for display watermark bandwidth calculations
 639 * Returns the dram bandwidth in MBytes/s
 640 */
 641static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 642{
 643	/* Calculate raw DRAM Bandwidth */
 644	fixed20_12 dram_efficiency; /* 0.7 */
 645	fixed20_12 yclk, dram_channels, bandwidth;
 646	fixed20_12 a;
 647
 648	a.full = dfixed_const(1000);
 649	yclk.full = dfixed_const(wm->yclk);
 650	yclk.full = dfixed_div(yclk, a);
 651	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 652	a.full = dfixed_const(10);
 653	dram_efficiency.full = dfixed_const(7);
 654	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 655	bandwidth.full = dfixed_mul(dram_channels, yclk);
 656	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 657
 658	return dfixed_trunc(bandwidth);
 659}
 660
 661/**
 662 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 663 *
 664 * @wm: watermark calculation data
 665 *
 666 * Calculate the dram bandwidth used for display (CIK).
 667 * Used for display watermark bandwidth calculations
 668 * Returns the dram bandwidth for display in MBytes/s
 669 */
 670static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 671{
 672	/* Calculate DRAM Bandwidth and the part allocated to display. */
 673	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 674	fixed20_12 yclk, dram_channels, bandwidth;
 675	fixed20_12 a;
 676
 677	a.full = dfixed_const(1000);
 678	yclk.full = dfixed_const(wm->yclk);
 679	yclk.full = dfixed_div(yclk, a);
 680	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 681	a.full = dfixed_const(10);
 682	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 683	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 684	bandwidth.full = dfixed_mul(dram_channels, yclk);
 685	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 686
 687	return dfixed_trunc(bandwidth);
 688}
 689
 690/**
 691 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 692 *
 693 * @wm: watermark calculation data
 694 *
 695 * Calculate the data return bandwidth used for display (CIK).
 696 * Used for display watermark bandwidth calculations
 697 * Returns the data return bandwidth in MBytes/s
 698 */
 699static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 700{
 701	/* Calculate the display Data return Bandwidth */
 702	fixed20_12 return_efficiency; /* 0.8 */
 703	fixed20_12 sclk, bandwidth;
 704	fixed20_12 a;
 705
 706	a.full = dfixed_const(1000);
 707	sclk.full = dfixed_const(wm->sclk);
 708	sclk.full = dfixed_div(sclk, a);
 709	a.full = dfixed_const(10);
 710	return_efficiency.full = dfixed_const(8);
 711	return_efficiency.full = dfixed_div(return_efficiency, a);
 712	a.full = dfixed_const(32);
 713	bandwidth.full = dfixed_mul(a, sclk);
 714	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 715
 716	return dfixed_trunc(bandwidth);
 717}
 718
 719/**
 720 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 721 *
 722 * @wm: watermark calculation data
 723 *
 724 * Calculate the dmif bandwidth used for display (CIK).
 725 * Used for display watermark bandwidth calculations
 726 * Returns the dmif bandwidth in MBytes/s
 727 */
 728static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 729{
 730	/* Calculate the DMIF Request Bandwidth */
 731	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 732	fixed20_12 disp_clk, bandwidth;
 733	fixed20_12 a, b;
 734
 735	a.full = dfixed_const(1000);
 736	disp_clk.full = dfixed_const(wm->disp_clk);
 737	disp_clk.full = dfixed_div(disp_clk, a);
 738	a.full = dfixed_const(32);
 739	b.full = dfixed_mul(a, disp_clk);
 740
 741	a.full = dfixed_const(10);
 742	disp_clk_request_efficiency.full = dfixed_const(8);
 743	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 744
 745	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 746
 747	return dfixed_trunc(bandwidth);
 748}
 749
 750/**
 751 * dce_v8_0_available_bandwidth - get the min available bandwidth
 752 *
 753 * @wm: watermark calculation data
 754 *
 755 * Calculate the min available bandwidth used for display (CIK).
 756 * Used for display watermark bandwidth calculations
 757 * Returns the min available bandwidth in MBytes/s
 758 */
 759static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 760{
 761	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 762	u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 763	u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 764	u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 765
 766	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 767}
 768
 769/**
 770 * dce_v8_0_average_bandwidth - get the average available bandwidth
 771 *
 772 * @wm: watermark calculation data
 773 *
 774 * Calculate the average available bandwidth used for display (CIK).
 775 * Used for display watermark bandwidth calculations
 776 * Returns the average available bandwidth in MBytes/s
 777 */
 778static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
 779{
 780	/* Calculate the display mode Average Bandwidth
 781	 * DisplayMode should contain the source and destination dimensions,
 782	 * timing, etc.
 783	 */
 784	fixed20_12 bpp;
 785	fixed20_12 line_time;
 786	fixed20_12 src_width;
 787	fixed20_12 bandwidth;
 788	fixed20_12 a;
 789
 790	a.full = dfixed_const(1000);
 791	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 792	line_time.full = dfixed_div(line_time, a);
 793	bpp.full = dfixed_const(wm->bytes_per_pixel);
 794	src_width.full = dfixed_const(wm->src_width);
 795	bandwidth.full = dfixed_mul(src_width, bpp);
 796	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 797	bandwidth.full = dfixed_div(bandwidth, line_time);
 798
 799	return dfixed_trunc(bandwidth);
 800}
 801
 802/**
 803 * dce_v8_0_latency_watermark - get the latency watermark
 804 *
 805 * @wm: watermark calculation data
 806 *
 807 * Calculate the latency watermark (CIK).
 808 * Used for display watermark bandwidth calculations
 809 * Returns the latency watermark in ns
 810 */
 811static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 812{
 813	/* First calculate the latency in ns */
 814	u32 mc_latency = 2000; /* 2000 ns. */
 815	u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
 816	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 817	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 818	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 819	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 820		(wm->num_heads * cursor_line_pair_return_time);
 821	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 822	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 823	u32 tmp, dmif_size = 12288;
 824	fixed20_12 a, b, c;
 825
 826	if (wm->num_heads == 0)
 827		return 0;
 828
 829	a.full = dfixed_const(2);
 830	b.full = dfixed_const(1);
 831	if ((wm->vsc.full > a.full) ||
 832	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 833	    (wm->vtaps >= 5) ||
 834	    ((wm->vsc.full >= a.full) && wm->interlaced))
 835		max_src_lines_per_dst_line = 4;
 836	else
 837		max_src_lines_per_dst_line = 2;
 838
 839	a.full = dfixed_const(available_bandwidth);
 840	b.full = dfixed_const(wm->num_heads);
 841	a.full = dfixed_div(a, b);
 842	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 843	tmp = min(dfixed_trunc(a), tmp);
 844
 845	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 846
 847	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 848	b.full = dfixed_const(1000);
 849	c.full = dfixed_const(lb_fill_bw);
 850	b.full = dfixed_div(c, b);
 851	a.full = dfixed_div(a, b);
 852	line_fill_time = dfixed_trunc(a);
 853
 854	if (line_fill_time < wm->active_time)
 855		return latency;
 856	else
 857		return latency + (line_fill_time - wm->active_time);
 858
 859}
 860
 861/**
 862 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 863 * average and available dram bandwidth
 864 *
 865 * @wm: watermark calculation data
 866 *
 867 * Check if the display average bandwidth fits in the display
 868 * dram bandwidth (CIK).
 869 * Used for display watermark bandwidth calculations
 870 * Returns true if the display fits, false if not.
 871 */
 872static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 873{
 874	if (dce_v8_0_average_bandwidth(wm) <=
 875	    (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 876		return true;
 877	else
 878		return false;
 879}
 880
 881/**
 882 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
 883 * average and available bandwidth
 884 *
 885 * @wm: watermark calculation data
 886 *
 887 * Check if the display average bandwidth fits in the display
 888 * available bandwidth (CIK).
 889 * Used for display watermark bandwidth calculations
 890 * Returns true if the display fits, false if not.
 891 */
 892static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
 893{
 894	if (dce_v8_0_average_bandwidth(wm) <=
 895	    (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
 896		return true;
 897	else
 898		return false;
 899}
 900
 901/**
 902 * dce_v8_0_check_latency_hiding - check latency hiding
 903 *
 904 * @wm: watermark calculation data
 905 *
 906 * Check latency hiding (CIK).
 907 * Used for display watermark bandwidth calculations
 908 * Returns true if the display fits, false if not.
 909 */
 910static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
 911{
 912	u32 lb_partitions = wm->lb_size / wm->src_width;
 913	u32 line_time = wm->active_time + wm->blank_time;
 914	u32 latency_tolerant_lines;
 915	u32 latency_hiding;
 916	fixed20_12 a;
 917
 918	a.full = dfixed_const(1);
 919	if (wm->vsc.full > a.full)
 920		latency_tolerant_lines = 1;
 921	else {
 922		if (lb_partitions <= (wm->vtaps + 1))
 923			latency_tolerant_lines = 1;
 924		else
 925			latency_tolerant_lines = 2;
 926	}
 927
 928	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 929
 930	if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
 931		return true;
 932	else
 933		return false;
 934}
 935
 936/**
 937 * dce_v8_0_program_watermarks - program display watermarks
 938 *
 939 * @adev: amdgpu_device pointer
 940 * @amdgpu_crtc: the selected display controller
 941 * @lb_size: line buffer size
 942 * @num_heads: number of display controllers in use
 943 *
 944 * Calculate and program the display watermarks for the
 945 * selected display controller (CIK).
 946 */
 947static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 948					struct amdgpu_crtc *amdgpu_crtc,
 949					u32 lb_size, u32 num_heads)
 950{
 951	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 952	struct dce8_wm_params wm_low, wm_high;
 953	u32 active_time;
 954	u32 line_time = 0;
 955	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 956	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 957
 958	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 959		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
 960					    (u32)mode->clock);
 961		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
 962					  (u32)mode->clock);
 963		line_time = min(line_time, (u32)65535);
 964
 965		/* watermark for high clocks */
 966		if (adev->pm.dpm_enabled) {
 967			wm_high.yclk =
 968				amdgpu_dpm_get_mclk(adev, false) * 10;
 969			wm_high.sclk =
 970				amdgpu_dpm_get_sclk(adev, false) * 10;
 971		} else {
 972			wm_high.yclk = adev->pm.current_mclk * 10;
 973			wm_high.sclk = adev->pm.current_sclk * 10;
 974		}
 975
 976		wm_high.disp_clk = mode->clock;
 977		wm_high.src_width = mode->crtc_hdisplay;
 978		wm_high.active_time = active_time;
 979		wm_high.blank_time = line_time - wm_high.active_time;
 980		wm_high.interlaced = false;
 981		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 982			wm_high.interlaced = true;
 983		wm_high.vsc = amdgpu_crtc->vsc;
 984		wm_high.vtaps = 1;
 985		if (amdgpu_crtc->rmx_type != RMX_OFF)
 986			wm_high.vtaps = 2;
 987		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
 988		wm_high.lb_size = lb_size;
 989		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
 990		wm_high.num_heads = num_heads;
 991
 992		/* set for high clocks */
 993		latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
 994
 995		/* possibly force display priority to high */
 996		/* should really do this at mode validation time... */
 997		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
 998		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
 999		    !dce_v8_0_check_latency_hiding(&wm_high) ||
1000		    (adev->mode_info.disp_priority == 2)) {
1001			DRM_DEBUG_KMS("force priority to high\n");
1002		}
1003
1004		/* watermark for low clocks */
1005		if (adev->pm.dpm_enabled) {
1006			wm_low.yclk =
1007				amdgpu_dpm_get_mclk(adev, true) * 10;
1008			wm_low.sclk =
1009				amdgpu_dpm_get_sclk(adev, true) * 10;
1010		} else {
1011			wm_low.yclk = adev->pm.current_mclk * 10;
1012			wm_low.sclk = adev->pm.current_sclk * 10;
1013		}
1014
1015		wm_low.disp_clk = mode->clock;
1016		wm_low.src_width = mode->crtc_hdisplay;
1017		wm_low.active_time = active_time;
1018		wm_low.blank_time = line_time - wm_low.active_time;
1019		wm_low.interlaced = false;
1020		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1021			wm_low.interlaced = true;
1022		wm_low.vsc = amdgpu_crtc->vsc;
1023		wm_low.vtaps = 1;
1024		if (amdgpu_crtc->rmx_type != RMX_OFF)
1025			wm_low.vtaps = 2;
1026		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1027		wm_low.lb_size = lb_size;
1028		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1029		wm_low.num_heads = num_heads;
1030
1031		/* set for low clocks */
1032		latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1033
1034		/* possibly force display priority to high */
1035		/* should really do this at mode validation time... */
1036		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1037		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1038		    !dce_v8_0_check_latency_hiding(&wm_low) ||
1039		    (adev->mode_info.disp_priority == 2)) {
1040			DRM_DEBUG_KMS("force priority to high\n");
1041		}
1042		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1043	}
1044
1045	/* select wm A */
1046	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1047	tmp = wm_mask;
1048	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1049	tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1050	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1051	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1052	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1053		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1054	/* select wm B */
1055	tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1056	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1057	tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1058	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1059	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1060	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1061		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1062	/* restore original selection */
1063	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1064
1065	/* save values for DPM */
1066	amdgpu_crtc->line_time = line_time;
1067	amdgpu_crtc->wm_high = latency_watermark_a;
1068	amdgpu_crtc->wm_low = latency_watermark_b;
1069	/* Save number of lines the linebuffer leads before the scanout */
1070	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1071}
1072
1073/**
1074 * dce_v8_0_bandwidth_update - program display watermarks
1075 *
1076 * @adev: amdgpu_device pointer
1077 *
1078 * Calculate and program the display watermarks and line
1079 * buffer allocation (CIK).
1080 */
1081static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1082{
1083	struct drm_display_mode *mode = NULL;
1084	u32 num_heads = 0, lb_size;
1085	int i;
1086
1087	amdgpu_display_update_priority(adev);
1088
1089	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1090		if (adev->mode_info.crtcs[i]->base.enabled)
1091			num_heads++;
1092	}
1093	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1094		mode = &adev->mode_info.crtcs[i]->base.mode;
1095		lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1096		dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1097					    lb_size, num_heads);
1098	}
1099}
1100
1101static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1102{
1103	int i;
1104	u32 offset, tmp;
1105
1106	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1107		offset = adev->mode_info.audio.pin[i].offset;
1108		tmp = RREG32_AUDIO_ENDPT(offset,
1109					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1110		if (((tmp &
1111		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1112		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1113			adev->mode_info.audio.pin[i].connected = false;
1114		else
1115			adev->mode_info.audio.pin[i].connected = true;
1116	}
1117}
1118
1119static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1120{
1121	int i;
1122
1123	dce_v8_0_audio_get_connected_pins(adev);
1124
1125	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1126		if (adev->mode_info.audio.pin[i].connected)
1127			return &adev->mode_info.audio.pin[i];
1128	}
1129	DRM_ERROR("No connected audio pins found!\n");
1130	return NULL;
1131}
1132
1133static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1134{
1135	struct amdgpu_device *adev = encoder->dev->dev_private;
1136	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1137	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1138	u32 offset;
1139
1140	if (!dig || !dig->afmt || !dig->afmt->pin)
1141		return;
1142
1143	offset = dig->afmt->offset;
1144
1145	WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1146	       (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1147}
1148
1149static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1150						struct drm_display_mode *mode)
1151{
1152	struct amdgpu_device *adev = encoder->dev->dev_private;
 
1153	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1154	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1155	struct drm_connector *connector;
 
1156	struct amdgpu_connector *amdgpu_connector = NULL;
1157	u32 tmp = 0, offset;
1158
1159	if (!dig || !dig->afmt || !dig->afmt->pin)
1160		return;
1161
1162	offset = dig->afmt->pin->offset;
1163
1164	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
 
1165		if (connector->encoder == encoder) {
1166			amdgpu_connector = to_amdgpu_connector(connector);
1167			break;
1168		}
1169	}
 
1170
1171	if (!amdgpu_connector) {
1172		DRM_ERROR("Couldn't find encoder's connector\n");
1173		return;
1174	}
1175
1176	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1177		if (connector->latency_present[1])
1178			tmp =
1179			(connector->video_latency[1] <<
1180			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1181			(connector->audio_latency[1] <<
1182			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1183		else
1184			tmp =
1185			(0 <<
1186			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1187			(0 <<
1188			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1189	} else {
1190		if (connector->latency_present[0])
1191			tmp =
1192			(connector->video_latency[0] <<
1193			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1194			(connector->audio_latency[0] <<
1195			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1196		else
1197			tmp =
1198			(0 <<
1199			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1200			(0 <<
1201			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1202
1203	}
1204	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1205}
1206
1207static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1208{
1209	struct amdgpu_device *adev = encoder->dev->dev_private;
 
1210	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1211	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1212	struct drm_connector *connector;
 
1213	struct amdgpu_connector *amdgpu_connector = NULL;
1214	u32 offset, tmp;
1215	u8 *sadb = NULL;
1216	int sad_count;
1217
1218	if (!dig || !dig->afmt || !dig->afmt->pin)
1219		return;
1220
1221	offset = dig->afmt->pin->offset;
1222
1223	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
 
1224		if (connector->encoder == encoder) {
1225			amdgpu_connector = to_amdgpu_connector(connector);
1226			break;
1227		}
1228	}
 
1229
1230	if (!amdgpu_connector) {
1231		DRM_ERROR("Couldn't find encoder's connector\n");
1232		return;
1233	}
1234
1235	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1236	if (sad_count < 0) {
1237		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1238		sad_count = 0;
1239	}
1240
1241	/* program the speaker allocation */
1242	tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1243	tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1244		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1245	/* set HDMI mode */
1246	tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1247	if (sad_count)
1248		tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1249	else
1250		tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1251	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1252
1253	kfree(sadb);
1254}
1255
1256static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1257{
1258	struct amdgpu_device *adev = encoder->dev->dev_private;
 
1259	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1260	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1261	u32 offset;
1262	struct drm_connector *connector;
 
1263	struct amdgpu_connector *amdgpu_connector = NULL;
1264	struct cea_sad *sads;
1265	int i, sad_count;
1266
1267	static const u16 eld_reg_to_type[][2] = {
1268		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1269		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1270		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1271		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1272		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1273		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1274		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1275		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1276		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1277		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1278		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1279		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1280	};
1281
1282	if (!dig || !dig->afmt || !dig->afmt->pin)
1283		return;
1284
1285	offset = dig->afmt->pin->offset;
1286
1287	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
 
1288		if (connector->encoder == encoder) {
1289			amdgpu_connector = to_amdgpu_connector(connector);
1290			break;
1291		}
1292	}
 
1293
1294	if (!amdgpu_connector) {
1295		DRM_ERROR("Couldn't find encoder's connector\n");
1296		return;
1297	}
1298
1299	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1300	if (sad_count <= 0) {
1301		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
 
1302		return;
1303	}
1304	BUG_ON(!sads);
1305
1306	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1307		u32 value = 0;
1308		u8 stereo_freqs = 0;
1309		int max_channels = -1;
1310		int j;
1311
1312		for (j = 0; j < sad_count; j++) {
1313			struct cea_sad *sad = &sads[j];
1314
1315			if (sad->format == eld_reg_to_type[i][1]) {
1316				if (sad->channels > max_channels) {
1317					value = (sad->channels <<
1318						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1319					        (sad->byte2 <<
1320						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1321					        (sad->freq <<
1322						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1323					max_channels = sad->channels;
1324				}
1325
1326				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1327					stereo_freqs |= sad->freq;
1328				else
1329					break;
1330			}
1331		}
1332
1333		value |= (stereo_freqs <<
1334			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1335
1336		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1337	}
1338
1339	kfree(sads);
1340}
1341
1342static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1343				  struct amdgpu_audio_pin *pin,
1344				  bool enable)
1345{
1346	if (!pin)
1347		return;
1348
1349	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1350		enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1351}
1352
1353static const u32 pin_offsets[7] =
1354{
1355	(0x1780 - 0x1780),
1356	(0x1786 - 0x1780),
1357	(0x178c - 0x1780),
1358	(0x1792 - 0x1780),
1359	(0x1798 - 0x1780),
1360	(0x179d - 0x1780),
1361	(0x17a4 - 0x1780),
1362};
1363
1364static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1365{
1366	int i;
1367
1368	if (!amdgpu_audio)
1369		return 0;
1370
1371	adev->mode_info.audio.enabled = true;
1372
1373	if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1374		adev->mode_info.audio.num_pins = 7;
1375	else if ((adev->asic_type == CHIP_KABINI) ||
1376		 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1377		adev->mode_info.audio.num_pins = 3;
1378	else if ((adev->asic_type == CHIP_BONAIRE) ||
1379		 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1380		adev->mode_info.audio.num_pins = 7;
1381	else
1382		adev->mode_info.audio.num_pins = 3;
1383
1384	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1385		adev->mode_info.audio.pin[i].channels = -1;
1386		adev->mode_info.audio.pin[i].rate = -1;
1387		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1388		adev->mode_info.audio.pin[i].status_bits = 0;
1389		adev->mode_info.audio.pin[i].category_code = 0;
1390		adev->mode_info.audio.pin[i].connected = false;
1391		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1392		adev->mode_info.audio.pin[i].id = i;
1393		/* disable audio.  it will be set up later */
1394		/* XXX remove once we switch to ip funcs */
1395		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1396	}
1397
1398	return 0;
1399}
1400
1401static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1402{
1403	int i;
1404
1405	if (!amdgpu_audio)
1406		return;
1407
1408	if (!adev->mode_info.audio.enabled)
1409		return;
1410
1411	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1412		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1413
1414	adev->mode_info.audio.enabled = false;
1415}
1416
1417/*
1418 * update the N and CTS parameters for a given pixel clock rate
1419 */
1420static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1421{
1422	struct drm_device *dev = encoder->dev;
1423	struct amdgpu_device *adev = dev->dev_private;
1424	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1425	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1426	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1427	uint32_t offset = dig->afmt->offset;
1428
1429	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1430	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1431
1432	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1433	WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1434
1435	WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1436	WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1437}
1438
1439/*
1440 * build a HDMI Video Info Frame
1441 */
1442static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1443					       void *buffer, size_t size)
1444{
1445	struct drm_device *dev = encoder->dev;
1446	struct amdgpu_device *adev = dev->dev_private;
1447	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1448	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1449	uint32_t offset = dig->afmt->offset;
1450	uint8_t *frame = buffer + 3;
1451	uint8_t *header = buffer;
1452
1453	WREG32(mmAFMT_AVI_INFO0 + offset,
1454		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1455	WREG32(mmAFMT_AVI_INFO1 + offset,
1456		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1457	WREG32(mmAFMT_AVI_INFO2 + offset,
1458		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1459	WREG32(mmAFMT_AVI_INFO3 + offset,
1460		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1461}
1462
1463static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1464{
1465	struct drm_device *dev = encoder->dev;
1466	struct amdgpu_device *adev = dev->dev_private;
1467	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1468	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1469	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1470	u32 dto_phase = 24 * 1000;
1471	u32 dto_modulo = clock;
1472
1473	if (!dig || !dig->afmt)
1474		return;
1475
1476	/* XXX two dtos; generally use dto0 for hdmi */
1477	/* Express [24MHz / target pixel clock] as an exact rational
1478	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1479	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1480	 */
1481	WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1482	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1483	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1484}
1485
1486/*
1487 * update the info frames with the data from the current display mode
1488 */
1489static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1490				  struct drm_display_mode *mode)
1491{
1492	struct drm_device *dev = encoder->dev;
1493	struct amdgpu_device *adev = dev->dev_private;
1494	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1495	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1496	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1497	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1498	struct hdmi_avi_infoframe frame;
1499	uint32_t offset, val;
1500	ssize_t err;
1501	int bpc = 8;
1502
1503	if (!dig || !dig->afmt)
1504		return;
1505
1506	/* Silent, r600_hdmi_enable will raise WARN for us */
1507	if (!dig->afmt->enabled)
1508		return;
1509
1510	offset = dig->afmt->offset;
1511
1512	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1513	if (encoder->crtc) {
1514		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1515		bpc = amdgpu_crtc->bpc;
1516	}
1517
1518	/* disable audio prior to setting up hw */
1519	dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1520	dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1521
1522	dce_v8_0_audio_set_dto(encoder, mode->clock);
1523
1524	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1525	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1526
1527	WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1528
1529	val = RREG32(mmHDMI_CONTROL + offset);
1530	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1531	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1532
1533	switch (bpc) {
1534	case 0:
1535	case 6:
1536	case 8:
1537	case 16:
1538	default:
1539		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1540			  connector->name, bpc);
1541		break;
1542	case 10:
1543		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1544		val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1545		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1546			  connector->name);
1547		break;
1548	case 12:
1549		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1550		val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1551		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1552			  connector->name);
1553		break;
1554	}
1555
1556	WREG32(mmHDMI_CONTROL + offset, val);
1557
1558	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1559	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1560	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1561	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1562
1563	WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1564	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1565	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1566
1567	WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1568	       AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1569
1570	WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1571	       (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1572
1573	WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1574
1575	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1576	       (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1577	       (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1578
1579	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1580	       AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1581
1582	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1583
1584	if (bpc > 8)
1585		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1586		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1587	else
1588		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1589		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1590		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1591
1592	dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1593
1594	WREG32(mmAFMT_60958_0 + offset,
1595	       (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1596
1597	WREG32(mmAFMT_60958_1 + offset,
1598	       (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1599
1600	WREG32(mmAFMT_60958_2 + offset,
1601	       (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1602	       (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1603	       (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1604	       (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1605	       (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1606	       (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1607
1608	dce_v8_0_audio_write_speaker_allocation(encoder);
1609
1610
1611	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1612	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1613
1614	dce_v8_0_afmt_audio_select_pin(encoder);
1615	dce_v8_0_audio_write_sad_regs(encoder);
1616	dce_v8_0_audio_write_latency_fields(encoder, mode);
1617
1618	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
1619	if (err < 0) {
1620		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1621		return;
1622	}
1623
1624	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1625	if (err < 0) {
1626		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1627		return;
1628	}
1629
1630	dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1631
1632	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1633		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1634		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1635
1636	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1637		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1638		 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1639
1640	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1641		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1642
1643	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1644	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1645	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1646	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1647
1648	/* enable audio after setting up hw */
1649	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1650}
1651
1652static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1653{
1654	struct drm_device *dev = encoder->dev;
1655	struct amdgpu_device *adev = dev->dev_private;
1656	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1657	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1658
1659	if (!dig || !dig->afmt)
1660		return;
1661
1662	/* Silent, r600_hdmi_enable will raise WARN for us */
1663	if (enable && dig->afmt->enabled)
1664		return;
1665	if (!enable && !dig->afmt->enabled)
1666		return;
1667
1668	if (!enable && dig->afmt->pin) {
1669		dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1670		dig->afmt->pin = NULL;
1671	}
1672
1673	dig->afmt->enabled = enable;
1674
1675	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1676		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1677}
1678
1679static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1680{
1681	int i;
1682
1683	for (i = 0; i < adev->mode_info.num_dig; i++)
1684		adev->mode_info.afmt[i] = NULL;
1685
1686	/* DCE8 has audio blocks tied to DIG encoders */
1687	for (i = 0; i < adev->mode_info.num_dig; i++) {
1688		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1689		if (adev->mode_info.afmt[i]) {
1690			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1691			adev->mode_info.afmt[i]->id = i;
1692		} else {
1693			int j;
1694			for (j = 0; j < i; j++) {
1695				kfree(adev->mode_info.afmt[j]);
1696				adev->mode_info.afmt[j] = NULL;
1697			}
1698			return -ENOMEM;
1699		}
1700	}
1701	return 0;
1702}
1703
1704static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1705{
1706	int i;
1707
1708	for (i = 0; i < adev->mode_info.num_dig; i++) {
1709		kfree(adev->mode_info.afmt[i]);
1710		adev->mode_info.afmt[i] = NULL;
1711	}
1712}
1713
1714static const u32 vga_control_regs[6] =
1715{
1716	mmD1VGA_CONTROL,
1717	mmD2VGA_CONTROL,
1718	mmD3VGA_CONTROL,
1719	mmD4VGA_CONTROL,
1720	mmD5VGA_CONTROL,
1721	mmD6VGA_CONTROL,
1722};
1723
1724static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1725{
1726	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1727	struct drm_device *dev = crtc->dev;
1728	struct amdgpu_device *adev = dev->dev_private;
1729	u32 vga_control;
1730
1731	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1732	if (enable)
1733		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1734	else
1735		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1736}
1737
1738static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1739{
1740	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1741	struct drm_device *dev = crtc->dev;
1742	struct amdgpu_device *adev = dev->dev_private;
1743
1744	if (enable)
1745		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1746	else
1747		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1748}
1749
1750static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1751				     struct drm_framebuffer *fb,
1752				     int x, int y, int atomic)
1753{
1754	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1755	struct drm_device *dev = crtc->dev;
1756	struct amdgpu_device *adev = dev->dev_private;
1757	struct amdgpu_framebuffer *amdgpu_fb;
1758	struct drm_framebuffer *target_fb;
1759	struct drm_gem_object *obj;
1760	struct amdgpu_bo *abo;
1761	uint64_t fb_location, tiling_flags;
1762	uint32_t fb_format, fb_pitch_pixels;
1763	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1764	u32 pipe_config;
1765	u32 viewport_w, viewport_h;
1766	int r;
1767	bool bypass_lut = false;
1768	struct drm_format_name_buf format_name;
1769
1770	/* no fb bound */
1771	if (!atomic && !crtc->primary->fb) {
1772		DRM_DEBUG_KMS("No FB bound\n");
1773		return 0;
1774	}
1775
1776	if (atomic) {
1777		amdgpu_fb = to_amdgpu_framebuffer(fb);
1778		target_fb = fb;
1779	} else {
1780		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1781		target_fb = crtc->primary->fb;
1782	}
1783
1784	/* If atomic, assume fb object is pinned & idle & fenced and
1785	 * just update base pointers
1786	 */
1787	obj = amdgpu_fb->obj;
1788	abo = gem_to_amdgpu_bo(obj);
1789	r = amdgpu_bo_reserve(abo, false);
1790	if (unlikely(r != 0))
1791		return r;
1792
1793	if (atomic) {
1794		fb_location = amdgpu_bo_gpu_offset(abo);
1795	} else {
1796		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1797		if (unlikely(r != 0)) {
1798			amdgpu_bo_unreserve(abo);
1799			return -EINVAL;
1800		}
1801	}
 
1802
1803	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1804	amdgpu_bo_unreserve(abo);
1805
1806	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1807
1808	switch (target_fb->format->format) {
1809	case DRM_FORMAT_C8:
1810		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1811			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1812		break;
1813	case DRM_FORMAT_XRGB4444:
1814	case DRM_FORMAT_ARGB4444:
1815		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1816			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1817#ifdef __BIG_ENDIAN
1818		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1819#endif
1820		break;
1821	case DRM_FORMAT_XRGB1555:
1822	case DRM_FORMAT_ARGB1555:
1823		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1824			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1825#ifdef __BIG_ENDIAN
1826		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1827#endif
1828		break;
1829	case DRM_FORMAT_BGRX5551:
1830	case DRM_FORMAT_BGRA5551:
1831		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1832			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1833#ifdef __BIG_ENDIAN
1834		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1835#endif
1836		break;
1837	case DRM_FORMAT_RGB565:
1838		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1839			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1840#ifdef __BIG_ENDIAN
1841		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1842#endif
1843		break;
1844	case DRM_FORMAT_XRGB8888:
1845	case DRM_FORMAT_ARGB8888:
1846		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1847			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1848#ifdef __BIG_ENDIAN
1849		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1850#endif
1851		break;
1852	case DRM_FORMAT_XRGB2101010:
1853	case DRM_FORMAT_ARGB2101010:
1854		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1855			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1856#ifdef __BIG_ENDIAN
1857		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1858#endif
1859		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1860		bypass_lut = true;
1861		break;
1862	case DRM_FORMAT_BGRX1010102:
1863	case DRM_FORMAT_BGRA1010102:
1864		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1865			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1866#ifdef __BIG_ENDIAN
1867		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1868#endif
1869		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1870		bypass_lut = true;
1871		break;
 
 
 
 
 
 
 
 
 
 
1872	default:
1873		DRM_ERROR("Unsupported screen format %s\n",
1874		          drm_get_format_name(target_fb->format->format, &format_name));
1875		return -EINVAL;
1876	}
1877
1878	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1879		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1880
1881		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1882		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1883		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1884		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1885		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1886
1887		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1888		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1889		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1890		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1891		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1892		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1893		fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1894	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1895		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1896	}
1897
1898	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1899
1900	dce_v8_0_vga_enable(crtc, false);
1901
1902	/* Make sure surface address is updated at vertical blank rather than
1903	 * horizontal blank
1904	 */
1905	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1906
1907	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1908	       upper_32_bits(fb_location));
1909	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1910	       upper_32_bits(fb_location));
1911	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1912	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1913	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1914	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1915	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1916	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1917
1918	/*
1919	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1920	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1921	 * retain the full precision throughout the pipeline.
1922	 */
1923	WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1924		 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1925		 ~LUT_10BIT_BYPASS_EN);
1926
1927	if (bypass_lut)
1928		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1929
1930	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1931	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1932	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1933	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1934	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1935	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1936
1937	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1938	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1939
1940	dce_v8_0_grph_enable(crtc, true);
1941
1942	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1943	       target_fb->height);
1944
1945	x &= ~3;
1946	y &= ~1;
1947	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1948	       (x << 16) | y);
1949	viewport_w = crtc->mode.hdisplay;
1950	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1951	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1952	       (viewport_w << 16) | viewport_h);
1953
1954	/* set pageflip to happen anywhere in vblank interval */
1955	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1956
1957	if (!atomic && fb && fb != crtc->primary->fb) {
1958		amdgpu_fb = to_amdgpu_framebuffer(fb);
1959		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1960		r = amdgpu_bo_reserve(abo, true);
1961		if (unlikely(r != 0))
1962			return r;
1963		amdgpu_bo_unpin(abo);
1964		amdgpu_bo_unreserve(abo);
1965	}
1966
1967	/* Bytes per pixel may have changed */
1968	dce_v8_0_bandwidth_update(adev);
1969
1970	return 0;
1971}
1972
1973static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
1974				    struct drm_display_mode *mode)
1975{
1976	struct drm_device *dev = crtc->dev;
1977	struct amdgpu_device *adev = dev->dev_private;
1978	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1979
1980	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1981		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
1982		       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
1983	else
1984		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1985}
1986
1987static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
1988{
1989	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1990	struct drm_device *dev = crtc->dev;
1991	struct amdgpu_device *adev = dev->dev_private;
1992	u16 *r, *g, *b;
1993	int i;
1994
1995	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1996
1997	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1998	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1999		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2000	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2001	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2002	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2003	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2004	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2005	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2006		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2007
2008	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2009
2010	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2011	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2012	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2013
2014	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2015	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2016	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2017
2018	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2019	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2020
2021	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2022	r = crtc->gamma_store;
2023	g = r + crtc->gamma_size;
2024	b = g + crtc->gamma_size;
2025	for (i = 0; i < 256; i++) {
2026		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2027		       ((*r++ & 0xffc0) << 14) |
2028		       ((*g++ & 0xffc0) << 4) |
2029		       (*b++ >> 6));
2030	}
2031
2032	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2033	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2034		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2035		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2036	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2037	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2038		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2039	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2040	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2041		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2042	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2043	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2044		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2045	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2046	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2047	/* XXX this only needs to be programmed once per crtc at startup,
2048	 * not sure where the best place for it is
2049	 */
2050	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2051	       ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2052}
2053
2054static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2055{
2056	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2057	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2058
2059	switch (amdgpu_encoder->encoder_id) {
2060	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2061		if (dig->linkb)
2062			return 1;
2063		else
2064			return 0;
2065		break;
2066	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2067		if (dig->linkb)
2068			return 3;
2069		else
2070			return 2;
2071		break;
2072	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2073		if (dig->linkb)
2074			return 5;
2075		else
2076			return 4;
2077		break;
2078	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2079		return 6;
2080		break;
2081	default:
2082		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2083		return 0;
2084	}
2085}
2086
2087/**
2088 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2089 *
2090 * @crtc: drm crtc
2091 *
2092 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2093 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2094 * monitors a dedicated PPLL must be used.  If a particular board has
2095 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2096 * as there is no need to program the PLL itself.  If we are not able to
2097 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2098 * avoid messing up an existing monitor.
2099 *
2100 * Asic specific PLL information
2101 *
2102 * DCE 8.x
2103 * KB/KV
2104 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2105 * CI
2106 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2107 *
2108 */
2109static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2110{
2111	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2112	struct drm_device *dev = crtc->dev;
2113	struct amdgpu_device *adev = dev->dev_private;
2114	u32 pll_in_use;
2115	int pll;
2116
2117	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2118		if (adev->clock.dp_extclk)
2119			/* skip PPLL programming if using ext clock */
2120			return ATOM_PPLL_INVALID;
2121		else {
2122			/* use the same PPLL for all DP monitors */
2123			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2124			if (pll != ATOM_PPLL_INVALID)
2125				return pll;
2126		}
2127	} else {
2128		/* use the same PPLL for all monitors with the same clock */
2129		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2130		if (pll != ATOM_PPLL_INVALID)
2131			return pll;
2132	}
2133	/* otherwise, pick one of the plls */
2134	if ((adev->asic_type == CHIP_KABINI) ||
2135	    (adev->asic_type == CHIP_MULLINS)) {
2136		/* KB/ML has PPLL1 and PPLL2 */
2137		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2138		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2139			return ATOM_PPLL2;
2140		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2141			return ATOM_PPLL1;
2142		DRM_ERROR("unable to allocate a PPLL\n");
2143		return ATOM_PPLL_INVALID;
2144	} else {
2145		/* CI/KV has PPLL0, PPLL1, and PPLL2 */
2146		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2147		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2148			return ATOM_PPLL2;
2149		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2150			return ATOM_PPLL1;
2151		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2152			return ATOM_PPLL0;
2153		DRM_ERROR("unable to allocate a PPLL\n");
2154		return ATOM_PPLL_INVALID;
2155	}
2156	return ATOM_PPLL_INVALID;
2157}
2158
2159static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2160{
2161	struct amdgpu_device *adev = crtc->dev->dev_private;
2162	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2163	uint32_t cur_lock;
2164
2165	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2166	if (lock)
2167		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2168	else
2169		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2170	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2171}
2172
2173static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2174{
2175	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2176	struct amdgpu_device *adev = crtc->dev->dev_private;
2177
2178	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2179		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2180		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2181}
2182
2183static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2184{
2185	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2186	struct amdgpu_device *adev = crtc->dev->dev_private;
2187
2188	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2189	       upper_32_bits(amdgpu_crtc->cursor_addr));
2190	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2191	       lower_32_bits(amdgpu_crtc->cursor_addr));
2192
2193	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2194		   CUR_CONTROL__CURSOR_EN_MASK |
2195		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2196		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2197}
2198
2199static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2200				       int x, int y)
2201{
2202	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2203	struct amdgpu_device *adev = crtc->dev->dev_private;
2204	int xorigin = 0, yorigin = 0;
2205
2206	amdgpu_crtc->cursor_x = x;
2207	amdgpu_crtc->cursor_y = y;
2208
2209	/* avivo cursor are offset into the total surface */
2210	x += crtc->x;
2211	y += crtc->y;
2212	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2213
2214	if (x < 0) {
2215		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2216		x = 0;
2217	}
2218	if (y < 0) {
2219		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2220		y = 0;
2221	}
2222
2223	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2224	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2225	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2226	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2227
2228	return 0;
2229}
2230
2231static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2232				     int x, int y)
2233{
2234	int ret;
2235
2236	dce_v8_0_lock_cursor(crtc, true);
2237	ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2238	dce_v8_0_lock_cursor(crtc, false);
2239
2240	return ret;
2241}
2242
2243static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2244				     struct drm_file *file_priv,
2245				     uint32_t handle,
2246				     uint32_t width,
2247				     uint32_t height,
2248				     int32_t hot_x,
2249				     int32_t hot_y)
2250{
2251	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2252	struct drm_gem_object *obj;
2253	struct amdgpu_bo *aobj;
2254	int ret;
2255
2256	if (!handle) {
2257		/* turn off cursor */
2258		dce_v8_0_hide_cursor(crtc);
2259		obj = NULL;
2260		goto unpin;
2261	}
2262
2263	if ((width > amdgpu_crtc->max_cursor_width) ||
2264	    (height > amdgpu_crtc->max_cursor_height)) {
2265		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2266		return -EINVAL;
2267	}
2268
2269	obj = drm_gem_object_lookup(file_priv, handle);
2270	if (!obj) {
2271		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2272		return -ENOENT;
2273	}
2274
2275	aobj = gem_to_amdgpu_bo(obj);
2276	ret = amdgpu_bo_reserve(aobj, false);
2277	if (ret != 0) {
2278		drm_gem_object_put_unlocked(obj);
2279		return ret;
2280	}
2281
2282	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2283	amdgpu_bo_unreserve(aobj);
2284	if (ret) {
2285		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2286		drm_gem_object_put_unlocked(obj);
2287		return ret;
2288	}
 
2289
2290	dce_v8_0_lock_cursor(crtc, true);
2291
2292	if (width != amdgpu_crtc->cursor_width ||
2293	    height != amdgpu_crtc->cursor_height ||
2294	    hot_x != amdgpu_crtc->cursor_hot_x ||
2295	    hot_y != amdgpu_crtc->cursor_hot_y) {
2296		int x, y;
2297
2298		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2299		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2300
2301		dce_v8_0_cursor_move_locked(crtc, x, y);
2302
2303		amdgpu_crtc->cursor_width = width;
2304		amdgpu_crtc->cursor_height = height;
2305		amdgpu_crtc->cursor_hot_x = hot_x;
2306		amdgpu_crtc->cursor_hot_y = hot_y;
2307	}
2308
2309	dce_v8_0_show_cursor(crtc);
2310	dce_v8_0_lock_cursor(crtc, false);
2311
2312unpin:
2313	if (amdgpu_crtc->cursor_bo) {
2314		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2315		ret = amdgpu_bo_reserve(aobj, true);
2316		if (likely(ret == 0)) {
2317			amdgpu_bo_unpin(aobj);
2318			amdgpu_bo_unreserve(aobj);
2319		}
2320		drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2321	}
2322
2323	amdgpu_crtc->cursor_bo = obj;
2324	return 0;
2325}
2326
2327static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2328{
2329	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2330
2331	if (amdgpu_crtc->cursor_bo) {
2332		dce_v8_0_lock_cursor(crtc, true);
2333
2334		dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2335					    amdgpu_crtc->cursor_y);
2336
2337		dce_v8_0_show_cursor(crtc);
2338
2339		dce_v8_0_lock_cursor(crtc, false);
2340	}
2341}
2342
2343static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2344				   u16 *blue, uint32_t size,
2345				   struct drm_modeset_acquire_ctx *ctx)
2346{
2347	dce_v8_0_crtc_load_lut(crtc);
2348
2349	return 0;
2350}
2351
2352static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2353{
2354	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2355
2356	drm_crtc_cleanup(crtc);
2357	kfree(amdgpu_crtc);
2358}
2359
2360static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2361	.cursor_set2 = dce_v8_0_crtc_cursor_set2,
2362	.cursor_move = dce_v8_0_crtc_cursor_move,
2363	.gamma_set = dce_v8_0_crtc_gamma_set,
2364	.set_config = amdgpu_display_crtc_set_config,
2365	.destroy = dce_v8_0_crtc_destroy,
2366	.page_flip_target = amdgpu_display_crtc_page_flip_target,
 
 
 
 
2367};
2368
2369static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2370{
2371	struct drm_device *dev = crtc->dev;
2372	struct amdgpu_device *adev = dev->dev_private;
2373	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2374	unsigned type;
2375
2376	switch (mode) {
2377	case DRM_MODE_DPMS_ON:
2378		amdgpu_crtc->enabled = true;
2379		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2380		dce_v8_0_vga_enable(crtc, true);
2381		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2382		dce_v8_0_vga_enable(crtc, false);
2383		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2384		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2385						amdgpu_crtc->crtc_id);
2386		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2387		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2388		drm_crtc_vblank_on(crtc);
2389		dce_v8_0_crtc_load_lut(crtc);
2390		break;
2391	case DRM_MODE_DPMS_STANDBY:
2392	case DRM_MODE_DPMS_SUSPEND:
2393	case DRM_MODE_DPMS_OFF:
2394		drm_crtc_vblank_off(crtc);
2395		if (amdgpu_crtc->enabled) {
2396			dce_v8_0_vga_enable(crtc, true);
2397			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2398			dce_v8_0_vga_enable(crtc, false);
2399		}
2400		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2401		amdgpu_crtc->enabled = false;
2402		break;
2403	}
2404	/* adjust pm to dpms */
2405	amdgpu_pm_compute_clocks(adev);
2406}
2407
2408static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2409{
2410	/* disable crtc pair power gating before programming */
2411	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2412	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2413	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2414}
2415
2416static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2417{
2418	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2419	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2420}
2421
2422static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2423{
2424	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2425	struct drm_device *dev = crtc->dev;
2426	struct amdgpu_device *adev = dev->dev_private;
2427	struct amdgpu_atom_ss ss;
2428	int i;
2429
2430	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2431	if (crtc->primary->fb) {
2432		int r;
2433		struct amdgpu_framebuffer *amdgpu_fb;
2434		struct amdgpu_bo *abo;
2435
2436		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2437		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2438		r = amdgpu_bo_reserve(abo, true);
2439		if (unlikely(r))
2440			DRM_ERROR("failed to reserve abo before unpin\n");
2441		else {
2442			amdgpu_bo_unpin(abo);
2443			amdgpu_bo_unreserve(abo);
2444		}
2445	}
2446	/* disable the GRPH */
2447	dce_v8_0_grph_enable(crtc, false);
2448
2449	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2450
2451	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2452		if (adev->mode_info.crtcs[i] &&
2453		    adev->mode_info.crtcs[i]->enabled &&
2454		    i != amdgpu_crtc->crtc_id &&
2455		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2456			/* one other crtc is using this pll don't turn
2457			 * off the pll
2458			 */
2459			goto done;
2460		}
2461	}
2462
2463	switch (amdgpu_crtc->pll_id) {
2464	case ATOM_PPLL1:
2465	case ATOM_PPLL2:
2466		/* disable the ppll */
2467		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2468                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2469		break;
2470	case ATOM_PPLL0:
2471		/* disable the ppll */
2472		if ((adev->asic_type == CHIP_KAVERI) ||
2473		    (adev->asic_type == CHIP_BONAIRE) ||
2474		    (adev->asic_type == CHIP_HAWAII))
2475			amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2476						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2477		break;
2478	default:
2479		break;
2480	}
2481done:
2482	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2483	amdgpu_crtc->adjusted_clock = 0;
2484	amdgpu_crtc->encoder = NULL;
2485	amdgpu_crtc->connector = NULL;
2486}
2487
2488static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2489				  struct drm_display_mode *mode,
2490				  struct drm_display_mode *adjusted_mode,
2491				  int x, int y, struct drm_framebuffer *old_fb)
2492{
2493	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2494
2495	if (!amdgpu_crtc->adjusted_clock)
2496		return -EINVAL;
2497
2498	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2499	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2500	dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2501	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2502	amdgpu_atombios_crtc_scaler_setup(crtc);
2503	dce_v8_0_cursor_reset(crtc);
2504	/* update the hw version fpr dpm */
2505	amdgpu_crtc->hw_mode = *adjusted_mode;
2506
2507	return 0;
2508}
2509
2510static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2511				     const struct drm_display_mode *mode,
2512				     struct drm_display_mode *adjusted_mode)
2513{
2514	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2515	struct drm_device *dev = crtc->dev;
2516	struct drm_encoder *encoder;
2517
2518	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2519	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2520		if (encoder->crtc == crtc) {
2521			amdgpu_crtc->encoder = encoder;
2522			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2523			break;
2524		}
2525	}
2526	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2527		amdgpu_crtc->encoder = NULL;
2528		amdgpu_crtc->connector = NULL;
2529		return false;
2530	}
2531	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2532		return false;
2533	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2534		return false;
2535	/* pick pll */
2536	amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2537	/* if we can't get a PPLL for a non-DP encoder, fail */
2538	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2539	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2540		return false;
2541
2542	return true;
2543}
2544
2545static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2546				  struct drm_framebuffer *old_fb)
2547{
2548	return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2549}
2550
2551static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2552					 struct drm_framebuffer *fb,
2553					 int x, int y, enum mode_set_atomic state)
2554{
2555       return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2556}
2557
2558static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2559	.dpms = dce_v8_0_crtc_dpms,
2560	.mode_fixup = dce_v8_0_crtc_mode_fixup,
2561	.mode_set = dce_v8_0_crtc_mode_set,
2562	.mode_set_base = dce_v8_0_crtc_set_base,
2563	.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2564	.prepare = dce_v8_0_crtc_prepare,
2565	.commit = dce_v8_0_crtc_commit,
2566	.disable = dce_v8_0_crtc_disable,
 
2567};
2568
2569static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2570{
2571	struct amdgpu_crtc *amdgpu_crtc;
2572
2573	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2574			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2575	if (amdgpu_crtc == NULL)
2576		return -ENOMEM;
2577
2578	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2579
2580	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2581	amdgpu_crtc->crtc_id = index;
2582	adev->mode_info.crtcs[index] = amdgpu_crtc;
2583
2584	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2585	amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2586	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2587	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2588
2589	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2590
2591	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2592	amdgpu_crtc->adjusted_clock = 0;
2593	amdgpu_crtc->encoder = NULL;
2594	amdgpu_crtc->connector = NULL;
2595	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2596
2597	return 0;
2598}
2599
2600static int dce_v8_0_early_init(void *handle)
2601{
2602	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2603
2604	adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2605	adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2606
2607	dce_v8_0_set_display_funcs(adev);
2608
2609	adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2610
2611	switch (adev->asic_type) {
2612	case CHIP_BONAIRE:
2613	case CHIP_HAWAII:
2614		adev->mode_info.num_hpd = 6;
2615		adev->mode_info.num_dig = 6;
2616		break;
2617	case CHIP_KAVERI:
2618		adev->mode_info.num_hpd = 6;
2619		adev->mode_info.num_dig = 7;
2620		break;
2621	case CHIP_KABINI:
2622	case CHIP_MULLINS:
2623		adev->mode_info.num_hpd = 6;
2624		adev->mode_info.num_dig = 6; /* ? */
2625		break;
2626	default:
2627		/* FIXME: not supported yet */
2628		return -EINVAL;
2629	}
2630
2631	dce_v8_0_set_irq_funcs(adev);
2632
2633	return 0;
2634}
2635
2636static int dce_v8_0_sw_init(void *handle)
2637{
2638	int r, i;
2639	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2640
2641	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2642		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2643		if (r)
2644			return r;
2645	}
2646
2647	for (i = 8; i < 20; i += 2) {
2648		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2649		if (r)
2650			return r;
2651	}
2652
2653	/* HPD hotplug */
2654	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2655	if (r)
2656		return r;
2657
2658	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2659
2660	adev->ddev->mode_config.async_page_flip = true;
2661
2662	adev->ddev->mode_config.max_width = 16384;
2663	adev->ddev->mode_config.max_height = 16384;
2664
2665	adev->ddev->mode_config.preferred_depth = 24;
2666	adev->ddev->mode_config.prefer_shadow = 1;
 
 
 
 
2667
2668	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2669
2670	r = amdgpu_display_modeset_create_props(adev);
2671	if (r)
2672		return r;
2673
2674	adev->ddev->mode_config.max_width = 16384;
2675	adev->ddev->mode_config.max_height = 16384;
2676
2677	/* allocate crtcs */
2678	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2679		r = dce_v8_0_crtc_init(adev, i);
2680		if (r)
2681			return r;
2682	}
2683
2684	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2685		amdgpu_display_print_display_setup(adev->ddev);
2686	else
2687		return -EINVAL;
2688
2689	/* setup afmt */
2690	r = dce_v8_0_afmt_init(adev);
2691	if (r)
2692		return r;
2693
2694	r = dce_v8_0_audio_init(adev);
2695	if (r)
2696		return r;
2697
2698	drm_kms_helper_poll_init(adev->ddev);
 
 
 
 
 
 
 
 
 
 
 
 
2699
2700	adev->mode_info.mode_config_initialized = true;
2701	return 0;
2702}
2703
2704static int dce_v8_0_sw_fini(void *handle)
2705{
2706	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2707
2708	kfree(adev->mode_info.bios_hardcoded_edid);
2709
2710	drm_kms_helper_poll_fini(adev->ddev);
2711
2712	dce_v8_0_audio_fini(adev);
2713
2714	dce_v8_0_afmt_fini(adev);
2715
2716	drm_mode_config_cleanup(adev->ddev);
2717	adev->mode_info.mode_config_initialized = false;
2718
2719	return 0;
2720}
2721
2722static int dce_v8_0_hw_init(void *handle)
2723{
2724	int i;
2725	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2726
2727	/* disable vga render */
2728	dce_v8_0_set_vga_render_state(adev, false);
2729	/* init dig PHYs, disp eng pll */
2730	amdgpu_atombios_encoder_init_dig(adev);
2731	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2732
2733	/* initialize hpd */
2734	dce_v8_0_hpd_init(adev);
2735
2736	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2737		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2738	}
2739
2740	dce_v8_0_pageflip_interrupt_init(adev);
2741
2742	return 0;
2743}
2744
2745static int dce_v8_0_hw_fini(void *handle)
2746{
2747	int i;
2748	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2749
2750	dce_v8_0_hpd_fini(adev);
2751
2752	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2753		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2754	}
2755
2756	dce_v8_0_pageflip_interrupt_fini(adev);
2757
 
 
2758	return 0;
2759}
2760
2761static int dce_v8_0_suspend(void *handle)
2762{
2763	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 
 
 
2764
2765	adev->mode_info.bl_level =
2766		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2767
2768	return dce_v8_0_hw_fini(handle);
2769}
2770
2771static int dce_v8_0_resume(void *handle)
2772{
2773	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2774	int ret;
2775
2776	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2777							   adev->mode_info.bl_level);
2778
2779	ret = dce_v8_0_hw_init(handle);
2780
2781	/* turn on the BL */
2782	if (adev->mode_info.bl_encoder) {
2783		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2784								  adev->mode_info.bl_encoder);
2785		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2786						    bl_level);
2787	}
 
 
2788
2789	return ret;
2790}
2791
2792static bool dce_v8_0_is_idle(void *handle)
2793{
2794	return true;
2795}
2796
2797static int dce_v8_0_wait_for_idle(void *handle)
2798{
2799	return 0;
2800}
2801
2802static int dce_v8_0_soft_reset(void *handle)
2803{
2804	u32 srbm_soft_reset = 0, tmp;
2805	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2806
2807	if (dce_v8_0_is_display_hung(adev))
2808		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2809
2810	if (srbm_soft_reset) {
2811		tmp = RREG32(mmSRBM_SOFT_RESET);
2812		tmp |= srbm_soft_reset;
2813		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2814		WREG32(mmSRBM_SOFT_RESET, tmp);
2815		tmp = RREG32(mmSRBM_SOFT_RESET);
2816
2817		udelay(50);
2818
2819		tmp &= ~srbm_soft_reset;
2820		WREG32(mmSRBM_SOFT_RESET, tmp);
2821		tmp = RREG32(mmSRBM_SOFT_RESET);
2822
2823		/* Wait a little for things to settle down */
2824		udelay(50);
2825	}
2826	return 0;
2827}
2828
2829static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2830						     int crtc,
2831						     enum amdgpu_interrupt_state state)
2832{
2833	u32 reg_block, lb_interrupt_mask;
2834
2835	if (crtc >= adev->mode_info.num_crtc) {
2836		DRM_DEBUG("invalid crtc %d\n", crtc);
2837		return;
2838	}
2839
2840	switch (crtc) {
2841	case 0:
2842		reg_block = CRTC0_REGISTER_OFFSET;
2843		break;
2844	case 1:
2845		reg_block = CRTC1_REGISTER_OFFSET;
2846		break;
2847	case 2:
2848		reg_block = CRTC2_REGISTER_OFFSET;
2849		break;
2850	case 3:
2851		reg_block = CRTC3_REGISTER_OFFSET;
2852		break;
2853	case 4:
2854		reg_block = CRTC4_REGISTER_OFFSET;
2855		break;
2856	case 5:
2857		reg_block = CRTC5_REGISTER_OFFSET;
2858		break;
2859	default:
2860		DRM_DEBUG("invalid crtc %d\n", crtc);
2861		return;
2862	}
2863
2864	switch (state) {
2865	case AMDGPU_IRQ_STATE_DISABLE:
2866		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2867		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2868		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2869		break;
2870	case AMDGPU_IRQ_STATE_ENABLE:
2871		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2872		lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2873		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2874		break;
2875	default:
2876		break;
2877	}
2878}
2879
2880static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2881						    int crtc,
2882						    enum amdgpu_interrupt_state state)
2883{
2884	u32 reg_block, lb_interrupt_mask;
2885
2886	if (crtc >= adev->mode_info.num_crtc) {
2887		DRM_DEBUG("invalid crtc %d\n", crtc);
2888		return;
2889	}
2890
2891	switch (crtc) {
2892	case 0:
2893		reg_block = CRTC0_REGISTER_OFFSET;
2894		break;
2895	case 1:
2896		reg_block = CRTC1_REGISTER_OFFSET;
2897		break;
2898	case 2:
2899		reg_block = CRTC2_REGISTER_OFFSET;
2900		break;
2901	case 3:
2902		reg_block = CRTC3_REGISTER_OFFSET;
2903		break;
2904	case 4:
2905		reg_block = CRTC4_REGISTER_OFFSET;
2906		break;
2907	case 5:
2908		reg_block = CRTC5_REGISTER_OFFSET;
2909		break;
2910	default:
2911		DRM_DEBUG("invalid crtc %d\n", crtc);
2912		return;
2913	}
2914
2915	switch (state) {
2916	case AMDGPU_IRQ_STATE_DISABLE:
2917		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2918		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2919		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2920		break;
2921	case AMDGPU_IRQ_STATE_ENABLE:
2922		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2923		lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2924		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2925		break;
2926	default:
2927		break;
2928	}
2929}
2930
2931static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2932					    struct amdgpu_irq_src *src,
2933					    unsigned type,
2934					    enum amdgpu_interrupt_state state)
2935{
2936	u32 dc_hpd_int_cntl;
2937
2938	if (type >= adev->mode_info.num_hpd) {
2939		DRM_DEBUG("invalid hdp %d\n", type);
2940		return 0;
2941	}
2942
2943	switch (state) {
2944	case AMDGPU_IRQ_STATE_DISABLE:
2945		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2946		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2947		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2948		break;
2949	case AMDGPU_IRQ_STATE_ENABLE:
2950		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2951		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2952		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2953		break;
2954	default:
2955		break;
2956	}
2957
2958	return 0;
2959}
2960
2961static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2962					     struct amdgpu_irq_src *src,
2963					     unsigned type,
2964					     enum amdgpu_interrupt_state state)
2965{
2966	switch (type) {
2967	case AMDGPU_CRTC_IRQ_VBLANK1:
2968		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2969		break;
2970	case AMDGPU_CRTC_IRQ_VBLANK2:
2971		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2972		break;
2973	case AMDGPU_CRTC_IRQ_VBLANK3:
2974		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2975		break;
2976	case AMDGPU_CRTC_IRQ_VBLANK4:
2977		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2978		break;
2979	case AMDGPU_CRTC_IRQ_VBLANK5:
2980		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2981		break;
2982	case AMDGPU_CRTC_IRQ_VBLANK6:
2983		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2984		break;
2985	case AMDGPU_CRTC_IRQ_VLINE1:
2986		dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
2987		break;
2988	case AMDGPU_CRTC_IRQ_VLINE2:
2989		dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
2990		break;
2991	case AMDGPU_CRTC_IRQ_VLINE3:
2992		dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
2993		break;
2994	case AMDGPU_CRTC_IRQ_VLINE4:
2995		dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
2996		break;
2997	case AMDGPU_CRTC_IRQ_VLINE5:
2998		dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
2999		break;
3000	case AMDGPU_CRTC_IRQ_VLINE6:
3001		dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3002		break;
3003	default:
3004		break;
3005	}
3006	return 0;
3007}
3008
3009static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3010			     struct amdgpu_irq_src *source,
3011			     struct amdgpu_iv_entry *entry)
3012{
3013	unsigned crtc = entry->src_id - 1;
3014	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3015	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3016								    crtc);
3017
3018	switch (entry->src_data[0]) {
3019	case 0: /* vblank */
3020		if (disp_int & interrupt_status_offsets[crtc].vblank)
3021			WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3022		else
3023			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3024
3025		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3026			drm_handle_vblank(adev->ddev, crtc);
3027		}
3028		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3029		break;
3030	case 1: /* vline */
3031		if (disp_int & interrupt_status_offsets[crtc].vline)
3032			WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3033		else
3034			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3035
3036		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3037		break;
3038	default:
3039		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3040		break;
3041	}
3042
3043	return 0;
3044}
3045
3046static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3047						 struct amdgpu_irq_src *src,
3048						 unsigned type,
3049						 enum amdgpu_interrupt_state state)
3050{
3051	u32 reg;
3052
3053	if (type >= adev->mode_info.num_crtc) {
3054		DRM_ERROR("invalid pageflip crtc %d\n", type);
3055		return -EINVAL;
3056	}
3057
3058	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3059	if (state == AMDGPU_IRQ_STATE_DISABLE)
3060		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3061		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3062	else
3063		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3064		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3065
3066	return 0;
3067}
3068
3069static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3070				struct amdgpu_irq_src *source,
3071				struct amdgpu_iv_entry *entry)
3072{
3073	unsigned long flags;
3074	unsigned crtc_id;
3075	struct amdgpu_crtc *amdgpu_crtc;
3076	struct amdgpu_flip_work *works;
3077
3078	crtc_id = (entry->src_id - 8) >> 1;
3079	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3080
3081	if (crtc_id >= adev->mode_info.num_crtc) {
3082		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3083		return -EINVAL;
3084	}
3085
3086	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3087	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3088		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3089		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3090
3091	/* IRQ could occur when in initial stage */
3092	if (amdgpu_crtc == NULL)
3093		return 0;
3094
3095	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3096	works = amdgpu_crtc->pflip_works;
3097	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3098		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3099						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3100						amdgpu_crtc->pflip_status,
3101						AMDGPU_FLIP_SUBMITTED);
3102		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3103		return 0;
3104	}
3105
3106	/* page flip completed. clean up */
3107	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3108	amdgpu_crtc->pflip_works = NULL;
3109
3110	/* wakeup usersapce */
3111	if (works->event)
3112		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3113
3114	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3115
3116	drm_crtc_vblank_put(&amdgpu_crtc->base);
3117	schedule_work(&works->unpin_work);
3118
3119	return 0;
3120}
3121
3122static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3123			    struct amdgpu_irq_src *source,
3124			    struct amdgpu_iv_entry *entry)
3125{
3126	uint32_t disp_int, mask, tmp;
3127	unsigned hpd;
3128
3129	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3130		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3131		return 0;
3132	}
3133
3134	hpd = entry->src_data[0];
3135	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3136	mask = interrupt_status_offsets[hpd].hpd;
3137
3138	if (disp_int & mask) {
3139		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3140		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3141		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3142		schedule_work(&adev->hotplug_work);
3143		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3144	}
3145
3146	return 0;
3147
3148}
3149
3150static int dce_v8_0_set_clockgating_state(void *handle,
3151					  enum amd_clockgating_state state)
3152{
3153	return 0;
3154}
3155
3156static int dce_v8_0_set_powergating_state(void *handle,
3157					  enum amd_powergating_state state)
3158{
3159	return 0;
3160}
3161
3162static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3163	.name = "dce_v8_0",
3164	.early_init = dce_v8_0_early_init,
3165	.late_init = NULL,
3166	.sw_init = dce_v8_0_sw_init,
3167	.sw_fini = dce_v8_0_sw_fini,
3168	.hw_init = dce_v8_0_hw_init,
3169	.hw_fini = dce_v8_0_hw_fini,
3170	.suspend = dce_v8_0_suspend,
3171	.resume = dce_v8_0_resume,
3172	.is_idle = dce_v8_0_is_idle,
3173	.wait_for_idle = dce_v8_0_wait_for_idle,
3174	.soft_reset = dce_v8_0_soft_reset,
3175	.set_clockgating_state = dce_v8_0_set_clockgating_state,
3176	.set_powergating_state = dce_v8_0_set_powergating_state,
3177};
3178
3179static void
3180dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3181			  struct drm_display_mode *mode,
3182			  struct drm_display_mode *adjusted_mode)
3183{
3184	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3185
3186	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3187
3188	/* need to call this here rather than in prepare() since we need some crtc info */
3189	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3190
3191	/* set scaler clears this on some chips */
3192	dce_v8_0_set_interleave(encoder->crtc, mode);
3193
3194	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3195		dce_v8_0_afmt_enable(encoder, true);
3196		dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3197	}
3198}
3199
3200static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3201{
3202	struct amdgpu_device *adev = encoder->dev->dev_private;
3203	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3204	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3205
3206	if ((amdgpu_encoder->active_device &
3207	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3208	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3209	     ENCODER_OBJECT_ID_NONE)) {
3210		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3211		if (dig) {
3212			dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3213			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3214				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3215		}
3216	}
3217
3218	amdgpu_atombios_scratch_regs_lock(adev, true);
3219
3220	if (connector) {
3221		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3222
3223		/* select the clock/data port if it uses a router */
3224		if (amdgpu_connector->router.cd_valid)
3225			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3226
3227		/* turn eDP panel on for mode set */
3228		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3229			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3230							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3231	}
3232
3233	/* this is needed for the pll/ss setup to work correctly in some cases */
3234	amdgpu_atombios_encoder_set_crtc_source(encoder);
3235	/* set up the FMT blocks */
3236	dce_v8_0_program_fmt(encoder);
3237}
3238
3239static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3240{
3241	struct drm_device *dev = encoder->dev;
3242	struct amdgpu_device *adev = dev->dev_private;
3243
3244	/* need to call this here as we need the crtc set up */
3245	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3246	amdgpu_atombios_scratch_regs_lock(adev, false);
3247}
3248
3249static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3250{
3251	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3252	struct amdgpu_encoder_atom_dig *dig;
3253
3254	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3255
3256	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3257		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3258			dce_v8_0_afmt_enable(encoder, false);
3259		dig = amdgpu_encoder->enc_priv;
3260		dig->dig_encoder = -1;
3261	}
3262	amdgpu_encoder->active_device = 0;
3263}
3264
3265/* these are handled by the primary encoders */
3266static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3267{
3268
3269}
3270
3271static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3272{
3273
3274}
3275
3276static void
3277dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3278		      struct drm_display_mode *mode,
3279		      struct drm_display_mode *adjusted_mode)
3280{
3281
3282}
3283
3284static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3285{
3286
3287}
3288
3289static void
3290dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3291{
3292
3293}
3294
3295static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3296	.dpms = dce_v8_0_ext_dpms,
3297	.prepare = dce_v8_0_ext_prepare,
3298	.mode_set = dce_v8_0_ext_mode_set,
3299	.commit = dce_v8_0_ext_commit,
3300	.disable = dce_v8_0_ext_disable,
3301	/* no detect for TMDS/LVDS yet */
3302};
3303
3304static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3305	.dpms = amdgpu_atombios_encoder_dpms,
3306	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3307	.prepare = dce_v8_0_encoder_prepare,
3308	.mode_set = dce_v8_0_encoder_mode_set,
3309	.commit = dce_v8_0_encoder_commit,
3310	.disable = dce_v8_0_encoder_disable,
3311	.detect = amdgpu_atombios_encoder_dig_detect,
3312};
3313
3314static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3315	.dpms = amdgpu_atombios_encoder_dpms,
3316	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3317	.prepare = dce_v8_0_encoder_prepare,
3318	.mode_set = dce_v8_0_encoder_mode_set,
3319	.commit = dce_v8_0_encoder_commit,
3320	.detect = amdgpu_atombios_encoder_dac_detect,
3321};
3322
3323static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3324{
3325	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3326	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3327		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3328	kfree(amdgpu_encoder->enc_priv);
3329	drm_encoder_cleanup(encoder);
3330	kfree(amdgpu_encoder);
3331}
3332
3333static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3334	.destroy = dce_v8_0_encoder_destroy,
3335};
3336
3337static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3338				 uint32_t encoder_enum,
3339				 uint32_t supported_device,
3340				 u16 caps)
3341{
3342	struct drm_device *dev = adev->ddev;
3343	struct drm_encoder *encoder;
3344	struct amdgpu_encoder *amdgpu_encoder;
3345
3346	/* see if we already added it */
3347	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3348		amdgpu_encoder = to_amdgpu_encoder(encoder);
3349		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3350			amdgpu_encoder->devices |= supported_device;
3351			return;
3352		}
3353
3354	}
3355
3356	/* add a new one */
3357	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3358	if (!amdgpu_encoder)
3359		return;
3360
3361	encoder = &amdgpu_encoder->base;
3362	switch (adev->mode_info.num_crtc) {
3363	case 1:
3364		encoder->possible_crtcs = 0x1;
3365		break;
3366	case 2:
3367	default:
3368		encoder->possible_crtcs = 0x3;
3369		break;
3370	case 4:
3371		encoder->possible_crtcs = 0xf;
3372		break;
3373	case 6:
3374		encoder->possible_crtcs = 0x3f;
3375		break;
3376	}
3377
3378	amdgpu_encoder->enc_priv = NULL;
3379
3380	amdgpu_encoder->encoder_enum = encoder_enum;
3381	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3382	amdgpu_encoder->devices = supported_device;
3383	amdgpu_encoder->rmx_type = RMX_OFF;
3384	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3385	amdgpu_encoder->is_ext_encoder = false;
3386	amdgpu_encoder->caps = caps;
3387
3388	switch (amdgpu_encoder->encoder_id) {
3389	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3390	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3391		drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3392				 DRM_MODE_ENCODER_DAC, NULL);
3393		drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3394		break;
3395	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3396	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3397	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3398	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3399	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3400		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3401			amdgpu_encoder->rmx_type = RMX_FULL;
3402			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3403					 DRM_MODE_ENCODER_LVDS, NULL);
3404			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3405		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3406			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3407					 DRM_MODE_ENCODER_DAC, NULL);
3408			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3409		} else {
3410			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3411					 DRM_MODE_ENCODER_TMDS, NULL);
3412			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3413		}
3414		drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3415		break;
3416	case ENCODER_OBJECT_ID_SI170B:
3417	case ENCODER_OBJECT_ID_CH7303:
3418	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3419	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3420	case ENCODER_OBJECT_ID_TITFP513:
3421	case ENCODER_OBJECT_ID_VT1623:
3422	case ENCODER_OBJECT_ID_HDMI_SI1930:
3423	case ENCODER_OBJECT_ID_TRAVIS:
3424	case ENCODER_OBJECT_ID_NUTMEG:
3425		/* these are handled by the primary encoders */
3426		amdgpu_encoder->is_ext_encoder = true;
3427		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3428			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3429					 DRM_MODE_ENCODER_LVDS, NULL);
3430		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3431			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3432					 DRM_MODE_ENCODER_DAC, NULL);
3433		else
3434			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3435					 DRM_MODE_ENCODER_TMDS, NULL);
3436		drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3437		break;
3438	}
3439}
3440
3441static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3442	.bandwidth_update = &dce_v8_0_bandwidth_update,
3443	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
3444	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3445	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3446	.hpd_sense = &dce_v8_0_hpd_sense,
3447	.hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3448	.hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3449	.page_flip = &dce_v8_0_page_flip,
3450	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3451	.add_encoder = &dce_v8_0_encoder_add,
3452	.add_connector = &amdgpu_connector_add,
3453};
3454
3455static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3456{
3457	if (adev->mode_info.funcs == NULL)
3458		adev->mode_info.funcs = &dce_v8_0_display_funcs;
3459}
3460
3461static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3462	.set = dce_v8_0_set_crtc_interrupt_state,
3463	.process = dce_v8_0_crtc_irq,
3464};
3465
3466static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3467	.set = dce_v8_0_set_pageflip_interrupt_state,
3468	.process = dce_v8_0_pageflip_irq,
3469};
3470
3471static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3472	.set = dce_v8_0_set_hpd_interrupt_state,
3473	.process = dce_v8_0_hpd_irq,
3474};
3475
3476static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3477{
3478	if (adev->mode_info.num_crtc > 0)
3479		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3480	else
3481		adev->crtc_irq.num_types = 0;
3482	adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3483
3484	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3485	adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3486
3487	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3488	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3489}
3490
3491const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3492{
3493	.type = AMD_IP_BLOCK_TYPE_DCE,
3494	.major = 8,
3495	.minor = 0,
3496	.rev = 0,
3497	.funcs = &dce_v8_0_ip_funcs,
3498};
3499
3500const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3501{
3502	.type = AMD_IP_BLOCK_TYPE_DCE,
3503	.major = 8,
3504	.minor = 1,
3505	.rev = 0,
3506	.funcs = &dce_v8_0_ip_funcs,
3507};
3508
3509const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3510{
3511	.type = AMD_IP_BLOCK_TYPE_DCE,
3512	.major = 8,
3513	.minor = 2,
3514	.rev = 0,
3515	.funcs = &dce_v8_0_ip_funcs,
3516};
3517
3518const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3519{
3520	.type = AMD_IP_BLOCK_TYPE_DCE,
3521	.major = 8,
3522	.minor = 3,
3523	.rev = 0,
3524	.funcs = &dce_v8_0_ip_funcs,
3525};
3526
3527const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3528{
3529	.type = AMD_IP_BLOCK_TYPE_DCE,
3530	.major = 8,
3531	.minor = 5,
3532	.rev = 0,
3533	.funcs = &dce_v8_0_ip_funcs,
3534};
v6.2
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <drm/drm_fb_helper.h>
  25#include <drm/drm_fourcc.h>
  26#include <drm/drm_vblank.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_pm.h"
  30#include "amdgpu_i2c.h"
  31#include "cikd.h"
  32#include "atom.h"
  33#include "amdgpu_atombios.h"
  34#include "atombios_crtc.h"
  35#include "atombios_encoders.h"
  36#include "amdgpu_pll.h"
  37#include "amdgpu_connectors.h"
  38#include "amdgpu_display.h"
  39#include "dce_v8_0.h"
  40
  41#include "dce/dce_8_0_d.h"
  42#include "dce/dce_8_0_sh_mask.h"
  43
  44#include "gca/gfx_7_2_enum.h"
  45
  46#include "gmc/gmc_7_1_d.h"
  47#include "gmc/gmc_7_1_sh_mask.h"
  48
  49#include "oss/oss_2_0_d.h"
  50#include "oss/oss_2_0_sh_mask.h"
  51
  52static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  53static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  54
  55static const u32 crtc_offsets[6] =
  56{
  57	CRTC0_REGISTER_OFFSET,
  58	CRTC1_REGISTER_OFFSET,
  59	CRTC2_REGISTER_OFFSET,
  60	CRTC3_REGISTER_OFFSET,
  61	CRTC4_REGISTER_OFFSET,
  62	CRTC5_REGISTER_OFFSET
  63};
  64
  65static const u32 hpd_offsets[] =
  66{
  67	HPD0_REGISTER_OFFSET,
  68	HPD1_REGISTER_OFFSET,
  69	HPD2_REGISTER_OFFSET,
  70	HPD3_REGISTER_OFFSET,
  71	HPD4_REGISTER_OFFSET,
  72	HPD5_REGISTER_OFFSET
  73};
  74
  75static const uint32_t dig_offsets[] = {
  76	CRTC0_REGISTER_OFFSET,
  77	CRTC1_REGISTER_OFFSET,
  78	CRTC2_REGISTER_OFFSET,
  79	CRTC3_REGISTER_OFFSET,
  80	CRTC4_REGISTER_OFFSET,
  81	CRTC5_REGISTER_OFFSET,
  82	(0x13830 - 0x7030) >> 2,
  83};
  84
  85static const struct {
  86	uint32_t	reg;
  87	uint32_t	vblank;
  88	uint32_t	vline;
  89	uint32_t	hpd;
  90
  91} interrupt_status_offsets[6] = { {
  92	.reg = mmDISP_INTERRUPT_STATUS,
  93	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 116}, {
 117	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 118	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 119	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 120	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 121} };
 122
 123static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 124				     u32 block_offset, u32 reg)
 125{
 126	unsigned long flags;
 127	u32 r;
 128
 129	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 130	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 131	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 132	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 133
 134	return r;
 135}
 136
 137static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 138				      u32 block_offset, u32 reg, u32 v)
 139{
 140	unsigned long flags;
 141
 142	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 143	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 144	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 145	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 146}
 147
 148static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 149{
 150	if (crtc >= adev->mode_info.num_crtc)
 151		return 0;
 152	else
 153		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 154}
 155
 156static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 157{
 158	unsigned i;
 159
 160	/* Enable pflip interrupts */
 161	for (i = 0; i < adev->mode_info.num_crtc; i++)
 162		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 163}
 164
 165static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 166{
 167	unsigned i;
 168
 169	/* Disable pflip interrupts */
 170	for (i = 0; i < adev->mode_info.num_crtc; i++)
 171		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 172}
 173
 174/**
 175 * dce_v8_0_page_flip - pageflip callback.
 176 *
 177 * @adev: amdgpu_device pointer
 178 * @crtc_id: crtc to cleanup pageflip on
 179 * @crtc_base: new address of the crtc (GPU MC address)
 180 * @async: asynchronous flip
 181 *
 182 * Triggers the actual pageflip by updating the primary
 183 * surface base address.
 184 */
 185static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 186			       int crtc_id, u64 crtc_base, bool async)
 187{
 188	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 189	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
 190
 191	/* flip at hsync for async, default is vsync */
 192	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 193	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 194	/* update pitch */
 195	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
 196	       fb->pitches[0] / fb->format->cpp[0]);
 197	/* update the primary scanout addresses */
 198	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 199	       upper_32_bits(crtc_base));
 200	/* writing to the low address triggers the update */
 201	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 202	       lower_32_bits(crtc_base));
 203	/* post the write */
 204	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 205}
 206
 207static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 208					u32 *vbl, u32 *position)
 209{
 210	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 211		return -EINVAL;
 212
 213	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 214	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 215
 216	return 0;
 217}
 218
 219/**
 220 * dce_v8_0_hpd_sense - hpd sense callback.
 221 *
 222 * @adev: amdgpu_device pointer
 223 * @hpd: hpd (hotplug detect) pin
 224 *
 225 * Checks if a digital monitor is connected (evergreen+).
 226 * Returns true if connected, false if not connected.
 227 */
 228static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 229			       enum amdgpu_hpd_id hpd)
 230{
 231	bool connected = false;
 232
 233	if (hpd >= adev->mode_info.num_hpd)
 234		return connected;
 235
 236	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
 237	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 238		connected = true;
 239
 240	return connected;
 241}
 242
 243/**
 244 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 245 *
 246 * @adev: amdgpu_device pointer
 247 * @hpd: hpd (hotplug detect) pin
 248 *
 249 * Set the polarity of the hpd pin (evergreen+).
 250 */
 251static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 252				      enum amdgpu_hpd_id hpd)
 253{
 254	u32 tmp;
 255	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 256
 257	if (hpd >= adev->mode_info.num_hpd)
 258		return;
 259
 260	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 261	if (connected)
 262		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 263	else
 264		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 265	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 266}
 267
 268/**
 269 * dce_v8_0_hpd_init - hpd setup callback.
 270 *
 271 * @adev: amdgpu_device pointer
 272 *
 273 * Setup the hpd pins used by the card (evergreen+).
 274 * Enable the pin, set the polarity, and enable the hpd interrupts.
 275 */
 276static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 277{
 278	struct drm_device *dev = adev_to_drm(adev);
 279	struct drm_connector *connector;
 280	struct drm_connector_list_iter iter;
 281	u32 tmp;
 282
 283	drm_connector_list_iter_begin(dev, &iter);
 284	drm_for_each_connector_iter(connector, &iter) {
 285		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 286
 287		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 288			continue;
 289
 290		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 291		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 292		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 293
 294		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 295		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 296			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 297			 * aux dp channel on imac and help (but not completely fix)
 298			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 299			 * also avoid interrupt storms during dpms.
 300			 */
 301			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 302			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 303			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 304			continue;
 305		}
 306
 307		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 308		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 309	}
 310	drm_connector_list_iter_end(&iter);
 311}
 312
 313/**
 314 * dce_v8_0_hpd_fini - hpd tear down callback.
 315 *
 316 * @adev: amdgpu_device pointer
 317 *
 318 * Tear down the hpd pins used by the card (evergreen+).
 319 * Disable the hpd interrupts.
 320 */
 321static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 322{
 323	struct drm_device *dev = adev_to_drm(adev);
 324	struct drm_connector *connector;
 325	struct drm_connector_list_iter iter;
 326	u32 tmp;
 327
 328	drm_connector_list_iter_begin(dev, &iter);
 329	drm_for_each_connector_iter(connector, &iter) {
 330		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 331
 332		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 333			continue;
 334
 335		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 336		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 337		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 338
 339		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 340	}
 341	drm_connector_list_iter_end(&iter);
 342}
 343
 344static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 345{
 346	return mmDC_GPIO_HPD_A;
 347}
 348
 349static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 350{
 351	u32 crtc_hung = 0;
 352	u32 crtc_status[6];
 353	u32 i, j, tmp;
 354
 355	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 356		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 357			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 358			crtc_hung |= (1 << i);
 359		}
 360	}
 361
 362	for (j = 0; j < 10; j++) {
 363		for (i = 0; i < adev->mode_info.num_crtc; i++) {
 364			if (crtc_hung & (1 << i)) {
 365				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 366				if (tmp != crtc_status[i])
 367					crtc_hung &= ~(1 << i);
 368			}
 369		}
 370		if (crtc_hung == 0)
 371			return false;
 372		udelay(100);
 373	}
 374
 375	return true;
 376}
 377
 378static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 379					  bool render)
 380{
 381	u32 tmp;
 382
 383	/* Lockout access through VGA aperture*/
 384	tmp = RREG32(mmVGA_HDP_CONTROL);
 385	if (render)
 386		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 387	else
 388		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 389	WREG32(mmVGA_HDP_CONTROL, tmp);
 390
 391	/* disable VGA render */
 392	tmp = RREG32(mmVGA_RENDER_CONTROL);
 393	if (render)
 394		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 395	else
 396		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 397	WREG32(mmVGA_RENDER_CONTROL, tmp);
 398}
 399
 400static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
 401{
 402	int num_crtc = 0;
 403
 404	switch (adev->asic_type) {
 405	case CHIP_BONAIRE:
 406	case CHIP_HAWAII:
 407		num_crtc = 6;
 408		break;
 409	case CHIP_KAVERI:
 410		num_crtc = 4;
 411		break;
 412	case CHIP_KABINI:
 413	case CHIP_MULLINS:
 414		num_crtc = 2;
 415		break;
 416	default:
 417		num_crtc = 0;
 418	}
 419	return num_crtc;
 420}
 421
 422void dce_v8_0_disable_dce(struct amdgpu_device *adev)
 423{
 424	/*Disable VGA render and enabled crtc, if has DCE engine*/
 425	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 426		u32 tmp;
 427		int crtc_enabled, i;
 428
 429		dce_v8_0_set_vga_render_state(adev, false);
 430
 431		/*Disable crtc*/
 432		for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
 433			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 434									 CRTC_CONTROL, CRTC_MASTER_EN);
 435			if (crtc_enabled) {
 436				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 437				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 438				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 439				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 440				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 441			}
 442		}
 443	}
 444}
 445
 446static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 447{
 448	struct drm_device *dev = encoder->dev;
 449	struct amdgpu_device *adev = drm_to_adev(dev);
 450	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 451	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 452	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 453	int bpc = 0;
 454	u32 tmp = 0;
 455	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 456
 457	if (connector) {
 458		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 459		bpc = amdgpu_connector_get_monitor_bpc(connector);
 460		dither = amdgpu_connector->dither;
 461	}
 462
 463	/* LVDS/eDP FMT is set up by atom */
 464	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 465		return;
 466
 467	/* not needed for analog */
 468	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 469	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 470		return;
 471
 472	if (bpc == 0)
 473		return;
 474
 475	switch (bpc) {
 476	case 6:
 477		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 478			/* XXX sort out optimal dither settings */
 479			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 480				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 481				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 482				(0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 483		else
 484			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 485			(0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 486		break;
 487	case 8:
 488		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 489			/* XXX sort out optimal dither settings */
 490			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 491				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 492				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 493				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 494				(1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 495		else
 496			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 497			(1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 498		break;
 499	case 10:
 500		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 501			/* XXX sort out optimal dither settings */
 502			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 503				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 504				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 505				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 506				(2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 507		else
 508			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 509			(2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 510		break;
 511	default:
 512		/* not needed */
 513		break;
 514	}
 515
 516	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 517}
 518
 519
 520/* display watermark setup */
 521/**
 522 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 523 *
 524 * @adev: amdgpu_device pointer
 525 * @amdgpu_crtc: the selected display controller
 526 * @mode: the current display mode on the selected display
 527 * controller
 528 *
 529 * Setup up the line buffer allocation for
 530 * the selected display controller (CIK).
 531 * Returns the line buffer size in pixels.
 532 */
 533static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 534				       struct amdgpu_crtc *amdgpu_crtc,
 535				       struct drm_display_mode *mode)
 536{
 537	u32 tmp, buffer_alloc, i;
 538	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 539	/*
 540	 * Line Buffer Setup
 541	 * There are 6 line buffers, one for each display controllers.
 542	 * There are 3 partitions per LB. Select the number of partitions
 543	 * to enable based on the display width.  For display widths larger
 544	 * than 4096, you need use to use 2 display controllers and combine
 545	 * them using the stereo blender.
 546	 */
 547	if (amdgpu_crtc->base.enabled && mode) {
 548		if (mode->crtc_hdisplay < 1920) {
 549			tmp = 1;
 550			buffer_alloc = 2;
 551		} else if (mode->crtc_hdisplay < 2560) {
 552			tmp = 2;
 553			buffer_alloc = 2;
 554		} else if (mode->crtc_hdisplay < 4096) {
 555			tmp = 0;
 556			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 557		} else {
 558			DRM_DEBUG_KMS("Mode too big for LB!\n");
 559			tmp = 0;
 560			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 561		}
 562	} else {
 563		tmp = 1;
 564		buffer_alloc = 0;
 565	}
 566
 567	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 568	      (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 569	      (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 570
 571	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 572	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 573	for (i = 0; i < adev->usec_timeout; i++) {
 574		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 575		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 576			break;
 577		udelay(1);
 578	}
 579
 580	if (amdgpu_crtc->base.enabled && mode) {
 581		switch (tmp) {
 582		case 0:
 583		default:
 584			return 4096 * 2;
 585		case 1:
 586			return 1920 * 2;
 587		case 2:
 588			return 2560 * 2;
 589		}
 590	}
 591
 592	/* controller not enabled, so no lb used */
 593	return 0;
 594}
 595
 596/**
 597 * cik_get_number_of_dram_channels - get the number of dram channels
 598 *
 599 * @adev: amdgpu_device pointer
 600 *
 601 * Look up the number of video ram channels (CIK).
 602 * Used for display watermark bandwidth calculations
 603 * Returns the number of dram channels
 604 */
 605static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 606{
 607	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 608
 609	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 610	case 0:
 611	default:
 612		return 1;
 613	case 1:
 614		return 2;
 615	case 2:
 616		return 4;
 617	case 3:
 618		return 8;
 619	case 4:
 620		return 3;
 621	case 5:
 622		return 6;
 623	case 6:
 624		return 10;
 625	case 7:
 626		return 12;
 627	case 8:
 628		return 16;
 629	}
 630}
 631
 632struct dce8_wm_params {
 633	u32 dram_channels; /* number of dram channels */
 634	u32 yclk;          /* bandwidth per dram data pin in kHz */
 635	u32 sclk;          /* engine clock in kHz */
 636	u32 disp_clk;      /* display clock in kHz */
 637	u32 src_width;     /* viewport width */
 638	u32 active_time;   /* active display time in ns */
 639	u32 blank_time;    /* blank time in ns */
 640	bool interlaced;    /* mode is interlaced */
 641	fixed20_12 vsc;    /* vertical scale ratio */
 642	u32 num_heads;     /* number of active crtcs */
 643	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 644	u32 lb_size;       /* line buffer allocated to pipe */
 645	u32 vtaps;         /* vertical scaler taps */
 646};
 647
 648/**
 649 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 650 *
 651 * @wm: watermark calculation data
 652 *
 653 * Calculate the raw dram bandwidth (CIK).
 654 * Used for display watermark bandwidth calculations
 655 * Returns the dram bandwidth in MBytes/s
 656 */
 657static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 658{
 659	/* Calculate raw DRAM Bandwidth */
 660	fixed20_12 dram_efficiency; /* 0.7 */
 661	fixed20_12 yclk, dram_channels, bandwidth;
 662	fixed20_12 a;
 663
 664	a.full = dfixed_const(1000);
 665	yclk.full = dfixed_const(wm->yclk);
 666	yclk.full = dfixed_div(yclk, a);
 667	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 668	a.full = dfixed_const(10);
 669	dram_efficiency.full = dfixed_const(7);
 670	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 671	bandwidth.full = dfixed_mul(dram_channels, yclk);
 672	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 673
 674	return dfixed_trunc(bandwidth);
 675}
 676
 677/**
 678 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 679 *
 680 * @wm: watermark calculation data
 681 *
 682 * Calculate the dram bandwidth used for display (CIK).
 683 * Used for display watermark bandwidth calculations
 684 * Returns the dram bandwidth for display in MBytes/s
 685 */
 686static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 687{
 688	/* Calculate DRAM Bandwidth and the part allocated to display. */
 689	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 690	fixed20_12 yclk, dram_channels, bandwidth;
 691	fixed20_12 a;
 692
 693	a.full = dfixed_const(1000);
 694	yclk.full = dfixed_const(wm->yclk);
 695	yclk.full = dfixed_div(yclk, a);
 696	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 697	a.full = dfixed_const(10);
 698	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 699	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 700	bandwidth.full = dfixed_mul(dram_channels, yclk);
 701	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 702
 703	return dfixed_trunc(bandwidth);
 704}
 705
 706/**
 707 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 708 *
 709 * @wm: watermark calculation data
 710 *
 711 * Calculate the data return bandwidth used for display (CIK).
 712 * Used for display watermark bandwidth calculations
 713 * Returns the data return bandwidth in MBytes/s
 714 */
 715static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 716{
 717	/* Calculate the display Data return Bandwidth */
 718	fixed20_12 return_efficiency; /* 0.8 */
 719	fixed20_12 sclk, bandwidth;
 720	fixed20_12 a;
 721
 722	a.full = dfixed_const(1000);
 723	sclk.full = dfixed_const(wm->sclk);
 724	sclk.full = dfixed_div(sclk, a);
 725	a.full = dfixed_const(10);
 726	return_efficiency.full = dfixed_const(8);
 727	return_efficiency.full = dfixed_div(return_efficiency, a);
 728	a.full = dfixed_const(32);
 729	bandwidth.full = dfixed_mul(a, sclk);
 730	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 731
 732	return dfixed_trunc(bandwidth);
 733}
 734
 735/**
 736 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 737 *
 738 * @wm: watermark calculation data
 739 *
 740 * Calculate the dmif bandwidth used for display (CIK).
 741 * Used for display watermark bandwidth calculations
 742 * Returns the dmif bandwidth in MBytes/s
 743 */
 744static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 745{
 746	/* Calculate the DMIF Request Bandwidth */
 747	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 748	fixed20_12 disp_clk, bandwidth;
 749	fixed20_12 a, b;
 750
 751	a.full = dfixed_const(1000);
 752	disp_clk.full = dfixed_const(wm->disp_clk);
 753	disp_clk.full = dfixed_div(disp_clk, a);
 754	a.full = dfixed_const(32);
 755	b.full = dfixed_mul(a, disp_clk);
 756
 757	a.full = dfixed_const(10);
 758	disp_clk_request_efficiency.full = dfixed_const(8);
 759	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 760
 761	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 762
 763	return dfixed_trunc(bandwidth);
 764}
 765
 766/**
 767 * dce_v8_0_available_bandwidth - get the min available bandwidth
 768 *
 769 * @wm: watermark calculation data
 770 *
 771 * Calculate the min available bandwidth used for display (CIK).
 772 * Used for display watermark bandwidth calculations
 773 * Returns the min available bandwidth in MBytes/s
 774 */
 775static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 776{
 777	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 778	u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 779	u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 780	u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 781
 782	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 783}
 784
 785/**
 786 * dce_v8_0_average_bandwidth - get the average available bandwidth
 787 *
 788 * @wm: watermark calculation data
 789 *
 790 * Calculate the average available bandwidth used for display (CIK).
 791 * Used for display watermark bandwidth calculations
 792 * Returns the average available bandwidth in MBytes/s
 793 */
 794static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
 795{
 796	/* Calculate the display mode Average Bandwidth
 797	 * DisplayMode should contain the source and destination dimensions,
 798	 * timing, etc.
 799	 */
 800	fixed20_12 bpp;
 801	fixed20_12 line_time;
 802	fixed20_12 src_width;
 803	fixed20_12 bandwidth;
 804	fixed20_12 a;
 805
 806	a.full = dfixed_const(1000);
 807	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 808	line_time.full = dfixed_div(line_time, a);
 809	bpp.full = dfixed_const(wm->bytes_per_pixel);
 810	src_width.full = dfixed_const(wm->src_width);
 811	bandwidth.full = dfixed_mul(src_width, bpp);
 812	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 813	bandwidth.full = dfixed_div(bandwidth, line_time);
 814
 815	return dfixed_trunc(bandwidth);
 816}
 817
 818/**
 819 * dce_v8_0_latency_watermark - get the latency watermark
 820 *
 821 * @wm: watermark calculation data
 822 *
 823 * Calculate the latency watermark (CIK).
 824 * Used for display watermark bandwidth calculations
 825 * Returns the latency watermark in ns
 826 */
 827static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 828{
 829	/* First calculate the latency in ns */
 830	u32 mc_latency = 2000; /* 2000 ns. */
 831	u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
 832	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 833	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 834	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 835	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 836		(wm->num_heads * cursor_line_pair_return_time);
 837	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 838	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 839	u32 tmp, dmif_size = 12288;
 840	fixed20_12 a, b, c;
 841
 842	if (wm->num_heads == 0)
 843		return 0;
 844
 845	a.full = dfixed_const(2);
 846	b.full = dfixed_const(1);
 847	if ((wm->vsc.full > a.full) ||
 848	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 849	    (wm->vtaps >= 5) ||
 850	    ((wm->vsc.full >= a.full) && wm->interlaced))
 851		max_src_lines_per_dst_line = 4;
 852	else
 853		max_src_lines_per_dst_line = 2;
 854
 855	a.full = dfixed_const(available_bandwidth);
 856	b.full = dfixed_const(wm->num_heads);
 857	a.full = dfixed_div(a, b);
 858	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 859	tmp = min(dfixed_trunc(a), tmp);
 860
 861	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 862
 863	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 864	b.full = dfixed_const(1000);
 865	c.full = dfixed_const(lb_fill_bw);
 866	b.full = dfixed_div(c, b);
 867	a.full = dfixed_div(a, b);
 868	line_fill_time = dfixed_trunc(a);
 869
 870	if (line_fill_time < wm->active_time)
 871		return latency;
 872	else
 873		return latency + (line_fill_time - wm->active_time);
 874
 875}
 876
 877/**
 878 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 879 * average and available dram bandwidth
 880 *
 881 * @wm: watermark calculation data
 882 *
 883 * Check if the display average bandwidth fits in the display
 884 * dram bandwidth (CIK).
 885 * Used for display watermark bandwidth calculations
 886 * Returns true if the display fits, false if not.
 887 */
 888static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 889{
 890	if (dce_v8_0_average_bandwidth(wm) <=
 891	    (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 892		return true;
 893	else
 894		return false;
 895}
 896
 897/**
 898 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
 899 * average and available bandwidth
 900 *
 901 * @wm: watermark calculation data
 902 *
 903 * Check if the display average bandwidth fits in the display
 904 * available bandwidth (CIK).
 905 * Used for display watermark bandwidth calculations
 906 * Returns true if the display fits, false if not.
 907 */
 908static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
 909{
 910	if (dce_v8_0_average_bandwidth(wm) <=
 911	    (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
 912		return true;
 913	else
 914		return false;
 915}
 916
 917/**
 918 * dce_v8_0_check_latency_hiding - check latency hiding
 919 *
 920 * @wm: watermark calculation data
 921 *
 922 * Check latency hiding (CIK).
 923 * Used for display watermark bandwidth calculations
 924 * Returns true if the display fits, false if not.
 925 */
 926static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
 927{
 928	u32 lb_partitions = wm->lb_size / wm->src_width;
 929	u32 line_time = wm->active_time + wm->blank_time;
 930	u32 latency_tolerant_lines;
 931	u32 latency_hiding;
 932	fixed20_12 a;
 933
 934	a.full = dfixed_const(1);
 935	if (wm->vsc.full > a.full)
 936		latency_tolerant_lines = 1;
 937	else {
 938		if (lb_partitions <= (wm->vtaps + 1))
 939			latency_tolerant_lines = 1;
 940		else
 941			latency_tolerant_lines = 2;
 942	}
 943
 944	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 945
 946	if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
 947		return true;
 948	else
 949		return false;
 950}
 951
 952/**
 953 * dce_v8_0_program_watermarks - program display watermarks
 954 *
 955 * @adev: amdgpu_device pointer
 956 * @amdgpu_crtc: the selected display controller
 957 * @lb_size: line buffer size
 958 * @num_heads: number of display controllers in use
 959 *
 960 * Calculate and program the display watermarks for the
 961 * selected display controller (CIK).
 962 */
 963static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 964					struct amdgpu_crtc *amdgpu_crtc,
 965					u32 lb_size, u32 num_heads)
 966{
 967	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 968	struct dce8_wm_params wm_low, wm_high;
 969	u32 active_time;
 970	u32 line_time = 0;
 971	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 972	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 973
 974	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 975		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
 976					    (u32)mode->clock);
 977		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
 978					  (u32)mode->clock);
 979		line_time = min(line_time, (u32)65535);
 980
 981		/* watermark for high clocks */
 982		if (adev->pm.dpm_enabled) {
 983			wm_high.yclk =
 984				amdgpu_dpm_get_mclk(adev, false) * 10;
 985			wm_high.sclk =
 986				amdgpu_dpm_get_sclk(adev, false) * 10;
 987		} else {
 988			wm_high.yclk = adev->pm.current_mclk * 10;
 989			wm_high.sclk = adev->pm.current_sclk * 10;
 990		}
 991
 992		wm_high.disp_clk = mode->clock;
 993		wm_high.src_width = mode->crtc_hdisplay;
 994		wm_high.active_time = active_time;
 995		wm_high.blank_time = line_time - wm_high.active_time;
 996		wm_high.interlaced = false;
 997		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 998			wm_high.interlaced = true;
 999		wm_high.vsc = amdgpu_crtc->vsc;
1000		wm_high.vtaps = 1;
1001		if (amdgpu_crtc->rmx_type != RMX_OFF)
1002			wm_high.vtaps = 2;
1003		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1004		wm_high.lb_size = lb_size;
1005		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1006		wm_high.num_heads = num_heads;
1007
1008		/* set for high clocks */
1009		latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1010
1011		/* possibly force display priority to high */
1012		/* should really do this at mode validation time... */
1013		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1014		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1015		    !dce_v8_0_check_latency_hiding(&wm_high) ||
1016		    (adev->mode_info.disp_priority == 2)) {
1017			DRM_DEBUG_KMS("force priority to high\n");
1018		}
1019
1020		/* watermark for low clocks */
1021		if (adev->pm.dpm_enabled) {
1022			wm_low.yclk =
1023				amdgpu_dpm_get_mclk(adev, true) * 10;
1024			wm_low.sclk =
1025				amdgpu_dpm_get_sclk(adev, true) * 10;
1026		} else {
1027			wm_low.yclk = adev->pm.current_mclk * 10;
1028			wm_low.sclk = adev->pm.current_sclk * 10;
1029		}
1030
1031		wm_low.disp_clk = mode->clock;
1032		wm_low.src_width = mode->crtc_hdisplay;
1033		wm_low.active_time = active_time;
1034		wm_low.blank_time = line_time - wm_low.active_time;
1035		wm_low.interlaced = false;
1036		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1037			wm_low.interlaced = true;
1038		wm_low.vsc = amdgpu_crtc->vsc;
1039		wm_low.vtaps = 1;
1040		if (amdgpu_crtc->rmx_type != RMX_OFF)
1041			wm_low.vtaps = 2;
1042		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1043		wm_low.lb_size = lb_size;
1044		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1045		wm_low.num_heads = num_heads;
1046
1047		/* set for low clocks */
1048		latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1049
1050		/* possibly force display priority to high */
1051		/* should really do this at mode validation time... */
1052		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1053		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1054		    !dce_v8_0_check_latency_hiding(&wm_low) ||
1055		    (adev->mode_info.disp_priority == 2)) {
1056			DRM_DEBUG_KMS("force priority to high\n");
1057		}
1058		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1059	}
1060
1061	/* select wm A */
1062	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1063	tmp = wm_mask;
1064	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1065	tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1066	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1067	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1068	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1069		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1070	/* select wm B */
1071	tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1072	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1073	tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1074	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1075	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1076	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1077		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1078	/* restore original selection */
1079	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1080
1081	/* save values for DPM */
1082	amdgpu_crtc->line_time = line_time;
1083	amdgpu_crtc->wm_high = latency_watermark_a;
1084	amdgpu_crtc->wm_low = latency_watermark_b;
1085	/* Save number of lines the linebuffer leads before the scanout */
1086	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1087}
1088
1089/**
1090 * dce_v8_0_bandwidth_update - program display watermarks
1091 *
1092 * @adev: amdgpu_device pointer
1093 *
1094 * Calculate and program the display watermarks and line
1095 * buffer allocation (CIK).
1096 */
1097static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1098{
1099	struct drm_display_mode *mode = NULL;
1100	u32 num_heads = 0, lb_size;
1101	int i;
1102
1103	amdgpu_display_update_priority(adev);
1104
1105	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1106		if (adev->mode_info.crtcs[i]->base.enabled)
1107			num_heads++;
1108	}
1109	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1110		mode = &adev->mode_info.crtcs[i]->base.mode;
1111		lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1112		dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1113					    lb_size, num_heads);
1114	}
1115}
1116
1117static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1118{
1119	int i;
1120	u32 offset, tmp;
1121
1122	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1123		offset = adev->mode_info.audio.pin[i].offset;
1124		tmp = RREG32_AUDIO_ENDPT(offset,
1125					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1126		if (((tmp &
1127		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1128		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1129			adev->mode_info.audio.pin[i].connected = false;
1130		else
1131			adev->mode_info.audio.pin[i].connected = true;
1132	}
1133}
1134
1135static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1136{
1137	int i;
1138
1139	dce_v8_0_audio_get_connected_pins(adev);
1140
1141	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1142		if (adev->mode_info.audio.pin[i].connected)
1143			return &adev->mode_info.audio.pin[i];
1144	}
1145	DRM_ERROR("No connected audio pins found!\n");
1146	return NULL;
1147}
1148
1149static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1150{
1151	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1152	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1153	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1154	u32 offset;
1155
1156	if (!dig || !dig->afmt || !dig->afmt->pin)
1157		return;
1158
1159	offset = dig->afmt->offset;
1160
1161	WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1162	       (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1163}
1164
1165static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1166						struct drm_display_mode *mode)
1167{
1168	struct drm_device *dev = encoder->dev;
1169	struct amdgpu_device *adev = drm_to_adev(dev);
1170	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1171	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1172	struct drm_connector *connector;
1173	struct drm_connector_list_iter iter;
1174	struct amdgpu_connector *amdgpu_connector = NULL;
1175	u32 tmp = 0, offset;
1176
1177	if (!dig || !dig->afmt || !dig->afmt->pin)
1178		return;
1179
1180	offset = dig->afmt->pin->offset;
1181
1182	drm_connector_list_iter_begin(dev, &iter);
1183	drm_for_each_connector_iter(connector, &iter) {
1184		if (connector->encoder == encoder) {
1185			amdgpu_connector = to_amdgpu_connector(connector);
1186			break;
1187		}
1188	}
1189	drm_connector_list_iter_end(&iter);
1190
1191	if (!amdgpu_connector) {
1192		DRM_ERROR("Couldn't find encoder's connector\n");
1193		return;
1194	}
1195
1196	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1197		if (connector->latency_present[1])
1198			tmp =
1199			(connector->video_latency[1] <<
1200			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1201			(connector->audio_latency[1] <<
1202			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1203		else
1204			tmp =
1205			(0 <<
1206			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1207			(0 <<
1208			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1209	} else {
1210		if (connector->latency_present[0])
1211			tmp =
1212			(connector->video_latency[0] <<
1213			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1214			(connector->audio_latency[0] <<
1215			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1216		else
1217			tmp =
1218			(0 <<
1219			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1220			(0 <<
1221			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1222
1223	}
1224	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1225}
1226
1227static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1228{
1229	struct drm_device *dev = encoder->dev;
1230	struct amdgpu_device *adev = drm_to_adev(dev);
1231	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1232	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1233	struct drm_connector *connector;
1234	struct drm_connector_list_iter iter;
1235	struct amdgpu_connector *amdgpu_connector = NULL;
1236	u32 offset, tmp;
1237	u8 *sadb = NULL;
1238	int sad_count;
1239
1240	if (!dig || !dig->afmt || !dig->afmt->pin)
1241		return;
1242
1243	offset = dig->afmt->pin->offset;
1244
1245	drm_connector_list_iter_begin(dev, &iter);
1246	drm_for_each_connector_iter(connector, &iter) {
1247		if (connector->encoder == encoder) {
1248			amdgpu_connector = to_amdgpu_connector(connector);
1249			break;
1250		}
1251	}
1252	drm_connector_list_iter_end(&iter);
1253
1254	if (!amdgpu_connector) {
1255		DRM_ERROR("Couldn't find encoder's connector\n");
1256		return;
1257	}
1258
1259	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1260	if (sad_count < 0) {
1261		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1262		sad_count = 0;
1263	}
1264
1265	/* program the speaker allocation */
1266	tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1267	tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1268		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1269	/* set HDMI mode */
1270	tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1271	if (sad_count)
1272		tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1273	else
1274		tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1275	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1276
1277	kfree(sadb);
1278}
1279
1280static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1281{
1282	struct drm_device *dev = encoder->dev;
1283	struct amdgpu_device *adev = drm_to_adev(dev);
1284	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1285	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1286	u32 offset;
1287	struct drm_connector *connector;
1288	struct drm_connector_list_iter iter;
1289	struct amdgpu_connector *amdgpu_connector = NULL;
1290	struct cea_sad *sads;
1291	int i, sad_count;
1292
1293	static const u16 eld_reg_to_type[][2] = {
1294		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1295		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1296		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1297		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1298		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1299		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1300		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1301		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1302		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1303		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1304		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1305		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1306	};
1307
1308	if (!dig || !dig->afmt || !dig->afmt->pin)
1309		return;
1310
1311	offset = dig->afmt->pin->offset;
1312
1313	drm_connector_list_iter_begin(dev, &iter);
1314	drm_for_each_connector_iter(connector, &iter) {
1315		if (connector->encoder == encoder) {
1316			amdgpu_connector = to_amdgpu_connector(connector);
1317			break;
1318		}
1319	}
1320	drm_connector_list_iter_end(&iter);
1321
1322	if (!amdgpu_connector) {
1323		DRM_ERROR("Couldn't find encoder's connector\n");
1324		return;
1325	}
1326
1327	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1328	if (sad_count < 0)
1329		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1330	if (sad_count <= 0)
1331		return;
 
1332	BUG_ON(!sads);
1333
1334	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1335		u32 value = 0;
1336		u8 stereo_freqs = 0;
1337		int max_channels = -1;
1338		int j;
1339
1340		for (j = 0; j < sad_count; j++) {
1341			struct cea_sad *sad = &sads[j];
1342
1343			if (sad->format == eld_reg_to_type[i][1]) {
1344				if (sad->channels > max_channels) {
1345					value = (sad->channels <<
1346						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1347					        (sad->byte2 <<
1348						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1349					        (sad->freq <<
1350						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1351					max_channels = sad->channels;
1352				}
1353
1354				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1355					stereo_freqs |= sad->freq;
1356				else
1357					break;
1358			}
1359		}
1360
1361		value |= (stereo_freqs <<
1362			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1363
1364		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1365	}
1366
1367	kfree(sads);
1368}
1369
1370static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1371				  struct amdgpu_audio_pin *pin,
1372				  bool enable)
1373{
1374	if (!pin)
1375		return;
1376
1377	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1378		enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1379}
1380
1381static const u32 pin_offsets[7] =
1382{
1383	(0x1780 - 0x1780),
1384	(0x1786 - 0x1780),
1385	(0x178c - 0x1780),
1386	(0x1792 - 0x1780),
1387	(0x1798 - 0x1780),
1388	(0x179d - 0x1780),
1389	(0x17a4 - 0x1780),
1390};
1391
1392static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1393{
1394	int i;
1395
1396	if (!amdgpu_audio)
1397		return 0;
1398
1399	adev->mode_info.audio.enabled = true;
1400
1401	if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1402		adev->mode_info.audio.num_pins = 7;
1403	else if ((adev->asic_type == CHIP_KABINI) ||
1404		 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1405		adev->mode_info.audio.num_pins = 3;
1406	else if ((adev->asic_type == CHIP_BONAIRE) ||
1407		 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1408		adev->mode_info.audio.num_pins = 7;
1409	else
1410		adev->mode_info.audio.num_pins = 3;
1411
1412	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1413		adev->mode_info.audio.pin[i].channels = -1;
1414		adev->mode_info.audio.pin[i].rate = -1;
1415		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1416		adev->mode_info.audio.pin[i].status_bits = 0;
1417		adev->mode_info.audio.pin[i].category_code = 0;
1418		adev->mode_info.audio.pin[i].connected = false;
1419		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1420		adev->mode_info.audio.pin[i].id = i;
1421		/* disable audio.  it will be set up later */
1422		/* XXX remove once we switch to ip funcs */
1423		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1424	}
1425
1426	return 0;
1427}
1428
1429static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1430{
1431	int i;
1432
1433	if (!amdgpu_audio)
1434		return;
1435
1436	if (!adev->mode_info.audio.enabled)
1437		return;
1438
1439	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1440		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1441
1442	adev->mode_info.audio.enabled = false;
1443}
1444
1445/*
1446 * update the N and CTS parameters for a given pixel clock rate
1447 */
1448static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1449{
1450	struct drm_device *dev = encoder->dev;
1451	struct amdgpu_device *adev = drm_to_adev(dev);
1452	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1453	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1454	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1455	uint32_t offset = dig->afmt->offset;
1456
1457	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1458	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1459
1460	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1461	WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1462
1463	WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1464	WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1465}
1466
1467/*
1468 * build a HDMI Video Info Frame
1469 */
1470static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1471					       void *buffer, size_t size)
1472{
1473	struct drm_device *dev = encoder->dev;
1474	struct amdgpu_device *adev = drm_to_adev(dev);
1475	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1476	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1477	uint32_t offset = dig->afmt->offset;
1478	uint8_t *frame = buffer + 3;
1479	uint8_t *header = buffer;
1480
1481	WREG32(mmAFMT_AVI_INFO0 + offset,
1482		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1483	WREG32(mmAFMT_AVI_INFO1 + offset,
1484		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1485	WREG32(mmAFMT_AVI_INFO2 + offset,
1486		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1487	WREG32(mmAFMT_AVI_INFO3 + offset,
1488		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1489}
1490
1491static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1492{
1493	struct drm_device *dev = encoder->dev;
1494	struct amdgpu_device *adev = drm_to_adev(dev);
1495	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1496	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1497	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1498	u32 dto_phase = 24 * 1000;
1499	u32 dto_modulo = clock;
1500
1501	if (!dig || !dig->afmt)
1502		return;
1503
1504	/* XXX two dtos; generally use dto0 for hdmi */
1505	/* Express [24MHz / target pixel clock] as an exact rational
1506	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1507	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1508	 */
1509	WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1510	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1511	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1512}
1513
1514/*
1515 * update the info frames with the data from the current display mode
1516 */
1517static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1518				  struct drm_display_mode *mode)
1519{
1520	struct drm_device *dev = encoder->dev;
1521	struct amdgpu_device *adev = drm_to_adev(dev);
1522	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1523	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1524	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1525	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1526	struct hdmi_avi_infoframe frame;
1527	uint32_t offset, val;
1528	ssize_t err;
1529	int bpc = 8;
1530
1531	if (!dig || !dig->afmt)
1532		return;
1533
1534	/* Silent, r600_hdmi_enable will raise WARN for us */
1535	if (!dig->afmt->enabled)
1536		return;
1537
1538	offset = dig->afmt->offset;
1539
1540	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1541	if (encoder->crtc) {
1542		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1543		bpc = amdgpu_crtc->bpc;
1544	}
1545
1546	/* disable audio prior to setting up hw */
1547	dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1548	dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1549
1550	dce_v8_0_audio_set_dto(encoder, mode->clock);
1551
1552	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1553	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1554
1555	WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1556
1557	val = RREG32(mmHDMI_CONTROL + offset);
1558	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1559	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1560
1561	switch (bpc) {
1562	case 0:
1563	case 6:
1564	case 8:
1565	case 16:
1566	default:
1567		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1568			  connector->name, bpc);
1569		break;
1570	case 10:
1571		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1572		val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1573		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1574			  connector->name);
1575		break;
1576	case 12:
1577		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1578		val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1579		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1580			  connector->name);
1581		break;
1582	}
1583
1584	WREG32(mmHDMI_CONTROL + offset, val);
1585
1586	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1587	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1588	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1589	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1590
1591	WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1592	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1593	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1594
1595	WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1596	       AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1597
1598	WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1599	       (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1600
1601	WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1602
1603	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1604	       (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1605	       (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1606
1607	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1608	       AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1609
1610	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1611
1612	if (bpc > 8)
1613		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1614		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1615	else
1616		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1617		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1618		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1619
1620	dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1621
1622	WREG32(mmAFMT_60958_0 + offset,
1623	       (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1624
1625	WREG32(mmAFMT_60958_1 + offset,
1626	       (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1627
1628	WREG32(mmAFMT_60958_2 + offset,
1629	       (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1630	       (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1631	       (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1632	       (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1633	       (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1634	       (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1635
1636	dce_v8_0_audio_write_speaker_allocation(encoder);
1637
1638
1639	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1640	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1641
1642	dce_v8_0_afmt_audio_select_pin(encoder);
1643	dce_v8_0_audio_write_sad_regs(encoder);
1644	dce_v8_0_audio_write_latency_fields(encoder, mode);
1645
1646	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1647	if (err < 0) {
1648		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1649		return;
1650	}
1651
1652	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1653	if (err < 0) {
1654		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1655		return;
1656	}
1657
1658	dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1659
1660	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1661		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1662		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1663
1664	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1665		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1666		 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1667
1668	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1669		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1670
1671	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1672	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1673	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1674	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1675
1676	/* enable audio after setting up hw */
1677	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1678}
1679
1680static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1681{
1682	struct drm_device *dev = encoder->dev;
1683	struct amdgpu_device *adev = drm_to_adev(dev);
1684	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1685	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1686
1687	if (!dig || !dig->afmt)
1688		return;
1689
1690	/* Silent, r600_hdmi_enable will raise WARN for us */
1691	if (enable && dig->afmt->enabled)
1692		return;
1693	if (!enable && !dig->afmt->enabled)
1694		return;
1695
1696	if (!enable && dig->afmt->pin) {
1697		dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1698		dig->afmt->pin = NULL;
1699	}
1700
1701	dig->afmt->enabled = enable;
1702
1703	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1704		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1705}
1706
1707static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1708{
1709	int i;
1710
1711	for (i = 0; i < adev->mode_info.num_dig; i++)
1712		adev->mode_info.afmt[i] = NULL;
1713
1714	/* DCE8 has audio blocks tied to DIG encoders */
1715	for (i = 0; i < adev->mode_info.num_dig; i++) {
1716		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1717		if (adev->mode_info.afmt[i]) {
1718			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1719			adev->mode_info.afmt[i]->id = i;
1720		} else {
1721			int j;
1722			for (j = 0; j < i; j++) {
1723				kfree(adev->mode_info.afmt[j]);
1724				adev->mode_info.afmt[j] = NULL;
1725			}
1726			return -ENOMEM;
1727		}
1728	}
1729	return 0;
1730}
1731
1732static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1733{
1734	int i;
1735
1736	for (i = 0; i < adev->mode_info.num_dig; i++) {
1737		kfree(adev->mode_info.afmt[i]);
1738		adev->mode_info.afmt[i] = NULL;
1739	}
1740}
1741
1742static const u32 vga_control_regs[6] =
1743{
1744	mmD1VGA_CONTROL,
1745	mmD2VGA_CONTROL,
1746	mmD3VGA_CONTROL,
1747	mmD4VGA_CONTROL,
1748	mmD5VGA_CONTROL,
1749	mmD6VGA_CONTROL,
1750};
1751
1752static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1753{
1754	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1755	struct drm_device *dev = crtc->dev;
1756	struct amdgpu_device *adev = drm_to_adev(dev);
1757	u32 vga_control;
1758
1759	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1760	if (enable)
1761		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1762	else
1763		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1764}
1765
1766static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1767{
1768	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1769	struct drm_device *dev = crtc->dev;
1770	struct amdgpu_device *adev = drm_to_adev(dev);
1771
1772	if (enable)
1773		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1774	else
1775		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1776}
1777
1778static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1779				     struct drm_framebuffer *fb,
1780				     int x, int y, int atomic)
1781{
1782	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1783	struct drm_device *dev = crtc->dev;
1784	struct amdgpu_device *adev = drm_to_adev(dev);
 
1785	struct drm_framebuffer *target_fb;
1786	struct drm_gem_object *obj;
1787	struct amdgpu_bo *abo;
1788	uint64_t fb_location, tiling_flags;
1789	uint32_t fb_format, fb_pitch_pixels;
1790	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1791	u32 pipe_config;
1792	u32 viewport_w, viewport_h;
1793	int r;
1794	bool bypass_lut = false;
 
1795
1796	/* no fb bound */
1797	if (!atomic && !crtc->primary->fb) {
1798		DRM_DEBUG_KMS("No FB bound\n");
1799		return 0;
1800	}
1801
1802	if (atomic)
 
1803		target_fb = fb;
1804	else
 
1805		target_fb = crtc->primary->fb;
 
1806
1807	/* If atomic, assume fb object is pinned & idle & fenced and
1808	 * just update base pointers
1809	 */
1810	obj = target_fb->obj[0];
1811	abo = gem_to_amdgpu_bo(obj);
1812	r = amdgpu_bo_reserve(abo, false);
1813	if (unlikely(r != 0))
1814		return r;
1815
1816	if (!atomic) {
1817		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
 
 
1818		if (unlikely(r != 0)) {
1819			amdgpu_bo_unreserve(abo);
1820			return -EINVAL;
1821		}
1822	}
1823	fb_location = amdgpu_bo_gpu_offset(abo);
1824
1825	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1826	amdgpu_bo_unreserve(abo);
1827
1828	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1829
1830	switch (target_fb->format->format) {
1831	case DRM_FORMAT_C8:
1832		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1833			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1834		break;
1835	case DRM_FORMAT_XRGB4444:
1836	case DRM_FORMAT_ARGB4444:
1837		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1838			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1839#ifdef __BIG_ENDIAN
1840		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1841#endif
1842		break;
1843	case DRM_FORMAT_XRGB1555:
1844	case DRM_FORMAT_ARGB1555:
1845		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1846			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1847#ifdef __BIG_ENDIAN
1848		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1849#endif
1850		break;
1851	case DRM_FORMAT_BGRX5551:
1852	case DRM_FORMAT_BGRA5551:
1853		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1854			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1855#ifdef __BIG_ENDIAN
1856		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1857#endif
1858		break;
1859	case DRM_FORMAT_RGB565:
1860		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1861			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1862#ifdef __BIG_ENDIAN
1863		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1864#endif
1865		break;
1866	case DRM_FORMAT_XRGB8888:
1867	case DRM_FORMAT_ARGB8888:
1868		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1869			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1870#ifdef __BIG_ENDIAN
1871		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1872#endif
1873		break;
1874	case DRM_FORMAT_XRGB2101010:
1875	case DRM_FORMAT_ARGB2101010:
1876		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1877			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1878#ifdef __BIG_ENDIAN
1879		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1880#endif
1881		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1882		bypass_lut = true;
1883		break;
1884	case DRM_FORMAT_BGRX1010102:
1885	case DRM_FORMAT_BGRA1010102:
1886		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1887			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1888#ifdef __BIG_ENDIAN
1889		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1890#endif
1891		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1892		bypass_lut = true;
1893		break;
1894	case DRM_FORMAT_XBGR8888:
1895	case DRM_FORMAT_ABGR8888:
1896		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1897		             (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1898		fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1899		           (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1900#ifdef __BIG_ENDIAN
1901		fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1902#endif
1903		break;
1904	default:
1905		DRM_ERROR("Unsupported screen format %p4cc\n",
1906			  &target_fb->format->format);
1907		return -EINVAL;
1908	}
1909
1910	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1911		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1912
1913		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1914		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1915		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1916		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1917		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1918
1919		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1920		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1921		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1922		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1923		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1924		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1925		fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1926	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1927		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1928	}
1929
1930	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1931
1932	dce_v8_0_vga_enable(crtc, false);
1933
1934	/* Make sure surface address is updated at vertical blank rather than
1935	 * horizontal blank
1936	 */
1937	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1938
1939	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1940	       upper_32_bits(fb_location));
1941	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1942	       upper_32_bits(fb_location));
1943	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1944	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1945	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1946	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1947	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1948	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1949
1950	/*
1951	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1952	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1953	 * retain the full precision throughout the pipeline.
1954	 */
1955	WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1956		 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1957		 ~LUT_10BIT_BYPASS_EN);
1958
1959	if (bypass_lut)
1960		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1961
1962	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1963	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1964	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1965	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1966	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1967	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1968
1969	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1970	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1971
1972	dce_v8_0_grph_enable(crtc, true);
1973
1974	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1975	       target_fb->height);
1976
1977	x &= ~3;
1978	y &= ~1;
1979	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1980	       (x << 16) | y);
1981	viewport_w = crtc->mode.hdisplay;
1982	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1983	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1984	       (viewport_w << 16) | viewport_h);
1985
1986	/* set pageflip to happen anywhere in vblank interval */
1987	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1988
1989	if (!atomic && fb && fb != crtc->primary->fb) {
1990		abo = gem_to_amdgpu_bo(fb->obj[0]);
 
1991		r = amdgpu_bo_reserve(abo, true);
1992		if (unlikely(r != 0))
1993			return r;
1994		amdgpu_bo_unpin(abo);
1995		amdgpu_bo_unreserve(abo);
1996	}
1997
1998	/* Bytes per pixel may have changed */
1999	dce_v8_0_bandwidth_update(adev);
2000
2001	return 0;
2002}
2003
2004static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2005				    struct drm_display_mode *mode)
2006{
2007	struct drm_device *dev = crtc->dev;
2008	struct amdgpu_device *adev = drm_to_adev(dev);
2009	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2010
2011	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2012		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2013		       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2014	else
2015		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2016}
2017
2018static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2019{
2020	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2021	struct drm_device *dev = crtc->dev;
2022	struct amdgpu_device *adev = drm_to_adev(dev);
2023	u16 *r, *g, *b;
2024	int i;
2025
2026	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2027
2028	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2029	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2030		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2031	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2032	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2033	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2034	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2035	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2036	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2037		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2038
2039	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2040
2041	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2042	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2043	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2044
2045	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2046	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2047	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2048
2049	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2050	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2051
2052	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2053	r = crtc->gamma_store;
2054	g = r + crtc->gamma_size;
2055	b = g + crtc->gamma_size;
2056	for (i = 0; i < 256; i++) {
2057		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2058		       ((*r++ & 0xffc0) << 14) |
2059		       ((*g++ & 0xffc0) << 4) |
2060		       (*b++ >> 6));
2061	}
2062
2063	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2064	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2065		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2066		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2067	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2068	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2069		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2070	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2071	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2072		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2073	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2074	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2075		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2076	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2077	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2078	/* XXX this only needs to be programmed once per crtc at startup,
2079	 * not sure where the best place for it is
2080	 */
2081	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2082	       ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2083}
2084
2085static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2086{
2087	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2088	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2089
2090	switch (amdgpu_encoder->encoder_id) {
2091	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2092		if (dig->linkb)
2093			return 1;
2094		else
2095			return 0;
 
2096	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2097		if (dig->linkb)
2098			return 3;
2099		else
2100			return 2;
 
2101	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2102		if (dig->linkb)
2103			return 5;
2104		else
2105			return 4;
 
2106	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2107		return 6;
 
2108	default:
2109		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2110		return 0;
2111	}
2112}
2113
2114/**
2115 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2116 *
2117 * @crtc: drm crtc
2118 *
2119 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2120 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2121 * monitors a dedicated PPLL must be used.  If a particular board has
2122 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2123 * as there is no need to program the PLL itself.  If we are not able to
2124 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2125 * avoid messing up an existing monitor.
2126 *
2127 * Asic specific PLL information
2128 *
2129 * DCE 8.x
2130 * KB/KV
2131 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2132 * CI
2133 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2134 *
2135 */
2136static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2137{
2138	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2139	struct drm_device *dev = crtc->dev;
2140	struct amdgpu_device *adev = drm_to_adev(dev);
2141	u32 pll_in_use;
2142	int pll;
2143
2144	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2145		if (adev->clock.dp_extclk)
2146			/* skip PPLL programming if using ext clock */
2147			return ATOM_PPLL_INVALID;
2148		else {
2149			/* use the same PPLL for all DP monitors */
2150			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2151			if (pll != ATOM_PPLL_INVALID)
2152				return pll;
2153		}
2154	} else {
2155		/* use the same PPLL for all monitors with the same clock */
2156		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2157		if (pll != ATOM_PPLL_INVALID)
2158			return pll;
2159	}
2160	/* otherwise, pick one of the plls */
2161	if ((adev->asic_type == CHIP_KABINI) ||
2162	    (adev->asic_type == CHIP_MULLINS)) {
2163		/* KB/ML has PPLL1 and PPLL2 */
2164		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2165		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2166			return ATOM_PPLL2;
2167		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2168			return ATOM_PPLL1;
2169		DRM_ERROR("unable to allocate a PPLL\n");
2170		return ATOM_PPLL_INVALID;
2171	} else {
2172		/* CI/KV has PPLL0, PPLL1, and PPLL2 */
2173		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2174		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2175			return ATOM_PPLL2;
2176		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2177			return ATOM_PPLL1;
2178		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2179			return ATOM_PPLL0;
2180		DRM_ERROR("unable to allocate a PPLL\n");
2181		return ATOM_PPLL_INVALID;
2182	}
2183	return ATOM_PPLL_INVALID;
2184}
2185
2186static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2187{
2188	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2189	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2190	uint32_t cur_lock;
2191
2192	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2193	if (lock)
2194		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2195	else
2196		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2197	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2198}
2199
2200static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2201{
2202	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2203	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2204
2205	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2206	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2207	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2208}
2209
2210static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2211{
2212	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2213	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2214
2215	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2216	       upper_32_bits(amdgpu_crtc->cursor_addr));
2217	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2218	       lower_32_bits(amdgpu_crtc->cursor_addr));
2219
2220	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2221	       CUR_CONTROL__CURSOR_EN_MASK |
2222	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2223	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2224}
2225
2226static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2227				       int x, int y)
2228{
2229	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2230	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2231	int xorigin = 0, yorigin = 0;
2232
2233	amdgpu_crtc->cursor_x = x;
2234	amdgpu_crtc->cursor_y = y;
2235
2236	/* avivo cursor are offset into the total surface */
2237	x += crtc->x;
2238	y += crtc->y;
2239	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2240
2241	if (x < 0) {
2242		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2243		x = 0;
2244	}
2245	if (y < 0) {
2246		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2247		y = 0;
2248	}
2249
2250	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2251	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2252	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2253	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2254
2255	return 0;
2256}
2257
2258static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2259				     int x, int y)
2260{
2261	int ret;
2262
2263	dce_v8_0_lock_cursor(crtc, true);
2264	ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2265	dce_v8_0_lock_cursor(crtc, false);
2266
2267	return ret;
2268}
2269
2270static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2271				     struct drm_file *file_priv,
2272				     uint32_t handle,
2273				     uint32_t width,
2274				     uint32_t height,
2275				     int32_t hot_x,
2276				     int32_t hot_y)
2277{
2278	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2279	struct drm_gem_object *obj;
2280	struct amdgpu_bo *aobj;
2281	int ret;
2282
2283	if (!handle) {
2284		/* turn off cursor */
2285		dce_v8_0_hide_cursor(crtc);
2286		obj = NULL;
2287		goto unpin;
2288	}
2289
2290	if ((width > amdgpu_crtc->max_cursor_width) ||
2291	    (height > amdgpu_crtc->max_cursor_height)) {
2292		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2293		return -EINVAL;
2294	}
2295
2296	obj = drm_gem_object_lookup(file_priv, handle);
2297	if (!obj) {
2298		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2299		return -ENOENT;
2300	}
2301
2302	aobj = gem_to_amdgpu_bo(obj);
2303	ret = amdgpu_bo_reserve(aobj, false);
2304	if (ret != 0) {
2305		drm_gem_object_put(obj);
2306		return ret;
2307	}
2308
2309	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2310	amdgpu_bo_unreserve(aobj);
2311	if (ret) {
2312		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2313		drm_gem_object_put(obj);
2314		return ret;
2315	}
2316	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2317
2318	dce_v8_0_lock_cursor(crtc, true);
2319
2320	if (width != amdgpu_crtc->cursor_width ||
2321	    height != amdgpu_crtc->cursor_height ||
2322	    hot_x != amdgpu_crtc->cursor_hot_x ||
2323	    hot_y != amdgpu_crtc->cursor_hot_y) {
2324		int x, y;
2325
2326		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2327		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2328
2329		dce_v8_0_cursor_move_locked(crtc, x, y);
2330
2331		amdgpu_crtc->cursor_width = width;
2332		amdgpu_crtc->cursor_height = height;
2333		amdgpu_crtc->cursor_hot_x = hot_x;
2334		amdgpu_crtc->cursor_hot_y = hot_y;
2335	}
2336
2337	dce_v8_0_show_cursor(crtc);
2338	dce_v8_0_lock_cursor(crtc, false);
2339
2340unpin:
2341	if (amdgpu_crtc->cursor_bo) {
2342		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2343		ret = amdgpu_bo_reserve(aobj, true);
2344		if (likely(ret == 0)) {
2345			amdgpu_bo_unpin(aobj);
2346			amdgpu_bo_unreserve(aobj);
2347		}
2348		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2349	}
2350
2351	amdgpu_crtc->cursor_bo = obj;
2352	return 0;
2353}
2354
2355static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2356{
2357	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2358
2359	if (amdgpu_crtc->cursor_bo) {
2360		dce_v8_0_lock_cursor(crtc, true);
2361
2362		dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2363					    amdgpu_crtc->cursor_y);
2364
2365		dce_v8_0_show_cursor(crtc);
2366
2367		dce_v8_0_lock_cursor(crtc, false);
2368	}
2369}
2370
2371static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2372				   u16 *blue, uint32_t size,
2373				   struct drm_modeset_acquire_ctx *ctx)
2374{
2375	dce_v8_0_crtc_load_lut(crtc);
2376
2377	return 0;
2378}
2379
2380static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2381{
2382	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2383
2384	drm_crtc_cleanup(crtc);
2385	kfree(amdgpu_crtc);
2386}
2387
2388static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2389	.cursor_set2 = dce_v8_0_crtc_cursor_set2,
2390	.cursor_move = dce_v8_0_crtc_cursor_move,
2391	.gamma_set = dce_v8_0_crtc_gamma_set,
2392	.set_config = amdgpu_display_crtc_set_config,
2393	.destroy = dce_v8_0_crtc_destroy,
2394	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2395	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2396	.enable_vblank = amdgpu_enable_vblank_kms,
2397	.disable_vblank = amdgpu_disable_vblank_kms,
2398	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2399};
2400
2401static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2402{
2403	struct drm_device *dev = crtc->dev;
2404	struct amdgpu_device *adev = drm_to_adev(dev);
2405	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2406	unsigned type;
2407
2408	switch (mode) {
2409	case DRM_MODE_DPMS_ON:
2410		amdgpu_crtc->enabled = true;
2411		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2412		dce_v8_0_vga_enable(crtc, true);
2413		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2414		dce_v8_0_vga_enable(crtc, false);
2415		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2416		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2417						amdgpu_crtc->crtc_id);
2418		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2419		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2420		drm_crtc_vblank_on(crtc);
2421		dce_v8_0_crtc_load_lut(crtc);
2422		break;
2423	case DRM_MODE_DPMS_STANDBY:
2424	case DRM_MODE_DPMS_SUSPEND:
2425	case DRM_MODE_DPMS_OFF:
2426		drm_crtc_vblank_off(crtc);
2427		if (amdgpu_crtc->enabled) {
2428			dce_v8_0_vga_enable(crtc, true);
2429			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2430			dce_v8_0_vga_enable(crtc, false);
2431		}
2432		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2433		amdgpu_crtc->enabled = false;
2434		break;
2435	}
2436	/* adjust pm to dpms */
2437	amdgpu_dpm_compute_clocks(adev);
2438}
2439
2440static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2441{
2442	/* disable crtc pair power gating before programming */
2443	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2444	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2445	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2446}
2447
2448static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2449{
2450	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2451	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2452}
2453
2454static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2455{
2456	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2457	struct drm_device *dev = crtc->dev;
2458	struct amdgpu_device *adev = drm_to_adev(dev);
2459	struct amdgpu_atom_ss ss;
2460	int i;
2461
2462	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2463	if (crtc->primary->fb) {
2464		int r;
 
2465		struct amdgpu_bo *abo;
2466
2467		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
 
2468		r = amdgpu_bo_reserve(abo, true);
2469		if (unlikely(r))
2470			DRM_ERROR("failed to reserve abo before unpin\n");
2471		else {
2472			amdgpu_bo_unpin(abo);
2473			amdgpu_bo_unreserve(abo);
2474		}
2475	}
2476	/* disable the GRPH */
2477	dce_v8_0_grph_enable(crtc, false);
2478
2479	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2480
2481	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2482		if (adev->mode_info.crtcs[i] &&
2483		    adev->mode_info.crtcs[i]->enabled &&
2484		    i != amdgpu_crtc->crtc_id &&
2485		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2486			/* one other crtc is using this pll don't turn
2487			 * off the pll
2488			 */
2489			goto done;
2490		}
2491	}
2492
2493	switch (amdgpu_crtc->pll_id) {
2494	case ATOM_PPLL1:
2495	case ATOM_PPLL2:
2496		/* disable the ppll */
2497		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2498						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2499		break;
2500	case ATOM_PPLL0:
2501		/* disable the ppll */
2502		if ((adev->asic_type == CHIP_KAVERI) ||
2503		    (adev->asic_type == CHIP_BONAIRE) ||
2504		    (adev->asic_type == CHIP_HAWAII))
2505			amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2506						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2507		break;
2508	default:
2509		break;
2510	}
2511done:
2512	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2513	amdgpu_crtc->adjusted_clock = 0;
2514	amdgpu_crtc->encoder = NULL;
2515	amdgpu_crtc->connector = NULL;
2516}
2517
2518static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2519				  struct drm_display_mode *mode,
2520				  struct drm_display_mode *adjusted_mode,
2521				  int x, int y, struct drm_framebuffer *old_fb)
2522{
2523	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2524
2525	if (!amdgpu_crtc->adjusted_clock)
2526		return -EINVAL;
2527
2528	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2529	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2530	dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2531	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2532	amdgpu_atombios_crtc_scaler_setup(crtc);
2533	dce_v8_0_cursor_reset(crtc);
2534	/* update the hw version fpr dpm */
2535	amdgpu_crtc->hw_mode = *adjusted_mode;
2536
2537	return 0;
2538}
2539
2540static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2541				     const struct drm_display_mode *mode,
2542				     struct drm_display_mode *adjusted_mode)
2543{
2544	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2545	struct drm_device *dev = crtc->dev;
2546	struct drm_encoder *encoder;
2547
2548	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2549	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2550		if (encoder->crtc == crtc) {
2551			amdgpu_crtc->encoder = encoder;
2552			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2553			break;
2554		}
2555	}
2556	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2557		amdgpu_crtc->encoder = NULL;
2558		amdgpu_crtc->connector = NULL;
2559		return false;
2560	}
2561	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2562		return false;
2563	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2564		return false;
2565	/* pick pll */
2566	amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2567	/* if we can't get a PPLL for a non-DP encoder, fail */
2568	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2569	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2570		return false;
2571
2572	return true;
2573}
2574
2575static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2576				  struct drm_framebuffer *old_fb)
2577{
2578	return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2579}
2580
2581static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2582					 struct drm_framebuffer *fb,
2583					 int x, int y, enum mode_set_atomic state)
2584{
2585	return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2586}
2587
2588static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2589	.dpms = dce_v8_0_crtc_dpms,
2590	.mode_fixup = dce_v8_0_crtc_mode_fixup,
2591	.mode_set = dce_v8_0_crtc_mode_set,
2592	.mode_set_base = dce_v8_0_crtc_set_base,
2593	.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2594	.prepare = dce_v8_0_crtc_prepare,
2595	.commit = dce_v8_0_crtc_commit,
2596	.disable = dce_v8_0_crtc_disable,
2597	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2598};
2599
2600static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2601{
2602	struct amdgpu_crtc *amdgpu_crtc;
2603
2604	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2605			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2606	if (amdgpu_crtc == NULL)
2607		return -ENOMEM;
2608
2609	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2610
2611	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2612	amdgpu_crtc->crtc_id = index;
2613	adev->mode_info.crtcs[index] = amdgpu_crtc;
2614
2615	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2616	amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2617	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2618	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2619
2620	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2621
2622	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2623	amdgpu_crtc->adjusted_clock = 0;
2624	amdgpu_crtc->encoder = NULL;
2625	amdgpu_crtc->connector = NULL;
2626	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2627
2628	return 0;
2629}
2630
2631static int dce_v8_0_early_init(void *handle)
2632{
2633	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2634
2635	adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2636	adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2637
2638	dce_v8_0_set_display_funcs(adev);
2639
2640	adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2641
2642	switch (adev->asic_type) {
2643	case CHIP_BONAIRE:
2644	case CHIP_HAWAII:
2645		adev->mode_info.num_hpd = 6;
2646		adev->mode_info.num_dig = 6;
2647		break;
2648	case CHIP_KAVERI:
2649		adev->mode_info.num_hpd = 6;
2650		adev->mode_info.num_dig = 7;
2651		break;
2652	case CHIP_KABINI:
2653	case CHIP_MULLINS:
2654		adev->mode_info.num_hpd = 6;
2655		adev->mode_info.num_dig = 6; /* ? */
2656		break;
2657	default:
2658		/* FIXME: not supported yet */
2659		return -EINVAL;
2660	}
2661
2662	dce_v8_0_set_irq_funcs(adev);
2663
2664	return 0;
2665}
2666
2667static int dce_v8_0_sw_init(void *handle)
2668{
2669	int r, i;
2670	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2671
2672	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2673		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2674		if (r)
2675			return r;
2676	}
2677
2678	for (i = 8; i < 20; i += 2) {
2679		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2680		if (r)
2681			return r;
2682	}
2683
2684	/* HPD hotplug */
2685	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2686	if (r)
2687		return r;
2688
2689	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2690
2691	adev_to_drm(adev)->mode_config.async_page_flip = true;
2692
2693	adev_to_drm(adev)->mode_config.max_width = 16384;
2694	adev_to_drm(adev)->mode_config.max_height = 16384;
2695
2696	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2697	if (adev->asic_type == CHIP_HAWAII)
2698		/* disable prefer shadow for now due to hibernation issues */
2699		adev_to_drm(adev)->mode_config.prefer_shadow = 0;
2700	else
2701		adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2702
2703	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2704
2705	r = amdgpu_display_modeset_create_props(adev);
2706	if (r)
2707		return r;
2708
2709	adev_to_drm(adev)->mode_config.max_width = 16384;
2710	adev_to_drm(adev)->mode_config.max_height = 16384;
2711
2712	/* allocate crtcs */
2713	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2714		r = dce_v8_0_crtc_init(adev, i);
2715		if (r)
2716			return r;
2717	}
2718
2719	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2720		amdgpu_display_print_display_setup(adev_to_drm(adev));
2721	else
2722		return -EINVAL;
2723
2724	/* setup afmt */
2725	r = dce_v8_0_afmt_init(adev);
2726	if (r)
2727		return r;
2728
2729	r = dce_v8_0_audio_init(adev);
2730	if (r)
2731		return r;
2732
2733	/* Disable vblank IRQs aggressively for power-saving */
2734	/* XXX: can this be enabled for DC? */
2735	adev_to_drm(adev)->vblank_disable_immediate = true;
2736
2737	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2738	if (r)
2739		return r;
2740
2741	/* Pre-DCE11 */
2742	INIT_WORK(&adev->hotplug_work,
2743		  amdgpu_display_hotplug_work_func);
2744
2745	drm_kms_helper_poll_init(adev_to_drm(adev));
2746
2747	adev->mode_info.mode_config_initialized = true;
2748	return 0;
2749}
2750
2751static int dce_v8_0_sw_fini(void *handle)
2752{
2753	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2754
2755	kfree(adev->mode_info.bios_hardcoded_edid);
2756
2757	drm_kms_helper_poll_fini(adev_to_drm(adev));
2758
2759	dce_v8_0_audio_fini(adev);
2760
2761	dce_v8_0_afmt_fini(adev);
2762
2763	drm_mode_config_cleanup(adev_to_drm(adev));
2764	adev->mode_info.mode_config_initialized = false;
2765
2766	return 0;
2767}
2768
2769static int dce_v8_0_hw_init(void *handle)
2770{
2771	int i;
2772	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2773
2774	/* disable vga render */
2775	dce_v8_0_set_vga_render_state(adev, false);
2776	/* init dig PHYs, disp eng pll */
2777	amdgpu_atombios_encoder_init_dig(adev);
2778	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2779
2780	/* initialize hpd */
2781	dce_v8_0_hpd_init(adev);
2782
2783	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2784		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2785	}
2786
2787	dce_v8_0_pageflip_interrupt_init(adev);
2788
2789	return 0;
2790}
2791
2792static int dce_v8_0_hw_fini(void *handle)
2793{
2794	int i;
2795	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2796
2797	dce_v8_0_hpd_fini(adev);
2798
2799	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2800		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2801	}
2802
2803	dce_v8_0_pageflip_interrupt_fini(adev);
2804
2805	flush_work(&adev->hotplug_work);
2806
2807	return 0;
2808}
2809
2810static int dce_v8_0_suspend(void *handle)
2811{
2812	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2813	int r;
2814
2815	r = amdgpu_display_suspend_helper(adev);
2816	if (r)
2817		return r;
2818
2819	adev->mode_info.bl_level =
2820		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2821
2822	return dce_v8_0_hw_fini(handle);
2823}
2824
2825static int dce_v8_0_resume(void *handle)
2826{
2827	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2828	int ret;
2829
2830	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2831							   adev->mode_info.bl_level);
2832
2833	ret = dce_v8_0_hw_init(handle);
2834
2835	/* turn on the BL */
2836	if (adev->mode_info.bl_encoder) {
2837		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2838								  adev->mode_info.bl_encoder);
2839		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2840						    bl_level);
2841	}
2842	if (ret)
2843		return ret;
2844
2845	return amdgpu_display_resume_helper(adev);
2846}
2847
2848static bool dce_v8_0_is_idle(void *handle)
2849{
2850	return true;
2851}
2852
2853static int dce_v8_0_wait_for_idle(void *handle)
2854{
2855	return 0;
2856}
2857
2858static int dce_v8_0_soft_reset(void *handle)
2859{
2860	u32 srbm_soft_reset = 0, tmp;
2861	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2862
2863	if (dce_v8_0_is_display_hung(adev))
2864		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2865
2866	if (srbm_soft_reset) {
2867		tmp = RREG32(mmSRBM_SOFT_RESET);
2868		tmp |= srbm_soft_reset;
2869		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2870		WREG32(mmSRBM_SOFT_RESET, tmp);
2871		tmp = RREG32(mmSRBM_SOFT_RESET);
2872
2873		udelay(50);
2874
2875		tmp &= ~srbm_soft_reset;
2876		WREG32(mmSRBM_SOFT_RESET, tmp);
2877		tmp = RREG32(mmSRBM_SOFT_RESET);
2878
2879		/* Wait a little for things to settle down */
2880		udelay(50);
2881	}
2882	return 0;
2883}
2884
2885static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2886						     int crtc,
2887						     enum amdgpu_interrupt_state state)
2888{
2889	u32 reg_block, lb_interrupt_mask;
2890
2891	if (crtc >= adev->mode_info.num_crtc) {
2892		DRM_DEBUG("invalid crtc %d\n", crtc);
2893		return;
2894	}
2895
2896	switch (crtc) {
2897	case 0:
2898		reg_block = CRTC0_REGISTER_OFFSET;
2899		break;
2900	case 1:
2901		reg_block = CRTC1_REGISTER_OFFSET;
2902		break;
2903	case 2:
2904		reg_block = CRTC2_REGISTER_OFFSET;
2905		break;
2906	case 3:
2907		reg_block = CRTC3_REGISTER_OFFSET;
2908		break;
2909	case 4:
2910		reg_block = CRTC4_REGISTER_OFFSET;
2911		break;
2912	case 5:
2913		reg_block = CRTC5_REGISTER_OFFSET;
2914		break;
2915	default:
2916		DRM_DEBUG("invalid crtc %d\n", crtc);
2917		return;
2918	}
2919
2920	switch (state) {
2921	case AMDGPU_IRQ_STATE_DISABLE:
2922		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2923		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2924		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2925		break;
2926	case AMDGPU_IRQ_STATE_ENABLE:
2927		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2928		lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2929		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2930		break;
2931	default:
2932		break;
2933	}
2934}
2935
2936static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2937						    int crtc,
2938						    enum amdgpu_interrupt_state state)
2939{
2940	u32 reg_block, lb_interrupt_mask;
2941
2942	if (crtc >= adev->mode_info.num_crtc) {
2943		DRM_DEBUG("invalid crtc %d\n", crtc);
2944		return;
2945	}
2946
2947	switch (crtc) {
2948	case 0:
2949		reg_block = CRTC0_REGISTER_OFFSET;
2950		break;
2951	case 1:
2952		reg_block = CRTC1_REGISTER_OFFSET;
2953		break;
2954	case 2:
2955		reg_block = CRTC2_REGISTER_OFFSET;
2956		break;
2957	case 3:
2958		reg_block = CRTC3_REGISTER_OFFSET;
2959		break;
2960	case 4:
2961		reg_block = CRTC4_REGISTER_OFFSET;
2962		break;
2963	case 5:
2964		reg_block = CRTC5_REGISTER_OFFSET;
2965		break;
2966	default:
2967		DRM_DEBUG("invalid crtc %d\n", crtc);
2968		return;
2969	}
2970
2971	switch (state) {
2972	case AMDGPU_IRQ_STATE_DISABLE:
2973		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2974		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2975		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2976		break;
2977	case AMDGPU_IRQ_STATE_ENABLE:
2978		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2979		lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2980		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2981		break;
2982	default:
2983		break;
2984	}
2985}
2986
2987static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2988					    struct amdgpu_irq_src *src,
2989					    unsigned type,
2990					    enum amdgpu_interrupt_state state)
2991{
2992	u32 dc_hpd_int_cntl;
2993
2994	if (type >= adev->mode_info.num_hpd) {
2995		DRM_DEBUG("invalid hdp %d\n", type);
2996		return 0;
2997	}
2998
2999	switch (state) {
3000	case AMDGPU_IRQ_STATE_DISABLE:
3001		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3002		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3003		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3004		break;
3005	case AMDGPU_IRQ_STATE_ENABLE:
3006		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3007		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3008		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3009		break;
3010	default:
3011		break;
3012	}
3013
3014	return 0;
3015}
3016
3017static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3018					     struct amdgpu_irq_src *src,
3019					     unsigned type,
3020					     enum amdgpu_interrupt_state state)
3021{
3022	switch (type) {
3023	case AMDGPU_CRTC_IRQ_VBLANK1:
3024		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3025		break;
3026	case AMDGPU_CRTC_IRQ_VBLANK2:
3027		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3028		break;
3029	case AMDGPU_CRTC_IRQ_VBLANK3:
3030		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3031		break;
3032	case AMDGPU_CRTC_IRQ_VBLANK4:
3033		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3034		break;
3035	case AMDGPU_CRTC_IRQ_VBLANK5:
3036		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3037		break;
3038	case AMDGPU_CRTC_IRQ_VBLANK6:
3039		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3040		break;
3041	case AMDGPU_CRTC_IRQ_VLINE1:
3042		dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3043		break;
3044	case AMDGPU_CRTC_IRQ_VLINE2:
3045		dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3046		break;
3047	case AMDGPU_CRTC_IRQ_VLINE3:
3048		dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3049		break;
3050	case AMDGPU_CRTC_IRQ_VLINE4:
3051		dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3052		break;
3053	case AMDGPU_CRTC_IRQ_VLINE5:
3054		dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3055		break;
3056	case AMDGPU_CRTC_IRQ_VLINE6:
3057		dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3058		break;
3059	default:
3060		break;
3061	}
3062	return 0;
3063}
3064
3065static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3066			     struct amdgpu_irq_src *source,
3067			     struct amdgpu_iv_entry *entry)
3068{
3069	unsigned crtc = entry->src_id - 1;
3070	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3071	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3072								    crtc);
3073
3074	switch (entry->src_data[0]) {
3075	case 0: /* vblank */
3076		if (disp_int & interrupt_status_offsets[crtc].vblank)
3077			WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3078		else
3079			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3080
3081		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3082			drm_handle_vblank(adev_to_drm(adev), crtc);
3083		}
3084		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3085		break;
3086	case 1: /* vline */
3087		if (disp_int & interrupt_status_offsets[crtc].vline)
3088			WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3089		else
3090			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3091
3092		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3093		break;
3094	default:
3095		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3096		break;
3097	}
3098
3099	return 0;
3100}
3101
3102static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3103						 struct amdgpu_irq_src *src,
3104						 unsigned type,
3105						 enum amdgpu_interrupt_state state)
3106{
3107	u32 reg;
3108
3109	if (type >= adev->mode_info.num_crtc) {
3110		DRM_ERROR("invalid pageflip crtc %d\n", type);
3111		return -EINVAL;
3112	}
3113
3114	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3115	if (state == AMDGPU_IRQ_STATE_DISABLE)
3116		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3117		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3118	else
3119		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3120		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3121
3122	return 0;
3123}
3124
3125static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3126				struct amdgpu_irq_src *source,
3127				struct amdgpu_iv_entry *entry)
3128{
3129	unsigned long flags;
3130	unsigned crtc_id;
3131	struct amdgpu_crtc *amdgpu_crtc;
3132	struct amdgpu_flip_work *works;
3133
3134	crtc_id = (entry->src_id - 8) >> 1;
3135	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3136
3137	if (crtc_id >= adev->mode_info.num_crtc) {
3138		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3139		return -EINVAL;
3140	}
3141
3142	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3143	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3144		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3145		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3146
3147	/* IRQ could occur when in initial stage */
3148	if (amdgpu_crtc == NULL)
3149		return 0;
3150
3151	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3152	works = amdgpu_crtc->pflip_works;
3153	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3154		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3155						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3156						amdgpu_crtc->pflip_status,
3157						AMDGPU_FLIP_SUBMITTED);
3158		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3159		return 0;
3160	}
3161
3162	/* page flip completed. clean up */
3163	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3164	amdgpu_crtc->pflip_works = NULL;
3165
3166	/* wakeup usersapce */
3167	if (works->event)
3168		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3169
3170	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3171
3172	drm_crtc_vblank_put(&amdgpu_crtc->base);
3173	schedule_work(&works->unpin_work);
3174
3175	return 0;
3176}
3177
3178static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3179			    struct amdgpu_irq_src *source,
3180			    struct amdgpu_iv_entry *entry)
3181{
3182	uint32_t disp_int, mask, tmp;
3183	unsigned hpd;
3184
3185	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3186		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3187		return 0;
3188	}
3189
3190	hpd = entry->src_data[0];
3191	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3192	mask = interrupt_status_offsets[hpd].hpd;
3193
3194	if (disp_int & mask) {
3195		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3196		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3197		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3198		schedule_work(&adev->hotplug_work);
3199		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3200	}
3201
3202	return 0;
3203
3204}
3205
3206static int dce_v8_0_set_clockgating_state(void *handle,
3207					  enum amd_clockgating_state state)
3208{
3209	return 0;
3210}
3211
3212static int dce_v8_0_set_powergating_state(void *handle,
3213					  enum amd_powergating_state state)
3214{
3215	return 0;
3216}
3217
3218static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3219	.name = "dce_v8_0",
3220	.early_init = dce_v8_0_early_init,
3221	.late_init = NULL,
3222	.sw_init = dce_v8_0_sw_init,
3223	.sw_fini = dce_v8_0_sw_fini,
3224	.hw_init = dce_v8_0_hw_init,
3225	.hw_fini = dce_v8_0_hw_fini,
3226	.suspend = dce_v8_0_suspend,
3227	.resume = dce_v8_0_resume,
3228	.is_idle = dce_v8_0_is_idle,
3229	.wait_for_idle = dce_v8_0_wait_for_idle,
3230	.soft_reset = dce_v8_0_soft_reset,
3231	.set_clockgating_state = dce_v8_0_set_clockgating_state,
3232	.set_powergating_state = dce_v8_0_set_powergating_state,
3233};
3234
3235static void
3236dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3237			  struct drm_display_mode *mode,
3238			  struct drm_display_mode *adjusted_mode)
3239{
3240	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3241
3242	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3243
3244	/* need to call this here rather than in prepare() since we need some crtc info */
3245	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3246
3247	/* set scaler clears this on some chips */
3248	dce_v8_0_set_interleave(encoder->crtc, mode);
3249
3250	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3251		dce_v8_0_afmt_enable(encoder, true);
3252		dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3253	}
3254}
3255
3256static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3257{
3258	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3259	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3260	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3261
3262	if ((amdgpu_encoder->active_device &
3263	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3264	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3265	     ENCODER_OBJECT_ID_NONE)) {
3266		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3267		if (dig) {
3268			dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3269			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3270				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3271		}
3272	}
3273
3274	amdgpu_atombios_scratch_regs_lock(adev, true);
3275
3276	if (connector) {
3277		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3278
3279		/* select the clock/data port if it uses a router */
3280		if (amdgpu_connector->router.cd_valid)
3281			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3282
3283		/* turn eDP panel on for mode set */
3284		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3285			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3286							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3287	}
3288
3289	/* this is needed for the pll/ss setup to work correctly in some cases */
3290	amdgpu_atombios_encoder_set_crtc_source(encoder);
3291	/* set up the FMT blocks */
3292	dce_v8_0_program_fmt(encoder);
3293}
3294
3295static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3296{
3297	struct drm_device *dev = encoder->dev;
3298	struct amdgpu_device *adev = drm_to_adev(dev);
3299
3300	/* need to call this here as we need the crtc set up */
3301	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3302	amdgpu_atombios_scratch_regs_lock(adev, false);
3303}
3304
3305static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3306{
3307	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3308	struct amdgpu_encoder_atom_dig *dig;
3309
3310	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3311
3312	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3313		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3314			dce_v8_0_afmt_enable(encoder, false);
3315		dig = amdgpu_encoder->enc_priv;
3316		dig->dig_encoder = -1;
3317	}
3318	amdgpu_encoder->active_device = 0;
3319}
3320
3321/* these are handled by the primary encoders */
3322static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3323{
3324
3325}
3326
3327static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3328{
3329
3330}
3331
3332static void
3333dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3334		      struct drm_display_mode *mode,
3335		      struct drm_display_mode *adjusted_mode)
3336{
3337
3338}
3339
3340static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3341{
3342
3343}
3344
3345static void
3346dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3347{
3348
3349}
3350
3351static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3352	.dpms = dce_v8_0_ext_dpms,
3353	.prepare = dce_v8_0_ext_prepare,
3354	.mode_set = dce_v8_0_ext_mode_set,
3355	.commit = dce_v8_0_ext_commit,
3356	.disable = dce_v8_0_ext_disable,
3357	/* no detect for TMDS/LVDS yet */
3358};
3359
3360static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3361	.dpms = amdgpu_atombios_encoder_dpms,
3362	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3363	.prepare = dce_v8_0_encoder_prepare,
3364	.mode_set = dce_v8_0_encoder_mode_set,
3365	.commit = dce_v8_0_encoder_commit,
3366	.disable = dce_v8_0_encoder_disable,
3367	.detect = amdgpu_atombios_encoder_dig_detect,
3368};
3369
3370static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3371	.dpms = amdgpu_atombios_encoder_dpms,
3372	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3373	.prepare = dce_v8_0_encoder_prepare,
3374	.mode_set = dce_v8_0_encoder_mode_set,
3375	.commit = dce_v8_0_encoder_commit,
3376	.detect = amdgpu_atombios_encoder_dac_detect,
3377};
3378
3379static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3380{
3381	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3382	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3383		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3384	kfree(amdgpu_encoder->enc_priv);
3385	drm_encoder_cleanup(encoder);
3386	kfree(amdgpu_encoder);
3387}
3388
3389static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3390	.destroy = dce_v8_0_encoder_destroy,
3391};
3392
3393static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3394				 uint32_t encoder_enum,
3395				 uint32_t supported_device,
3396				 u16 caps)
3397{
3398	struct drm_device *dev = adev_to_drm(adev);
3399	struct drm_encoder *encoder;
3400	struct amdgpu_encoder *amdgpu_encoder;
3401
3402	/* see if we already added it */
3403	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3404		amdgpu_encoder = to_amdgpu_encoder(encoder);
3405		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3406			amdgpu_encoder->devices |= supported_device;
3407			return;
3408		}
3409
3410	}
3411
3412	/* add a new one */
3413	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3414	if (!amdgpu_encoder)
3415		return;
3416
3417	encoder = &amdgpu_encoder->base;
3418	switch (adev->mode_info.num_crtc) {
3419	case 1:
3420		encoder->possible_crtcs = 0x1;
3421		break;
3422	case 2:
3423	default:
3424		encoder->possible_crtcs = 0x3;
3425		break;
3426	case 4:
3427		encoder->possible_crtcs = 0xf;
3428		break;
3429	case 6:
3430		encoder->possible_crtcs = 0x3f;
3431		break;
3432	}
3433
3434	amdgpu_encoder->enc_priv = NULL;
3435
3436	amdgpu_encoder->encoder_enum = encoder_enum;
3437	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3438	amdgpu_encoder->devices = supported_device;
3439	amdgpu_encoder->rmx_type = RMX_OFF;
3440	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3441	amdgpu_encoder->is_ext_encoder = false;
3442	amdgpu_encoder->caps = caps;
3443
3444	switch (amdgpu_encoder->encoder_id) {
3445	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3446	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3447		drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3448				 DRM_MODE_ENCODER_DAC, NULL);
3449		drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3450		break;
3451	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3452	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3453	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3454	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3455	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3456		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3457			amdgpu_encoder->rmx_type = RMX_FULL;
3458			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3459					 DRM_MODE_ENCODER_LVDS, NULL);
3460			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3461		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3462			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3463					 DRM_MODE_ENCODER_DAC, NULL);
3464			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3465		} else {
3466			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3467					 DRM_MODE_ENCODER_TMDS, NULL);
3468			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3469		}
3470		drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3471		break;
3472	case ENCODER_OBJECT_ID_SI170B:
3473	case ENCODER_OBJECT_ID_CH7303:
3474	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3475	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3476	case ENCODER_OBJECT_ID_TITFP513:
3477	case ENCODER_OBJECT_ID_VT1623:
3478	case ENCODER_OBJECT_ID_HDMI_SI1930:
3479	case ENCODER_OBJECT_ID_TRAVIS:
3480	case ENCODER_OBJECT_ID_NUTMEG:
3481		/* these are handled by the primary encoders */
3482		amdgpu_encoder->is_ext_encoder = true;
3483		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3484			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3485					 DRM_MODE_ENCODER_LVDS, NULL);
3486		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3487			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3488					 DRM_MODE_ENCODER_DAC, NULL);
3489		else
3490			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3491					 DRM_MODE_ENCODER_TMDS, NULL);
3492		drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3493		break;
3494	}
3495}
3496
3497static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3498	.bandwidth_update = &dce_v8_0_bandwidth_update,
3499	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
3500	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3501	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3502	.hpd_sense = &dce_v8_0_hpd_sense,
3503	.hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3504	.hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3505	.page_flip = &dce_v8_0_page_flip,
3506	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3507	.add_encoder = &dce_v8_0_encoder_add,
3508	.add_connector = &amdgpu_connector_add,
3509};
3510
3511static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3512{
3513	adev->mode_info.funcs = &dce_v8_0_display_funcs;
 
3514}
3515
3516static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3517	.set = dce_v8_0_set_crtc_interrupt_state,
3518	.process = dce_v8_0_crtc_irq,
3519};
3520
3521static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3522	.set = dce_v8_0_set_pageflip_interrupt_state,
3523	.process = dce_v8_0_pageflip_irq,
3524};
3525
3526static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3527	.set = dce_v8_0_set_hpd_interrupt_state,
3528	.process = dce_v8_0_hpd_irq,
3529};
3530
3531static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3532{
3533	if (adev->mode_info.num_crtc > 0)
3534		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3535	else
3536		adev->crtc_irq.num_types = 0;
3537	adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3538
3539	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3540	adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3541
3542	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3543	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3544}
3545
3546const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3547{
3548	.type = AMD_IP_BLOCK_TYPE_DCE,
3549	.major = 8,
3550	.minor = 0,
3551	.rev = 0,
3552	.funcs = &dce_v8_0_ip_funcs,
3553};
3554
3555const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3556{
3557	.type = AMD_IP_BLOCK_TYPE_DCE,
3558	.major = 8,
3559	.minor = 1,
3560	.rev = 0,
3561	.funcs = &dce_v8_0_ip_funcs,
3562};
3563
3564const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3565{
3566	.type = AMD_IP_BLOCK_TYPE_DCE,
3567	.major = 8,
3568	.minor = 2,
3569	.rev = 0,
3570	.funcs = &dce_v8_0_ip_funcs,
3571};
3572
3573const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3574{
3575	.type = AMD_IP_BLOCK_TYPE_DCE,
3576	.major = 8,
3577	.minor = 3,
3578	.rev = 0,
3579	.funcs = &dce_v8_0_ip_funcs,
3580};
3581
3582const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3583{
3584	.type = AMD_IP_BLOCK_TYPE_DCE,
3585	.major = 8,
3586	.minor = 5,
3587	.rev = 0,
3588	.funcs = &dce_v8_0_ip_funcs,
3589};