Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <drm/drm_edid.h>
  25#include <drm/drm_fourcc.h>
  26#include <drm/drm_modeset_helper.h>
  27#include <drm/drm_modeset_helper_vtables.h>
  28#include <drm/drm_vblank.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_pm.h"
  32#include "amdgpu_i2c.h"
  33#include "cikd.h"
  34#include "atom.h"
  35#include "amdgpu_atombios.h"
  36#include "atombios_crtc.h"
  37#include "atombios_encoders.h"
  38#include "amdgpu_pll.h"
  39#include "amdgpu_connectors.h"
  40#include "amdgpu_display.h"
  41#include "dce_v8_0.h"
  42
  43#include "dce/dce_8_0_d.h"
  44#include "dce/dce_8_0_sh_mask.h"
  45
  46#include "gca/gfx_7_2_enum.h"
  47
  48#include "gmc/gmc_7_1_d.h"
  49#include "gmc/gmc_7_1_sh_mask.h"
  50
  51#include "oss/oss_2_0_d.h"
  52#include "oss/oss_2_0_sh_mask.h"
  53
  54static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  55static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  56
  57static const u32 crtc_offsets[6] = {
 
  58	CRTC0_REGISTER_OFFSET,
  59	CRTC1_REGISTER_OFFSET,
  60	CRTC2_REGISTER_OFFSET,
  61	CRTC3_REGISTER_OFFSET,
  62	CRTC4_REGISTER_OFFSET,
  63	CRTC5_REGISTER_OFFSET
  64};
  65
  66static const u32 hpd_offsets[] = {
 
  67	HPD0_REGISTER_OFFSET,
  68	HPD1_REGISTER_OFFSET,
  69	HPD2_REGISTER_OFFSET,
  70	HPD3_REGISTER_OFFSET,
  71	HPD4_REGISTER_OFFSET,
  72	HPD5_REGISTER_OFFSET
  73};
  74
  75static const uint32_t dig_offsets[] = {
  76	CRTC0_REGISTER_OFFSET,
  77	CRTC1_REGISTER_OFFSET,
  78	CRTC2_REGISTER_OFFSET,
  79	CRTC3_REGISTER_OFFSET,
  80	CRTC4_REGISTER_OFFSET,
  81	CRTC5_REGISTER_OFFSET,
  82	(0x13830 - 0x7030) >> 2,
  83};
  84
  85static const struct {
  86	uint32_t	reg;
  87	uint32_t	vblank;
  88	uint32_t	vline;
  89	uint32_t	hpd;
  90
  91} interrupt_status_offsets[6] = { {
  92	.reg = mmDISP_INTERRUPT_STATUS,
  93	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 116}, {
 117	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 118	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 119	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 120	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 121} };
 122
 123static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 124				     u32 block_offset, u32 reg)
 125{
 126	unsigned long flags;
 127	u32 r;
 128
 129	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 130	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 131	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 132	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 133
 134	return r;
 135}
 136
 137static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 138				      u32 block_offset, u32 reg, u32 v)
 139{
 140	unsigned long flags;
 141
 142	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 143	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 144	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 145	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 146}
 147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 149{
 150	if (crtc >= adev->mode_info.num_crtc)
 151		return 0;
 152	else
 153		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 154}
 155
 156static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 157{
 158	unsigned i;
 159
 160	/* Enable pflip interrupts */
 161	for (i = 0; i < adev->mode_info.num_crtc; i++)
 162		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 163}
 164
 165static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 166{
 167	unsigned i;
 168
 169	/* Disable pflip interrupts */
 170	for (i = 0; i < adev->mode_info.num_crtc; i++)
 171		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 172}
 173
 174/**
 175 * dce_v8_0_page_flip - pageflip callback.
 176 *
 177 * @adev: amdgpu_device pointer
 178 * @crtc_id: crtc to cleanup pageflip on
 179 * @crtc_base: new address of the crtc (GPU MC address)
 180 * @async: asynchronous flip
 181 *
 182 * Triggers the actual pageflip by updating the primary
 183 * surface base address.
 184 */
 185static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 186			       int crtc_id, u64 crtc_base, bool async)
 187{
 188	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 189	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
 190
 191	/* flip at hsync for async, default is vsync */
 192	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 193	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 194	/* update pitch */
 195	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
 196	       fb->pitches[0] / fb->format->cpp[0]);
 197	/* update the primary scanout addresses */
 198	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 199	       upper_32_bits(crtc_base));
 200	/* writing to the low address triggers the update */
 201	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 202	       lower_32_bits(crtc_base));
 203	/* post the write */
 204	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 205}
 206
 207static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 208					u32 *vbl, u32 *position)
 209{
 210	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 211		return -EINVAL;
 212
 213	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 214	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 215
 216	return 0;
 217}
 218
 219/**
 220 * dce_v8_0_hpd_sense - hpd sense callback.
 221 *
 222 * @adev: amdgpu_device pointer
 223 * @hpd: hpd (hotplug detect) pin
 224 *
 225 * Checks if a digital monitor is connected (evergreen+).
 226 * Returns true if connected, false if not connected.
 227 */
 228static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 229			       enum amdgpu_hpd_id hpd)
 230{
 231	bool connected = false;
 232
 233	if (hpd >= adev->mode_info.num_hpd)
 234		return connected;
 235
 236	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
 237	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 238		connected = true;
 239
 240	return connected;
 241}
 242
 243/**
 244 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 245 *
 246 * @adev: amdgpu_device pointer
 247 * @hpd: hpd (hotplug detect) pin
 248 *
 249 * Set the polarity of the hpd pin (evergreen+).
 250 */
 251static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 252				      enum amdgpu_hpd_id hpd)
 253{
 254	u32 tmp;
 255	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 256
 257	if (hpd >= adev->mode_info.num_hpd)
 258		return;
 259
 260	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 261	if (connected)
 262		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 263	else
 264		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 265	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 266}
 267
 268static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
 269				 int hpd)
 270{
 271	u32 tmp;
 272
 273	if (hpd >= adev->mode_info.num_hpd) {
 274		DRM_DEBUG("invalid hdp %d\n", hpd);
 275		return;
 276	}
 277
 278	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 279	tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
 280	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 281}
 282
 283/**
 284 * dce_v8_0_hpd_init - hpd setup callback.
 285 *
 286 * @adev: amdgpu_device pointer
 287 *
 288 * Setup the hpd pins used by the card (evergreen+).
 289 * Enable the pin, set the polarity, and enable the hpd interrupts.
 290 */
 291static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 292{
 293	struct drm_device *dev = adev_to_drm(adev);
 294	struct drm_connector *connector;
 295	struct drm_connector_list_iter iter;
 296	u32 tmp;
 297
 298	drm_connector_list_iter_begin(dev, &iter);
 299	drm_for_each_connector_iter(connector, &iter) {
 300		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 301
 302		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 303			continue;
 304
 305		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 306		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 307		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 308
 309		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 310		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 311			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 312			 * aux dp channel on imac and help (but not completely fix)
 313			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 314			 * also avoid interrupt storms during dpms.
 315			 */
 316			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 317			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 318			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 319			continue;
 320		}
 321
 322		dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
 323		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 324		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 325	}
 326	drm_connector_list_iter_end(&iter);
 327}
 328
 329/**
 330 * dce_v8_0_hpd_fini - hpd tear down callback.
 331 *
 332 * @adev: amdgpu_device pointer
 333 *
 334 * Tear down the hpd pins used by the card (evergreen+).
 335 * Disable the hpd interrupts.
 336 */
 337static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 338{
 339	struct drm_device *dev = adev_to_drm(adev);
 340	struct drm_connector *connector;
 341	struct drm_connector_list_iter iter;
 342	u32 tmp;
 343
 344	drm_connector_list_iter_begin(dev, &iter);
 345	drm_for_each_connector_iter(connector, &iter) {
 346		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 347
 348		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 349			continue;
 350
 351		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 352		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 353		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 354
 355		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 356	}
 357	drm_connector_list_iter_end(&iter);
 358}
 359
 360static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 361{
 362	return mmDC_GPIO_HPD_A;
 363}
 364
 365static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 366{
 367	u32 crtc_hung = 0;
 368	u32 crtc_status[6];
 369	u32 i, j, tmp;
 370
 371	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 372		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 373			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 374			crtc_hung |= (1 << i);
 375		}
 376	}
 377
 378	for (j = 0; j < 10; j++) {
 379		for (i = 0; i < adev->mode_info.num_crtc; i++) {
 380			if (crtc_hung & (1 << i)) {
 381				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 382				if (tmp != crtc_status[i])
 383					crtc_hung &= ~(1 << i);
 384			}
 385		}
 386		if (crtc_hung == 0)
 387			return false;
 388		udelay(100);
 389	}
 390
 391	return true;
 392}
 393
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 394static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 395					  bool render)
 396{
 397	u32 tmp;
 398
 399	/* Lockout access through VGA aperture*/
 400	tmp = RREG32(mmVGA_HDP_CONTROL);
 401	if (render)
 402		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 403	else
 404		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 405	WREG32(mmVGA_HDP_CONTROL, tmp);
 406
 407	/* disable VGA render */
 408	tmp = RREG32(mmVGA_RENDER_CONTROL);
 409	if (render)
 410		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 411	else
 412		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 413	WREG32(mmVGA_RENDER_CONTROL, tmp);
 414}
 415
 416static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
 417{
 418	int num_crtc = 0;
 419
 420	switch (adev->asic_type) {
 421	case CHIP_BONAIRE:
 422	case CHIP_HAWAII:
 423		num_crtc = 6;
 424		break;
 425	case CHIP_KAVERI:
 426		num_crtc = 4;
 427		break;
 428	case CHIP_KABINI:
 429	case CHIP_MULLINS:
 430		num_crtc = 2;
 431		break;
 432	default:
 433		num_crtc = 0;
 434	}
 435	return num_crtc;
 436}
 437
 438void dce_v8_0_disable_dce(struct amdgpu_device *adev)
 439{
 440	/*Disable VGA render and enabled crtc, if has DCE engine*/
 441	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 442		u32 tmp;
 443		int crtc_enabled, i;
 444
 445		dce_v8_0_set_vga_render_state(adev, false);
 446
 447		/*Disable crtc*/
 448		for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
 449			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 450									 CRTC_CONTROL, CRTC_MASTER_EN);
 451			if (crtc_enabled) {
 452				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 453				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 454				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 455				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 456				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 457			}
 458		}
 459	}
 460}
 461
 462static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 463{
 464	struct drm_device *dev = encoder->dev;
 465	struct amdgpu_device *adev = drm_to_adev(dev);
 466	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 467	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 468	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 469	int bpc = 0;
 470	u32 tmp = 0;
 471	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 472
 473	if (connector) {
 474		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 475		bpc = amdgpu_connector_get_monitor_bpc(connector);
 476		dither = amdgpu_connector->dither;
 477	}
 478
 479	/* LVDS/eDP FMT is set up by atom */
 480	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 481		return;
 482
 483	/* not needed for analog */
 484	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 485	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 486		return;
 487
 488	if (bpc == 0)
 489		return;
 490
 491	switch (bpc) {
 492	case 6:
 493		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 494			/* XXX sort out optimal dither settings */
 495			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 496				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 497				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 498				(0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 499		else
 500			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 501			(0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 502		break;
 503	case 8:
 504		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 505			/* XXX sort out optimal dither settings */
 506			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 507				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 508				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 509				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 510				(1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 511		else
 512			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 513			(1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 514		break;
 515	case 10:
 516		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 517			/* XXX sort out optimal dither settings */
 518			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 519				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 520				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 521				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 522				(2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 523		else
 524			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 525			(2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 526		break;
 527	default:
 528		/* not needed */
 529		break;
 530	}
 531
 532	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 533}
 534
 535
 536/* display watermark setup */
 537/**
 538 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 539 *
 540 * @adev: amdgpu_device pointer
 541 * @amdgpu_crtc: the selected display controller
 542 * @mode: the current display mode on the selected display
 543 * controller
 544 *
 545 * Setup up the line buffer allocation for
 546 * the selected display controller (CIK).
 547 * Returns the line buffer size in pixels.
 548 */
 549static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 550				       struct amdgpu_crtc *amdgpu_crtc,
 551				       struct drm_display_mode *mode)
 552{
 553	u32 tmp, buffer_alloc, i;
 554	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 555	/*
 556	 * Line Buffer Setup
 557	 * There are 6 line buffers, one for each display controllers.
 558	 * There are 3 partitions per LB. Select the number of partitions
 559	 * to enable based on the display width.  For display widths larger
 560	 * than 4096, you need use to use 2 display controllers and combine
 561	 * them using the stereo blender.
 562	 */
 563	if (amdgpu_crtc->base.enabled && mode) {
 564		if (mode->crtc_hdisplay < 1920) {
 565			tmp = 1;
 566			buffer_alloc = 2;
 567		} else if (mode->crtc_hdisplay < 2560) {
 568			tmp = 2;
 569			buffer_alloc = 2;
 570		} else if (mode->crtc_hdisplay < 4096) {
 571			tmp = 0;
 572			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 573		} else {
 574			DRM_DEBUG_KMS("Mode too big for LB!\n");
 575			tmp = 0;
 576			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 577		}
 578	} else {
 579		tmp = 1;
 580		buffer_alloc = 0;
 581	}
 582
 583	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 584	      (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 585	      (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 586
 587	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 588	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 589	for (i = 0; i < adev->usec_timeout; i++) {
 590		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 591		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 592			break;
 593		udelay(1);
 594	}
 595
 596	if (amdgpu_crtc->base.enabled && mode) {
 597		switch (tmp) {
 598		case 0:
 599		default:
 600			return 4096 * 2;
 601		case 1:
 602			return 1920 * 2;
 603		case 2:
 604			return 2560 * 2;
 605		}
 606	}
 607
 608	/* controller not enabled, so no lb used */
 609	return 0;
 610}
 611
 612/**
 613 * cik_get_number_of_dram_channels - get the number of dram channels
 614 *
 615 * @adev: amdgpu_device pointer
 616 *
 617 * Look up the number of video ram channels (CIK).
 618 * Used for display watermark bandwidth calculations
 619 * Returns the number of dram channels
 620 */
 621static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 622{
 623	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 624
 625	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 626	case 0:
 627	default:
 628		return 1;
 629	case 1:
 630		return 2;
 631	case 2:
 632		return 4;
 633	case 3:
 634		return 8;
 635	case 4:
 636		return 3;
 637	case 5:
 638		return 6;
 639	case 6:
 640		return 10;
 641	case 7:
 642		return 12;
 643	case 8:
 644		return 16;
 645	}
 646}
 647
 648struct dce8_wm_params {
 649	u32 dram_channels; /* number of dram channels */
 650	u32 yclk;          /* bandwidth per dram data pin in kHz */
 651	u32 sclk;          /* engine clock in kHz */
 652	u32 disp_clk;      /* display clock in kHz */
 653	u32 src_width;     /* viewport width */
 654	u32 active_time;   /* active display time in ns */
 655	u32 blank_time;    /* blank time in ns */
 656	bool interlaced;    /* mode is interlaced */
 657	fixed20_12 vsc;    /* vertical scale ratio */
 658	u32 num_heads;     /* number of active crtcs */
 659	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 660	u32 lb_size;       /* line buffer allocated to pipe */
 661	u32 vtaps;         /* vertical scaler taps */
 662};
 663
 664/**
 665 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 666 *
 667 * @wm: watermark calculation data
 668 *
 669 * Calculate the raw dram bandwidth (CIK).
 670 * Used for display watermark bandwidth calculations
 671 * Returns the dram bandwidth in MBytes/s
 672 */
 673static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 674{
 675	/* Calculate raw DRAM Bandwidth */
 676	fixed20_12 dram_efficiency; /* 0.7 */
 677	fixed20_12 yclk, dram_channels, bandwidth;
 678	fixed20_12 a;
 679
 680	a.full = dfixed_const(1000);
 681	yclk.full = dfixed_const(wm->yclk);
 682	yclk.full = dfixed_div(yclk, a);
 683	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 684	a.full = dfixed_const(10);
 685	dram_efficiency.full = dfixed_const(7);
 686	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 687	bandwidth.full = dfixed_mul(dram_channels, yclk);
 688	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 689
 690	return dfixed_trunc(bandwidth);
 691}
 692
 693/**
 694 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 695 *
 696 * @wm: watermark calculation data
 697 *
 698 * Calculate the dram bandwidth used for display (CIK).
 699 * Used for display watermark bandwidth calculations
 700 * Returns the dram bandwidth for display in MBytes/s
 701 */
 702static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 703{
 704	/* Calculate DRAM Bandwidth and the part allocated to display. */
 705	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 706	fixed20_12 yclk, dram_channels, bandwidth;
 707	fixed20_12 a;
 708
 709	a.full = dfixed_const(1000);
 710	yclk.full = dfixed_const(wm->yclk);
 711	yclk.full = dfixed_div(yclk, a);
 712	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 713	a.full = dfixed_const(10);
 714	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 715	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 716	bandwidth.full = dfixed_mul(dram_channels, yclk);
 717	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 718
 719	return dfixed_trunc(bandwidth);
 720}
 721
 722/**
 723 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 724 *
 725 * @wm: watermark calculation data
 726 *
 727 * Calculate the data return bandwidth used for display (CIK).
 728 * Used for display watermark bandwidth calculations
 729 * Returns the data return bandwidth in MBytes/s
 730 */
 731static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 732{
 733	/* Calculate the display Data return Bandwidth */
 734	fixed20_12 return_efficiency; /* 0.8 */
 735	fixed20_12 sclk, bandwidth;
 736	fixed20_12 a;
 737
 738	a.full = dfixed_const(1000);
 739	sclk.full = dfixed_const(wm->sclk);
 740	sclk.full = dfixed_div(sclk, a);
 741	a.full = dfixed_const(10);
 742	return_efficiency.full = dfixed_const(8);
 743	return_efficiency.full = dfixed_div(return_efficiency, a);
 744	a.full = dfixed_const(32);
 745	bandwidth.full = dfixed_mul(a, sclk);
 746	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 747
 748	return dfixed_trunc(bandwidth);
 749}
 750
 751/**
 752 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 753 *
 754 * @wm: watermark calculation data
 755 *
 756 * Calculate the dmif bandwidth used for display (CIK).
 757 * Used for display watermark bandwidth calculations
 758 * Returns the dmif bandwidth in MBytes/s
 759 */
 760static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 761{
 762	/* Calculate the DMIF Request Bandwidth */
 763	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 764	fixed20_12 disp_clk, bandwidth;
 765	fixed20_12 a, b;
 766
 767	a.full = dfixed_const(1000);
 768	disp_clk.full = dfixed_const(wm->disp_clk);
 769	disp_clk.full = dfixed_div(disp_clk, a);
 770	a.full = dfixed_const(32);
 771	b.full = dfixed_mul(a, disp_clk);
 772
 773	a.full = dfixed_const(10);
 774	disp_clk_request_efficiency.full = dfixed_const(8);
 775	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 776
 777	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 778
 779	return dfixed_trunc(bandwidth);
 780}
 781
 782/**
 783 * dce_v8_0_available_bandwidth - get the min available bandwidth
 784 *
 785 * @wm: watermark calculation data
 786 *
 787 * Calculate the min available bandwidth used for display (CIK).
 788 * Used for display watermark bandwidth calculations
 789 * Returns the min available bandwidth in MBytes/s
 790 */
 791static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 792{
 793	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 794	u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 795	u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 796	u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 797
 798	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 799}
 800
 801/**
 802 * dce_v8_0_average_bandwidth - get the average available bandwidth
 803 *
 804 * @wm: watermark calculation data
 805 *
 806 * Calculate the average available bandwidth used for display (CIK).
 807 * Used for display watermark bandwidth calculations
 808 * Returns the average available bandwidth in MBytes/s
 809 */
 810static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
 811{
 812	/* Calculate the display mode Average Bandwidth
 813	 * DisplayMode should contain the source and destination dimensions,
 814	 * timing, etc.
 815	 */
 816	fixed20_12 bpp;
 817	fixed20_12 line_time;
 818	fixed20_12 src_width;
 819	fixed20_12 bandwidth;
 820	fixed20_12 a;
 821
 822	a.full = dfixed_const(1000);
 823	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 824	line_time.full = dfixed_div(line_time, a);
 825	bpp.full = dfixed_const(wm->bytes_per_pixel);
 826	src_width.full = dfixed_const(wm->src_width);
 827	bandwidth.full = dfixed_mul(src_width, bpp);
 828	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 829	bandwidth.full = dfixed_div(bandwidth, line_time);
 830
 831	return dfixed_trunc(bandwidth);
 832}
 833
 834/**
 835 * dce_v8_0_latency_watermark - get the latency watermark
 836 *
 837 * @wm: watermark calculation data
 838 *
 839 * Calculate the latency watermark (CIK).
 840 * Used for display watermark bandwidth calculations
 841 * Returns the latency watermark in ns
 842 */
 843static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 844{
 845	/* First calculate the latency in ns */
 846	u32 mc_latency = 2000; /* 2000 ns. */
 847	u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
 848	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 849	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 850	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 851	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 852		(wm->num_heads * cursor_line_pair_return_time);
 853	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 854	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 855	u32 tmp, dmif_size = 12288;
 856	fixed20_12 a, b, c;
 857
 858	if (wm->num_heads == 0)
 859		return 0;
 860
 861	a.full = dfixed_const(2);
 862	b.full = dfixed_const(1);
 863	if ((wm->vsc.full > a.full) ||
 864	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 865	    (wm->vtaps >= 5) ||
 866	    ((wm->vsc.full >= a.full) && wm->interlaced))
 867		max_src_lines_per_dst_line = 4;
 868	else
 869		max_src_lines_per_dst_line = 2;
 870
 871	a.full = dfixed_const(available_bandwidth);
 872	b.full = dfixed_const(wm->num_heads);
 873	a.full = dfixed_div(a, b);
 874	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 875	tmp = min(dfixed_trunc(a), tmp);
 876
 877	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 878
 879	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 880	b.full = dfixed_const(1000);
 881	c.full = dfixed_const(lb_fill_bw);
 882	b.full = dfixed_div(c, b);
 883	a.full = dfixed_div(a, b);
 884	line_fill_time = dfixed_trunc(a);
 885
 886	if (line_fill_time < wm->active_time)
 887		return latency;
 888	else
 889		return latency + (line_fill_time - wm->active_time);
 890
 891}
 892
 893/**
 894 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 895 * average and available dram bandwidth
 896 *
 897 * @wm: watermark calculation data
 898 *
 899 * Check if the display average bandwidth fits in the display
 900 * dram bandwidth (CIK).
 901 * Used for display watermark bandwidth calculations
 902 * Returns true if the display fits, false if not.
 903 */
 904static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 905{
 906	if (dce_v8_0_average_bandwidth(wm) <=
 907	    (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 908		return true;
 909	else
 910		return false;
 911}
 912
 913/**
 914 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
 915 * average and available bandwidth
 916 *
 917 * @wm: watermark calculation data
 918 *
 919 * Check if the display average bandwidth fits in the display
 920 * available bandwidth (CIK).
 921 * Used for display watermark bandwidth calculations
 922 * Returns true if the display fits, false if not.
 923 */
 924static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
 925{
 926	if (dce_v8_0_average_bandwidth(wm) <=
 927	    (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
 928		return true;
 929	else
 930		return false;
 931}
 932
 933/**
 934 * dce_v8_0_check_latency_hiding - check latency hiding
 935 *
 936 * @wm: watermark calculation data
 937 *
 938 * Check latency hiding (CIK).
 939 * Used for display watermark bandwidth calculations
 940 * Returns true if the display fits, false if not.
 941 */
 942static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
 943{
 944	u32 lb_partitions = wm->lb_size / wm->src_width;
 945	u32 line_time = wm->active_time + wm->blank_time;
 946	u32 latency_tolerant_lines;
 947	u32 latency_hiding;
 948	fixed20_12 a;
 949
 950	a.full = dfixed_const(1);
 951	if (wm->vsc.full > a.full)
 952		latency_tolerant_lines = 1;
 953	else {
 954		if (lb_partitions <= (wm->vtaps + 1))
 955			latency_tolerant_lines = 1;
 956		else
 957			latency_tolerant_lines = 2;
 958	}
 959
 960	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 961
 962	if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
 963		return true;
 964	else
 965		return false;
 966}
 967
 968/**
 969 * dce_v8_0_program_watermarks - program display watermarks
 970 *
 971 * @adev: amdgpu_device pointer
 972 * @amdgpu_crtc: the selected display controller
 973 * @lb_size: line buffer size
 974 * @num_heads: number of display controllers in use
 975 *
 976 * Calculate and program the display watermarks for the
 977 * selected display controller (CIK).
 978 */
 979static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 980					struct amdgpu_crtc *amdgpu_crtc,
 981					u32 lb_size, u32 num_heads)
 982{
 983	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 984	struct dce8_wm_params wm_low, wm_high;
 985	u32 active_time;
 986	u32 line_time = 0;
 987	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 988	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 989
 990	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 991		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
 992					    (u32)mode->clock);
 993		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
 994					  (u32)mode->clock);
 995		line_time = min_t(u32, line_time, 65535);
 996
 997		/* watermark for high clocks */
 998		if (adev->pm.dpm_enabled) {
 999			wm_high.yclk =
1000				amdgpu_dpm_get_mclk(adev, false) * 10;
1001			wm_high.sclk =
1002				amdgpu_dpm_get_sclk(adev, false) * 10;
1003		} else {
1004			wm_high.yclk = adev->pm.current_mclk * 10;
1005			wm_high.sclk = adev->pm.current_sclk * 10;
1006		}
1007
1008		wm_high.disp_clk = mode->clock;
1009		wm_high.src_width = mode->crtc_hdisplay;
1010		wm_high.active_time = active_time;
1011		wm_high.blank_time = line_time - wm_high.active_time;
1012		wm_high.interlaced = false;
1013		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1014			wm_high.interlaced = true;
1015		wm_high.vsc = amdgpu_crtc->vsc;
1016		wm_high.vtaps = 1;
1017		if (amdgpu_crtc->rmx_type != RMX_OFF)
1018			wm_high.vtaps = 2;
1019		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1020		wm_high.lb_size = lb_size;
1021		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1022		wm_high.num_heads = num_heads;
1023
1024		/* set for high clocks */
1025		latency_watermark_a = min_t(u32, dce_v8_0_latency_watermark(&wm_high), 65535);
1026
1027		/* possibly force display priority to high */
1028		/* should really do this at mode validation time... */
1029		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1030		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1031		    !dce_v8_0_check_latency_hiding(&wm_high) ||
1032		    (adev->mode_info.disp_priority == 2)) {
1033			DRM_DEBUG_KMS("force priority to high\n");
1034		}
1035
1036		/* watermark for low clocks */
1037		if (adev->pm.dpm_enabled) {
1038			wm_low.yclk =
1039				amdgpu_dpm_get_mclk(adev, true) * 10;
1040			wm_low.sclk =
1041				amdgpu_dpm_get_sclk(adev, true) * 10;
1042		} else {
1043			wm_low.yclk = adev->pm.current_mclk * 10;
1044			wm_low.sclk = adev->pm.current_sclk * 10;
1045		}
1046
1047		wm_low.disp_clk = mode->clock;
1048		wm_low.src_width = mode->crtc_hdisplay;
1049		wm_low.active_time = active_time;
1050		wm_low.blank_time = line_time - wm_low.active_time;
1051		wm_low.interlaced = false;
1052		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1053			wm_low.interlaced = true;
1054		wm_low.vsc = amdgpu_crtc->vsc;
1055		wm_low.vtaps = 1;
1056		if (amdgpu_crtc->rmx_type != RMX_OFF)
1057			wm_low.vtaps = 2;
1058		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1059		wm_low.lb_size = lb_size;
1060		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1061		wm_low.num_heads = num_heads;
1062
1063		/* set for low clocks */
1064		latency_watermark_b = min_t(u32, dce_v8_0_latency_watermark(&wm_low), 65535);
1065
1066		/* possibly force display priority to high */
1067		/* should really do this at mode validation time... */
1068		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1069		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1070		    !dce_v8_0_check_latency_hiding(&wm_low) ||
1071		    (adev->mode_info.disp_priority == 2)) {
1072			DRM_DEBUG_KMS("force priority to high\n");
1073		}
1074		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1075	}
1076
1077	/* select wm A */
1078	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1079	tmp = wm_mask;
1080	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1081	tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1082	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1083	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1084	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1085		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1086	/* select wm B */
1087	tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1088	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1089	tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1090	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1091	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1092	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1093		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1094	/* restore original selection */
1095	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1096
1097	/* save values for DPM */
1098	amdgpu_crtc->line_time = line_time;
1099	amdgpu_crtc->wm_high = latency_watermark_a;
1100	amdgpu_crtc->wm_low = latency_watermark_b;
1101	/* Save number of lines the linebuffer leads before the scanout */
1102	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1103}
1104
1105/**
1106 * dce_v8_0_bandwidth_update - program display watermarks
1107 *
1108 * @adev: amdgpu_device pointer
1109 *
1110 * Calculate and program the display watermarks and line
1111 * buffer allocation (CIK).
1112 */
1113static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1114{
1115	struct drm_display_mode *mode = NULL;
1116	u32 num_heads = 0, lb_size;
1117	int i;
1118
1119	amdgpu_display_update_priority(adev);
1120
1121	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1122		if (adev->mode_info.crtcs[i]->base.enabled)
1123			num_heads++;
1124	}
1125	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1126		mode = &adev->mode_info.crtcs[i]->base.mode;
1127		lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1128		dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1129					    lb_size, num_heads);
1130	}
1131}
1132
1133static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1134{
1135	int i;
1136	u32 offset, tmp;
1137
1138	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1139		offset = adev->mode_info.audio.pin[i].offset;
1140		tmp = RREG32_AUDIO_ENDPT(offset,
1141					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1142		if (((tmp &
1143		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1144		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1145			adev->mode_info.audio.pin[i].connected = false;
1146		else
1147			adev->mode_info.audio.pin[i].connected = true;
1148	}
1149}
1150
1151static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1152{
1153	int i;
1154
1155	dce_v8_0_audio_get_connected_pins(adev);
1156
1157	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1158		if (adev->mode_info.audio.pin[i].connected)
1159			return &adev->mode_info.audio.pin[i];
1160	}
1161	DRM_ERROR("No connected audio pins found!\n");
1162	return NULL;
1163}
1164
1165static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1166{
1167	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1168	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1169	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1170	u32 offset;
1171
1172	if (!dig || !dig->afmt || !dig->afmt->pin)
1173		return;
1174
1175	offset = dig->afmt->offset;
1176
1177	WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1178	       (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1179}
1180
1181static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1182						struct drm_display_mode *mode)
1183{
1184	struct drm_device *dev = encoder->dev;
1185	struct amdgpu_device *adev = drm_to_adev(dev);
1186	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1187	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1188	struct drm_connector *connector;
1189	struct drm_connector_list_iter iter;
1190	struct amdgpu_connector *amdgpu_connector = NULL;
1191	u32 tmp = 0, offset;
1192
1193	if (!dig || !dig->afmt || !dig->afmt->pin)
1194		return;
1195
1196	offset = dig->afmt->pin->offset;
1197
1198	drm_connector_list_iter_begin(dev, &iter);
1199	drm_for_each_connector_iter(connector, &iter) {
1200		if (connector->encoder == encoder) {
1201			amdgpu_connector = to_amdgpu_connector(connector);
1202			break;
1203		}
1204	}
1205	drm_connector_list_iter_end(&iter);
1206
1207	if (!amdgpu_connector) {
1208		DRM_ERROR("Couldn't find encoder's connector\n");
1209		return;
1210	}
1211
1212	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1213		if (connector->latency_present[1])
1214			tmp =
1215			(connector->video_latency[1] <<
1216			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1217			(connector->audio_latency[1] <<
1218			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1219		else
1220			tmp =
1221			(0 <<
1222			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1223			(0 <<
1224			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1225	} else {
1226		if (connector->latency_present[0])
1227			tmp =
1228			(connector->video_latency[0] <<
1229			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1230			(connector->audio_latency[0] <<
1231			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1232		else
1233			tmp =
1234			(0 <<
1235			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1236			(0 <<
1237			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1238
1239	}
1240	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1241}
1242
1243static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1244{
1245	struct drm_device *dev = encoder->dev;
1246	struct amdgpu_device *adev = drm_to_adev(dev);
1247	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1248	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1249	struct drm_connector *connector;
1250	struct drm_connector_list_iter iter;
1251	struct amdgpu_connector *amdgpu_connector = NULL;
1252	u32 offset, tmp;
1253	u8 *sadb = NULL;
1254	int sad_count;
1255
1256	if (!dig || !dig->afmt || !dig->afmt->pin)
1257		return;
1258
1259	offset = dig->afmt->pin->offset;
1260
1261	drm_connector_list_iter_begin(dev, &iter);
1262	drm_for_each_connector_iter(connector, &iter) {
1263		if (connector->encoder == encoder) {
1264			amdgpu_connector = to_amdgpu_connector(connector);
1265			break;
1266		}
1267	}
1268	drm_connector_list_iter_end(&iter);
1269
1270	if (!amdgpu_connector) {
1271		DRM_ERROR("Couldn't find encoder's connector\n");
1272		return;
1273	}
1274
1275	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1276	if (sad_count < 0) {
1277		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1278		sad_count = 0;
1279	}
1280
1281	/* program the speaker allocation */
1282	tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1283	tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1284		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1285	/* set HDMI mode */
1286	tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1287	if (sad_count)
1288		tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1289	else
1290		tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1291	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1292
1293	kfree(sadb);
1294}
1295
1296static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1297{
1298	struct drm_device *dev = encoder->dev;
1299	struct amdgpu_device *adev = drm_to_adev(dev);
1300	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1301	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1302	u32 offset;
1303	struct drm_connector *connector;
1304	struct drm_connector_list_iter iter;
1305	struct amdgpu_connector *amdgpu_connector = NULL;
1306	struct cea_sad *sads;
1307	int i, sad_count;
1308
1309	static const u16 eld_reg_to_type[][2] = {
1310		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1311		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1312		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1313		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1314		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1315		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1316		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1317		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1318		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1319		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1320		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1321		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1322	};
1323
1324	if (!dig || !dig->afmt || !dig->afmt->pin)
1325		return;
1326
1327	offset = dig->afmt->pin->offset;
1328
1329	drm_connector_list_iter_begin(dev, &iter);
1330	drm_for_each_connector_iter(connector, &iter) {
1331		if (connector->encoder == encoder) {
1332			amdgpu_connector = to_amdgpu_connector(connector);
1333			break;
1334		}
1335	}
1336	drm_connector_list_iter_end(&iter);
1337
1338	if (!amdgpu_connector) {
1339		DRM_ERROR("Couldn't find encoder's connector\n");
1340		return;
1341	}
1342
1343	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1344	if (sad_count < 0)
1345		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1346	if (sad_count <= 0)
1347		return;
 
1348	BUG_ON(!sads);
1349
1350	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1351		u32 value = 0;
1352		u8 stereo_freqs = 0;
1353		int max_channels = -1;
1354		int j;
1355
1356		for (j = 0; j < sad_count; j++) {
1357			struct cea_sad *sad = &sads[j];
1358
1359			if (sad->format == eld_reg_to_type[i][1]) {
1360				if (sad->channels > max_channels) {
1361					value = (sad->channels <<
1362						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1363						(sad->byte2 <<
1364						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1365						(sad->freq <<
1366						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1367					max_channels = sad->channels;
1368				}
1369
1370				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1371					stereo_freqs |= sad->freq;
1372				else
1373					break;
1374			}
1375		}
1376
1377		value |= (stereo_freqs <<
1378			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1379
1380		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1381	}
1382
1383	kfree(sads);
1384}
1385
1386static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1387				  struct amdgpu_audio_pin *pin,
1388				  bool enable)
1389{
1390	if (!pin)
1391		return;
1392
1393	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1394		enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1395}
1396
1397static const u32 pin_offsets[7] = {
 
1398	(0x1780 - 0x1780),
1399	(0x1786 - 0x1780),
1400	(0x178c - 0x1780),
1401	(0x1792 - 0x1780),
1402	(0x1798 - 0x1780),
1403	(0x179d - 0x1780),
1404	(0x17a4 - 0x1780),
1405};
1406
1407static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1408{
1409	int i;
1410
1411	if (!amdgpu_audio)
1412		return 0;
1413
1414	adev->mode_info.audio.enabled = true;
1415
1416	if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1417		adev->mode_info.audio.num_pins = 7;
1418	else if ((adev->asic_type == CHIP_KABINI) ||
1419		 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1420		adev->mode_info.audio.num_pins = 3;
1421	else if ((adev->asic_type == CHIP_BONAIRE) ||
1422		 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1423		adev->mode_info.audio.num_pins = 7;
1424	else
1425		adev->mode_info.audio.num_pins = 3;
1426
1427	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1428		adev->mode_info.audio.pin[i].channels = -1;
1429		adev->mode_info.audio.pin[i].rate = -1;
1430		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1431		adev->mode_info.audio.pin[i].status_bits = 0;
1432		adev->mode_info.audio.pin[i].category_code = 0;
1433		adev->mode_info.audio.pin[i].connected = false;
1434		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1435		adev->mode_info.audio.pin[i].id = i;
1436		/* disable audio.  it will be set up later */
1437		/* XXX remove once we switch to ip funcs */
1438		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1439	}
1440
1441	return 0;
1442}
1443
1444static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1445{
1446	int i;
1447
1448	if (!amdgpu_audio)
1449		return;
1450
1451	if (!adev->mode_info.audio.enabled)
1452		return;
1453
1454	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1455		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1456
1457	adev->mode_info.audio.enabled = false;
1458}
1459
1460/*
1461 * update the N and CTS parameters for a given pixel clock rate
1462 */
1463static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1464{
1465	struct drm_device *dev = encoder->dev;
1466	struct amdgpu_device *adev = drm_to_adev(dev);
1467	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1468	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1469	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1470	uint32_t offset = dig->afmt->offset;
1471
1472	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1473	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1474
1475	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1476	WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1477
1478	WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1479	WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1480}
1481
1482/*
1483 * build a HDMI Video Info Frame
1484 */
1485static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1486					       void *buffer, size_t size)
1487{
1488	struct drm_device *dev = encoder->dev;
1489	struct amdgpu_device *adev = drm_to_adev(dev);
1490	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1491	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1492	uint32_t offset = dig->afmt->offset;
1493	uint8_t *frame = buffer + 3;
1494	uint8_t *header = buffer;
1495
1496	WREG32(mmAFMT_AVI_INFO0 + offset,
1497		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1498	WREG32(mmAFMT_AVI_INFO1 + offset,
1499		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1500	WREG32(mmAFMT_AVI_INFO2 + offset,
1501		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1502	WREG32(mmAFMT_AVI_INFO3 + offset,
1503		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1504}
1505
1506static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1507{
1508	struct drm_device *dev = encoder->dev;
1509	struct amdgpu_device *adev = drm_to_adev(dev);
1510	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1511	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1512	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1513	u32 dto_phase = 24 * 1000;
1514	u32 dto_modulo = clock;
1515
1516	if (!dig || !dig->afmt)
1517		return;
1518
1519	/* XXX two dtos; generally use dto0 for hdmi */
1520	/* Express [24MHz / target pixel clock] as an exact rational
1521	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1522	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1523	 */
1524	WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1525	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1526	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1527}
1528
1529/*
1530 * update the info frames with the data from the current display mode
1531 */
1532static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1533				  struct drm_display_mode *mode)
1534{
1535	struct drm_device *dev = encoder->dev;
1536	struct amdgpu_device *adev = drm_to_adev(dev);
1537	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1538	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1539	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1540	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1541	struct hdmi_avi_infoframe frame;
1542	uint32_t offset, val;
1543	ssize_t err;
1544	int bpc = 8;
1545
1546	if (!dig || !dig->afmt)
1547		return;
1548
1549	/* Silent, r600_hdmi_enable will raise WARN for us */
1550	if (!dig->afmt->enabled)
1551		return;
1552
1553	offset = dig->afmt->offset;
1554
1555	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1556	if (encoder->crtc) {
1557		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1558		bpc = amdgpu_crtc->bpc;
1559	}
1560
1561	/* disable audio prior to setting up hw */
1562	dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1563	dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1564
1565	dce_v8_0_audio_set_dto(encoder, mode->clock);
1566
1567	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1568	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1569
1570	WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1571
1572	val = RREG32(mmHDMI_CONTROL + offset);
1573	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1574	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1575
1576	switch (bpc) {
1577	case 0:
1578	case 6:
1579	case 8:
1580	case 16:
1581	default:
1582		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1583			  connector->name, bpc);
1584		break;
1585	case 10:
1586		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1587		val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1588		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1589			  connector->name);
1590		break;
1591	case 12:
1592		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1593		val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1594		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1595			  connector->name);
1596		break;
1597	}
1598
1599	WREG32(mmHDMI_CONTROL + offset, val);
1600
1601	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1602	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1603	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1604	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1605
1606	WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1607	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1608	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1609
1610	WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1611	       AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1612
1613	WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1614	       (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1615
1616	WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1617
1618	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1619	       (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1620	       (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1621
1622	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1623	       AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1624
1625	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1626
1627	if (bpc > 8)
1628		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1629		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1630	else
1631		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1632		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1633		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1634
1635	dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1636
1637	WREG32(mmAFMT_60958_0 + offset,
1638	       (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1639
1640	WREG32(mmAFMT_60958_1 + offset,
1641	       (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1642
1643	WREG32(mmAFMT_60958_2 + offset,
1644	       (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1645	       (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1646	       (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1647	       (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1648	       (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1649	       (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1650
1651	dce_v8_0_audio_write_speaker_allocation(encoder);
1652
1653
1654	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1655	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1656
1657	dce_v8_0_afmt_audio_select_pin(encoder);
1658	dce_v8_0_audio_write_sad_regs(encoder);
1659	dce_v8_0_audio_write_latency_fields(encoder, mode);
1660
1661	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1662	if (err < 0) {
1663		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1664		return;
1665	}
1666
1667	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1668	if (err < 0) {
1669		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1670		return;
1671	}
1672
1673	dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1674
1675	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1676		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1677		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1678
1679	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1680		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1681		 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1682
1683	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1684		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1685
1686	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1687	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1688	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1689	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1690
1691	/* enable audio after setting up hw */
1692	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1693}
1694
1695static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1696{
1697	struct drm_device *dev = encoder->dev;
1698	struct amdgpu_device *adev = drm_to_adev(dev);
1699	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1700	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1701
1702	if (!dig || !dig->afmt)
1703		return;
1704
1705	/* Silent, r600_hdmi_enable will raise WARN for us */
1706	if (enable && dig->afmt->enabled)
1707		return;
1708	if (!enable && !dig->afmt->enabled)
1709		return;
1710
1711	if (!enable && dig->afmt->pin) {
1712		dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1713		dig->afmt->pin = NULL;
1714	}
1715
1716	dig->afmt->enabled = enable;
1717
1718	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1719		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1720}
1721
1722static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1723{
1724	int i;
1725
1726	for (i = 0; i < adev->mode_info.num_dig; i++)
1727		adev->mode_info.afmt[i] = NULL;
1728
1729	/* DCE8 has audio blocks tied to DIG encoders */
1730	for (i = 0; i < adev->mode_info.num_dig; i++) {
1731		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1732		if (adev->mode_info.afmt[i]) {
1733			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1734			adev->mode_info.afmt[i]->id = i;
1735		} else {
1736			int j;
1737			for (j = 0; j < i; j++) {
1738				kfree(adev->mode_info.afmt[j]);
1739				adev->mode_info.afmt[j] = NULL;
1740			}
1741			return -ENOMEM;
1742		}
1743	}
1744	return 0;
1745}
1746
1747static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1748{
1749	int i;
1750
1751	for (i = 0; i < adev->mode_info.num_dig; i++) {
1752		kfree(adev->mode_info.afmt[i]);
1753		adev->mode_info.afmt[i] = NULL;
1754	}
1755}
1756
1757static const u32 vga_control_regs[6] = {
 
1758	mmD1VGA_CONTROL,
1759	mmD2VGA_CONTROL,
1760	mmD3VGA_CONTROL,
1761	mmD4VGA_CONTROL,
1762	mmD5VGA_CONTROL,
1763	mmD6VGA_CONTROL,
1764};
1765
1766static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1767{
1768	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1769	struct drm_device *dev = crtc->dev;
1770	struct amdgpu_device *adev = drm_to_adev(dev);
1771	u32 vga_control;
1772
1773	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1774	if (enable)
1775		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1776	else
1777		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1778}
1779
1780static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1781{
1782	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1783	struct drm_device *dev = crtc->dev;
1784	struct amdgpu_device *adev = drm_to_adev(dev);
1785
1786	if (enable)
1787		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1788	else
1789		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1790}
1791
1792static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1793				     struct drm_framebuffer *fb,
1794				     int x, int y, int atomic)
1795{
1796	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1797	struct drm_device *dev = crtc->dev;
1798	struct amdgpu_device *adev = drm_to_adev(dev);
 
1799	struct drm_framebuffer *target_fb;
1800	struct drm_gem_object *obj;
1801	struct amdgpu_bo *abo;
1802	uint64_t fb_location, tiling_flags;
1803	uint32_t fb_format, fb_pitch_pixels;
1804	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1805	u32 pipe_config;
1806	u32 viewport_w, viewport_h;
1807	int r;
1808	bool bypass_lut = false;
 
1809
1810	/* no fb bound */
1811	if (!atomic && !crtc->primary->fb) {
1812		DRM_DEBUG_KMS("No FB bound\n");
1813		return 0;
1814	}
1815
1816	if (atomic)
 
1817		target_fb = fb;
1818	else
 
1819		target_fb = crtc->primary->fb;
 
1820
1821	/* If atomic, assume fb object is pinned & idle & fenced and
1822	 * just update base pointers
1823	 */
1824	obj = target_fb->obj[0];
1825	abo = gem_to_amdgpu_bo(obj);
1826	r = amdgpu_bo_reserve(abo, false);
1827	if (unlikely(r != 0))
1828		return r;
1829
1830	if (!atomic) {
1831		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
 
 
1832		if (unlikely(r != 0)) {
1833			amdgpu_bo_unreserve(abo);
1834			return -EINVAL;
1835		}
1836	}
1837	fb_location = amdgpu_bo_gpu_offset(abo);
1838
1839	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1840	amdgpu_bo_unreserve(abo);
1841
1842	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1843
1844	switch (target_fb->format->format) {
1845	case DRM_FORMAT_C8:
1846		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1847			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1848		break;
1849	case DRM_FORMAT_XRGB4444:
1850	case DRM_FORMAT_ARGB4444:
1851		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1852			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1853#ifdef __BIG_ENDIAN
1854		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1855#endif
1856		break;
1857	case DRM_FORMAT_XRGB1555:
1858	case DRM_FORMAT_ARGB1555:
1859		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1860			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1861#ifdef __BIG_ENDIAN
1862		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1863#endif
1864		break;
1865	case DRM_FORMAT_BGRX5551:
1866	case DRM_FORMAT_BGRA5551:
1867		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1868			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1869#ifdef __BIG_ENDIAN
1870		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1871#endif
1872		break;
1873	case DRM_FORMAT_RGB565:
1874		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1875			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1876#ifdef __BIG_ENDIAN
1877		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1878#endif
1879		break;
1880	case DRM_FORMAT_XRGB8888:
1881	case DRM_FORMAT_ARGB8888:
1882		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1883			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1884#ifdef __BIG_ENDIAN
1885		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1886#endif
1887		break;
1888	case DRM_FORMAT_XRGB2101010:
1889	case DRM_FORMAT_ARGB2101010:
1890		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1891			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1892#ifdef __BIG_ENDIAN
1893		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1894#endif
1895		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1896		bypass_lut = true;
1897		break;
1898	case DRM_FORMAT_BGRX1010102:
1899	case DRM_FORMAT_BGRA1010102:
1900		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1901			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1902#ifdef __BIG_ENDIAN
1903		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1904#endif
1905		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1906		bypass_lut = true;
1907		break;
1908	case DRM_FORMAT_XBGR8888:
1909	case DRM_FORMAT_ABGR8888:
1910		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1911				(GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1912		fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1913			(GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1914#ifdef __BIG_ENDIAN
1915		fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1916#endif
1917		break;
1918	default:
1919		DRM_ERROR("Unsupported screen format %p4cc\n",
1920			  &target_fb->format->format);
1921		return -EINVAL;
1922	}
1923
1924	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1925		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1926
1927		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1928		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1929		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1930		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1931		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1932
1933		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1934		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1935		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1936		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1937		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1938		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1939		fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1940	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1941		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1942	}
1943
1944	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1945
1946	dce_v8_0_vga_enable(crtc, false);
1947
1948	/* Make sure surface address is updated at vertical blank rather than
1949	 * horizontal blank
1950	 */
1951	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1952
1953	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1954	       upper_32_bits(fb_location));
1955	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1956	       upper_32_bits(fb_location));
1957	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1958	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1959	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1960	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1961	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1962	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1963
1964	/*
1965	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1966	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1967	 * retain the full precision throughout the pipeline.
1968	 */
1969	WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1970		 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1971		 ~LUT_10BIT_BYPASS_EN);
1972
1973	if (bypass_lut)
1974		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1975
1976	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1977	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1978	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1979	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1980	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1981	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1982
1983	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1984	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1985
1986	dce_v8_0_grph_enable(crtc, true);
1987
1988	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1989	       target_fb->height);
1990
1991	x &= ~3;
1992	y &= ~1;
1993	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1994	       (x << 16) | y);
1995	viewport_w = crtc->mode.hdisplay;
1996	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1997	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1998	       (viewport_w << 16) | viewport_h);
1999
2000	/* set pageflip to happen anywhere in vblank interval */
2001	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2002
2003	if (!atomic && fb && fb != crtc->primary->fb) {
2004		abo = gem_to_amdgpu_bo(fb->obj[0]);
2005		r = amdgpu_bo_reserve(abo, true);
 
2006		if (unlikely(r != 0))
2007			return r;
2008		amdgpu_bo_unpin(abo);
2009		amdgpu_bo_unreserve(abo);
2010	}
2011
2012	/* Bytes per pixel may have changed */
2013	dce_v8_0_bandwidth_update(adev);
2014
2015	return 0;
2016}
2017
2018static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2019				    struct drm_display_mode *mode)
2020{
2021	struct drm_device *dev = crtc->dev;
2022	struct amdgpu_device *adev = drm_to_adev(dev);
2023	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2024
2025	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2026		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2027		       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2028	else
2029		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2030}
2031
2032static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2033{
2034	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2035	struct drm_device *dev = crtc->dev;
2036	struct amdgpu_device *adev = drm_to_adev(dev);
2037	u16 *r, *g, *b;
2038	int i;
2039
2040	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2041
2042	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2043	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2044		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2045	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2046	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2047	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2048	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2049	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2050	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2051		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2052
2053	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2054
2055	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2056	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2057	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2058
2059	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2060	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2061	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2062
2063	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2064	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2065
2066	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2067	r = crtc->gamma_store;
2068	g = r + crtc->gamma_size;
2069	b = g + crtc->gamma_size;
2070	for (i = 0; i < 256; i++) {
2071		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2072		       ((*r++ & 0xffc0) << 14) |
2073		       ((*g++ & 0xffc0) << 4) |
2074		       (*b++ >> 6));
2075	}
2076
2077	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2078	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2079		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2080		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2081	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2082	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2083		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2084	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2085	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2086		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2087	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2088	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2089		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2090	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2091	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2092	/* XXX this only needs to be programmed once per crtc at startup,
2093	 * not sure where the best place for it is
2094	 */
2095	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2096	       ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2097}
2098
2099static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2100{
2101	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2102	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2103
2104	switch (amdgpu_encoder->encoder_id) {
2105	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2106		if (dig->linkb)
2107			return 1;
2108		else
2109			return 0;
 
2110	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2111		if (dig->linkb)
2112			return 3;
2113		else
2114			return 2;
 
2115	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2116		if (dig->linkb)
2117			return 5;
2118		else
2119			return 4;
 
2120	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2121		return 6;
 
2122	default:
2123		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2124		return 0;
2125	}
2126}
2127
2128/**
2129 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2130 *
2131 * @crtc: drm crtc
2132 *
2133 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2134 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2135 * monitors a dedicated PPLL must be used.  If a particular board has
2136 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2137 * as there is no need to program the PLL itself.  If we are not able to
2138 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2139 * avoid messing up an existing monitor.
2140 *
2141 * Asic specific PLL information
2142 *
2143 * DCE 8.x
2144 * KB/KV
2145 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2146 * CI
2147 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2148 *
2149 */
2150static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2151{
2152	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2153	struct drm_device *dev = crtc->dev;
2154	struct amdgpu_device *adev = drm_to_adev(dev);
2155	u32 pll_in_use;
2156	int pll;
2157
2158	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2159		if (adev->clock.dp_extclk)
2160			/* skip PPLL programming if using ext clock */
2161			return ATOM_PPLL_INVALID;
2162		else {
2163			/* use the same PPLL for all DP monitors */
2164			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2165			if (pll != ATOM_PPLL_INVALID)
2166				return pll;
2167		}
2168	} else {
2169		/* use the same PPLL for all monitors with the same clock */
2170		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2171		if (pll != ATOM_PPLL_INVALID)
2172			return pll;
2173	}
2174	/* otherwise, pick one of the plls */
2175	if ((adev->asic_type == CHIP_KABINI) ||
2176	    (adev->asic_type == CHIP_MULLINS)) {
2177		/* KB/ML has PPLL1 and PPLL2 */
2178		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2179		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2180			return ATOM_PPLL2;
2181		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2182			return ATOM_PPLL1;
2183		DRM_ERROR("unable to allocate a PPLL\n");
2184		return ATOM_PPLL_INVALID;
2185	} else {
2186		/* CI/KV has PPLL0, PPLL1, and PPLL2 */
2187		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2188		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2189			return ATOM_PPLL2;
2190		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2191			return ATOM_PPLL1;
2192		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2193			return ATOM_PPLL0;
2194		DRM_ERROR("unable to allocate a PPLL\n");
2195		return ATOM_PPLL_INVALID;
2196	}
2197	return ATOM_PPLL_INVALID;
2198}
2199
2200static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2201{
2202	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2203	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2204	uint32_t cur_lock;
2205
2206	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2207	if (lock)
2208		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2209	else
2210		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2211	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2212}
2213
2214static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2215{
2216	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2217	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2218
2219	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2220	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2221	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2222}
2223
2224static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2225{
2226	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2227	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2228
2229	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2230	       upper_32_bits(amdgpu_crtc->cursor_addr));
2231	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2232	       lower_32_bits(amdgpu_crtc->cursor_addr));
2233
2234	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2235	       CUR_CONTROL__CURSOR_EN_MASK |
2236	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2237	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2238}
2239
2240static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2241				       int x, int y)
2242{
2243	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2244	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2245	int xorigin = 0, yorigin = 0;
2246
2247	amdgpu_crtc->cursor_x = x;
2248	amdgpu_crtc->cursor_y = y;
2249
2250	/* avivo cursor are offset into the total surface */
2251	x += crtc->x;
2252	y += crtc->y;
2253	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2254
2255	if (x < 0) {
2256		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2257		x = 0;
2258	}
2259	if (y < 0) {
2260		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2261		y = 0;
2262	}
2263
2264	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2265	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2266	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2267	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2268
2269	return 0;
2270}
2271
2272static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2273				     int x, int y)
2274{
2275	int ret;
2276
2277	dce_v8_0_lock_cursor(crtc, true);
2278	ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2279	dce_v8_0_lock_cursor(crtc, false);
2280
2281	return ret;
2282}
2283
2284static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2285				     struct drm_file *file_priv,
2286				     uint32_t handle,
2287				     uint32_t width,
2288				     uint32_t height,
2289				     int32_t hot_x,
2290				     int32_t hot_y)
2291{
2292	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2293	struct drm_gem_object *obj;
2294	struct amdgpu_bo *aobj;
2295	int ret;
2296
2297	if (!handle) {
2298		/* turn off cursor */
2299		dce_v8_0_hide_cursor(crtc);
2300		obj = NULL;
2301		goto unpin;
2302	}
2303
2304	if ((width > amdgpu_crtc->max_cursor_width) ||
2305	    (height > amdgpu_crtc->max_cursor_height)) {
2306		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2307		return -EINVAL;
2308	}
2309
2310	obj = drm_gem_object_lookup(file_priv, handle);
2311	if (!obj) {
2312		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2313		return -ENOENT;
2314	}
2315
2316	aobj = gem_to_amdgpu_bo(obj);
2317	ret = amdgpu_bo_reserve(aobj, false);
2318	if (ret != 0) {
2319		drm_gem_object_put(obj);
2320		return ret;
2321	}
2322
2323	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2324	amdgpu_bo_unreserve(aobj);
2325	if (ret) {
2326		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2327		drm_gem_object_put(obj);
2328		return ret;
2329	}
2330	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2331
2332	dce_v8_0_lock_cursor(crtc, true);
2333
2334	if (width != amdgpu_crtc->cursor_width ||
2335	    height != amdgpu_crtc->cursor_height ||
2336	    hot_x != amdgpu_crtc->cursor_hot_x ||
2337	    hot_y != amdgpu_crtc->cursor_hot_y) {
2338		int x, y;
2339
2340		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2341		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2342
2343		dce_v8_0_cursor_move_locked(crtc, x, y);
2344
2345		amdgpu_crtc->cursor_width = width;
2346		amdgpu_crtc->cursor_height = height;
2347		amdgpu_crtc->cursor_hot_x = hot_x;
2348		amdgpu_crtc->cursor_hot_y = hot_y;
2349	}
2350
2351	dce_v8_0_show_cursor(crtc);
2352	dce_v8_0_lock_cursor(crtc, false);
2353
2354unpin:
2355	if (amdgpu_crtc->cursor_bo) {
2356		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2357		ret = amdgpu_bo_reserve(aobj, true);
2358		if (likely(ret == 0)) {
2359			amdgpu_bo_unpin(aobj);
2360			amdgpu_bo_unreserve(aobj);
2361		}
2362		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2363	}
2364
2365	amdgpu_crtc->cursor_bo = obj;
2366	return 0;
2367}
2368
2369static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2370{
2371	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2372
2373	if (amdgpu_crtc->cursor_bo) {
2374		dce_v8_0_lock_cursor(crtc, true);
2375
2376		dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2377					    amdgpu_crtc->cursor_y);
2378
2379		dce_v8_0_show_cursor(crtc);
2380
2381		dce_v8_0_lock_cursor(crtc, false);
2382	}
2383}
2384
2385static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2386				   u16 *blue, uint32_t size,
2387				   struct drm_modeset_acquire_ctx *ctx)
2388{
 
 
 
 
 
 
 
 
 
2389	dce_v8_0_crtc_load_lut(crtc);
2390
2391	return 0;
2392}
2393
2394static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2395{
2396	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2397
2398	drm_crtc_cleanup(crtc);
2399	kfree(amdgpu_crtc);
2400}
2401
2402static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2403	.cursor_set2 = dce_v8_0_crtc_cursor_set2,
2404	.cursor_move = dce_v8_0_crtc_cursor_move,
2405	.gamma_set = dce_v8_0_crtc_gamma_set,
2406	.set_config = amdgpu_display_crtc_set_config,
2407	.destroy = dce_v8_0_crtc_destroy,
2408	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2409	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2410	.enable_vblank = amdgpu_enable_vblank_kms,
2411	.disable_vblank = amdgpu_disable_vblank_kms,
2412	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2413};
2414
2415static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2416{
2417	struct drm_device *dev = crtc->dev;
2418	struct amdgpu_device *adev = drm_to_adev(dev);
2419	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2420	unsigned type;
2421
2422	switch (mode) {
2423	case DRM_MODE_DPMS_ON:
2424		amdgpu_crtc->enabled = true;
2425		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2426		dce_v8_0_vga_enable(crtc, true);
2427		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2428		dce_v8_0_vga_enable(crtc, false);
2429		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2430		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2431						amdgpu_crtc->crtc_id);
2432		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2433		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2434		drm_crtc_vblank_on(crtc);
2435		dce_v8_0_crtc_load_lut(crtc);
2436		break;
2437	case DRM_MODE_DPMS_STANDBY:
2438	case DRM_MODE_DPMS_SUSPEND:
2439	case DRM_MODE_DPMS_OFF:
2440		drm_crtc_vblank_off(crtc);
2441		if (amdgpu_crtc->enabled) {
2442			dce_v8_0_vga_enable(crtc, true);
2443			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2444			dce_v8_0_vga_enable(crtc, false);
2445		}
2446		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2447		amdgpu_crtc->enabled = false;
2448		break;
2449	}
2450	/* adjust pm to dpms */
2451	amdgpu_dpm_compute_clocks(adev);
2452}
2453
2454static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2455{
2456	/* disable crtc pair power gating before programming */
2457	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2458	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2459	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2460}
2461
2462static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2463{
2464	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2465	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2466}
2467
2468static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2469{
2470	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2471	struct drm_device *dev = crtc->dev;
2472	struct amdgpu_device *adev = drm_to_adev(dev);
2473	struct amdgpu_atom_ss ss;
2474	int i;
2475
2476	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2477	if (crtc->primary->fb) {
2478		int r;
 
2479		struct amdgpu_bo *abo;
2480
2481		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2482		r = amdgpu_bo_reserve(abo, true);
 
2483		if (unlikely(r))
2484			DRM_ERROR("failed to reserve abo before unpin\n");
2485		else {
2486			amdgpu_bo_unpin(abo);
2487			amdgpu_bo_unreserve(abo);
2488		}
2489	}
2490	/* disable the GRPH */
2491	dce_v8_0_grph_enable(crtc, false);
2492
2493	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2494
2495	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2496		if (adev->mode_info.crtcs[i] &&
2497		    adev->mode_info.crtcs[i]->enabled &&
2498		    i != amdgpu_crtc->crtc_id &&
2499		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2500			/* one other crtc is using this pll don't turn
2501			 * off the pll
2502			 */
2503			goto done;
2504		}
2505	}
2506
2507	switch (amdgpu_crtc->pll_id) {
2508	case ATOM_PPLL1:
2509	case ATOM_PPLL2:
2510		/* disable the ppll */
2511		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2512						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2513		break;
2514	case ATOM_PPLL0:
2515		/* disable the ppll */
2516		if ((adev->asic_type == CHIP_KAVERI) ||
2517		    (adev->asic_type == CHIP_BONAIRE) ||
2518		    (adev->asic_type == CHIP_HAWAII))
2519			amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2520						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2521		break;
2522	default:
2523		break;
2524	}
2525done:
2526	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2527	amdgpu_crtc->adjusted_clock = 0;
2528	amdgpu_crtc->encoder = NULL;
2529	amdgpu_crtc->connector = NULL;
2530}
2531
2532static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2533				  struct drm_display_mode *mode,
2534				  struct drm_display_mode *adjusted_mode,
2535				  int x, int y, struct drm_framebuffer *old_fb)
2536{
2537	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2538
2539	if (!amdgpu_crtc->adjusted_clock)
2540		return -EINVAL;
2541
2542	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2543	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2544	dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2545	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2546	amdgpu_atombios_crtc_scaler_setup(crtc);
2547	dce_v8_0_cursor_reset(crtc);
2548	/* update the hw version fpr dpm */
2549	amdgpu_crtc->hw_mode = *adjusted_mode;
2550
2551	return 0;
2552}
2553
2554static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2555				     const struct drm_display_mode *mode,
2556				     struct drm_display_mode *adjusted_mode)
2557{
2558	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2559	struct drm_device *dev = crtc->dev;
2560	struct drm_encoder *encoder;
2561
2562	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2563	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2564		if (encoder->crtc == crtc) {
2565			amdgpu_crtc->encoder = encoder;
2566			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2567			break;
2568		}
2569	}
2570	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2571		amdgpu_crtc->encoder = NULL;
2572		amdgpu_crtc->connector = NULL;
2573		return false;
2574	}
2575	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2576		return false;
2577	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2578		return false;
2579	/* pick pll */
2580	amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2581	/* if we can't get a PPLL for a non-DP encoder, fail */
2582	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2583	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2584		return false;
2585
2586	return true;
2587}
2588
2589static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2590				  struct drm_framebuffer *old_fb)
2591{
2592	return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2593}
2594
2595static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2596					 struct drm_framebuffer *fb,
2597					 int x, int y, enum mode_set_atomic state)
2598{
2599	return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2600}
2601
2602static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2603	.dpms = dce_v8_0_crtc_dpms,
2604	.mode_fixup = dce_v8_0_crtc_mode_fixup,
2605	.mode_set = dce_v8_0_crtc_mode_set,
2606	.mode_set_base = dce_v8_0_crtc_set_base,
2607	.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2608	.prepare = dce_v8_0_crtc_prepare,
2609	.commit = dce_v8_0_crtc_commit,
 
2610	.disable = dce_v8_0_crtc_disable,
2611	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2612};
2613
2614static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2615{
2616	struct amdgpu_crtc *amdgpu_crtc;
 
2617
2618	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2619			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2620	if (amdgpu_crtc == NULL)
2621		return -ENOMEM;
2622
2623	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2624
2625	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2626	amdgpu_crtc->crtc_id = index;
2627	adev->mode_info.crtcs[index] = amdgpu_crtc;
2628
2629	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2630	amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2631	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2632	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
 
 
 
 
 
 
2633
2634	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2635
2636	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2637	amdgpu_crtc->adjusted_clock = 0;
2638	amdgpu_crtc->encoder = NULL;
2639	amdgpu_crtc->connector = NULL;
2640	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2641
2642	return 0;
2643}
2644
2645static int dce_v8_0_early_init(void *handle)
2646{
2647	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2648
2649	adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2650	adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2651
2652	dce_v8_0_set_display_funcs(adev);
 
2653
2654	adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2655
2656	switch (adev->asic_type) {
2657	case CHIP_BONAIRE:
2658	case CHIP_HAWAII:
2659		adev->mode_info.num_hpd = 6;
2660		adev->mode_info.num_dig = 6;
2661		break;
2662	case CHIP_KAVERI:
2663		adev->mode_info.num_hpd = 6;
2664		adev->mode_info.num_dig = 7;
2665		break;
2666	case CHIP_KABINI:
2667	case CHIP_MULLINS:
2668		adev->mode_info.num_hpd = 6;
2669		adev->mode_info.num_dig = 6; /* ? */
2670		break;
2671	default:
2672		/* FIXME: not supported yet */
2673		return -EINVAL;
2674	}
2675
2676	dce_v8_0_set_irq_funcs(adev);
2677
2678	return 0;
2679}
2680
2681static int dce_v8_0_sw_init(void *handle)
2682{
2683	int r, i;
2684	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2685
2686	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2687		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2688		if (r)
2689			return r;
2690	}
2691
2692	for (i = 8; i < 20; i += 2) {
2693		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2694		if (r)
2695			return r;
2696	}
2697
2698	/* HPD hotplug */
2699	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2700	if (r)
2701		return r;
2702
2703	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2704
2705	adev_to_drm(adev)->mode_config.async_page_flip = true;
2706
2707	adev_to_drm(adev)->mode_config.max_width = 16384;
2708	adev_to_drm(adev)->mode_config.max_height = 16384;
2709
2710	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2711	if (adev->asic_type == CHIP_HAWAII)
2712		/* disable prefer shadow for now due to hibernation issues */
2713		adev_to_drm(adev)->mode_config.prefer_shadow = 0;
2714	else
2715		adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2716
2717	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2718
2719	r = amdgpu_display_modeset_create_props(adev);
2720	if (r)
2721		return r;
2722
2723	adev_to_drm(adev)->mode_config.max_width = 16384;
2724	adev_to_drm(adev)->mode_config.max_height = 16384;
2725
2726	/* allocate crtcs */
2727	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2728		r = dce_v8_0_crtc_init(adev, i);
2729		if (r)
2730			return r;
2731	}
2732
2733	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2734		amdgpu_display_print_display_setup(adev_to_drm(adev));
2735	else
2736		return -EINVAL;
2737
2738	/* setup afmt */
2739	r = dce_v8_0_afmt_init(adev);
2740	if (r)
2741		return r;
2742
2743	r = dce_v8_0_audio_init(adev);
2744	if (r)
2745		return r;
2746
2747	/* Disable vblank IRQs aggressively for power-saving */
2748	/* XXX: can this be enabled for DC? */
2749	adev_to_drm(adev)->vblank_disable_immediate = true;
2750
2751	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2752	if (r)
2753		return r;
2754
2755	/* Pre-DCE11 */
2756	INIT_DELAYED_WORK(&adev->hotplug_work,
2757		  amdgpu_display_hotplug_work_func);
2758
2759	drm_kms_helper_poll_init(adev_to_drm(adev));
2760
2761	adev->mode_info.mode_config_initialized = true;
2762	return 0;
2763}
2764
2765static int dce_v8_0_sw_fini(void *handle)
2766{
2767	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2768
2769	kfree(adev->mode_info.bios_hardcoded_edid);
2770
2771	drm_kms_helper_poll_fini(adev_to_drm(adev));
2772
2773	dce_v8_0_audio_fini(adev);
2774
2775	dce_v8_0_afmt_fini(adev);
2776
2777	drm_mode_config_cleanup(adev_to_drm(adev));
2778	adev->mode_info.mode_config_initialized = false;
2779
2780	return 0;
2781}
2782
2783static int dce_v8_0_hw_init(void *handle)
2784{
2785	int i;
2786	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2787
2788	/* disable vga render */
2789	dce_v8_0_set_vga_render_state(adev, false);
2790	/* init dig PHYs, disp eng pll */
2791	amdgpu_atombios_encoder_init_dig(adev);
2792	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2793
2794	/* initialize hpd */
2795	dce_v8_0_hpd_init(adev);
2796
2797	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2798		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2799	}
2800
2801	dce_v8_0_pageflip_interrupt_init(adev);
2802
2803	return 0;
2804}
2805
2806static int dce_v8_0_hw_fini(void *handle)
2807{
2808	int i;
2809	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2810
2811	dce_v8_0_hpd_fini(adev);
2812
2813	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2814		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2815	}
2816
2817	dce_v8_0_pageflip_interrupt_fini(adev);
2818
2819	flush_delayed_work(&adev->hotplug_work);
2820
2821	return 0;
2822}
2823
2824static int dce_v8_0_suspend(void *handle)
2825{
2826	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2827	int r;
2828
2829	r = amdgpu_display_suspend_helper(adev);
2830	if (r)
2831		return r;
2832
2833	adev->mode_info.bl_level =
2834		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2835
2836	return dce_v8_0_hw_fini(handle);
2837}
2838
2839static int dce_v8_0_resume(void *handle)
2840{
2841	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2842	int ret;
2843
2844	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2845							   adev->mode_info.bl_level);
2846
2847	ret = dce_v8_0_hw_init(handle);
2848
2849	/* turn on the BL */
2850	if (adev->mode_info.bl_encoder) {
2851		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2852								  adev->mode_info.bl_encoder);
2853		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2854						    bl_level);
2855	}
2856	if (ret)
2857		return ret;
2858
2859	return amdgpu_display_resume_helper(adev);
2860}
2861
2862static bool dce_v8_0_is_idle(void *handle)
2863{
2864	return true;
2865}
2866
2867static int dce_v8_0_wait_for_idle(void *handle)
2868{
2869	return 0;
2870}
2871
2872static int dce_v8_0_soft_reset(void *handle)
2873{
2874	u32 srbm_soft_reset = 0, tmp;
2875	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2876
2877	if (dce_v8_0_is_display_hung(adev))
2878		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2879
2880	if (srbm_soft_reset) {
2881		tmp = RREG32(mmSRBM_SOFT_RESET);
2882		tmp |= srbm_soft_reset;
2883		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2884		WREG32(mmSRBM_SOFT_RESET, tmp);
2885		tmp = RREG32(mmSRBM_SOFT_RESET);
2886
2887		udelay(50);
2888
2889		tmp &= ~srbm_soft_reset;
2890		WREG32(mmSRBM_SOFT_RESET, tmp);
2891		tmp = RREG32(mmSRBM_SOFT_RESET);
2892
2893		/* Wait a little for things to settle down */
2894		udelay(50);
2895	}
2896	return 0;
2897}
2898
2899static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2900						     int crtc,
2901						     enum amdgpu_interrupt_state state)
2902{
2903	u32 reg_block, lb_interrupt_mask;
2904
2905	if (crtc >= adev->mode_info.num_crtc) {
2906		DRM_DEBUG("invalid crtc %d\n", crtc);
2907		return;
2908	}
2909
2910	switch (crtc) {
2911	case 0:
2912		reg_block = CRTC0_REGISTER_OFFSET;
2913		break;
2914	case 1:
2915		reg_block = CRTC1_REGISTER_OFFSET;
2916		break;
2917	case 2:
2918		reg_block = CRTC2_REGISTER_OFFSET;
2919		break;
2920	case 3:
2921		reg_block = CRTC3_REGISTER_OFFSET;
2922		break;
2923	case 4:
2924		reg_block = CRTC4_REGISTER_OFFSET;
2925		break;
2926	case 5:
2927		reg_block = CRTC5_REGISTER_OFFSET;
2928		break;
2929	default:
2930		DRM_DEBUG("invalid crtc %d\n", crtc);
2931		return;
2932	}
2933
2934	switch (state) {
2935	case AMDGPU_IRQ_STATE_DISABLE:
2936		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2937		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2938		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2939		break;
2940	case AMDGPU_IRQ_STATE_ENABLE:
2941		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2942		lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2943		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2944		break;
2945	default:
2946		break;
2947	}
2948}
2949
2950static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2951						    int crtc,
2952						    enum amdgpu_interrupt_state state)
2953{
2954	u32 reg_block, lb_interrupt_mask;
2955
2956	if (crtc >= adev->mode_info.num_crtc) {
2957		DRM_DEBUG("invalid crtc %d\n", crtc);
2958		return;
2959	}
2960
2961	switch (crtc) {
2962	case 0:
2963		reg_block = CRTC0_REGISTER_OFFSET;
2964		break;
2965	case 1:
2966		reg_block = CRTC1_REGISTER_OFFSET;
2967		break;
2968	case 2:
2969		reg_block = CRTC2_REGISTER_OFFSET;
2970		break;
2971	case 3:
2972		reg_block = CRTC3_REGISTER_OFFSET;
2973		break;
2974	case 4:
2975		reg_block = CRTC4_REGISTER_OFFSET;
2976		break;
2977	case 5:
2978		reg_block = CRTC5_REGISTER_OFFSET;
2979		break;
2980	default:
2981		DRM_DEBUG("invalid crtc %d\n", crtc);
2982		return;
2983	}
2984
2985	switch (state) {
2986	case AMDGPU_IRQ_STATE_DISABLE:
2987		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2988		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2989		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2990		break;
2991	case AMDGPU_IRQ_STATE_ENABLE:
2992		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2993		lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2994		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2995		break;
2996	default:
2997		break;
2998	}
2999}
3000
3001static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3002					    struct amdgpu_irq_src *src,
3003					    unsigned type,
3004					    enum amdgpu_interrupt_state state)
3005{
3006	u32 dc_hpd_int_cntl;
3007
3008	if (type >= adev->mode_info.num_hpd) {
3009		DRM_DEBUG("invalid hdp %d\n", type);
3010		return 0;
3011	}
3012
3013	switch (state) {
3014	case AMDGPU_IRQ_STATE_DISABLE:
3015		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3016		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3017		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3018		break;
3019	case AMDGPU_IRQ_STATE_ENABLE:
3020		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3021		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3022		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3023		break;
3024	default:
3025		break;
3026	}
3027
3028	return 0;
3029}
3030
3031static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3032					     struct amdgpu_irq_src *src,
3033					     unsigned type,
3034					     enum amdgpu_interrupt_state state)
3035{
3036	switch (type) {
3037	case AMDGPU_CRTC_IRQ_VBLANK1:
3038		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3039		break;
3040	case AMDGPU_CRTC_IRQ_VBLANK2:
3041		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3042		break;
3043	case AMDGPU_CRTC_IRQ_VBLANK3:
3044		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3045		break;
3046	case AMDGPU_CRTC_IRQ_VBLANK4:
3047		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3048		break;
3049	case AMDGPU_CRTC_IRQ_VBLANK5:
3050		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3051		break;
3052	case AMDGPU_CRTC_IRQ_VBLANK6:
3053		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3054		break;
3055	case AMDGPU_CRTC_IRQ_VLINE1:
3056		dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3057		break;
3058	case AMDGPU_CRTC_IRQ_VLINE2:
3059		dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3060		break;
3061	case AMDGPU_CRTC_IRQ_VLINE3:
3062		dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3063		break;
3064	case AMDGPU_CRTC_IRQ_VLINE4:
3065		dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3066		break;
3067	case AMDGPU_CRTC_IRQ_VLINE5:
3068		dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3069		break;
3070	case AMDGPU_CRTC_IRQ_VLINE6:
3071		dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3072		break;
3073	default:
3074		break;
3075	}
3076	return 0;
3077}
3078
3079static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3080			     struct amdgpu_irq_src *source,
3081			     struct amdgpu_iv_entry *entry)
3082{
3083	unsigned crtc = entry->src_id - 1;
3084	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3085	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3086								    crtc);
3087
3088	switch (entry->src_data[0]) {
3089	case 0: /* vblank */
3090		if (disp_int & interrupt_status_offsets[crtc].vblank)
3091			WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3092		else
3093			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3094
3095		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3096			drm_handle_vblank(adev_to_drm(adev), crtc);
3097		}
3098		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3099		break;
3100	case 1: /* vline */
3101		if (disp_int & interrupt_status_offsets[crtc].vline)
3102			WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3103		else
3104			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3105
3106		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3107		break;
3108	default:
3109		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3110		break;
3111	}
3112
3113	return 0;
3114}
3115
3116static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3117						 struct amdgpu_irq_src *src,
3118						 unsigned type,
3119						 enum amdgpu_interrupt_state state)
3120{
3121	u32 reg;
3122
3123	if (type >= adev->mode_info.num_crtc) {
3124		DRM_ERROR("invalid pageflip crtc %d\n", type);
3125		return -EINVAL;
3126	}
3127
3128	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3129	if (state == AMDGPU_IRQ_STATE_DISABLE)
3130		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3131		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3132	else
3133		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3134		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3135
3136	return 0;
3137}
3138
3139static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3140				struct amdgpu_irq_src *source,
3141				struct amdgpu_iv_entry *entry)
3142{
3143	unsigned long flags;
3144	unsigned crtc_id;
3145	struct amdgpu_crtc *amdgpu_crtc;
3146	struct amdgpu_flip_work *works;
3147
3148	crtc_id = (entry->src_id - 8) >> 1;
3149	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3150
3151	if (crtc_id >= adev->mode_info.num_crtc) {
3152		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3153		return -EINVAL;
3154	}
3155
3156	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3157	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3158		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3159		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3160
3161	/* IRQ could occur when in initial stage */
3162	if (amdgpu_crtc == NULL)
3163		return 0;
3164
3165	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3166	works = amdgpu_crtc->pflip_works;
3167	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3168		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3169						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3170						amdgpu_crtc->pflip_status,
3171						AMDGPU_FLIP_SUBMITTED);
3172		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3173		return 0;
3174	}
3175
3176	/* page flip completed. clean up */
3177	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3178	amdgpu_crtc->pflip_works = NULL;
3179
3180	/* wakeup usersapce */
3181	if (works->event)
3182		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3183
3184	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3185
3186	drm_crtc_vblank_put(&amdgpu_crtc->base);
3187	schedule_work(&works->unpin_work);
3188
3189	return 0;
3190}
3191
3192static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3193			    struct amdgpu_irq_src *source,
3194			    struct amdgpu_iv_entry *entry)
3195{
3196	uint32_t disp_int, mask;
3197	unsigned hpd;
3198
3199	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3200		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3201		return 0;
3202	}
3203
3204	hpd = entry->src_data[0];
3205	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3206	mask = interrupt_status_offsets[hpd].hpd;
3207
3208	if (disp_int & mask) {
3209		dce_v8_0_hpd_int_ack(adev, hpd);
3210		schedule_delayed_work(&adev->hotplug_work, 0);
 
 
3211		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3212	}
3213
3214	return 0;
3215
3216}
3217
3218static int dce_v8_0_set_clockgating_state(void *handle,
3219					  enum amd_clockgating_state state)
3220{
3221	return 0;
3222}
3223
3224static int dce_v8_0_set_powergating_state(void *handle,
3225					  enum amd_powergating_state state)
3226{
3227	return 0;
3228}
3229
3230static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3231	.name = "dce_v8_0",
3232	.early_init = dce_v8_0_early_init,
3233	.late_init = NULL,
3234	.sw_init = dce_v8_0_sw_init,
3235	.sw_fini = dce_v8_0_sw_fini,
3236	.hw_init = dce_v8_0_hw_init,
3237	.hw_fini = dce_v8_0_hw_fini,
3238	.suspend = dce_v8_0_suspend,
3239	.resume = dce_v8_0_resume,
3240	.is_idle = dce_v8_0_is_idle,
3241	.wait_for_idle = dce_v8_0_wait_for_idle,
3242	.soft_reset = dce_v8_0_soft_reset,
3243	.set_clockgating_state = dce_v8_0_set_clockgating_state,
3244	.set_powergating_state = dce_v8_0_set_powergating_state,
3245};
3246
3247static void
3248dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3249			  struct drm_display_mode *mode,
3250			  struct drm_display_mode *adjusted_mode)
3251{
3252	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3253
3254	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3255
3256	/* need to call this here rather than in prepare() since we need some crtc info */
3257	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3258
3259	/* set scaler clears this on some chips */
3260	dce_v8_0_set_interleave(encoder->crtc, mode);
3261
3262	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3263		dce_v8_0_afmt_enable(encoder, true);
3264		dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3265	}
3266}
3267
3268static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3269{
3270	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3271	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3272	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3273
3274	if ((amdgpu_encoder->active_device &
3275	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3276	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3277	     ENCODER_OBJECT_ID_NONE)) {
3278		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3279		if (dig) {
3280			dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3281			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3282				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3283		}
3284	}
3285
3286	amdgpu_atombios_scratch_regs_lock(adev, true);
3287
3288	if (connector) {
3289		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3290
3291		/* select the clock/data port if it uses a router */
3292		if (amdgpu_connector->router.cd_valid)
3293			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3294
3295		/* turn eDP panel on for mode set */
3296		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3297			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3298							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3299	}
3300
3301	/* this is needed for the pll/ss setup to work correctly in some cases */
3302	amdgpu_atombios_encoder_set_crtc_source(encoder);
3303	/* set up the FMT blocks */
3304	dce_v8_0_program_fmt(encoder);
3305}
3306
3307static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3308{
3309	struct drm_device *dev = encoder->dev;
3310	struct amdgpu_device *adev = drm_to_adev(dev);
3311
3312	/* need to call this here as we need the crtc set up */
3313	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3314	amdgpu_atombios_scratch_regs_lock(adev, false);
3315}
3316
3317static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3318{
3319	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3320	struct amdgpu_encoder_atom_dig *dig;
3321
3322	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3323
3324	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3325		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3326			dce_v8_0_afmt_enable(encoder, false);
3327		dig = amdgpu_encoder->enc_priv;
3328		dig->dig_encoder = -1;
3329	}
3330	amdgpu_encoder->active_device = 0;
3331}
3332
3333/* these are handled by the primary encoders */
3334static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3335{
3336
3337}
3338
3339static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3340{
3341
3342}
3343
3344static void
3345dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3346		      struct drm_display_mode *mode,
3347		      struct drm_display_mode *adjusted_mode)
3348{
3349
3350}
3351
3352static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3353{
3354
3355}
3356
3357static void
3358dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3359{
3360
3361}
3362
3363static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3364	.dpms = dce_v8_0_ext_dpms,
3365	.prepare = dce_v8_0_ext_prepare,
3366	.mode_set = dce_v8_0_ext_mode_set,
3367	.commit = dce_v8_0_ext_commit,
3368	.disable = dce_v8_0_ext_disable,
3369	/* no detect for TMDS/LVDS yet */
3370};
3371
3372static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3373	.dpms = amdgpu_atombios_encoder_dpms,
3374	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3375	.prepare = dce_v8_0_encoder_prepare,
3376	.mode_set = dce_v8_0_encoder_mode_set,
3377	.commit = dce_v8_0_encoder_commit,
3378	.disable = dce_v8_0_encoder_disable,
3379	.detect = amdgpu_atombios_encoder_dig_detect,
3380};
3381
3382static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3383	.dpms = amdgpu_atombios_encoder_dpms,
3384	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3385	.prepare = dce_v8_0_encoder_prepare,
3386	.mode_set = dce_v8_0_encoder_mode_set,
3387	.commit = dce_v8_0_encoder_commit,
3388	.detect = amdgpu_atombios_encoder_dac_detect,
3389};
3390
3391static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3392{
3393	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3394	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3395		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3396	kfree(amdgpu_encoder->enc_priv);
3397	drm_encoder_cleanup(encoder);
3398	kfree(amdgpu_encoder);
3399}
3400
3401static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3402	.destroy = dce_v8_0_encoder_destroy,
3403};
3404
3405static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3406				 uint32_t encoder_enum,
3407				 uint32_t supported_device,
3408				 u16 caps)
3409{
3410	struct drm_device *dev = adev_to_drm(adev);
3411	struct drm_encoder *encoder;
3412	struct amdgpu_encoder *amdgpu_encoder;
3413
3414	/* see if we already added it */
3415	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3416		amdgpu_encoder = to_amdgpu_encoder(encoder);
3417		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3418			amdgpu_encoder->devices |= supported_device;
3419			return;
3420		}
3421
3422	}
3423
3424	/* add a new one */
3425	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3426	if (!amdgpu_encoder)
3427		return;
3428
3429	encoder = &amdgpu_encoder->base;
3430	switch (adev->mode_info.num_crtc) {
3431	case 1:
3432		encoder->possible_crtcs = 0x1;
3433		break;
3434	case 2:
3435	default:
3436		encoder->possible_crtcs = 0x3;
3437		break;
3438	case 4:
3439		encoder->possible_crtcs = 0xf;
3440		break;
3441	case 6:
3442		encoder->possible_crtcs = 0x3f;
3443		break;
3444	}
3445
3446	amdgpu_encoder->enc_priv = NULL;
3447
3448	amdgpu_encoder->encoder_enum = encoder_enum;
3449	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3450	amdgpu_encoder->devices = supported_device;
3451	amdgpu_encoder->rmx_type = RMX_OFF;
3452	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3453	amdgpu_encoder->is_ext_encoder = false;
3454	amdgpu_encoder->caps = caps;
3455
3456	switch (amdgpu_encoder->encoder_id) {
3457	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3458	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3459		drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3460				 DRM_MODE_ENCODER_DAC, NULL);
3461		drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3462		break;
3463	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3464	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3465	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3466	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3467	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3468		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3469			amdgpu_encoder->rmx_type = RMX_FULL;
3470			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3471					 DRM_MODE_ENCODER_LVDS, NULL);
3472			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3473		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3474			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3475					 DRM_MODE_ENCODER_DAC, NULL);
3476			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3477		} else {
3478			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3479					 DRM_MODE_ENCODER_TMDS, NULL);
3480			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3481		}
3482		drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3483		break;
3484	case ENCODER_OBJECT_ID_SI170B:
3485	case ENCODER_OBJECT_ID_CH7303:
3486	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3487	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3488	case ENCODER_OBJECT_ID_TITFP513:
3489	case ENCODER_OBJECT_ID_VT1623:
3490	case ENCODER_OBJECT_ID_HDMI_SI1930:
3491	case ENCODER_OBJECT_ID_TRAVIS:
3492	case ENCODER_OBJECT_ID_NUTMEG:
3493		/* these are handled by the primary encoders */
3494		amdgpu_encoder->is_ext_encoder = true;
3495		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3496			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3497					 DRM_MODE_ENCODER_LVDS, NULL);
3498		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3499			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3500					 DRM_MODE_ENCODER_DAC, NULL);
3501		else
3502			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3503					 DRM_MODE_ENCODER_TMDS, NULL);
3504		drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3505		break;
3506	}
3507}
3508
3509static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
 
3510	.bandwidth_update = &dce_v8_0_bandwidth_update,
3511	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
 
3512	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3513	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3514	.hpd_sense = &dce_v8_0_hpd_sense,
3515	.hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3516	.hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3517	.page_flip = &dce_v8_0_page_flip,
3518	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3519	.add_encoder = &dce_v8_0_encoder_add,
3520	.add_connector = &amdgpu_connector_add,
 
 
3521};
3522
3523static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3524{
3525	adev->mode_info.funcs = &dce_v8_0_display_funcs;
 
3526}
3527
3528static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3529	.set = dce_v8_0_set_crtc_interrupt_state,
3530	.process = dce_v8_0_crtc_irq,
3531};
3532
3533static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3534	.set = dce_v8_0_set_pageflip_interrupt_state,
3535	.process = dce_v8_0_pageflip_irq,
3536};
3537
3538static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3539	.set = dce_v8_0_set_hpd_interrupt_state,
3540	.process = dce_v8_0_hpd_irq,
3541};
3542
3543static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3544{
3545	if (adev->mode_info.num_crtc > 0)
3546		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3547	else
3548		adev->crtc_irq.num_types = 0;
3549	adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3550
3551	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3552	adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3553
3554	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3555	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3556}
3557
3558const struct amdgpu_ip_block_version dce_v8_0_ip_block = {
 
3559	.type = AMD_IP_BLOCK_TYPE_DCE,
3560	.major = 8,
3561	.minor = 0,
3562	.rev = 0,
3563	.funcs = &dce_v8_0_ip_funcs,
3564};
3565
3566const struct amdgpu_ip_block_version dce_v8_1_ip_block = {
 
3567	.type = AMD_IP_BLOCK_TYPE_DCE,
3568	.major = 8,
3569	.minor = 1,
3570	.rev = 0,
3571	.funcs = &dce_v8_0_ip_funcs,
3572};
3573
3574const struct amdgpu_ip_block_version dce_v8_2_ip_block = {
 
3575	.type = AMD_IP_BLOCK_TYPE_DCE,
3576	.major = 8,
3577	.minor = 2,
3578	.rev = 0,
3579	.funcs = &dce_v8_0_ip_funcs,
3580};
3581
3582const struct amdgpu_ip_block_version dce_v8_3_ip_block = {
 
3583	.type = AMD_IP_BLOCK_TYPE_DCE,
3584	.major = 8,
3585	.minor = 3,
3586	.rev = 0,
3587	.funcs = &dce_v8_0_ip_funcs,
3588};
3589
3590const struct amdgpu_ip_block_version dce_v8_5_ip_block = {
 
3591	.type = AMD_IP_BLOCK_TYPE_DCE,
3592	.major = 8,
3593	.minor = 5,
3594	.rev = 0,
3595	.funcs = &dce_v8_0_ip_funcs,
3596};
v4.10.11
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "drmP.h"
 
 
 
 
 
 
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "cikd.h"
  28#include "atom.h"
  29#include "amdgpu_atombios.h"
  30#include "atombios_crtc.h"
  31#include "atombios_encoders.h"
  32#include "amdgpu_pll.h"
  33#include "amdgpu_connectors.h"
 
  34#include "dce_v8_0.h"
  35
  36#include "dce/dce_8_0_d.h"
  37#include "dce/dce_8_0_sh_mask.h"
  38
  39#include "gca/gfx_7_2_enum.h"
  40
  41#include "gmc/gmc_7_1_d.h"
  42#include "gmc/gmc_7_1_sh_mask.h"
  43
  44#include "oss/oss_2_0_d.h"
  45#include "oss/oss_2_0_sh_mask.h"
  46
  47static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52	CRTC0_REGISTER_OFFSET,
  53	CRTC1_REGISTER_OFFSET,
  54	CRTC2_REGISTER_OFFSET,
  55	CRTC3_REGISTER_OFFSET,
  56	CRTC4_REGISTER_OFFSET,
  57	CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62	HPD0_REGISTER_OFFSET,
  63	HPD1_REGISTER_OFFSET,
  64	HPD2_REGISTER_OFFSET,
  65	HPD3_REGISTER_OFFSET,
  66	HPD4_REGISTER_OFFSET,
  67	HPD5_REGISTER_OFFSET
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71	CRTC0_REGISTER_OFFSET,
  72	CRTC1_REGISTER_OFFSET,
  73	CRTC2_REGISTER_OFFSET,
  74	CRTC3_REGISTER_OFFSET,
  75	CRTC4_REGISTER_OFFSET,
  76	CRTC5_REGISTER_OFFSET,
  77	(0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81	uint32_t	reg;
  82	uint32_t	vblank;
  83	uint32_t	vline;
  84	uint32_t	hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87	.reg = mmDISP_INTERRUPT_STATUS,
  88	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119				     u32 block_offset, u32 reg)
 120{
 121	unsigned long flags;
 122	u32 r;
 123
 124	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 125	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 126	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 127	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 128
 129	return r;
 130}
 131
 132static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 133				      u32 block_offset, u32 reg, u32 v)
 134{
 135	unsigned long flags;
 136
 137	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 138	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 139	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 140	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 141}
 142
 143static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 144{
 145	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
 146			CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
 147		return true;
 148	else
 149		return false;
 150}
 151
 152static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 153{
 154	u32 pos1, pos2;
 155
 156	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 157	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 158
 159	if (pos1 != pos2)
 160		return true;
 161	else
 162		return false;
 163}
 164
 165/**
 166 * dce_v8_0_vblank_wait - vblank wait asic callback.
 167 *
 168 * @adev: amdgpu_device pointer
 169 * @crtc: crtc to wait for vblank on
 170 *
 171 * Wait for vblank on the requested crtc (evergreen+).
 172 */
 173static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 174{
 175	unsigned i = 100;
 176
 177	if (crtc >= adev->mode_info.num_crtc)
 178		return;
 179
 180	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 181		return;
 182
 183	/* depending on when we hit vblank, we may be close to active; if so,
 184	 * wait for another frame.
 185	 */
 186	while (dce_v8_0_is_in_vblank(adev, crtc)) {
 187		if (i++ == 100) {
 188			i = 0;
 189			if (!dce_v8_0_is_counter_moving(adev, crtc))
 190				break;
 191		}
 192	}
 193
 194	while (!dce_v8_0_is_in_vblank(adev, crtc)) {
 195		if (i++ == 100) {
 196			i = 0;
 197			if (!dce_v8_0_is_counter_moving(adev, crtc))
 198				break;
 199		}
 200	}
 201}
 202
 203static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 204{
 205	if (crtc >= adev->mode_info.num_crtc)
 206		return 0;
 207	else
 208		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 209}
 210
 211static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 212{
 213	unsigned i;
 214
 215	/* Enable pflip interrupts */
 216	for (i = 0; i < adev->mode_info.num_crtc; i++)
 217		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 218}
 219
 220static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 221{
 222	unsigned i;
 223
 224	/* Disable pflip interrupts */
 225	for (i = 0; i < adev->mode_info.num_crtc; i++)
 226		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 227}
 228
 229/**
 230 * dce_v8_0_page_flip - pageflip callback.
 231 *
 232 * @adev: amdgpu_device pointer
 233 * @crtc_id: crtc to cleanup pageflip on
 234 * @crtc_base: new address of the crtc (GPU MC address)
 
 235 *
 236 * Triggers the actual pageflip by updating the primary
 237 * surface base address.
 238 */
 239static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 240			       int crtc_id, u64 crtc_base, bool async)
 241{
 242	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
 243
 244	/* flip at hsync for async, default is vsync */
 245	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 246	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 
 
 
 247	/* update the primary scanout addresses */
 248	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 249	       upper_32_bits(crtc_base));
 250	/* writing to the low address triggers the update */
 251	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 252	       lower_32_bits(crtc_base));
 253	/* post the write */
 254	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 255}
 256
 257static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 258					u32 *vbl, u32 *position)
 259{
 260	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 261		return -EINVAL;
 262
 263	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 264	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 265
 266	return 0;
 267}
 268
 269/**
 270 * dce_v8_0_hpd_sense - hpd sense callback.
 271 *
 272 * @adev: amdgpu_device pointer
 273 * @hpd: hpd (hotplug detect) pin
 274 *
 275 * Checks if a digital monitor is connected (evergreen+).
 276 * Returns true if connected, false if not connected.
 277 */
 278static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 279			       enum amdgpu_hpd_id hpd)
 280{
 281	bool connected = false;
 282
 283	if (hpd >= adev->mode_info.num_hpd)
 284		return connected;
 285
 286	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
 287	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 288		connected = true;
 289
 290	return connected;
 291}
 292
 293/**
 294 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 295 *
 296 * @adev: amdgpu_device pointer
 297 * @hpd: hpd (hotplug detect) pin
 298 *
 299 * Set the polarity of the hpd pin (evergreen+).
 300 */
 301static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 302				      enum amdgpu_hpd_id hpd)
 303{
 304	u32 tmp;
 305	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 306
 307	if (hpd >= adev->mode_info.num_hpd)
 308		return;
 309
 310	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 311	if (connected)
 312		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 313	else
 314		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 315	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 316}
 317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 318/**
 319 * dce_v8_0_hpd_init - hpd setup callback.
 320 *
 321 * @adev: amdgpu_device pointer
 322 *
 323 * Setup the hpd pins used by the card (evergreen+).
 324 * Enable the pin, set the polarity, and enable the hpd interrupts.
 325 */
 326static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 327{
 328	struct drm_device *dev = adev->ddev;
 329	struct drm_connector *connector;
 
 330	u32 tmp;
 331
 332	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 333		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 334
 335		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 336			continue;
 337
 338		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 339		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 340		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 341
 342		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 343		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 344			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 345			 * aux dp channel on imac and help (but not completely fix)
 346			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 347			 * also avoid interrupt storms during dpms.
 348			 */
 349			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 350			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 351			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 352			continue;
 353		}
 354
 
 355		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 356		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 357	}
 
 358}
 359
 360/**
 361 * dce_v8_0_hpd_fini - hpd tear down callback.
 362 *
 363 * @adev: amdgpu_device pointer
 364 *
 365 * Tear down the hpd pins used by the card (evergreen+).
 366 * Disable the hpd interrupts.
 367 */
 368static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 369{
 370	struct drm_device *dev = adev->ddev;
 371	struct drm_connector *connector;
 
 372	u32 tmp;
 373
 374	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 375		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 376
 377		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 378			continue;
 379
 380		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 381		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 382		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 383
 384		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 385	}
 
 386}
 387
 388static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 389{
 390	return mmDC_GPIO_HPD_A;
 391}
 392
 393static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 394{
 395	u32 crtc_hung = 0;
 396	u32 crtc_status[6];
 397	u32 i, j, tmp;
 398
 399	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 400		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 401			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 402			crtc_hung |= (1 << i);
 403		}
 404	}
 405
 406	for (j = 0; j < 10; j++) {
 407		for (i = 0; i < adev->mode_info.num_crtc; i++) {
 408			if (crtc_hung & (1 << i)) {
 409				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 410				if (tmp != crtc_status[i])
 411					crtc_hung &= ~(1 << i);
 412			}
 413		}
 414		if (crtc_hung == 0)
 415			return false;
 416		udelay(100);
 417	}
 418
 419	return true;
 420}
 421
 422static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
 423				    struct amdgpu_mode_mc_save *save)
 424{
 425	u32 crtc_enabled, tmp;
 426	int i;
 427
 428	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 429	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 430
 431	/* disable VGA render */
 432	tmp = RREG32(mmVGA_RENDER_CONTROL);
 433	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 434	WREG32(mmVGA_RENDER_CONTROL, tmp);
 435
 436	/* blank the display controllers */
 437	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 438		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 439					     CRTC_CONTROL, CRTC_MASTER_EN);
 440		if (crtc_enabled) {
 441#if 1
 442			save->crtc_enabled[i] = true;
 443			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 444			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
 445				/*it is correct only for RGB ; black is 0*/
 446				WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
 447				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
 448				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 449			}
 450			mdelay(20);
 451#else
 452			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 453			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 454			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 455			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 456			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 457			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 458			save->crtc_enabled[i] = false;
 459			/* ***** */
 460#endif
 461		} else {
 462			save->crtc_enabled[i] = false;
 463		}
 464	}
 465}
 466
 467static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
 468				      struct amdgpu_mode_mc_save *save)
 469{
 470	u32 tmp;
 471	int i;
 472
 473	/* update crtc base addresses */
 474	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 475		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 476		       upper_32_bits(adev->mc.vram_start));
 477		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 478		       (u32)adev->mc.vram_start);
 479
 480		if (save->crtc_enabled[i]) {
 481			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 482			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
 483			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 484		}
 485		mdelay(20);
 486	}
 487
 488	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
 489	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
 490
 491	/* Unlock vga access */
 492	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 493	mdelay(1);
 494	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 495}
 496
 497static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 498					  bool render)
 499{
 500	u32 tmp;
 501
 502	/* Lockout access through VGA aperture*/
 503	tmp = RREG32(mmVGA_HDP_CONTROL);
 504	if (render)
 505		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 506	else
 507		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 508	WREG32(mmVGA_HDP_CONTROL, tmp);
 509
 510	/* disable VGA render */
 511	tmp = RREG32(mmVGA_RENDER_CONTROL);
 512	if (render)
 513		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 514	else
 515		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 516	WREG32(mmVGA_RENDER_CONTROL, tmp);
 517}
 518
 519static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
 520{
 521	int num_crtc = 0;
 522
 523	switch (adev->asic_type) {
 524	case CHIP_BONAIRE:
 525	case CHIP_HAWAII:
 526		num_crtc = 6;
 527		break;
 528	case CHIP_KAVERI:
 529		num_crtc = 4;
 530		break;
 531	case CHIP_KABINI:
 532	case CHIP_MULLINS:
 533		num_crtc = 2;
 534		break;
 535	default:
 536		num_crtc = 0;
 537	}
 538	return num_crtc;
 539}
 540
 541void dce_v8_0_disable_dce(struct amdgpu_device *adev)
 542{
 543	/*Disable VGA render and enabled crtc, if has DCE engine*/
 544	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 545		u32 tmp;
 546		int crtc_enabled, i;
 547
 548		dce_v8_0_set_vga_render_state(adev, false);
 549
 550		/*Disable crtc*/
 551		for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
 552			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 553									 CRTC_CONTROL, CRTC_MASTER_EN);
 554			if (crtc_enabled) {
 555				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 556				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 557				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 558				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 559				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 560			}
 561		}
 562	}
 563}
 564
 565static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 566{
 567	struct drm_device *dev = encoder->dev;
 568	struct amdgpu_device *adev = dev->dev_private;
 569	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 570	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 571	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 572	int bpc = 0;
 573	u32 tmp = 0;
 574	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 575
 576	if (connector) {
 577		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 578		bpc = amdgpu_connector_get_monitor_bpc(connector);
 579		dither = amdgpu_connector->dither;
 580	}
 581
 582	/* LVDS/eDP FMT is set up by atom */
 583	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 584		return;
 585
 586	/* not needed for analog */
 587	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 588	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 589		return;
 590
 591	if (bpc == 0)
 592		return;
 593
 594	switch (bpc) {
 595	case 6:
 596		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 597			/* XXX sort out optimal dither settings */
 598			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 599				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 600				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 601				(0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 602		else
 603			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 604			(0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 605		break;
 606	case 8:
 607		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 608			/* XXX sort out optimal dither settings */
 609			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 610				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 611				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 612				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 613				(1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 614		else
 615			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 616			(1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 617		break;
 618	case 10:
 619		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 620			/* XXX sort out optimal dither settings */
 621			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 622				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 623				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 624				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 625				(2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 626		else
 627			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 628			(2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 629		break;
 630	default:
 631		/* not needed */
 632		break;
 633	}
 634
 635	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 636}
 637
 638
 639/* display watermark setup */
 640/**
 641 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 642 *
 643 * @adev: amdgpu_device pointer
 644 * @amdgpu_crtc: the selected display controller
 645 * @mode: the current display mode on the selected display
 646 * controller
 647 *
 648 * Setup up the line buffer allocation for
 649 * the selected display controller (CIK).
 650 * Returns the line buffer size in pixels.
 651 */
 652static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 653				       struct amdgpu_crtc *amdgpu_crtc,
 654				       struct drm_display_mode *mode)
 655{
 656	u32 tmp, buffer_alloc, i;
 657	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 658	/*
 659	 * Line Buffer Setup
 660	 * There are 6 line buffers, one for each display controllers.
 661	 * There are 3 partitions per LB. Select the number of partitions
 662	 * to enable based on the display width.  For display widths larger
 663	 * than 4096, you need use to use 2 display controllers and combine
 664	 * them using the stereo blender.
 665	 */
 666	if (amdgpu_crtc->base.enabled && mode) {
 667		if (mode->crtc_hdisplay < 1920) {
 668			tmp = 1;
 669			buffer_alloc = 2;
 670		} else if (mode->crtc_hdisplay < 2560) {
 671			tmp = 2;
 672			buffer_alloc = 2;
 673		} else if (mode->crtc_hdisplay < 4096) {
 674			tmp = 0;
 675			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 676		} else {
 677			DRM_DEBUG_KMS("Mode too big for LB!\n");
 678			tmp = 0;
 679			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 680		}
 681	} else {
 682		tmp = 1;
 683		buffer_alloc = 0;
 684	}
 685
 686	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 687	      (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 688	      (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 689
 690	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 691	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 692	for (i = 0; i < adev->usec_timeout; i++) {
 693		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 694		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 695			break;
 696		udelay(1);
 697	}
 698
 699	if (amdgpu_crtc->base.enabled && mode) {
 700		switch (tmp) {
 701		case 0:
 702		default:
 703			return 4096 * 2;
 704		case 1:
 705			return 1920 * 2;
 706		case 2:
 707			return 2560 * 2;
 708		}
 709	}
 710
 711	/* controller not enabled, so no lb used */
 712	return 0;
 713}
 714
 715/**
 716 * cik_get_number_of_dram_channels - get the number of dram channels
 717 *
 718 * @adev: amdgpu_device pointer
 719 *
 720 * Look up the number of video ram channels (CIK).
 721 * Used for display watermark bandwidth calculations
 722 * Returns the number of dram channels
 723 */
 724static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 725{
 726	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 727
 728	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 729	case 0:
 730	default:
 731		return 1;
 732	case 1:
 733		return 2;
 734	case 2:
 735		return 4;
 736	case 3:
 737		return 8;
 738	case 4:
 739		return 3;
 740	case 5:
 741		return 6;
 742	case 6:
 743		return 10;
 744	case 7:
 745		return 12;
 746	case 8:
 747		return 16;
 748	}
 749}
 750
 751struct dce8_wm_params {
 752	u32 dram_channels; /* number of dram channels */
 753	u32 yclk;          /* bandwidth per dram data pin in kHz */
 754	u32 sclk;          /* engine clock in kHz */
 755	u32 disp_clk;      /* display clock in kHz */
 756	u32 src_width;     /* viewport width */
 757	u32 active_time;   /* active display time in ns */
 758	u32 blank_time;    /* blank time in ns */
 759	bool interlaced;    /* mode is interlaced */
 760	fixed20_12 vsc;    /* vertical scale ratio */
 761	u32 num_heads;     /* number of active crtcs */
 762	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 763	u32 lb_size;       /* line buffer allocated to pipe */
 764	u32 vtaps;         /* vertical scaler taps */
 765};
 766
 767/**
 768 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 769 *
 770 * @wm: watermark calculation data
 771 *
 772 * Calculate the raw dram bandwidth (CIK).
 773 * Used for display watermark bandwidth calculations
 774 * Returns the dram bandwidth in MBytes/s
 775 */
 776static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 777{
 778	/* Calculate raw DRAM Bandwidth */
 779	fixed20_12 dram_efficiency; /* 0.7 */
 780	fixed20_12 yclk, dram_channels, bandwidth;
 781	fixed20_12 a;
 782
 783	a.full = dfixed_const(1000);
 784	yclk.full = dfixed_const(wm->yclk);
 785	yclk.full = dfixed_div(yclk, a);
 786	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 787	a.full = dfixed_const(10);
 788	dram_efficiency.full = dfixed_const(7);
 789	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 790	bandwidth.full = dfixed_mul(dram_channels, yclk);
 791	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 792
 793	return dfixed_trunc(bandwidth);
 794}
 795
 796/**
 797 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 798 *
 799 * @wm: watermark calculation data
 800 *
 801 * Calculate the dram bandwidth used for display (CIK).
 802 * Used for display watermark bandwidth calculations
 803 * Returns the dram bandwidth for display in MBytes/s
 804 */
 805static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 806{
 807	/* Calculate DRAM Bandwidth and the part allocated to display. */
 808	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 809	fixed20_12 yclk, dram_channels, bandwidth;
 810	fixed20_12 a;
 811
 812	a.full = dfixed_const(1000);
 813	yclk.full = dfixed_const(wm->yclk);
 814	yclk.full = dfixed_div(yclk, a);
 815	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 816	a.full = dfixed_const(10);
 817	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 818	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 819	bandwidth.full = dfixed_mul(dram_channels, yclk);
 820	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 821
 822	return dfixed_trunc(bandwidth);
 823}
 824
 825/**
 826 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 827 *
 828 * @wm: watermark calculation data
 829 *
 830 * Calculate the data return bandwidth used for display (CIK).
 831 * Used for display watermark bandwidth calculations
 832 * Returns the data return bandwidth in MBytes/s
 833 */
 834static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 835{
 836	/* Calculate the display Data return Bandwidth */
 837	fixed20_12 return_efficiency; /* 0.8 */
 838	fixed20_12 sclk, bandwidth;
 839	fixed20_12 a;
 840
 841	a.full = dfixed_const(1000);
 842	sclk.full = dfixed_const(wm->sclk);
 843	sclk.full = dfixed_div(sclk, a);
 844	a.full = dfixed_const(10);
 845	return_efficiency.full = dfixed_const(8);
 846	return_efficiency.full = dfixed_div(return_efficiency, a);
 847	a.full = dfixed_const(32);
 848	bandwidth.full = dfixed_mul(a, sclk);
 849	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 850
 851	return dfixed_trunc(bandwidth);
 852}
 853
 854/**
 855 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 856 *
 857 * @wm: watermark calculation data
 858 *
 859 * Calculate the dmif bandwidth used for display (CIK).
 860 * Used for display watermark bandwidth calculations
 861 * Returns the dmif bandwidth in MBytes/s
 862 */
 863static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 864{
 865	/* Calculate the DMIF Request Bandwidth */
 866	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 867	fixed20_12 disp_clk, bandwidth;
 868	fixed20_12 a, b;
 869
 870	a.full = dfixed_const(1000);
 871	disp_clk.full = dfixed_const(wm->disp_clk);
 872	disp_clk.full = dfixed_div(disp_clk, a);
 873	a.full = dfixed_const(32);
 874	b.full = dfixed_mul(a, disp_clk);
 875
 876	a.full = dfixed_const(10);
 877	disp_clk_request_efficiency.full = dfixed_const(8);
 878	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 879
 880	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 881
 882	return dfixed_trunc(bandwidth);
 883}
 884
 885/**
 886 * dce_v8_0_available_bandwidth - get the min available bandwidth
 887 *
 888 * @wm: watermark calculation data
 889 *
 890 * Calculate the min available bandwidth used for display (CIK).
 891 * Used for display watermark bandwidth calculations
 892 * Returns the min available bandwidth in MBytes/s
 893 */
 894static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 895{
 896	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 897	u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 898	u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 899	u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 900
 901	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 902}
 903
 904/**
 905 * dce_v8_0_average_bandwidth - get the average available bandwidth
 906 *
 907 * @wm: watermark calculation data
 908 *
 909 * Calculate the average available bandwidth used for display (CIK).
 910 * Used for display watermark bandwidth calculations
 911 * Returns the average available bandwidth in MBytes/s
 912 */
 913static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
 914{
 915	/* Calculate the display mode Average Bandwidth
 916	 * DisplayMode should contain the source and destination dimensions,
 917	 * timing, etc.
 918	 */
 919	fixed20_12 bpp;
 920	fixed20_12 line_time;
 921	fixed20_12 src_width;
 922	fixed20_12 bandwidth;
 923	fixed20_12 a;
 924
 925	a.full = dfixed_const(1000);
 926	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 927	line_time.full = dfixed_div(line_time, a);
 928	bpp.full = dfixed_const(wm->bytes_per_pixel);
 929	src_width.full = dfixed_const(wm->src_width);
 930	bandwidth.full = dfixed_mul(src_width, bpp);
 931	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 932	bandwidth.full = dfixed_div(bandwidth, line_time);
 933
 934	return dfixed_trunc(bandwidth);
 935}
 936
 937/**
 938 * dce_v8_0_latency_watermark - get the latency watermark
 939 *
 940 * @wm: watermark calculation data
 941 *
 942 * Calculate the latency watermark (CIK).
 943 * Used for display watermark bandwidth calculations
 944 * Returns the latency watermark in ns
 945 */
 946static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 947{
 948	/* First calculate the latency in ns */
 949	u32 mc_latency = 2000; /* 2000 ns. */
 950	u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
 951	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 952	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 953	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 954	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 955		(wm->num_heads * cursor_line_pair_return_time);
 956	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 957	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 958	u32 tmp, dmif_size = 12288;
 959	fixed20_12 a, b, c;
 960
 961	if (wm->num_heads == 0)
 962		return 0;
 963
 964	a.full = dfixed_const(2);
 965	b.full = dfixed_const(1);
 966	if ((wm->vsc.full > a.full) ||
 967	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 968	    (wm->vtaps >= 5) ||
 969	    ((wm->vsc.full >= a.full) && wm->interlaced))
 970		max_src_lines_per_dst_line = 4;
 971	else
 972		max_src_lines_per_dst_line = 2;
 973
 974	a.full = dfixed_const(available_bandwidth);
 975	b.full = dfixed_const(wm->num_heads);
 976	a.full = dfixed_div(a, b);
 
 
 977
 978	b.full = dfixed_const(mc_latency + 512);
 979	c.full = dfixed_const(wm->disp_clk);
 980	b.full = dfixed_div(b, c);
 981
 982	c.full = dfixed_const(dmif_size);
 983	b.full = dfixed_div(c, b);
 984
 985	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
 986
 987	b.full = dfixed_const(1000);
 988	c.full = dfixed_const(wm->disp_clk);
 989	b.full = dfixed_div(c, b);
 990	c.full = dfixed_const(wm->bytes_per_pixel);
 991	b.full = dfixed_mul(b, c);
 992
 993	lb_fill_bw = min(tmp, dfixed_trunc(b));
 994
 995	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 996	b.full = dfixed_const(1000);
 997	c.full = dfixed_const(lb_fill_bw);
 998	b.full = dfixed_div(c, b);
 999	a.full = dfixed_div(a, b);
1000	line_fill_time = dfixed_trunc(a);
1001
1002	if (line_fill_time < wm->active_time)
1003		return latency;
1004	else
1005		return latency + (line_fill_time - wm->active_time);
1006
1007}
1008
1009/**
1010 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1011 * average and available dram bandwidth
1012 *
1013 * @wm: watermark calculation data
1014 *
1015 * Check if the display average bandwidth fits in the display
1016 * dram bandwidth (CIK).
1017 * Used for display watermark bandwidth calculations
1018 * Returns true if the display fits, false if not.
1019 */
1020static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1021{
1022	if (dce_v8_0_average_bandwidth(wm) <=
1023	    (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1024		return true;
1025	else
1026		return false;
1027}
1028
1029/**
1030 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1031 * average and available bandwidth
1032 *
1033 * @wm: watermark calculation data
1034 *
1035 * Check if the display average bandwidth fits in the display
1036 * available bandwidth (CIK).
1037 * Used for display watermark bandwidth calculations
1038 * Returns true if the display fits, false if not.
1039 */
1040static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1041{
1042	if (dce_v8_0_average_bandwidth(wm) <=
1043	    (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1044		return true;
1045	else
1046		return false;
1047}
1048
1049/**
1050 * dce_v8_0_check_latency_hiding - check latency hiding
1051 *
1052 * @wm: watermark calculation data
1053 *
1054 * Check latency hiding (CIK).
1055 * Used for display watermark bandwidth calculations
1056 * Returns true if the display fits, false if not.
1057 */
1058static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1059{
1060	u32 lb_partitions = wm->lb_size / wm->src_width;
1061	u32 line_time = wm->active_time + wm->blank_time;
1062	u32 latency_tolerant_lines;
1063	u32 latency_hiding;
1064	fixed20_12 a;
1065
1066	a.full = dfixed_const(1);
1067	if (wm->vsc.full > a.full)
1068		latency_tolerant_lines = 1;
1069	else {
1070		if (lb_partitions <= (wm->vtaps + 1))
1071			latency_tolerant_lines = 1;
1072		else
1073			latency_tolerant_lines = 2;
1074	}
1075
1076	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1077
1078	if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1079		return true;
1080	else
1081		return false;
1082}
1083
1084/**
1085 * dce_v8_0_program_watermarks - program display watermarks
1086 *
1087 * @adev: amdgpu_device pointer
1088 * @amdgpu_crtc: the selected display controller
1089 * @lb_size: line buffer size
1090 * @num_heads: number of display controllers in use
1091 *
1092 * Calculate and program the display watermarks for the
1093 * selected display controller (CIK).
1094 */
1095static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1096					struct amdgpu_crtc *amdgpu_crtc,
1097					u32 lb_size, u32 num_heads)
1098{
1099	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1100	struct dce8_wm_params wm_low, wm_high;
1101	u32 pixel_period;
1102	u32 line_time = 0;
1103	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1104	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1105
1106	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1107		pixel_period = 1000000 / (u32)mode->clock;
1108		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
 
 
 
1109
1110		/* watermark for high clocks */
1111		if (adev->pm.dpm_enabled) {
1112			wm_high.yclk =
1113				amdgpu_dpm_get_mclk(adev, false) * 10;
1114			wm_high.sclk =
1115				amdgpu_dpm_get_sclk(adev, false) * 10;
1116		} else {
1117			wm_high.yclk = adev->pm.current_mclk * 10;
1118			wm_high.sclk = adev->pm.current_sclk * 10;
1119		}
1120
1121		wm_high.disp_clk = mode->clock;
1122		wm_high.src_width = mode->crtc_hdisplay;
1123		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1124		wm_high.blank_time = line_time - wm_high.active_time;
1125		wm_high.interlaced = false;
1126		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1127			wm_high.interlaced = true;
1128		wm_high.vsc = amdgpu_crtc->vsc;
1129		wm_high.vtaps = 1;
1130		if (amdgpu_crtc->rmx_type != RMX_OFF)
1131			wm_high.vtaps = 2;
1132		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1133		wm_high.lb_size = lb_size;
1134		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1135		wm_high.num_heads = num_heads;
1136
1137		/* set for high clocks */
1138		latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1139
1140		/* possibly force display priority to high */
1141		/* should really do this at mode validation time... */
1142		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1143		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1144		    !dce_v8_0_check_latency_hiding(&wm_high) ||
1145		    (adev->mode_info.disp_priority == 2)) {
1146			DRM_DEBUG_KMS("force priority to high\n");
1147		}
1148
1149		/* watermark for low clocks */
1150		if (adev->pm.dpm_enabled) {
1151			wm_low.yclk =
1152				amdgpu_dpm_get_mclk(adev, true) * 10;
1153			wm_low.sclk =
1154				amdgpu_dpm_get_sclk(adev, true) * 10;
1155		} else {
1156			wm_low.yclk = adev->pm.current_mclk * 10;
1157			wm_low.sclk = adev->pm.current_sclk * 10;
1158		}
1159
1160		wm_low.disp_clk = mode->clock;
1161		wm_low.src_width = mode->crtc_hdisplay;
1162		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1163		wm_low.blank_time = line_time - wm_low.active_time;
1164		wm_low.interlaced = false;
1165		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1166			wm_low.interlaced = true;
1167		wm_low.vsc = amdgpu_crtc->vsc;
1168		wm_low.vtaps = 1;
1169		if (amdgpu_crtc->rmx_type != RMX_OFF)
1170			wm_low.vtaps = 2;
1171		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1172		wm_low.lb_size = lb_size;
1173		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1174		wm_low.num_heads = num_heads;
1175
1176		/* set for low clocks */
1177		latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1178
1179		/* possibly force display priority to high */
1180		/* should really do this at mode validation time... */
1181		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1182		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1183		    !dce_v8_0_check_latency_hiding(&wm_low) ||
1184		    (adev->mode_info.disp_priority == 2)) {
1185			DRM_DEBUG_KMS("force priority to high\n");
1186		}
1187		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1188	}
1189
1190	/* select wm A */
1191	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1192	tmp = wm_mask;
1193	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1194	tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1195	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1196	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1197	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1198		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1199	/* select wm B */
1200	tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1201	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1202	tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1203	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1204	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1205	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1206		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1207	/* restore original selection */
1208	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1209
1210	/* save values for DPM */
1211	amdgpu_crtc->line_time = line_time;
1212	amdgpu_crtc->wm_high = latency_watermark_a;
1213	amdgpu_crtc->wm_low = latency_watermark_b;
1214	/* Save number of lines the linebuffer leads before the scanout */
1215	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1216}
1217
1218/**
1219 * dce_v8_0_bandwidth_update - program display watermarks
1220 *
1221 * @adev: amdgpu_device pointer
1222 *
1223 * Calculate and program the display watermarks and line
1224 * buffer allocation (CIK).
1225 */
1226static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1227{
1228	struct drm_display_mode *mode = NULL;
1229	u32 num_heads = 0, lb_size;
1230	int i;
1231
1232	amdgpu_update_display_priority(adev);
1233
1234	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1235		if (adev->mode_info.crtcs[i]->base.enabled)
1236			num_heads++;
1237	}
1238	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1239		mode = &adev->mode_info.crtcs[i]->base.mode;
1240		lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1241		dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1242					    lb_size, num_heads);
1243	}
1244}
1245
1246static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1247{
1248	int i;
1249	u32 offset, tmp;
1250
1251	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1252		offset = adev->mode_info.audio.pin[i].offset;
1253		tmp = RREG32_AUDIO_ENDPT(offset,
1254					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1255		if (((tmp &
1256		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1257		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1258			adev->mode_info.audio.pin[i].connected = false;
1259		else
1260			adev->mode_info.audio.pin[i].connected = true;
1261	}
1262}
1263
1264static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1265{
1266	int i;
1267
1268	dce_v8_0_audio_get_connected_pins(adev);
1269
1270	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1271		if (adev->mode_info.audio.pin[i].connected)
1272			return &adev->mode_info.audio.pin[i];
1273	}
1274	DRM_ERROR("No connected audio pins found!\n");
1275	return NULL;
1276}
1277
1278static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1279{
1280	struct amdgpu_device *adev = encoder->dev->dev_private;
1281	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1282	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1283	u32 offset;
1284
1285	if (!dig || !dig->afmt || !dig->afmt->pin)
1286		return;
1287
1288	offset = dig->afmt->offset;
1289
1290	WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1291	       (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1292}
1293
1294static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1295						struct drm_display_mode *mode)
1296{
1297	struct amdgpu_device *adev = encoder->dev->dev_private;
 
1298	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1299	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1300	struct drm_connector *connector;
 
1301	struct amdgpu_connector *amdgpu_connector = NULL;
1302	u32 tmp = 0, offset;
1303
1304	if (!dig || !dig->afmt || !dig->afmt->pin)
1305		return;
1306
1307	offset = dig->afmt->pin->offset;
1308
1309	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
 
1310		if (connector->encoder == encoder) {
1311			amdgpu_connector = to_amdgpu_connector(connector);
1312			break;
1313		}
1314	}
 
1315
1316	if (!amdgpu_connector) {
1317		DRM_ERROR("Couldn't find encoder's connector\n");
1318		return;
1319	}
1320
1321	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1322		if (connector->latency_present[1])
1323			tmp =
1324			(connector->video_latency[1] <<
1325			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1326			(connector->audio_latency[1] <<
1327			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1328		else
1329			tmp =
1330			(0 <<
1331			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1332			(0 <<
1333			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1334	} else {
1335		if (connector->latency_present[0])
1336			tmp =
1337			(connector->video_latency[0] <<
1338			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1339			(connector->audio_latency[0] <<
1340			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1341		else
1342			tmp =
1343			(0 <<
1344			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1345			(0 <<
1346			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1347
1348	}
1349	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1350}
1351
1352static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1353{
1354	struct amdgpu_device *adev = encoder->dev->dev_private;
 
1355	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1356	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1357	struct drm_connector *connector;
 
1358	struct amdgpu_connector *amdgpu_connector = NULL;
1359	u32 offset, tmp;
1360	u8 *sadb = NULL;
1361	int sad_count;
1362
1363	if (!dig || !dig->afmt || !dig->afmt->pin)
1364		return;
1365
1366	offset = dig->afmt->pin->offset;
1367
1368	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
 
1369		if (connector->encoder == encoder) {
1370			amdgpu_connector = to_amdgpu_connector(connector);
1371			break;
1372		}
1373	}
 
1374
1375	if (!amdgpu_connector) {
1376		DRM_ERROR("Couldn't find encoder's connector\n");
1377		return;
1378	}
1379
1380	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1381	if (sad_count < 0) {
1382		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1383		sad_count = 0;
1384	}
1385
1386	/* program the speaker allocation */
1387	tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1388	tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1389		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1390	/* set HDMI mode */
1391	tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1392	if (sad_count)
1393		tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1394	else
1395		tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1396	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1397
1398	kfree(sadb);
1399}
1400
1401static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1402{
1403	struct amdgpu_device *adev = encoder->dev->dev_private;
 
1404	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1405	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1406	u32 offset;
1407	struct drm_connector *connector;
 
1408	struct amdgpu_connector *amdgpu_connector = NULL;
1409	struct cea_sad *sads;
1410	int i, sad_count;
1411
1412	static const u16 eld_reg_to_type[][2] = {
1413		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1414		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1415		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1416		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1417		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1418		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1419		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1420		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1421		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1422		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1423		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1424		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1425	};
1426
1427	if (!dig || !dig->afmt || !dig->afmt->pin)
1428		return;
1429
1430	offset = dig->afmt->pin->offset;
1431
1432	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
 
1433		if (connector->encoder == encoder) {
1434			amdgpu_connector = to_amdgpu_connector(connector);
1435			break;
1436		}
1437	}
 
1438
1439	if (!amdgpu_connector) {
1440		DRM_ERROR("Couldn't find encoder's connector\n");
1441		return;
1442	}
1443
1444	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1445	if (sad_count <= 0) {
1446		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
 
1447		return;
1448	}
1449	BUG_ON(!sads);
1450
1451	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1452		u32 value = 0;
1453		u8 stereo_freqs = 0;
1454		int max_channels = -1;
1455		int j;
1456
1457		for (j = 0; j < sad_count; j++) {
1458			struct cea_sad *sad = &sads[j];
1459
1460			if (sad->format == eld_reg_to_type[i][1]) {
1461				if (sad->channels > max_channels) {
1462					value = (sad->channels <<
1463						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1464					        (sad->byte2 <<
1465						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1466					        (sad->freq <<
1467						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1468					max_channels = sad->channels;
1469				}
1470
1471				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1472					stereo_freqs |= sad->freq;
1473				else
1474					break;
1475			}
1476		}
1477
1478		value |= (stereo_freqs <<
1479			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1480
1481		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1482	}
1483
1484	kfree(sads);
1485}
1486
1487static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1488				  struct amdgpu_audio_pin *pin,
1489				  bool enable)
1490{
1491	if (!pin)
1492		return;
1493
1494	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1495		enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1496}
1497
1498static const u32 pin_offsets[7] =
1499{
1500	(0x1780 - 0x1780),
1501	(0x1786 - 0x1780),
1502	(0x178c - 0x1780),
1503	(0x1792 - 0x1780),
1504	(0x1798 - 0x1780),
1505	(0x179d - 0x1780),
1506	(0x17a4 - 0x1780),
1507};
1508
1509static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1510{
1511	int i;
1512
1513	if (!amdgpu_audio)
1514		return 0;
1515
1516	adev->mode_info.audio.enabled = true;
1517
1518	if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1519		adev->mode_info.audio.num_pins = 7;
1520	else if ((adev->asic_type == CHIP_KABINI) ||
1521		 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1522		adev->mode_info.audio.num_pins = 3;
1523	else if ((adev->asic_type == CHIP_BONAIRE) ||
1524		 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1525		adev->mode_info.audio.num_pins = 7;
1526	else
1527		adev->mode_info.audio.num_pins = 3;
1528
1529	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1530		adev->mode_info.audio.pin[i].channels = -1;
1531		adev->mode_info.audio.pin[i].rate = -1;
1532		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1533		adev->mode_info.audio.pin[i].status_bits = 0;
1534		adev->mode_info.audio.pin[i].category_code = 0;
1535		adev->mode_info.audio.pin[i].connected = false;
1536		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1537		adev->mode_info.audio.pin[i].id = i;
1538		/* disable audio.  it will be set up later */
1539		/* XXX remove once we switch to ip funcs */
1540		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1541	}
1542
1543	return 0;
1544}
1545
1546static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1547{
1548	int i;
1549
1550	if (!amdgpu_audio)
1551		return;
1552
1553	if (!adev->mode_info.audio.enabled)
1554		return;
1555
1556	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1557		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1558
1559	adev->mode_info.audio.enabled = false;
1560}
1561
1562/*
1563 * update the N and CTS parameters for a given pixel clock rate
1564 */
1565static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1566{
1567	struct drm_device *dev = encoder->dev;
1568	struct amdgpu_device *adev = dev->dev_private;
1569	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1570	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1571	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1572	uint32_t offset = dig->afmt->offset;
1573
1574	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1575	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1576
1577	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1578	WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1579
1580	WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1581	WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1582}
1583
1584/*
1585 * build a HDMI Video Info Frame
1586 */
1587static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1588					       void *buffer, size_t size)
1589{
1590	struct drm_device *dev = encoder->dev;
1591	struct amdgpu_device *adev = dev->dev_private;
1592	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1593	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1594	uint32_t offset = dig->afmt->offset;
1595	uint8_t *frame = buffer + 3;
1596	uint8_t *header = buffer;
1597
1598	WREG32(mmAFMT_AVI_INFO0 + offset,
1599		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1600	WREG32(mmAFMT_AVI_INFO1 + offset,
1601		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1602	WREG32(mmAFMT_AVI_INFO2 + offset,
1603		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1604	WREG32(mmAFMT_AVI_INFO3 + offset,
1605		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1606}
1607
1608static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1609{
1610	struct drm_device *dev = encoder->dev;
1611	struct amdgpu_device *adev = dev->dev_private;
1612	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1613	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1614	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1615	u32 dto_phase = 24 * 1000;
1616	u32 dto_modulo = clock;
1617
1618	if (!dig || !dig->afmt)
1619		return;
1620
1621	/* XXX two dtos; generally use dto0 for hdmi */
1622	/* Express [24MHz / target pixel clock] as an exact rational
1623	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1624	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1625	 */
1626	WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1627	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1628	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1629}
1630
1631/*
1632 * update the info frames with the data from the current display mode
1633 */
1634static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1635				  struct drm_display_mode *mode)
1636{
1637	struct drm_device *dev = encoder->dev;
1638	struct amdgpu_device *adev = dev->dev_private;
1639	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1640	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1641	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1642	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1643	struct hdmi_avi_infoframe frame;
1644	uint32_t offset, val;
1645	ssize_t err;
1646	int bpc = 8;
1647
1648	if (!dig || !dig->afmt)
1649		return;
1650
1651	/* Silent, r600_hdmi_enable will raise WARN for us */
1652	if (!dig->afmt->enabled)
1653		return;
1654
1655	offset = dig->afmt->offset;
1656
1657	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1658	if (encoder->crtc) {
1659		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1660		bpc = amdgpu_crtc->bpc;
1661	}
1662
1663	/* disable audio prior to setting up hw */
1664	dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1665	dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1666
1667	dce_v8_0_audio_set_dto(encoder, mode->clock);
1668
1669	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1670	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1671
1672	WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1673
1674	val = RREG32(mmHDMI_CONTROL + offset);
1675	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1676	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1677
1678	switch (bpc) {
1679	case 0:
1680	case 6:
1681	case 8:
1682	case 16:
1683	default:
1684		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1685			  connector->name, bpc);
1686		break;
1687	case 10:
1688		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1689		val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1690		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1691			  connector->name);
1692		break;
1693	case 12:
1694		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1695		val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1696		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1697			  connector->name);
1698		break;
1699	}
1700
1701	WREG32(mmHDMI_CONTROL + offset, val);
1702
1703	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1704	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1705	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1706	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1707
1708	WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1709	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1710	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1711
1712	WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1713	       AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1714
1715	WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1716	       (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1717
1718	WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1719
1720	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1721	       (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1722	       (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1723
1724	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1725	       AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1726
1727	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1728
1729	if (bpc > 8)
1730		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1731		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1732	else
1733		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1734		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1735		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1736
1737	dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1738
1739	WREG32(mmAFMT_60958_0 + offset,
1740	       (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1741
1742	WREG32(mmAFMT_60958_1 + offset,
1743	       (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1744
1745	WREG32(mmAFMT_60958_2 + offset,
1746	       (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1747	       (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1748	       (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1749	       (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1750	       (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1751	       (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1752
1753	dce_v8_0_audio_write_speaker_allocation(encoder);
1754
1755
1756	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1757	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1758
1759	dce_v8_0_afmt_audio_select_pin(encoder);
1760	dce_v8_0_audio_write_sad_regs(encoder);
1761	dce_v8_0_audio_write_latency_fields(encoder, mode);
1762
1763	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1764	if (err < 0) {
1765		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1766		return;
1767	}
1768
1769	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1770	if (err < 0) {
1771		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1772		return;
1773	}
1774
1775	dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1776
1777	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1778		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1779		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1780
1781	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1782		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1783		 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1784
1785	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1786		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1787
1788	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1789	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1790	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1791	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1792
1793	/* enable audio after setting up hw */
1794	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1795}
1796
1797static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1798{
1799	struct drm_device *dev = encoder->dev;
1800	struct amdgpu_device *adev = dev->dev_private;
1801	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1802	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1803
1804	if (!dig || !dig->afmt)
1805		return;
1806
1807	/* Silent, r600_hdmi_enable will raise WARN for us */
1808	if (enable && dig->afmt->enabled)
1809		return;
1810	if (!enable && !dig->afmt->enabled)
1811		return;
1812
1813	if (!enable && dig->afmt->pin) {
1814		dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1815		dig->afmt->pin = NULL;
1816	}
1817
1818	dig->afmt->enabled = enable;
1819
1820	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1821		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1822}
1823
1824static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1825{
1826	int i;
1827
1828	for (i = 0; i < adev->mode_info.num_dig; i++)
1829		adev->mode_info.afmt[i] = NULL;
1830
1831	/* DCE8 has audio blocks tied to DIG encoders */
1832	for (i = 0; i < adev->mode_info.num_dig; i++) {
1833		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1834		if (adev->mode_info.afmt[i]) {
1835			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1836			adev->mode_info.afmt[i]->id = i;
1837		} else {
1838			int j;
1839			for (j = 0; j < i; j++) {
1840				kfree(adev->mode_info.afmt[j]);
1841				adev->mode_info.afmt[j] = NULL;
1842			}
1843			return -ENOMEM;
1844		}
1845	}
1846	return 0;
1847}
1848
1849static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1850{
1851	int i;
1852
1853	for (i = 0; i < adev->mode_info.num_dig; i++) {
1854		kfree(adev->mode_info.afmt[i]);
1855		adev->mode_info.afmt[i] = NULL;
1856	}
1857}
1858
1859static const u32 vga_control_regs[6] =
1860{
1861	mmD1VGA_CONTROL,
1862	mmD2VGA_CONTROL,
1863	mmD3VGA_CONTROL,
1864	mmD4VGA_CONTROL,
1865	mmD5VGA_CONTROL,
1866	mmD6VGA_CONTROL,
1867};
1868
1869static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1870{
1871	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1872	struct drm_device *dev = crtc->dev;
1873	struct amdgpu_device *adev = dev->dev_private;
1874	u32 vga_control;
1875
1876	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1877	if (enable)
1878		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1879	else
1880		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1881}
1882
1883static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1884{
1885	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1886	struct drm_device *dev = crtc->dev;
1887	struct amdgpu_device *adev = dev->dev_private;
1888
1889	if (enable)
1890		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1891	else
1892		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1893}
1894
1895static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1896				     struct drm_framebuffer *fb,
1897				     int x, int y, int atomic)
1898{
1899	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1900	struct drm_device *dev = crtc->dev;
1901	struct amdgpu_device *adev = dev->dev_private;
1902	struct amdgpu_framebuffer *amdgpu_fb;
1903	struct drm_framebuffer *target_fb;
1904	struct drm_gem_object *obj;
1905	struct amdgpu_bo *abo;
1906	uint64_t fb_location, tiling_flags;
1907	uint32_t fb_format, fb_pitch_pixels;
1908	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1909	u32 pipe_config;
1910	u32 viewport_w, viewport_h;
1911	int r;
1912	bool bypass_lut = false;
1913	struct drm_format_name_buf format_name;
1914
1915	/* no fb bound */
1916	if (!atomic && !crtc->primary->fb) {
1917		DRM_DEBUG_KMS("No FB bound\n");
1918		return 0;
1919	}
1920
1921	if (atomic) {
1922		amdgpu_fb = to_amdgpu_framebuffer(fb);
1923		target_fb = fb;
1924	} else {
1925		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1926		target_fb = crtc->primary->fb;
1927	}
1928
1929	/* If atomic, assume fb object is pinned & idle & fenced and
1930	 * just update base pointers
1931	 */
1932	obj = amdgpu_fb->obj;
1933	abo = gem_to_amdgpu_bo(obj);
1934	r = amdgpu_bo_reserve(abo, false);
1935	if (unlikely(r != 0))
1936		return r;
1937
1938	if (atomic) {
1939		fb_location = amdgpu_bo_gpu_offset(abo);
1940	} else {
1941		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1942		if (unlikely(r != 0)) {
1943			amdgpu_bo_unreserve(abo);
1944			return -EINVAL;
1945		}
1946	}
 
1947
1948	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1949	amdgpu_bo_unreserve(abo);
1950
1951	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1952
1953	switch (target_fb->pixel_format) {
1954	case DRM_FORMAT_C8:
1955		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1956			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1957		break;
1958	case DRM_FORMAT_XRGB4444:
1959	case DRM_FORMAT_ARGB4444:
1960		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1961			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1962#ifdef __BIG_ENDIAN
1963		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1964#endif
1965		break;
1966	case DRM_FORMAT_XRGB1555:
1967	case DRM_FORMAT_ARGB1555:
1968		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1969			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1970#ifdef __BIG_ENDIAN
1971		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1972#endif
1973		break;
1974	case DRM_FORMAT_BGRX5551:
1975	case DRM_FORMAT_BGRA5551:
1976		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1977			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1978#ifdef __BIG_ENDIAN
1979		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1980#endif
1981		break;
1982	case DRM_FORMAT_RGB565:
1983		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1984			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1985#ifdef __BIG_ENDIAN
1986		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1987#endif
1988		break;
1989	case DRM_FORMAT_XRGB8888:
1990	case DRM_FORMAT_ARGB8888:
1991		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1992			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1993#ifdef __BIG_ENDIAN
1994		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1995#endif
1996		break;
1997	case DRM_FORMAT_XRGB2101010:
1998	case DRM_FORMAT_ARGB2101010:
1999		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2000			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2001#ifdef __BIG_ENDIAN
2002		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2003#endif
2004		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2005		bypass_lut = true;
2006		break;
2007	case DRM_FORMAT_BGRX1010102:
2008	case DRM_FORMAT_BGRA1010102:
2009		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2010			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2011#ifdef __BIG_ENDIAN
2012		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2013#endif
2014		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2015		bypass_lut = true;
2016		break;
 
 
 
 
 
 
 
 
 
 
2017	default:
2018		DRM_ERROR("Unsupported screen format %s\n",
2019		          drm_get_format_name(target_fb->pixel_format, &format_name));
2020		return -EINVAL;
2021	}
2022
2023	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2024		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2025
2026		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2027		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2028		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2029		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2030		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2031
2032		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2033		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2034		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2035		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2036		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2037		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2038		fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
2039	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2040		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2041	}
2042
2043	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2044
2045	dce_v8_0_vga_enable(crtc, false);
2046
2047	/* Make sure surface address is updated at vertical blank rather than
2048	 * horizontal blank
2049	 */
2050	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
2051
2052	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2053	       upper_32_bits(fb_location));
2054	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2055	       upper_32_bits(fb_location));
2056	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2057	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2058	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2059	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2060	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2061	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2062
2063	/*
2064	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2065	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2066	 * retain the full precision throughout the pipeline.
2067	 */
2068	WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2069		 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2070		 ~LUT_10BIT_BYPASS_EN);
2071
2072	if (bypass_lut)
2073		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2074
2075	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2076	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2077	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2078	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2079	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2080	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2081
2082	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2083	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2084
2085	dce_v8_0_grph_enable(crtc, true);
2086
2087	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2088	       target_fb->height);
2089
2090	x &= ~3;
2091	y &= ~1;
2092	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2093	       (x << 16) | y);
2094	viewport_w = crtc->mode.hdisplay;
2095	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2096	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2097	       (viewport_w << 16) | viewport_h);
2098
2099	/* set pageflip to happen anywhere in vblank interval */
2100	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2101
2102	if (!atomic && fb && fb != crtc->primary->fb) {
2103		amdgpu_fb = to_amdgpu_framebuffer(fb);
2104		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2105		r = amdgpu_bo_reserve(abo, false);
2106		if (unlikely(r != 0))
2107			return r;
2108		amdgpu_bo_unpin(abo);
2109		amdgpu_bo_unreserve(abo);
2110	}
2111
2112	/* Bytes per pixel may have changed */
2113	dce_v8_0_bandwidth_update(adev);
2114
2115	return 0;
2116}
2117
2118static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2119				    struct drm_display_mode *mode)
2120{
2121	struct drm_device *dev = crtc->dev;
2122	struct amdgpu_device *adev = dev->dev_private;
2123	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2124
2125	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2126		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2127		       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2128	else
2129		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2130}
2131
2132static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2133{
2134	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2135	struct drm_device *dev = crtc->dev;
2136	struct amdgpu_device *adev = dev->dev_private;
 
2137	int i;
2138
2139	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2140
2141	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2142	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2143		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2144	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2145	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2146	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2147	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2148	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2149	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2150		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2151
2152	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2153
2154	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2155	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2156	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2157
2158	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2159	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2160	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2161
2162	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2163	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2164
2165	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
 
 
 
2166	for (i = 0; i < 256; i++) {
2167		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2168		       (amdgpu_crtc->lut_r[i] << 20) |
2169		       (amdgpu_crtc->lut_g[i] << 10) |
2170		       (amdgpu_crtc->lut_b[i] << 0));
2171	}
2172
2173	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2174	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2175		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2176		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2177	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2178	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2179		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2180	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2181	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2182		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2183	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2184	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2185		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2186	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2187	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2188	/* XXX this only needs to be programmed once per crtc at startup,
2189	 * not sure where the best place for it is
2190	 */
2191	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2192	       ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2193}
2194
2195static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2196{
2197	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2198	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2199
2200	switch (amdgpu_encoder->encoder_id) {
2201	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2202		if (dig->linkb)
2203			return 1;
2204		else
2205			return 0;
2206		break;
2207	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2208		if (dig->linkb)
2209			return 3;
2210		else
2211			return 2;
2212		break;
2213	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2214		if (dig->linkb)
2215			return 5;
2216		else
2217			return 4;
2218		break;
2219	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2220		return 6;
2221		break;
2222	default:
2223		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2224		return 0;
2225	}
2226}
2227
2228/**
2229 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2230 *
2231 * @crtc: drm crtc
2232 *
2233 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2234 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2235 * monitors a dedicated PPLL must be used.  If a particular board has
2236 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2237 * as there is no need to program the PLL itself.  If we are not able to
2238 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2239 * avoid messing up an existing monitor.
2240 *
2241 * Asic specific PLL information
2242 *
2243 * DCE 8.x
2244 * KB/KV
2245 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2246 * CI
2247 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2248 *
2249 */
2250static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2251{
2252	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2253	struct drm_device *dev = crtc->dev;
2254	struct amdgpu_device *adev = dev->dev_private;
2255	u32 pll_in_use;
2256	int pll;
2257
2258	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2259		if (adev->clock.dp_extclk)
2260			/* skip PPLL programming if using ext clock */
2261			return ATOM_PPLL_INVALID;
2262		else {
2263			/* use the same PPLL for all DP monitors */
2264			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2265			if (pll != ATOM_PPLL_INVALID)
2266				return pll;
2267		}
2268	} else {
2269		/* use the same PPLL for all monitors with the same clock */
2270		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2271		if (pll != ATOM_PPLL_INVALID)
2272			return pll;
2273	}
2274	/* otherwise, pick one of the plls */
2275	if ((adev->asic_type == CHIP_KABINI) ||
2276	    (adev->asic_type == CHIP_MULLINS)) {
2277		/* KB/ML has PPLL1 and PPLL2 */
2278		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2279		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2280			return ATOM_PPLL2;
2281		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2282			return ATOM_PPLL1;
2283		DRM_ERROR("unable to allocate a PPLL\n");
2284		return ATOM_PPLL_INVALID;
2285	} else {
2286		/* CI/KV has PPLL0, PPLL1, and PPLL2 */
2287		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2288		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2289			return ATOM_PPLL2;
2290		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2291			return ATOM_PPLL1;
2292		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2293			return ATOM_PPLL0;
2294		DRM_ERROR("unable to allocate a PPLL\n");
2295		return ATOM_PPLL_INVALID;
2296	}
2297	return ATOM_PPLL_INVALID;
2298}
2299
2300static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2301{
2302	struct amdgpu_device *adev = crtc->dev->dev_private;
2303	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2304	uint32_t cur_lock;
2305
2306	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2307	if (lock)
2308		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2309	else
2310		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2311	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2312}
2313
2314static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2315{
2316	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2317	struct amdgpu_device *adev = crtc->dev->dev_private;
2318
2319	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2320		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2321		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2322}
2323
2324static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2325{
2326	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2327	struct amdgpu_device *adev = crtc->dev->dev_private;
2328
2329	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2330	       upper_32_bits(amdgpu_crtc->cursor_addr));
2331	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2332	       lower_32_bits(amdgpu_crtc->cursor_addr));
2333
2334	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2335		   CUR_CONTROL__CURSOR_EN_MASK |
2336		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2337		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2338}
2339
2340static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2341				       int x, int y)
2342{
2343	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2344	struct amdgpu_device *adev = crtc->dev->dev_private;
2345	int xorigin = 0, yorigin = 0;
2346
2347	amdgpu_crtc->cursor_x = x;
2348	amdgpu_crtc->cursor_y = y;
2349
2350	/* avivo cursor are offset into the total surface */
2351	x += crtc->x;
2352	y += crtc->y;
2353	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2354
2355	if (x < 0) {
2356		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2357		x = 0;
2358	}
2359	if (y < 0) {
2360		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2361		y = 0;
2362	}
2363
2364	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2365	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2366	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2367	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2368
2369	return 0;
2370}
2371
2372static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2373				     int x, int y)
2374{
2375	int ret;
2376
2377	dce_v8_0_lock_cursor(crtc, true);
2378	ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2379	dce_v8_0_lock_cursor(crtc, false);
2380
2381	return ret;
2382}
2383
2384static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2385				     struct drm_file *file_priv,
2386				     uint32_t handle,
2387				     uint32_t width,
2388				     uint32_t height,
2389				     int32_t hot_x,
2390				     int32_t hot_y)
2391{
2392	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2393	struct drm_gem_object *obj;
2394	struct amdgpu_bo *aobj;
2395	int ret;
2396
2397	if (!handle) {
2398		/* turn off cursor */
2399		dce_v8_0_hide_cursor(crtc);
2400		obj = NULL;
2401		goto unpin;
2402	}
2403
2404	if ((width > amdgpu_crtc->max_cursor_width) ||
2405	    (height > amdgpu_crtc->max_cursor_height)) {
2406		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2407		return -EINVAL;
2408	}
2409
2410	obj = drm_gem_object_lookup(file_priv, handle);
2411	if (!obj) {
2412		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2413		return -ENOENT;
2414	}
2415
2416	aobj = gem_to_amdgpu_bo(obj);
2417	ret = amdgpu_bo_reserve(aobj, false);
2418	if (ret != 0) {
2419		drm_gem_object_unreference_unlocked(obj);
2420		return ret;
2421	}
2422
2423	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2424	amdgpu_bo_unreserve(aobj);
2425	if (ret) {
2426		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2427		drm_gem_object_unreference_unlocked(obj);
2428		return ret;
2429	}
 
2430
2431	dce_v8_0_lock_cursor(crtc, true);
2432
2433	if (width != amdgpu_crtc->cursor_width ||
2434	    height != amdgpu_crtc->cursor_height ||
2435	    hot_x != amdgpu_crtc->cursor_hot_x ||
2436	    hot_y != amdgpu_crtc->cursor_hot_y) {
2437		int x, y;
2438
2439		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2440		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2441
2442		dce_v8_0_cursor_move_locked(crtc, x, y);
2443
2444		amdgpu_crtc->cursor_width = width;
2445		amdgpu_crtc->cursor_height = height;
2446		amdgpu_crtc->cursor_hot_x = hot_x;
2447		amdgpu_crtc->cursor_hot_y = hot_y;
2448	}
2449
2450	dce_v8_0_show_cursor(crtc);
2451	dce_v8_0_lock_cursor(crtc, false);
2452
2453unpin:
2454	if (amdgpu_crtc->cursor_bo) {
2455		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2456		ret = amdgpu_bo_reserve(aobj, false);
2457		if (likely(ret == 0)) {
2458			amdgpu_bo_unpin(aobj);
2459			amdgpu_bo_unreserve(aobj);
2460		}
2461		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2462	}
2463
2464	amdgpu_crtc->cursor_bo = obj;
2465	return 0;
2466}
2467
2468static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2469{
2470	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2471
2472	if (amdgpu_crtc->cursor_bo) {
2473		dce_v8_0_lock_cursor(crtc, true);
2474
2475		dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2476					    amdgpu_crtc->cursor_y);
2477
2478		dce_v8_0_show_cursor(crtc);
2479
2480		dce_v8_0_lock_cursor(crtc, false);
2481	}
2482}
2483
2484static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2485				   u16 *blue, uint32_t size)
 
2486{
2487	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2488	int i;
2489
2490	/* userspace palettes are always correct as is */
2491	for (i = 0; i < size; i++) {
2492		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2493		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2494		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2495	}
2496	dce_v8_0_crtc_load_lut(crtc);
2497
2498	return 0;
2499}
2500
2501static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2502{
2503	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2504
2505	drm_crtc_cleanup(crtc);
2506	kfree(amdgpu_crtc);
2507}
2508
2509static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2510	.cursor_set2 = dce_v8_0_crtc_cursor_set2,
2511	.cursor_move = dce_v8_0_crtc_cursor_move,
2512	.gamma_set = dce_v8_0_crtc_gamma_set,
2513	.set_config = amdgpu_crtc_set_config,
2514	.destroy = dce_v8_0_crtc_destroy,
2515	.page_flip_target = amdgpu_crtc_page_flip_target,
 
 
 
 
2516};
2517
2518static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2519{
2520	struct drm_device *dev = crtc->dev;
2521	struct amdgpu_device *adev = dev->dev_private;
2522	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2523	unsigned type;
2524
2525	switch (mode) {
2526	case DRM_MODE_DPMS_ON:
2527		amdgpu_crtc->enabled = true;
2528		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2529		dce_v8_0_vga_enable(crtc, true);
2530		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2531		dce_v8_0_vga_enable(crtc, false);
2532		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2533		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 
2534		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2535		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2536		drm_crtc_vblank_on(crtc);
2537		dce_v8_0_crtc_load_lut(crtc);
2538		break;
2539	case DRM_MODE_DPMS_STANDBY:
2540	case DRM_MODE_DPMS_SUSPEND:
2541	case DRM_MODE_DPMS_OFF:
2542		drm_crtc_vblank_off(crtc);
2543		if (amdgpu_crtc->enabled) {
2544			dce_v8_0_vga_enable(crtc, true);
2545			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2546			dce_v8_0_vga_enable(crtc, false);
2547		}
2548		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2549		amdgpu_crtc->enabled = false;
2550		break;
2551	}
2552	/* adjust pm to dpms */
2553	amdgpu_pm_compute_clocks(adev);
2554}
2555
2556static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2557{
2558	/* disable crtc pair power gating before programming */
2559	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2560	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2561	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2562}
2563
2564static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2565{
2566	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2567	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2568}
2569
2570static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2571{
2572	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2573	struct drm_device *dev = crtc->dev;
2574	struct amdgpu_device *adev = dev->dev_private;
2575	struct amdgpu_atom_ss ss;
2576	int i;
2577
2578	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2579	if (crtc->primary->fb) {
2580		int r;
2581		struct amdgpu_framebuffer *amdgpu_fb;
2582		struct amdgpu_bo *abo;
2583
2584		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2585		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2586		r = amdgpu_bo_reserve(abo, false);
2587		if (unlikely(r))
2588			DRM_ERROR("failed to reserve abo before unpin\n");
2589		else {
2590			amdgpu_bo_unpin(abo);
2591			amdgpu_bo_unreserve(abo);
2592		}
2593	}
2594	/* disable the GRPH */
2595	dce_v8_0_grph_enable(crtc, false);
2596
2597	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2598
2599	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2600		if (adev->mode_info.crtcs[i] &&
2601		    adev->mode_info.crtcs[i]->enabled &&
2602		    i != amdgpu_crtc->crtc_id &&
2603		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2604			/* one other crtc is using this pll don't turn
2605			 * off the pll
2606			 */
2607			goto done;
2608		}
2609	}
2610
2611	switch (amdgpu_crtc->pll_id) {
2612	case ATOM_PPLL1:
2613	case ATOM_PPLL2:
2614		/* disable the ppll */
2615		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2616                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2617		break;
2618	case ATOM_PPLL0:
2619		/* disable the ppll */
2620		if ((adev->asic_type == CHIP_KAVERI) ||
2621		    (adev->asic_type == CHIP_BONAIRE) ||
2622		    (adev->asic_type == CHIP_HAWAII))
2623			amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2624						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2625		break;
2626	default:
2627		break;
2628	}
2629done:
2630	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2631	amdgpu_crtc->adjusted_clock = 0;
2632	amdgpu_crtc->encoder = NULL;
2633	amdgpu_crtc->connector = NULL;
2634}
2635
2636static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2637				  struct drm_display_mode *mode,
2638				  struct drm_display_mode *adjusted_mode,
2639				  int x, int y, struct drm_framebuffer *old_fb)
2640{
2641	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2642
2643	if (!amdgpu_crtc->adjusted_clock)
2644		return -EINVAL;
2645
2646	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2647	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2648	dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2649	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2650	amdgpu_atombios_crtc_scaler_setup(crtc);
2651	dce_v8_0_cursor_reset(crtc);
2652	/* update the hw version fpr dpm */
2653	amdgpu_crtc->hw_mode = *adjusted_mode;
2654
2655	return 0;
2656}
2657
2658static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2659				     const struct drm_display_mode *mode,
2660				     struct drm_display_mode *adjusted_mode)
2661{
2662	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2663	struct drm_device *dev = crtc->dev;
2664	struct drm_encoder *encoder;
2665
2666	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2667	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2668		if (encoder->crtc == crtc) {
2669			amdgpu_crtc->encoder = encoder;
2670			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2671			break;
2672		}
2673	}
2674	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2675		amdgpu_crtc->encoder = NULL;
2676		amdgpu_crtc->connector = NULL;
2677		return false;
2678	}
2679	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2680		return false;
2681	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2682		return false;
2683	/* pick pll */
2684	amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2685	/* if we can't get a PPLL for a non-DP encoder, fail */
2686	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2687	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2688		return false;
2689
2690	return true;
2691}
2692
2693static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2694				  struct drm_framebuffer *old_fb)
2695{
2696	return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2697}
2698
2699static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2700					 struct drm_framebuffer *fb,
2701					 int x, int y, enum mode_set_atomic state)
2702{
2703       return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2704}
2705
2706static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2707	.dpms = dce_v8_0_crtc_dpms,
2708	.mode_fixup = dce_v8_0_crtc_mode_fixup,
2709	.mode_set = dce_v8_0_crtc_mode_set,
2710	.mode_set_base = dce_v8_0_crtc_set_base,
2711	.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2712	.prepare = dce_v8_0_crtc_prepare,
2713	.commit = dce_v8_0_crtc_commit,
2714	.load_lut = dce_v8_0_crtc_load_lut,
2715	.disable = dce_v8_0_crtc_disable,
 
2716};
2717
2718static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2719{
2720	struct amdgpu_crtc *amdgpu_crtc;
2721	int i;
2722
2723	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2724			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2725	if (amdgpu_crtc == NULL)
2726		return -ENOMEM;
2727
2728	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2729
2730	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2731	amdgpu_crtc->crtc_id = index;
2732	adev->mode_info.crtcs[index] = amdgpu_crtc;
2733
2734	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2735	amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2736	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2737	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2738
2739	for (i = 0; i < 256; i++) {
2740		amdgpu_crtc->lut_r[i] = i << 2;
2741		amdgpu_crtc->lut_g[i] = i << 2;
2742		amdgpu_crtc->lut_b[i] = i << 2;
2743	}
2744
2745	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2746
2747	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2748	amdgpu_crtc->adjusted_clock = 0;
2749	amdgpu_crtc->encoder = NULL;
2750	amdgpu_crtc->connector = NULL;
2751	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2752
2753	return 0;
2754}
2755
2756static int dce_v8_0_early_init(void *handle)
2757{
2758	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2759
2760	adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2761	adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2762
2763	dce_v8_0_set_display_funcs(adev);
2764	dce_v8_0_set_irq_funcs(adev);
2765
2766	adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2767
2768	switch (adev->asic_type) {
2769	case CHIP_BONAIRE:
2770	case CHIP_HAWAII:
2771		adev->mode_info.num_hpd = 6;
2772		adev->mode_info.num_dig = 6;
2773		break;
2774	case CHIP_KAVERI:
2775		adev->mode_info.num_hpd = 6;
2776		adev->mode_info.num_dig = 7;
2777		break;
2778	case CHIP_KABINI:
2779	case CHIP_MULLINS:
2780		adev->mode_info.num_hpd = 6;
2781		adev->mode_info.num_dig = 6; /* ? */
2782		break;
2783	default:
2784		/* FIXME: not supported yet */
2785		return -EINVAL;
2786	}
2787
 
 
2788	return 0;
2789}
2790
2791static int dce_v8_0_sw_init(void *handle)
2792{
2793	int r, i;
2794	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2795
2796	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2797		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2798		if (r)
2799			return r;
2800	}
2801
2802	for (i = 8; i < 20; i += 2) {
2803		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2804		if (r)
2805			return r;
2806	}
2807
2808	/* HPD hotplug */
2809	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2810	if (r)
2811		return r;
2812
2813	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2814
2815	adev->ddev->mode_config.async_page_flip = true;
2816
2817	adev->ddev->mode_config.max_width = 16384;
2818	adev->ddev->mode_config.max_height = 16384;
2819
2820	adev->ddev->mode_config.preferred_depth = 24;
2821	adev->ddev->mode_config.prefer_shadow = 1;
 
 
 
 
2822
2823	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2824
2825	r = amdgpu_modeset_create_props(adev);
2826	if (r)
2827		return r;
2828
2829	adev->ddev->mode_config.max_width = 16384;
2830	adev->ddev->mode_config.max_height = 16384;
2831
2832	/* allocate crtcs */
2833	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2834		r = dce_v8_0_crtc_init(adev, i);
2835		if (r)
2836			return r;
2837	}
2838
2839	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2840		amdgpu_print_display_setup(adev->ddev);
2841	else
2842		return -EINVAL;
2843
2844	/* setup afmt */
2845	r = dce_v8_0_afmt_init(adev);
2846	if (r)
2847		return r;
2848
2849	r = dce_v8_0_audio_init(adev);
2850	if (r)
2851		return r;
2852
2853	drm_kms_helper_poll_init(adev->ddev);
 
 
 
 
 
 
 
 
 
 
 
 
2854
2855	adev->mode_info.mode_config_initialized = true;
2856	return 0;
2857}
2858
2859static int dce_v8_0_sw_fini(void *handle)
2860{
2861	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2862
2863	kfree(adev->mode_info.bios_hardcoded_edid);
2864
2865	drm_kms_helper_poll_fini(adev->ddev);
2866
2867	dce_v8_0_audio_fini(adev);
2868
2869	dce_v8_0_afmt_fini(adev);
2870
2871	drm_mode_config_cleanup(adev->ddev);
2872	adev->mode_info.mode_config_initialized = false;
2873
2874	return 0;
2875}
2876
2877static int dce_v8_0_hw_init(void *handle)
2878{
2879	int i;
2880	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2881
 
 
2882	/* init dig PHYs, disp eng pll */
2883	amdgpu_atombios_encoder_init_dig(adev);
2884	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2885
2886	/* initialize hpd */
2887	dce_v8_0_hpd_init(adev);
2888
2889	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2890		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2891	}
2892
2893	dce_v8_0_pageflip_interrupt_init(adev);
2894
2895	return 0;
2896}
2897
2898static int dce_v8_0_hw_fini(void *handle)
2899{
2900	int i;
2901	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2902
2903	dce_v8_0_hpd_fini(adev);
2904
2905	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2906		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2907	}
2908
2909	dce_v8_0_pageflip_interrupt_fini(adev);
2910
 
 
2911	return 0;
2912}
2913
2914static int dce_v8_0_suspend(void *handle)
2915{
 
 
 
 
 
 
 
 
 
 
2916	return dce_v8_0_hw_fini(handle);
2917}
2918
2919static int dce_v8_0_resume(void *handle)
2920{
2921	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2922	int ret;
2923
 
 
 
2924	ret = dce_v8_0_hw_init(handle);
2925
2926	/* turn on the BL */
2927	if (adev->mode_info.bl_encoder) {
2928		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2929								  adev->mode_info.bl_encoder);
2930		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2931						    bl_level);
2932	}
 
 
2933
2934	return ret;
2935}
2936
2937static bool dce_v8_0_is_idle(void *handle)
2938{
2939	return true;
2940}
2941
2942static int dce_v8_0_wait_for_idle(void *handle)
2943{
2944	return 0;
2945}
2946
2947static int dce_v8_0_soft_reset(void *handle)
2948{
2949	u32 srbm_soft_reset = 0, tmp;
2950	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2951
2952	if (dce_v8_0_is_display_hung(adev))
2953		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2954
2955	if (srbm_soft_reset) {
2956		tmp = RREG32(mmSRBM_SOFT_RESET);
2957		tmp |= srbm_soft_reset;
2958		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2959		WREG32(mmSRBM_SOFT_RESET, tmp);
2960		tmp = RREG32(mmSRBM_SOFT_RESET);
2961
2962		udelay(50);
2963
2964		tmp &= ~srbm_soft_reset;
2965		WREG32(mmSRBM_SOFT_RESET, tmp);
2966		tmp = RREG32(mmSRBM_SOFT_RESET);
2967
2968		/* Wait a little for things to settle down */
2969		udelay(50);
2970	}
2971	return 0;
2972}
2973
2974static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2975						     int crtc,
2976						     enum amdgpu_interrupt_state state)
2977{
2978	u32 reg_block, lb_interrupt_mask;
2979
2980	if (crtc >= adev->mode_info.num_crtc) {
2981		DRM_DEBUG("invalid crtc %d\n", crtc);
2982		return;
2983	}
2984
2985	switch (crtc) {
2986	case 0:
2987		reg_block = CRTC0_REGISTER_OFFSET;
2988		break;
2989	case 1:
2990		reg_block = CRTC1_REGISTER_OFFSET;
2991		break;
2992	case 2:
2993		reg_block = CRTC2_REGISTER_OFFSET;
2994		break;
2995	case 3:
2996		reg_block = CRTC3_REGISTER_OFFSET;
2997		break;
2998	case 4:
2999		reg_block = CRTC4_REGISTER_OFFSET;
3000		break;
3001	case 5:
3002		reg_block = CRTC5_REGISTER_OFFSET;
3003		break;
3004	default:
3005		DRM_DEBUG("invalid crtc %d\n", crtc);
3006		return;
3007	}
3008
3009	switch (state) {
3010	case AMDGPU_IRQ_STATE_DISABLE:
3011		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3012		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3013		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3014		break;
3015	case AMDGPU_IRQ_STATE_ENABLE:
3016		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3017		lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3018		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3019		break;
3020	default:
3021		break;
3022	}
3023}
3024
3025static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3026						    int crtc,
3027						    enum amdgpu_interrupt_state state)
3028{
3029	u32 reg_block, lb_interrupt_mask;
3030
3031	if (crtc >= adev->mode_info.num_crtc) {
3032		DRM_DEBUG("invalid crtc %d\n", crtc);
3033		return;
3034	}
3035
3036	switch (crtc) {
3037	case 0:
3038		reg_block = CRTC0_REGISTER_OFFSET;
3039		break;
3040	case 1:
3041		reg_block = CRTC1_REGISTER_OFFSET;
3042		break;
3043	case 2:
3044		reg_block = CRTC2_REGISTER_OFFSET;
3045		break;
3046	case 3:
3047		reg_block = CRTC3_REGISTER_OFFSET;
3048		break;
3049	case 4:
3050		reg_block = CRTC4_REGISTER_OFFSET;
3051		break;
3052	case 5:
3053		reg_block = CRTC5_REGISTER_OFFSET;
3054		break;
3055	default:
3056		DRM_DEBUG("invalid crtc %d\n", crtc);
3057		return;
3058	}
3059
3060	switch (state) {
3061	case AMDGPU_IRQ_STATE_DISABLE:
3062		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3063		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3064		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3065		break;
3066	case AMDGPU_IRQ_STATE_ENABLE:
3067		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3068		lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3069		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3070		break;
3071	default:
3072		break;
3073	}
3074}
3075
3076static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3077					    struct amdgpu_irq_src *src,
3078					    unsigned type,
3079					    enum amdgpu_interrupt_state state)
3080{
3081	u32 dc_hpd_int_cntl;
3082
3083	if (type >= adev->mode_info.num_hpd) {
3084		DRM_DEBUG("invalid hdp %d\n", type);
3085		return 0;
3086	}
3087
3088	switch (state) {
3089	case AMDGPU_IRQ_STATE_DISABLE:
3090		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3091		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3092		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3093		break;
3094	case AMDGPU_IRQ_STATE_ENABLE:
3095		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3096		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3097		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3098		break;
3099	default:
3100		break;
3101	}
3102
3103	return 0;
3104}
3105
3106static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3107					     struct amdgpu_irq_src *src,
3108					     unsigned type,
3109					     enum amdgpu_interrupt_state state)
3110{
3111	switch (type) {
3112	case AMDGPU_CRTC_IRQ_VBLANK1:
3113		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3114		break;
3115	case AMDGPU_CRTC_IRQ_VBLANK2:
3116		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3117		break;
3118	case AMDGPU_CRTC_IRQ_VBLANK3:
3119		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3120		break;
3121	case AMDGPU_CRTC_IRQ_VBLANK4:
3122		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3123		break;
3124	case AMDGPU_CRTC_IRQ_VBLANK5:
3125		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3126		break;
3127	case AMDGPU_CRTC_IRQ_VBLANK6:
3128		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3129		break;
3130	case AMDGPU_CRTC_IRQ_VLINE1:
3131		dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3132		break;
3133	case AMDGPU_CRTC_IRQ_VLINE2:
3134		dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3135		break;
3136	case AMDGPU_CRTC_IRQ_VLINE3:
3137		dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3138		break;
3139	case AMDGPU_CRTC_IRQ_VLINE4:
3140		dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3141		break;
3142	case AMDGPU_CRTC_IRQ_VLINE5:
3143		dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3144		break;
3145	case AMDGPU_CRTC_IRQ_VLINE6:
3146		dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3147		break;
3148	default:
3149		break;
3150	}
3151	return 0;
3152}
3153
3154static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3155			     struct amdgpu_irq_src *source,
3156			     struct amdgpu_iv_entry *entry)
3157{
3158	unsigned crtc = entry->src_id - 1;
3159	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3160	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
 
3161
3162	switch (entry->src_data) {
3163	case 0: /* vblank */
3164		if (disp_int & interrupt_status_offsets[crtc].vblank)
3165			WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3166		else
3167			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3168
3169		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3170			drm_handle_vblank(adev->ddev, crtc);
3171		}
3172		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3173		break;
3174	case 1: /* vline */
3175		if (disp_int & interrupt_status_offsets[crtc].vline)
3176			WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3177		else
3178			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3179
3180		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3181		break;
3182	default:
3183		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3184		break;
3185	}
3186
3187	return 0;
3188}
3189
3190static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3191						 struct amdgpu_irq_src *src,
3192						 unsigned type,
3193						 enum amdgpu_interrupt_state state)
3194{
3195	u32 reg;
3196
3197	if (type >= adev->mode_info.num_crtc) {
3198		DRM_ERROR("invalid pageflip crtc %d\n", type);
3199		return -EINVAL;
3200	}
3201
3202	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3203	if (state == AMDGPU_IRQ_STATE_DISABLE)
3204		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3205		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3206	else
3207		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3208		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3209
3210	return 0;
3211}
3212
3213static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3214				struct amdgpu_irq_src *source,
3215				struct amdgpu_iv_entry *entry)
3216{
3217	unsigned long flags;
3218	unsigned crtc_id;
3219	struct amdgpu_crtc *amdgpu_crtc;
3220	struct amdgpu_flip_work *works;
3221
3222	crtc_id = (entry->src_id - 8) >> 1;
3223	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3224
3225	if (crtc_id >= adev->mode_info.num_crtc) {
3226		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3227		return -EINVAL;
3228	}
3229
3230	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3231	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3232		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3233		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3234
3235	/* IRQ could occur when in initial stage */
3236	if (amdgpu_crtc == NULL)
3237		return 0;
3238
3239	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3240	works = amdgpu_crtc->pflip_works;
3241	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3242		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3243						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3244						amdgpu_crtc->pflip_status,
3245						AMDGPU_FLIP_SUBMITTED);
3246		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3247		return 0;
3248	}
3249
3250	/* page flip completed. clean up */
3251	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3252	amdgpu_crtc->pflip_works = NULL;
3253
3254	/* wakeup usersapce */
3255	if (works->event)
3256		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3257
3258	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3259
3260	drm_crtc_vblank_put(&amdgpu_crtc->base);
3261	schedule_work(&works->unpin_work);
3262
3263	return 0;
3264}
3265
3266static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3267			    struct amdgpu_irq_src *source,
3268			    struct amdgpu_iv_entry *entry)
3269{
3270	uint32_t disp_int, mask, tmp;
3271	unsigned hpd;
3272
3273	if (entry->src_data >= adev->mode_info.num_hpd) {
3274		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3275		return 0;
3276	}
3277
3278	hpd = entry->src_data;
3279	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3280	mask = interrupt_status_offsets[hpd].hpd;
3281
3282	if (disp_int & mask) {
3283		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3284		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3285		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3286		schedule_work(&adev->hotplug_work);
3287		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3288	}
3289
3290	return 0;
3291
3292}
3293
3294static int dce_v8_0_set_clockgating_state(void *handle,
3295					  enum amd_clockgating_state state)
3296{
3297	return 0;
3298}
3299
3300static int dce_v8_0_set_powergating_state(void *handle,
3301					  enum amd_powergating_state state)
3302{
3303	return 0;
3304}
3305
3306static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3307	.name = "dce_v8_0",
3308	.early_init = dce_v8_0_early_init,
3309	.late_init = NULL,
3310	.sw_init = dce_v8_0_sw_init,
3311	.sw_fini = dce_v8_0_sw_fini,
3312	.hw_init = dce_v8_0_hw_init,
3313	.hw_fini = dce_v8_0_hw_fini,
3314	.suspend = dce_v8_0_suspend,
3315	.resume = dce_v8_0_resume,
3316	.is_idle = dce_v8_0_is_idle,
3317	.wait_for_idle = dce_v8_0_wait_for_idle,
3318	.soft_reset = dce_v8_0_soft_reset,
3319	.set_clockgating_state = dce_v8_0_set_clockgating_state,
3320	.set_powergating_state = dce_v8_0_set_powergating_state,
3321};
3322
3323static void
3324dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3325			  struct drm_display_mode *mode,
3326			  struct drm_display_mode *adjusted_mode)
3327{
3328	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3329
3330	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3331
3332	/* need to call this here rather than in prepare() since we need some crtc info */
3333	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3334
3335	/* set scaler clears this on some chips */
3336	dce_v8_0_set_interleave(encoder->crtc, mode);
3337
3338	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3339		dce_v8_0_afmt_enable(encoder, true);
3340		dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3341	}
3342}
3343
3344static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3345{
3346	struct amdgpu_device *adev = encoder->dev->dev_private;
3347	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3348	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3349
3350	if ((amdgpu_encoder->active_device &
3351	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3352	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3353	     ENCODER_OBJECT_ID_NONE)) {
3354		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3355		if (dig) {
3356			dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3357			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3358				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3359		}
3360	}
3361
3362	amdgpu_atombios_scratch_regs_lock(adev, true);
3363
3364	if (connector) {
3365		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3366
3367		/* select the clock/data port if it uses a router */
3368		if (amdgpu_connector->router.cd_valid)
3369			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3370
3371		/* turn eDP panel on for mode set */
3372		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3373			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3374							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3375	}
3376
3377	/* this is needed for the pll/ss setup to work correctly in some cases */
3378	amdgpu_atombios_encoder_set_crtc_source(encoder);
3379	/* set up the FMT blocks */
3380	dce_v8_0_program_fmt(encoder);
3381}
3382
3383static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3384{
3385	struct drm_device *dev = encoder->dev;
3386	struct amdgpu_device *adev = dev->dev_private;
3387
3388	/* need to call this here as we need the crtc set up */
3389	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3390	amdgpu_atombios_scratch_regs_lock(adev, false);
3391}
3392
3393static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3394{
3395	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3396	struct amdgpu_encoder_atom_dig *dig;
3397
3398	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3399
3400	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3401		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3402			dce_v8_0_afmt_enable(encoder, false);
3403		dig = amdgpu_encoder->enc_priv;
3404		dig->dig_encoder = -1;
3405	}
3406	amdgpu_encoder->active_device = 0;
3407}
3408
3409/* these are handled by the primary encoders */
3410static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3411{
3412
3413}
3414
3415static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3416{
3417
3418}
3419
3420static void
3421dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3422		      struct drm_display_mode *mode,
3423		      struct drm_display_mode *adjusted_mode)
3424{
3425
3426}
3427
3428static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3429{
3430
3431}
3432
3433static void
3434dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3435{
3436
3437}
3438
3439static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3440	.dpms = dce_v8_0_ext_dpms,
3441	.prepare = dce_v8_0_ext_prepare,
3442	.mode_set = dce_v8_0_ext_mode_set,
3443	.commit = dce_v8_0_ext_commit,
3444	.disable = dce_v8_0_ext_disable,
3445	/* no detect for TMDS/LVDS yet */
3446};
3447
3448static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3449	.dpms = amdgpu_atombios_encoder_dpms,
3450	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3451	.prepare = dce_v8_0_encoder_prepare,
3452	.mode_set = dce_v8_0_encoder_mode_set,
3453	.commit = dce_v8_0_encoder_commit,
3454	.disable = dce_v8_0_encoder_disable,
3455	.detect = amdgpu_atombios_encoder_dig_detect,
3456};
3457
3458static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3459	.dpms = amdgpu_atombios_encoder_dpms,
3460	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3461	.prepare = dce_v8_0_encoder_prepare,
3462	.mode_set = dce_v8_0_encoder_mode_set,
3463	.commit = dce_v8_0_encoder_commit,
3464	.detect = amdgpu_atombios_encoder_dac_detect,
3465};
3466
3467static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3468{
3469	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3470	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3471		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3472	kfree(amdgpu_encoder->enc_priv);
3473	drm_encoder_cleanup(encoder);
3474	kfree(amdgpu_encoder);
3475}
3476
3477static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3478	.destroy = dce_v8_0_encoder_destroy,
3479};
3480
3481static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3482				 uint32_t encoder_enum,
3483				 uint32_t supported_device,
3484				 u16 caps)
3485{
3486	struct drm_device *dev = adev->ddev;
3487	struct drm_encoder *encoder;
3488	struct amdgpu_encoder *amdgpu_encoder;
3489
3490	/* see if we already added it */
3491	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3492		amdgpu_encoder = to_amdgpu_encoder(encoder);
3493		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3494			amdgpu_encoder->devices |= supported_device;
3495			return;
3496		}
3497
3498	}
3499
3500	/* add a new one */
3501	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3502	if (!amdgpu_encoder)
3503		return;
3504
3505	encoder = &amdgpu_encoder->base;
3506	switch (adev->mode_info.num_crtc) {
3507	case 1:
3508		encoder->possible_crtcs = 0x1;
3509		break;
3510	case 2:
3511	default:
3512		encoder->possible_crtcs = 0x3;
3513		break;
3514	case 4:
3515		encoder->possible_crtcs = 0xf;
3516		break;
3517	case 6:
3518		encoder->possible_crtcs = 0x3f;
3519		break;
3520	}
3521
3522	amdgpu_encoder->enc_priv = NULL;
3523
3524	amdgpu_encoder->encoder_enum = encoder_enum;
3525	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3526	amdgpu_encoder->devices = supported_device;
3527	amdgpu_encoder->rmx_type = RMX_OFF;
3528	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3529	amdgpu_encoder->is_ext_encoder = false;
3530	amdgpu_encoder->caps = caps;
3531
3532	switch (amdgpu_encoder->encoder_id) {
3533	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3534	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3535		drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3536				 DRM_MODE_ENCODER_DAC, NULL);
3537		drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3538		break;
3539	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3540	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3541	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3542	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3543	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3544		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3545			amdgpu_encoder->rmx_type = RMX_FULL;
3546			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3547					 DRM_MODE_ENCODER_LVDS, NULL);
3548			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3549		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3550			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3551					 DRM_MODE_ENCODER_DAC, NULL);
3552			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3553		} else {
3554			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3555					 DRM_MODE_ENCODER_TMDS, NULL);
3556			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3557		}
3558		drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3559		break;
3560	case ENCODER_OBJECT_ID_SI170B:
3561	case ENCODER_OBJECT_ID_CH7303:
3562	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3563	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3564	case ENCODER_OBJECT_ID_TITFP513:
3565	case ENCODER_OBJECT_ID_VT1623:
3566	case ENCODER_OBJECT_ID_HDMI_SI1930:
3567	case ENCODER_OBJECT_ID_TRAVIS:
3568	case ENCODER_OBJECT_ID_NUTMEG:
3569		/* these are handled by the primary encoders */
3570		amdgpu_encoder->is_ext_encoder = true;
3571		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3572			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3573					 DRM_MODE_ENCODER_LVDS, NULL);
3574		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3575			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3576					 DRM_MODE_ENCODER_DAC, NULL);
3577		else
3578			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3579					 DRM_MODE_ENCODER_TMDS, NULL);
3580		drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3581		break;
3582	}
3583}
3584
3585static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3586	.set_vga_render_state = &dce_v8_0_set_vga_render_state,
3587	.bandwidth_update = &dce_v8_0_bandwidth_update,
3588	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
3589	.vblank_wait = &dce_v8_0_vblank_wait,
3590	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3591	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3592	.hpd_sense = &dce_v8_0_hpd_sense,
3593	.hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3594	.hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3595	.page_flip = &dce_v8_0_page_flip,
3596	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3597	.add_encoder = &dce_v8_0_encoder_add,
3598	.add_connector = &amdgpu_connector_add,
3599	.stop_mc_access = &dce_v8_0_stop_mc_access,
3600	.resume_mc_access = &dce_v8_0_resume_mc_access,
3601};
3602
3603static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3604{
3605	if (adev->mode_info.funcs == NULL)
3606		adev->mode_info.funcs = &dce_v8_0_display_funcs;
3607}
3608
3609static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3610	.set = dce_v8_0_set_crtc_interrupt_state,
3611	.process = dce_v8_0_crtc_irq,
3612};
3613
3614static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3615	.set = dce_v8_0_set_pageflip_interrupt_state,
3616	.process = dce_v8_0_pageflip_irq,
3617};
3618
3619static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3620	.set = dce_v8_0_set_hpd_interrupt_state,
3621	.process = dce_v8_0_hpd_irq,
3622};
3623
3624static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3625{
3626	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
 
 
 
3627	adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3628
3629	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3630	adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3631
3632	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3633	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3634}
3635
3636const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3637{
3638	.type = AMD_IP_BLOCK_TYPE_DCE,
3639	.major = 8,
3640	.minor = 0,
3641	.rev = 0,
3642	.funcs = &dce_v8_0_ip_funcs,
3643};
3644
3645const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3646{
3647	.type = AMD_IP_BLOCK_TYPE_DCE,
3648	.major = 8,
3649	.minor = 1,
3650	.rev = 0,
3651	.funcs = &dce_v8_0_ip_funcs,
3652};
3653
3654const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3655{
3656	.type = AMD_IP_BLOCK_TYPE_DCE,
3657	.major = 8,
3658	.minor = 2,
3659	.rev = 0,
3660	.funcs = &dce_v8_0_ip_funcs,
3661};
3662
3663const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3664{
3665	.type = AMD_IP_BLOCK_TYPE_DCE,
3666	.major = 8,
3667	.minor = 3,
3668	.rev = 0,
3669	.funcs = &dce_v8_0_ip_funcs,
3670};
3671
3672const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3673{
3674	.type = AMD_IP_BLOCK_TYPE_DCE,
3675	.major = 8,
3676	.minor = 5,
3677	.rev = 0,
3678	.funcs = &dce_v8_0_ip_funcs,
3679};