Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "drmP.h"
 
 
 
 
 
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "atom.h"
  28#include "amdgpu_atombios.h"
  29#include "atombios_crtc.h"
  30#include "atombios_encoders.h"
  31#include "amdgpu_pll.h"
  32#include "amdgpu_connectors.h"
 
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gca/gfx_6_0_d.h"
  39#include "gca/gfx_6_0_sh_mask.h"
  40#include "gmc/gmc_6_0_d.h"
  41#include "gmc/gmc_6_0_sh_mask.h"
  42#include "dce/dce_6_0_d.h"
  43#include "dce/dce_6_0_sh_mask.h"
  44#include "gca/gfx_7_2_enum.h"
 
  45#include "si_enums.h"
  46
  47static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52	SI_CRTC0_REGISTER_OFFSET,
  53	SI_CRTC1_REGISTER_OFFSET,
  54	SI_CRTC2_REGISTER_OFFSET,
  55	SI_CRTC3_REGISTER_OFFSET,
  56	SI_CRTC4_REGISTER_OFFSET,
  57	SI_CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
  63	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
  64	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
  65	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
  66	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
  67	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71	SI_CRTC0_REGISTER_OFFSET,
  72	SI_CRTC1_REGISTER_OFFSET,
  73	SI_CRTC2_REGISTER_OFFSET,
  74	SI_CRTC3_REGISTER_OFFSET,
  75	SI_CRTC4_REGISTER_OFFSET,
  76	SI_CRTC5_REGISTER_OFFSET,
  77	(0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81	uint32_t	reg;
  82	uint32_t	vblank;
  83	uint32_t	vline;
  84	uint32_t	hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87	.reg = mmDISP_INTERRUPT_STATUS,
  88	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 118static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119				     u32 block_offset, u32 reg)
 120{
 121	DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
 122	return 0;
 123}
 124
 125static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
 126				      u32 block_offset, u32 reg, u32 v)
 127{
 128	DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
 129}
 130
 131static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 132{
 133	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
 134		return true;
 135	else
 136		return false;
 137}
 138
 139static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 140{
 141	u32 pos1, pos2;
 142
 143	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 144	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 
 
 145
 146	if (pos1 != pos2)
 147		return true;
 148	else
 149		return false;
 150}
 151
 152/**
 153 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
 154 *
 155 * @crtc: crtc to wait for vblank on
 156 *
 157 * Wait for vblank on the requested crtc (evergreen+).
 158 */
 159static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 160{
 161	unsigned i = 100;
 162
 163	if (crtc >= adev->mode_info.num_crtc)
 164		return;
 165
 166	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 167		return;
 168
 169	/* depending on when we hit vblank, we may be close to active; if so,
 170	 * wait for another frame.
 171	 */
 172	while (dce_v6_0_is_in_vblank(adev, crtc)) {
 173		if (i++ == 100) {
 174			i = 0;
 175			if (!dce_v6_0_is_counter_moving(adev, crtc))
 176				break;
 177		}
 178	}
 179
 180	while (!dce_v6_0_is_in_vblank(adev, crtc)) {
 181		if (i++ == 100) {
 182			i = 0;
 183			if (!dce_v6_0_is_counter_moving(adev, crtc))
 184				break;
 185		}
 186	}
 187}
 188
 189static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 190{
 191	if (crtc >= adev->mode_info.num_crtc)
 192		return 0;
 193	else
 194		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 195}
 196
 197static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 198{
 199	unsigned i;
 200
 201	/* Enable pflip interrupts */
 202	for (i = 0; i < adev->mode_info.num_crtc; i++)
 203		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 204}
 205
 206static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 207{
 208	unsigned i;
 209
 210	/* Disable pflip interrupts */
 211	for (i = 0; i < adev->mode_info.num_crtc; i++)
 212		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 213}
 214
 215/**
 216 * dce_v6_0_page_flip - pageflip callback.
 217 *
 218 * @adev: amdgpu_device pointer
 219 * @crtc_id: crtc to cleanup pageflip on
 220 * @crtc_base: new address of the crtc (GPU MC address)
 221 *
 222 * Does the actual pageflip (evergreen+).
 223 * During vblank we take the crtc lock and wait for the update_pending
 224 * bit to go high, when it does, we release the lock, and allow the
 225 * double buffered update to take place.
 226 * Returns the current update pending status.
 227 */
 228static void dce_v6_0_page_flip(struct amdgpu_device *adev,
 229			       int crtc_id, u64 crtc_base, bool async)
 230{
 231	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
 232
 233	/* flip at hsync for async, default is vsync */
 234	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 235	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 
 
 
 236	/* update the scanout addresses */
 237	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 238	       upper_32_bits(crtc_base));
 239	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 240	       (u32)crtc_base);
 241
 242	/* post the write */
 243	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 244}
 245
 246static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 247					u32 *vbl, u32 *position)
 248{
 249	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 250		return -EINVAL;
 251	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 252	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 253
 254	return 0;
 255
 256}
 257
 258/**
 259 * dce_v6_0_hpd_sense - hpd sense callback.
 260 *
 261 * @adev: amdgpu_device pointer
 262 * @hpd: hpd (hotplug detect) pin
 263 *
 264 * Checks if a digital monitor is connected (evergreen+).
 265 * Returns true if connected, false if not connected.
 266 */
 267static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 268			       enum amdgpu_hpd_id hpd)
 269{
 270	bool connected = false;
 271
 272	if (hpd >= adev->mode_info.num_hpd)
 273		return connected;
 274
 275	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 276		connected = true;
 277
 278	return connected;
 279}
 280
 281/**
 282 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
 283 *
 284 * @adev: amdgpu_device pointer
 285 * @hpd: hpd (hotplug detect) pin
 286 *
 287 * Set the polarity of the hpd pin (evergreen+).
 288 */
 289static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 290				      enum amdgpu_hpd_id hpd)
 291{
 292	u32 tmp;
 293	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 294
 295	if (hpd >= adev->mode_info.num_hpd)
 296		return;
 297
 298	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 299	if (connected)
 300		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 301	else
 302		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 303	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 304}
 305
 306/**
 307 * dce_v6_0_hpd_init - hpd setup callback.
 308 *
 309 * @adev: amdgpu_device pointer
 310 *
 311 * Setup the hpd pins used by the card (evergreen+).
 312 * Enable the pin, set the polarity, and enable the hpd interrupts.
 313 */
 314static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 315{
 316	struct drm_device *dev = adev->ddev;
 317	struct drm_connector *connector;
 
 318	u32 tmp;
 319
 320	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 321		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 322
 323		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 324			continue;
 325
 326		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 327		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 328		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 329
 330		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 331		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 332			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 333			 * aux dp channel on imac and help (but not completely fix)
 334			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 335			 * also avoid interrupt storms during dpms.
 336			 */
 337			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 338			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 339			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 340			continue;
 341		}
 342
 343		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 344		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 345	}
 346
 347}
 348
 349/**
 350 * dce_v6_0_hpd_fini - hpd tear down callback.
 351 *
 352 * @adev: amdgpu_device pointer
 353 *
 354 * Tear down the hpd pins used by the card (evergreen+).
 355 * Disable the hpd interrupts.
 356 */
 357static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 358{
 359	struct drm_device *dev = adev->ddev;
 360	struct drm_connector *connector;
 
 361	u32 tmp;
 362
 363	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 364		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 365
 366		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 367			continue;
 368
 369		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 370		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 371		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 372
 373		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 374	}
 
 375}
 376
 377static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 378{
 379	return mmDC_GPIO_HPD_A;
 380}
 381
 382static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
 383{
 384	if (crtc >= adev->mode_info.num_crtc)
 385		return 0;
 386	else
 387		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 388}
 389
 390static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
 391				    struct amdgpu_mode_mc_save *save)
 392{
 393	u32 crtc_enabled, tmp, frame_count;
 394	int i, j;
 395
 396	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 397	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 398
 399	/* disable VGA render */
 400	WREG32(mmVGA_RENDER_CONTROL, 0);
 401
 402	/* blank the display controllers */
 403	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 404		crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 405		if (crtc_enabled) {
 406			save->crtc_enabled[i] = true;
 407			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 408
 409			if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
 410				dce_v6_0_vblank_wait(adev, i);
 411				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 412				tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
 413				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 414				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 415			}
 416			/* wait for the next frame */
 417			frame_count = evergreen_get_vblank_counter(adev, i);
 418			for (j = 0; j < adev->usec_timeout; j++) {
 419				if (evergreen_get_vblank_counter(adev, i) != frame_count)
 420					break;
 421				udelay(1);
 422			}
 423
 424			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 425			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 426			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 427			tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 428			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 429			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 430			save->crtc_enabled[i] = false;
 431			/* ***** */
 432		} else {
 433			save->crtc_enabled[i] = false;
 434		}
 435	}
 436}
 437
 438static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
 439				      struct amdgpu_mode_mc_save *save)
 440{
 441	u32 tmp;
 442	int i, j;
 443
 444	/* update crtc base addresses */
 445	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 446		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 447		       upper_32_bits(adev->mc.vram_start));
 448		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 449		       upper_32_bits(adev->mc.vram_start));
 450		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 451		       (u32)adev->mc.vram_start);
 452		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
 453		       (u32)adev->mc.vram_start);
 454	}
 455
 456	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
 457	WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
 458
 459	/* unlock regs and wait for update */
 460	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 461		if (save->crtc_enabled[i]) {
 462			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
 463			if ((tmp & 0x7) != 0) {
 464				tmp &= ~0x7;
 465				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 466			}
 467			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 468			if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
 469				tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
 470				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
 471			}
 472			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
 473			if (tmp & 1) {
 474				tmp &= ~1;
 475				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
 476			}
 477			for (j = 0; j < adev->usec_timeout; j++) {
 478				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 479				if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
 480					break;
 481				udelay(1);
 482			}
 483		}
 484	}
 485
 486	/* Unlock vga access */
 487	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 488	mdelay(1);
 489	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 490
 491}
 492
 493static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 494					  bool render)
 495{
 496	if (!render)
 497		WREG32(mmVGA_RENDER_CONTROL,
 498			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
 499
 500}
 501
 502static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
 503{
 504	int num_crtc = 0;
 505
 506	switch (adev->asic_type) {
 507	case CHIP_TAHITI:
 508	case CHIP_PITCAIRN:
 509	case CHIP_VERDE:
 510		num_crtc = 6;
 511		break;
 512	case CHIP_OLAND:
 513		num_crtc = 2;
 514		break;
 515	default:
 516		num_crtc = 0;
 517	}
 518	return num_crtc;
 519}
 520
 521void dce_v6_0_disable_dce(struct amdgpu_device *adev)
 522{
 523	/*Disable VGA render and enabled crtc, if has DCE engine*/
 524	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 525		u32 tmp;
 526		int crtc_enabled, i;
 527
 528		dce_v6_0_set_vga_render_state(adev, false);
 529
 530		/*Disable crtc*/
 531		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
 532			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
 533				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 534			if (crtc_enabled) {
 535				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 536				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 537				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 538				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 539				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 540			}
 541		}
 542	}
 543}
 544
 545static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 546{
 547
 548	struct drm_device *dev = encoder->dev;
 549	struct amdgpu_device *adev = dev->dev_private;
 550	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 551	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 552	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 553	int bpc = 0;
 554	u32 tmp = 0;
 555	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 556
 557	if (connector) {
 558		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 559		bpc = amdgpu_connector_get_monitor_bpc(connector);
 560		dither = amdgpu_connector->dither;
 561	}
 562
 563	/* LVDS FMT is set up by atom */
 564	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 565		return;
 566
 567	if (bpc == 0)
 568		return;
 569
 570
 571	switch (bpc) {
 572	case 6:
 573		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 574			/* XXX sort out optimal dither settings */
 575			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 576				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 577				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
 578		else
 579			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
 580		break;
 581	case 8:
 582		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 583			/* XXX sort out optimal dither settings */
 584			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 585				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 586				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 587				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 588				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
 589		else
 590			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 591				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
 592		break;
 593	case 10:
 594	default:
 595		/* not needed */
 596		break;
 597	}
 598
 599	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 600}
 601
 602/**
 603 * cik_get_number_of_dram_channels - get the number of dram channels
 604 *
 605 * @adev: amdgpu_device pointer
 606 *
 607 * Look up the number of video ram channels (CIK).
 608 * Used for display watermark bandwidth calculations
 609 * Returns the number of dram channels
 610 */
 611static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
 612{
 613	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 614
 615	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 616	case 0:
 617	default:
 618		return 1;
 619	case 1:
 620		return 2;
 621	case 2:
 622		return 4;
 623	case 3:
 624		return 8;
 625	case 4:
 626		return 3;
 627	case 5:
 628		return 6;
 629	case 6:
 630		return 10;
 631	case 7:
 632		return 12;
 633	case 8:
 634		return 16;
 635	}
 636}
 637
 638struct dce6_wm_params {
 639	u32 dram_channels; /* number of dram channels */
 640	u32 yclk;          /* bandwidth per dram data pin in kHz */
 641	u32 sclk;          /* engine clock in kHz */
 642	u32 disp_clk;      /* display clock in kHz */
 643	u32 src_width;     /* viewport width */
 644	u32 active_time;   /* active display time in ns */
 645	u32 blank_time;    /* blank time in ns */
 646	bool interlaced;    /* mode is interlaced */
 647	fixed20_12 vsc;    /* vertical scale ratio */
 648	u32 num_heads;     /* number of active crtcs */
 649	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 650	u32 lb_size;       /* line buffer allocated to pipe */
 651	u32 vtaps;         /* vertical scaler taps */
 652};
 653
 654/**
 655 * dce_v6_0_dram_bandwidth - get the dram bandwidth
 656 *
 657 * @wm: watermark calculation data
 658 *
 659 * Calculate the raw dram bandwidth (CIK).
 660 * Used for display watermark bandwidth calculations
 661 * Returns the dram bandwidth in MBytes/s
 662 */
 663static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
 664{
 665	/* Calculate raw DRAM Bandwidth */
 666	fixed20_12 dram_efficiency; /* 0.7 */
 667	fixed20_12 yclk, dram_channels, bandwidth;
 668	fixed20_12 a;
 669
 670	a.full = dfixed_const(1000);
 671	yclk.full = dfixed_const(wm->yclk);
 672	yclk.full = dfixed_div(yclk, a);
 673	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 674	a.full = dfixed_const(10);
 675	dram_efficiency.full = dfixed_const(7);
 676	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 677	bandwidth.full = dfixed_mul(dram_channels, yclk);
 678	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 679
 680	return dfixed_trunc(bandwidth);
 681}
 682
 683/**
 684 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
 685 *
 686 * @wm: watermark calculation data
 687 *
 688 * Calculate the dram bandwidth used for display (CIK).
 689 * Used for display watermark bandwidth calculations
 690 * Returns the dram bandwidth for display in MBytes/s
 691 */
 692static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 693{
 694	/* Calculate DRAM Bandwidth and the part allocated to display. */
 695	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 696	fixed20_12 yclk, dram_channels, bandwidth;
 697	fixed20_12 a;
 698
 699	a.full = dfixed_const(1000);
 700	yclk.full = dfixed_const(wm->yclk);
 701	yclk.full = dfixed_div(yclk, a);
 702	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 703	a.full = dfixed_const(10);
 704	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 705	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 706	bandwidth.full = dfixed_mul(dram_channels, yclk);
 707	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 708
 709	return dfixed_trunc(bandwidth);
 710}
 711
 712/**
 713 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
 714 *
 715 * @wm: watermark calculation data
 716 *
 717 * Calculate the data return bandwidth used for display (CIK).
 718 * Used for display watermark bandwidth calculations
 719 * Returns the data return bandwidth in MBytes/s
 720 */
 721static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
 722{
 723	/* Calculate the display Data return Bandwidth */
 724	fixed20_12 return_efficiency; /* 0.8 */
 725	fixed20_12 sclk, bandwidth;
 726	fixed20_12 a;
 727
 728	a.full = dfixed_const(1000);
 729	sclk.full = dfixed_const(wm->sclk);
 730	sclk.full = dfixed_div(sclk, a);
 731	a.full = dfixed_const(10);
 732	return_efficiency.full = dfixed_const(8);
 733	return_efficiency.full = dfixed_div(return_efficiency, a);
 734	a.full = dfixed_const(32);
 735	bandwidth.full = dfixed_mul(a, sclk);
 736	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 737
 738	return dfixed_trunc(bandwidth);
 739}
 740
 741/**
 742 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
 743 *
 744 * @wm: watermark calculation data
 745 *
 746 * Calculate the dmif bandwidth used for display (CIK).
 747 * Used for display watermark bandwidth calculations
 748 * Returns the dmif bandwidth in MBytes/s
 749 */
 750static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
 751{
 752	/* Calculate the DMIF Request Bandwidth */
 753	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 754	fixed20_12 disp_clk, bandwidth;
 755	fixed20_12 a, b;
 756
 757	a.full = dfixed_const(1000);
 758	disp_clk.full = dfixed_const(wm->disp_clk);
 759	disp_clk.full = dfixed_div(disp_clk, a);
 760	a.full = dfixed_const(32);
 761	b.full = dfixed_mul(a, disp_clk);
 762
 763	a.full = dfixed_const(10);
 764	disp_clk_request_efficiency.full = dfixed_const(8);
 765	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 766
 767	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 768
 769	return dfixed_trunc(bandwidth);
 770}
 771
 772/**
 773 * dce_v6_0_available_bandwidth - get the min available bandwidth
 774 *
 775 * @wm: watermark calculation data
 776 *
 777 * Calculate the min available bandwidth used for display (CIK).
 778 * Used for display watermark bandwidth calculations
 779 * Returns the min available bandwidth in MBytes/s
 780 */
 781static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
 782{
 783	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 784	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
 785	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
 786	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
 787
 788	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 789}
 790
 791/**
 792 * dce_v6_0_average_bandwidth - get the average available bandwidth
 793 *
 794 * @wm: watermark calculation data
 795 *
 796 * Calculate the average available bandwidth used for display (CIK).
 797 * Used for display watermark bandwidth calculations
 798 * Returns the average available bandwidth in MBytes/s
 799 */
 800static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
 801{
 802	/* Calculate the display mode Average Bandwidth
 803	 * DisplayMode should contain the source and destination dimensions,
 804	 * timing, etc.
 805	 */
 806	fixed20_12 bpp;
 807	fixed20_12 line_time;
 808	fixed20_12 src_width;
 809	fixed20_12 bandwidth;
 810	fixed20_12 a;
 811
 812	a.full = dfixed_const(1000);
 813	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 814	line_time.full = dfixed_div(line_time, a);
 815	bpp.full = dfixed_const(wm->bytes_per_pixel);
 816	src_width.full = dfixed_const(wm->src_width);
 817	bandwidth.full = dfixed_mul(src_width, bpp);
 818	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 819	bandwidth.full = dfixed_div(bandwidth, line_time);
 820
 821	return dfixed_trunc(bandwidth);
 822}
 823
 824/**
 825 * dce_v6_0_latency_watermark - get the latency watermark
 826 *
 827 * @wm: watermark calculation data
 828 *
 829 * Calculate the latency watermark (CIK).
 830 * Used for display watermark bandwidth calculations
 831 * Returns the latency watermark in ns
 832 */
 833static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
 834{
 835	/* First calculate the latency in ns */
 836	u32 mc_latency = 2000; /* 2000 ns. */
 837	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
 838	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 839	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 840	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 841	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 842		(wm->num_heads * cursor_line_pair_return_time);
 843	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 844	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 845	u32 tmp, dmif_size = 12288;
 846	fixed20_12 a, b, c;
 847
 848	if (wm->num_heads == 0)
 849		return 0;
 850
 851	a.full = dfixed_const(2);
 852	b.full = dfixed_const(1);
 853	if ((wm->vsc.full > a.full) ||
 854	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 855	    (wm->vtaps >= 5) ||
 856	    ((wm->vsc.full >= a.full) && wm->interlaced))
 857		max_src_lines_per_dst_line = 4;
 858	else
 859		max_src_lines_per_dst_line = 2;
 860
 861	a.full = dfixed_const(available_bandwidth);
 862	b.full = dfixed_const(wm->num_heads);
 863	a.full = dfixed_div(a, b);
 
 
 864
 865	b.full = dfixed_const(mc_latency + 512);
 866	c.full = dfixed_const(wm->disp_clk);
 867	b.full = dfixed_div(b, c);
 868
 869	c.full = dfixed_const(dmif_size);
 870	b.full = dfixed_div(c, b);
 871
 872	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
 873
 874	b.full = dfixed_const(1000);
 875	c.full = dfixed_const(wm->disp_clk);
 876	b.full = dfixed_div(c, b);
 877	c.full = dfixed_const(wm->bytes_per_pixel);
 878	b.full = dfixed_mul(b, c);
 879
 880	lb_fill_bw = min(tmp, dfixed_trunc(b));
 881
 882	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 883	b.full = dfixed_const(1000);
 884	c.full = dfixed_const(lb_fill_bw);
 885	b.full = dfixed_div(c, b);
 886	a.full = dfixed_div(a, b);
 887	line_fill_time = dfixed_trunc(a);
 888
 889	if (line_fill_time < wm->active_time)
 890		return latency;
 891	else
 892		return latency + (line_fill_time - wm->active_time);
 893
 894}
 895
 896/**
 897 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 898 * average and available dram bandwidth
 899 *
 900 * @wm: watermark calculation data
 901 *
 902 * Check if the display average bandwidth fits in the display
 903 * dram bandwidth (CIK).
 904 * Used for display watermark bandwidth calculations
 905 * Returns true if the display fits, false if not.
 906 */
 907static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 908{
 909	if (dce_v6_0_average_bandwidth(wm) <=
 910	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 911		return true;
 912	else
 913		return false;
 914}
 915
 916/**
 917 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
 918 * average and available bandwidth
 919 *
 920 * @wm: watermark calculation data
 921 *
 922 * Check if the display average bandwidth fits in the display
 923 * available bandwidth (CIK).
 924 * Used for display watermark bandwidth calculations
 925 * Returns true if the display fits, false if not.
 926 */
 927static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
 928{
 929	if (dce_v6_0_average_bandwidth(wm) <=
 930	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
 931		return true;
 932	else
 933		return false;
 934}
 935
 936/**
 937 * dce_v6_0_check_latency_hiding - check latency hiding
 938 *
 939 * @wm: watermark calculation data
 940 *
 941 * Check latency hiding (CIK).
 942 * Used for display watermark bandwidth calculations
 943 * Returns true if the display fits, false if not.
 944 */
 945static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
 946{
 947	u32 lb_partitions = wm->lb_size / wm->src_width;
 948	u32 line_time = wm->active_time + wm->blank_time;
 949	u32 latency_tolerant_lines;
 950	u32 latency_hiding;
 951	fixed20_12 a;
 952
 953	a.full = dfixed_const(1);
 954	if (wm->vsc.full > a.full)
 955		latency_tolerant_lines = 1;
 956	else {
 957		if (lb_partitions <= (wm->vtaps + 1))
 958			latency_tolerant_lines = 1;
 959		else
 960			latency_tolerant_lines = 2;
 961	}
 962
 963	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 964
 965	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
 966		return true;
 967	else
 968		return false;
 969}
 970
 971/**
 972 * dce_v6_0_program_watermarks - program display watermarks
 973 *
 974 * @adev: amdgpu_device pointer
 975 * @amdgpu_crtc: the selected display controller
 976 * @lb_size: line buffer size
 977 * @num_heads: number of display controllers in use
 978 *
 979 * Calculate and program the display watermarks for the
 980 * selected display controller (CIK).
 981 */
 982static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 983					struct amdgpu_crtc *amdgpu_crtc,
 984					u32 lb_size, u32 num_heads)
 985{
 986	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 987	struct dce6_wm_params wm_low, wm_high;
 988	u32 dram_channels;
 989	u32 pixel_period;
 990	u32 line_time = 0;
 991	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 992	u32 priority_a_mark = 0, priority_b_mark = 0;
 993	u32 priority_a_cnt = PRIORITY_OFF;
 994	u32 priority_b_cnt = PRIORITY_OFF;
 995	u32 tmp, arb_control3;
 996	fixed20_12 a, b, c;
 997
 998	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 999		pixel_period = 1000000 / (u32)mode->clock;
1000		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
 
 
 
1001		priority_a_cnt = 0;
1002		priority_b_cnt = 0;
1003
1004		dram_channels = si_get_number_of_dram_channels(adev);
1005
1006		/* watermark for high clocks */
1007		if (adev->pm.dpm_enabled) {
1008			wm_high.yclk =
1009				amdgpu_dpm_get_mclk(adev, false) * 10;
1010			wm_high.sclk =
1011				amdgpu_dpm_get_sclk(adev, false) * 10;
1012		} else {
1013			wm_high.yclk = adev->pm.current_mclk * 10;
1014			wm_high.sclk = adev->pm.current_sclk * 10;
1015		}
1016
1017		wm_high.disp_clk = mode->clock;
1018		wm_high.src_width = mode->crtc_hdisplay;
1019		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1020		wm_high.blank_time = line_time - wm_high.active_time;
1021		wm_high.interlaced = false;
1022		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1023			wm_high.interlaced = true;
1024		wm_high.vsc = amdgpu_crtc->vsc;
1025		wm_high.vtaps = 1;
1026		if (amdgpu_crtc->rmx_type != RMX_OFF)
1027			wm_high.vtaps = 2;
1028		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1029		wm_high.lb_size = lb_size;
1030		wm_high.dram_channels = dram_channels;
1031		wm_high.num_heads = num_heads;
1032
1033		if (adev->pm.dpm_enabled) {
1034		/* watermark for low clocks */
1035			wm_low.yclk =
1036				amdgpu_dpm_get_mclk(adev, true) * 10;
1037			wm_low.sclk =
1038				amdgpu_dpm_get_sclk(adev, true) * 10;
1039		} else {
1040			wm_low.yclk = adev->pm.current_mclk * 10;
1041			wm_low.sclk = adev->pm.current_sclk * 10;
1042		}
1043
1044		wm_low.disp_clk = mode->clock;
1045		wm_low.src_width = mode->crtc_hdisplay;
1046		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1047		wm_low.blank_time = line_time - wm_low.active_time;
1048		wm_low.interlaced = false;
1049		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1050			wm_low.interlaced = true;
1051		wm_low.vsc = amdgpu_crtc->vsc;
1052		wm_low.vtaps = 1;
1053		if (amdgpu_crtc->rmx_type != RMX_OFF)
1054			wm_low.vtaps = 2;
1055		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1056		wm_low.lb_size = lb_size;
1057		wm_low.dram_channels = dram_channels;
1058		wm_low.num_heads = num_heads;
1059
1060		/* set for high clocks */
1061		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1062		/* set for low clocks */
1063		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1064
1065		/* possibly force display priority to high */
1066		/* should really do this at mode validation time... */
1067		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1068		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1069		    !dce_v6_0_check_latency_hiding(&wm_high) ||
1070		    (adev->mode_info.disp_priority == 2)) {
1071			DRM_DEBUG_KMS("force priority to high\n");
1072			priority_a_cnt |= PRIORITY_ALWAYS_ON;
1073			priority_b_cnt |= PRIORITY_ALWAYS_ON;
1074		}
1075		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1076		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1077		    !dce_v6_0_check_latency_hiding(&wm_low) ||
1078		    (adev->mode_info.disp_priority == 2)) {
1079			DRM_DEBUG_KMS("force priority to high\n");
1080			priority_a_cnt |= PRIORITY_ALWAYS_ON;
1081			priority_b_cnt |= PRIORITY_ALWAYS_ON;
1082		}
1083
1084		a.full = dfixed_const(1000);
1085		b.full = dfixed_const(mode->clock);
1086		b.full = dfixed_div(b, a);
1087		c.full = dfixed_const(latency_watermark_a);
1088		c.full = dfixed_mul(c, b);
1089		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1090		c.full = dfixed_div(c, a);
1091		a.full = dfixed_const(16);
1092		c.full = dfixed_div(c, a);
1093		priority_a_mark = dfixed_trunc(c);
1094		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1095
1096		a.full = dfixed_const(1000);
1097		b.full = dfixed_const(mode->clock);
1098		b.full = dfixed_div(b, a);
1099		c.full = dfixed_const(latency_watermark_b);
1100		c.full = dfixed_mul(c, b);
1101		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1102		c.full = dfixed_div(c, a);
1103		a.full = dfixed_const(16);
1104		c.full = dfixed_div(c, a);
1105		priority_b_mark = dfixed_trunc(c);
1106		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 
 
1107	}
1108
1109	/* select wm A */
1110	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1111	tmp = arb_control3;
1112	tmp &= ~LATENCY_WATERMARK_MASK(3);
1113	tmp |= LATENCY_WATERMARK_MASK(1);
1114	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1115	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1116	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
1117		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1118	/* select wm B */
1119	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1120	tmp &= ~LATENCY_WATERMARK_MASK(3);
1121	tmp |= LATENCY_WATERMARK_MASK(2);
1122	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1123	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1124	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1125		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1126	/* restore original selection */
1127	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1128
1129	/* write the priority marks */
1130	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1131	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1132
1133	/* save values for DPM */
1134	amdgpu_crtc->line_time = line_time;
1135	amdgpu_crtc->wm_high = latency_watermark_a;
 
 
 
1136}
1137
1138/* watermark setup */
1139static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1140				   struct amdgpu_crtc *amdgpu_crtc,
1141				   struct drm_display_mode *mode,
1142				   struct drm_display_mode *other_mode)
1143{
1144	u32 tmp, buffer_alloc, i;
1145	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1146	/*
1147	 * Line Buffer Setup
1148	 * There are 3 line buffers, each one shared by 2 display controllers.
1149	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1150	 * the display controllers.  The paritioning is done via one of four
1151	 * preset allocations specified in bits 21:20:
1152	 *  0 - half lb
1153	 *  2 - whole lb, other crtc must be disabled
1154	 */
1155	/* this can get tricky if we have two large displays on a paired group
1156	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1157	 * non-linked crtcs for maximum line buffer allocation.
1158	 */
1159	if (amdgpu_crtc->base.enabled && mode) {
1160		if (other_mode) {
1161			tmp = 0; /* 1/2 */
1162			buffer_alloc = 1;
1163		} else {
1164			tmp = 2; /* whole */
1165			buffer_alloc = 2;
1166		}
1167	} else {
1168		tmp = 0;
1169		buffer_alloc = 0;
1170	}
1171
1172	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1173	       DC_LB_MEMORY_CONFIG(tmp));
1174
1175	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1176	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1177	for (i = 0; i < adev->usec_timeout; i++) {
1178		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1179		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1180			break;
1181		udelay(1);
1182	}
1183
1184	if (amdgpu_crtc->base.enabled && mode) {
1185		switch (tmp) {
1186		case 0:
1187		default:
1188			return 4096 * 2;
1189		case 2:
1190			return 8192 * 2;
1191		}
1192	}
1193
1194	/* controller not enabled, so no lb used */
1195	return 0;
1196}
1197
1198
1199/**
1200 *
1201 * dce_v6_0_bandwidth_update - program display watermarks
1202 *
1203 * @adev: amdgpu_device pointer
1204 *
1205 * Calculate and program the display watermarks and line
1206 * buffer allocation (CIK).
1207 */
1208static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1209{
1210	struct drm_display_mode *mode0 = NULL;
1211	struct drm_display_mode *mode1 = NULL;
1212	u32 num_heads = 0, lb_size;
1213	int i;
1214
1215	if (!adev->mode_info.mode_config_initialized)
1216		return;
1217
1218	amdgpu_update_display_priority(adev);
1219
1220	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1221		if (adev->mode_info.crtcs[i]->base.enabled)
1222			num_heads++;
1223	}
1224	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1225		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1226		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1227		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1228		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1229		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1230		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1231	}
1232}
1233/*
1234static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1235{
1236	int i;
1237	u32 offset, tmp;
1238
1239	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1240		offset = adev->mode_info.audio.pin[i].offset;
1241		tmp = RREG32_AUDIO_ENDPT(offset,
1242				      AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1243		if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1244			adev->mode_info.audio.pin[i].connected = false;
1245		else
1246			adev->mode_info.audio.pin[i].connected = true;
1247	}
1248
1249}
1250
1251static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1252{
1253	int i;
1254
1255	dce_v6_0_audio_get_connected_pins(adev);
1256
1257	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1258		if (adev->mode_info.audio.pin[i].connected)
1259			return &adev->mode_info.audio.pin[i];
1260	}
1261	DRM_ERROR("No connected audio pins found!\n");
1262	return NULL;
1263}
1264
1265static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1266{
1267	struct amdgpu_device *adev = encoder->dev->dev_private;
1268	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1269	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1270	u32 offset;
1271
1272	if (!dig || !dig->afmt || !dig->afmt->pin)
1273		return;
1274
1275	offset = dig->afmt->offset;
1276
1277	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1278	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1279
1280}
1281
1282static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1283						struct drm_display_mode *mode)
1284{
1285	DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286}
1287
1288static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1289{
1290	DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291}
1292
1293static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1294{
1295	DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296
1297}
1298*/
1299static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1300				  struct amdgpu_audio_pin *pin,
1301				  bool enable)
1302{
1303	DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
 
 
 
 
1304}
1305
1306static const u32 pin_offsets[7] =
1307{
1308	(0x1780 - 0x1780),
1309	(0x1786 - 0x1780),
1310	(0x178c - 0x1780),
1311	(0x1792 - 0x1780),
1312	(0x1798 - 0x1780),
1313	(0x179d - 0x1780),
1314	(0x17a4 - 0x1780),
1315};
1316
1317static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1318{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319	return 0;
1320}
1321
1322static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1323{
 
 
 
 
 
 
 
 
 
 
1324
 
1325}
1326
1327/*
1328static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1329{
1330	DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
1331}
1332*/
1333/*
1334 * build a HDMI Video Info Frame
1335 */
1336/*
1337static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1338					       void *buffer, size_t size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1339{
1340	DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1341}
1342
1343static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1344{
1345	DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346}
1347*/
1348/*
1349 * update the info frames with the data from the current display mode
1350 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1351static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1352				  struct drm_display_mode *mode)
1353{
1354	DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355}
1356
1357static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1358{
1359	struct drm_device *dev = encoder->dev;
1360	struct amdgpu_device *adev = dev->dev_private;
1361	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1362	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1363
1364	if (!dig || !dig->afmt)
1365		return;
1366
1367	/* Silent, r600_hdmi_enable will raise WARN for us */
1368	if (enable && dig->afmt->enabled)
1369		return;
 
1370	if (!enable && !dig->afmt->enabled)
1371		return;
1372
1373	if (!enable && dig->afmt->pin) {
1374		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1375		dig->afmt->pin = NULL;
1376	}
1377
1378	dig->afmt->enabled = enable;
1379
1380	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1381		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1382}
1383
1384static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1385{
1386	int i, j;
1387
1388	for (i = 0; i < adev->mode_info.num_dig; i++)
1389		adev->mode_info.afmt[i] = NULL;
1390
1391	/* DCE6 has audio blocks tied to DIG encoders */
1392	for (i = 0; i < adev->mode_info.num_dig; i++) {
1393		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1394		if (adev->mode_info.afmt[i]) {
1395			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1396			adev->mode_info.afmt[i]->id = i;
1397		} else {
1398			for (j = 0; j < i; j++) {
1399				kfree(adev->mode_info.afmt[j]);
1400				adev->mode_info.afmt[j] = NULL;
1401			}
1402			DRM_ERROR("Out of memory allocating afmt table\n");
1403			return -ENOMEM;
1404		}
1405	}
1406	return 0;
1407}
1408
1409static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1410{
1411	int i;
1412
1413	for (i = 0; i < adev->mode_info.num_dig; i++) {
1414		kfree(adev->mode_info.afmt[i]);
1415		adev->mode_info.afmt[i] = NULL;
1416	}
1417}
1418
1419static const u32 vga_control_regs[6] =
1420{
1421	mmD1VGA_CONTROL,
1422	mmD2VGA_CONTROL,
1423	mmD3VGA_CONTROL,
1424	mmD4VGA_CONTROL,
1425	mmD5VGA_CONTROL,
1426	mmD6VGA_CONTROL,
1427};
1428
1429static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1430{
1431	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1432	struct drm_device *dev = crtc->dev;
1433	struct amdgpu_device *adev = dev->dev_private;
1434	u32 vga_control;
1435
1436	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1437	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1438}
1439
1440static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1441{
1442	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1443	struct drm_device *dev = crtc->dev;
1444	struct amdgpu_device *adev = dev->dev_private;
1445
1446	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1447}
1448
1449static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1450				     struct drm_framebuffer *fb,
1451				     int x, int y, int atomic)
1452{
1453	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1454	struct drm_device *dev = crtc->dev;
1455	struct amdgpu_device *adev = dev->dev_private;
1456	struct amdgpu_framebuffer *amdgpu_fb;
1457	struct drm_framebuffer *target_fb;
1458	struct drm_gem_object *obj;
1459	struct amdgpu_bo *abo;
1460	uint64_t fb_location, tiling_flags;
1461	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1462	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1463	u32 viewport_w, viewport_h;
1464	int r;
1465	bool bypass_lut = false;
1466	struct drm_format_name_buf format_name;
1467
1468	/* no fb bound */
1469	if (!atomic && !crtc->primary->fb) {
1470		DRM_DEBUG_KMS("No FB bound\n");
1471		return 0;
1472	}
1473
1474	if (atomic) {
1475		amdgpu_fb = to_amdgpu_framebuffer(fb);
1476		target_fb = fb;
1477	} else {
1478		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1479		target_fb = crtc->primary->fb;
1480	}
1481
1482	/* If atomic, assume fb object is pinned & idle & fenced and
1483	 * just update base pointers
1484	 */
1485	obj = amdgpu_fb->obj;
1486	abo = gem_to_amdgpu_bo(obj);
1487	r = amdgpu_bo_reserve(abo, false);
1488	if (unlikely(r != 0))
1489		return r;
1490
1491	if (atomic) {
1492		fb_location = amdgpu_bo_gpu_offset(abo);
1493	} else {
1494		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1495		if (unlikely(r != 0)) {
1496			amdgpu_bo_unreserve(abo);
1497			return -EINVAL;
1498		}
1499	}
 
1500
1501	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1502	amdgpu_bo_unreserve(abo);
1503
1504	switch (target_fb->pixel_format) {
1505	case DRM_FORMAT_C8:
1506		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1507			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1508		break;
1509	case DRM_FORMAT_XRGB4444:
1510	case DRM_FORMAT_ARGB4444:
1511		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1512			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1513#ifdef __BIG_ENDIAN
1514		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1515#endif
1516		break;
1517	case DRM_FORMAT_XRGB1555:
1518	case DRM_FORMAT_ARGB1555:
1519		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1520			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1521#ifdef __BIG_ENDIAN
1522		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1523#endif
1524		break;
1525	case DRM_FORMAT_BGRX5551:
1526	case DRM_FORMAT_BGRA5551:
1527		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1528			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1529#ifdef __BIG_ENDIAN
1530		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1531#endif
1532		break;
1533	case DRM_FORMAT_RGB565:
1534		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1535			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1536#ifdef __BIG_ENDIAN
1537		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1538#endif
1539		break;
1540	case DRM_FORMAT_XRGB8888:
1541	case DRM_FORMAT_ARGB8888:
1542		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1543			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1544#ifdef __BIG_ENDIAN
1545		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1546#endif
1547		break;
1548	case DRM_FORMAT_XRGB2101010:
1549	case DRM_FORMAT_ARGB2101010:
1550		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1551			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1552#ifdef __BIG_ENDIAN
1553		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1554#endif
1555		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1556		bypass_lut = true;
1557		break;
1558	case DRM_FORMAT_BGRX1010102:
1559	case DRM_FORMAT_BGRA1010102:
1560		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1561			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1562#ifdef __BIG_ENDIAN
1563		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1564#endif
1565		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1566		bypass_lut = true;
1567		break;
 
 
 
 
 
 
 
 
 
 
1568	default:
1569		DRM_ERROR("Unsupported screen format %s\n",
1570		          drm_get_format_name(target_fb->pixel_format, &format_name));
1571		return -EINVAL;
1572	}
1573
1574	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1575		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1576
1577		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1578		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1579		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1580		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1581		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1582
1583		fb_format |= GRPH_NUM_BANKS(num_banks);
1584		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1585		fb_format |= GRPH_TILE_SPLIT(tile_split);
1586		fb_format |= GRPH_BANK_WIDTH(bankw);
1587		fb_format |= GRPH_BANK_HEIGHT(bankh);
1588		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1589	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1590		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1591	}
1592
1593	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1594	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1595
1596	dce_v6_0_vga_enable(crtc, false);
1597
1598	/* Make sure surface address is updated at vertical blank rather than
1599	 * horizontal blank
1600	 */
1601	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1602
1603	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1604	       upper_32_bits(fb_location));
1605	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1606	       upper_32_bits(fb_location));
1607	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1608	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1609	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1610	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1611	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1612	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1613
1614	/*
1615	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1616	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1617	 * retain the full precision throughout the pipeline.
1618	 */
1619	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1620		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1621		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1622
1623	if (bypass_lut)
1624		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1625
1626	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1627	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1628	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1629	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1630	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1631	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1632
1633	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1634	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1635
1636	dce_v6_0_grph_enable(crtc, true);
1637
1638	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1639		       target_fb->height);
1640	x &= ~3;
1641	y &= ~1;
1642	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1643	       (x << 16) | y);
1644	viewport_w = crtc->mode.hdisplay;
1645	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1646
1647	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1648	       (viewport_w << 16) | viewport_h);
1649
1650	/* set pageflip to happen anywhere in vblank interval */
1651	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1652
1653	if (!atomic && fb && fb != crtc->primary->fb) {
1654		amdgpu_fb = to_amdgpu_framebuffer(fb);
1655		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1656		r = amdgpu_bo_reserve(abo, false);
1657		if (unlikely(r != 0))
1658			return r;
1659		amdgpu_bo_unpin(abo);
1660		amdgpu_bo_unreserve(abo);
1661	}
1662
1663	/* Bytes per pixel may have changed */
1664	dce_v6_0_bandwidth_update(adev);
1665
1666	return 0;
1667
1668}
1669
1670static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1671				    struct drm_display_mode *mode)
1672{
1673	struct drm_device *dev = crtc->dev;
1674	struct amdgpu_device *adev = dev->dev_private;
1675	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1676
1677	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1678		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
1679		       INTERLEAVE_EN);
1680	else
1681		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1682}
1683
1684static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1685{
1686
1687	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1688	struct drm_device *dev = crtc->dev;
1689	struct amdgpu_device *adev = dev->dev_private;
 
1690	int i;
1691
1692	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1693
1694	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1695	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1696		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
1697	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1698	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
1699	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1700	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
1701	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1702	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
1703		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
1704
1705	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1706
1707	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1708	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1709	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1710
1711	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1712	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1713	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1714
1715	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1716	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1717
1718	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
 
 
 
1719	for (i = 0; i < 256; i++) {
1720		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1721		       (amdgpu_crtc->lut_r[i] << 20) |
1722		       (amdgpu_crtc->lut_g[i] << 10) |
1723		       (amdgpu_crtc->lut_b[i] << 0));
1724	}
1725
1726	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1727	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
1728		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
1729		ICON_DEGAMMA_MODE(0) |
1730		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
1731	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1732	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
1733		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
1734	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1735	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
1736		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
1737	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1738	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
1739		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
1740	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
1741	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1742
1743
1744}
1745
1746static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1747{
1748	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1749	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1750
1751	switch (amdgpu_encoder->encoder_id) {
1752	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1753		return dig->linkb ? 1 : 0;
1754	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1755		return dig->linkb ? 3 : 2;
1756	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1757		return dig->linkb ? 5 : 4;
1758	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1759		return 6;
1760	default:
1761		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1762		return 0;
1763	}
1764}
1765
1766/**
1767 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1768 *
1769 * @crtc: drm crtc
1770 *
1771 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
1772 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
1773 * monitors a dedicated PPLL must be used.  If a particular board has
1774 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1775 * as there is no need to program the PLL itself.  If we are not able to
1776 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1777 * avoid messing up an existing monitor.
1778 *
1779 *
1780 */
1781static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1782{
1783	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1784	struct drm_device *dev = crtc->dev;
1785	struct amdgpu_device *adev = dev->dev_private;
1786	u32 pll_in_use;
1787	int pll;
1788
1789	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1790		if (adev->clock.dp_extclk)
1791			/* skip PPLL programming if using ext clock */
1792			return ATOM_PPLL_INVALID;
1793		else
1794			return ATOM_PPLL0;
1795	} else {
1796		/* use the same PPLL for all monitors with the same clock */
1797		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1798		if (pll != ATOM_PPLL_INVALID)
1799			return pll;
1800	}
1801
1802	/*  PPLL1, and PPLL2 */
1803	pll_in_use = amdgpu_pll_get_use_mask(crtc);
1804	if (!(pll_in_use & (1 << ATOM_PPLL2)))
1805		return ATOM_PPLL2;
1806	if (!(pll_in_use & (1 << ATOM_PPLL1)))
1807		return ATOM_PPLL1;
1808	DRM_ERROR("unable to allocate a PPLL\n");
1809	return ATOM_PPLL_INVALID;
1810}
1811
1812static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1813{
1814	struct amdgpu_device *adev = crtc->dev->dev_private;
1815	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1816	uint32_t cur_lock;
1817
1818	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
1819	if (lock)
1820		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1821	else
1822		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1823	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1824}
1825
1826static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1827{
1828	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1829	struct amdgpu_device *adev = crtc->dev->dev_private;
1830
1831	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1832		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1833		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1834
1835
1836}
1837
1838static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1839{
1840	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1841	struct amdgpu_device *adev = crtc->dev->dev_private;
1842
1843	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1844	       upper_32_bits(amdgpu_crtc->cursor_addr));
1845	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1846	       lower_32_bits(amdgpu_crtc->cursor_addr));
1847
1848	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1849		   CUR_CONTROL__CURSOR_EN_MASK |
1850		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1851		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1852
1853}
1854
1855static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1856				       int x, int y)
1857{
1858	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1859	struct amdgpu_device *adev = crtc->dev->dev_private;
1860	int xorigin = 0, yorigin = 0;
1861
1862	int w = amdgpu_crtc->cursor_width;
1863
1864	amdgpu_crtc->cursor_x = x;
1865	amdgpu_crtc->cursor_y = y;
1866
1867	/* avivo cursor are offset into the total surface */
1868	x += crtc->x;
1869	y += crtc->y;
1870	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1871
1872	if (x < 0) {
1873		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1874		x = 0;
1875	}
1876	if (y < 0) {
1877		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1878		y = 0;
1879	}
1880
1881	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1882	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1883	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1884	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1885
1886	return 0;
1887}
1888
1889static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1890				     int x, int y)
1891{
1892	int ret;
1893
1894	dce_v6_0_lock_cursor(crtc, true);
1895	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1896	dce_v6_0_lock_cursor(crtc, false);
1897
1898	return ret;
1899}
1900
1901static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1902				     struct drm_file *file_priv,
1903				     uint32_t handle,
1904				     uint32_t width,
1905				     uint32_t height,
1906				     int32_t hot_x,
1907				     int32_t hot_y)
1908{
1909	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1910	struct drm_gem_object *obj;
1911	struct amdgpu_bo *aobj;
1912	int ret;
1913
1914	if (!handle) {
1915		/* turn off cursor */
1916		dce_v6_0_hide_cursor(crtc);
1917		obj = NULL;
1918		goto unpin;
1919	}
1920
1921	if ((width > amdgpu_crtc->max_cursor_width) ||
1922	    (height > amdgpu_crtc->max_cursor_height)) {
1923		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1924		return -EINVAL;
1925	}
1926
1927	obj = drm_gem_object_lookup(file_priv, handle);
1928	if (!obj) {
1929		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1930		return -ENOENT;
1931	}
1932
1933	aobj = gem_to_amdgpu_bo(obj);
1934	ret = amdgpu_bo_reserve(aobj, false);
1935	if (ret != 0) {
1936		drm_gem_object_unreference_unlocked(obj);
1937		return ret;
1938	}
1939
1940	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
1941	amdgpu_bo_unreserve(aobj);
1942	if (ret) {
1943		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
1944		drm_gem_object_unreference_unlocked(obj);
1945		return ret;
1946	}
 
1947
1948	dce_v6_0_lock_cursor(crtc, true);
1949
1950	if (width != amdgpu_crtc->cursor_width ||
1951	    height != amdgpu_crtc->cursor_height ||
1952	    hot_x != amdgpu_crtc->cursor_hot_x ||
1953	    hot_y != amdgpu_crtc->cursor_hot_y) {
1954		int x, y;
1955
1956		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
1957		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
1958
1959		dce_v6_0_cursor_move_locked(crtc, x, y);
1960
1961		amdgpu_crtc->cursor_width = width;
1962		amdgpu_crtc->cursor_height = height;
1963		amdgpu_crtc->cursor_hot_x = hot_x;
1964		amdgpu_crtc->cursor_hot_y = hot_y;
1965	}
1966
1967	dce_v6_0_show_cursor(crtc);
1968	dce_v6_0_lock_cursor(crtc, false);
1969
1970unpin:
1971	if (amdgpu_crtc->cursor_bo) {
1972		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1973		ret = amdgpu_bo_reserve(aobj, false);
1974		if (likely(ret == 0)) {
1975			amdgpu_bo_unpin(aobj);
1976			amdgpu_bo_unreserve(aobj);
1977		}
1978		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
1979	}
1980
1981	amdgpu_crtc->cursor_bo = obj;
1982	return 0;
1983}
1984
1985static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1986{
1987	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1988
1989	if (amdgpu_crtc->cursor_bo) {
1990		dce_v6_0_lock_cursor(crtc, true);
1991
1992		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1993					    amdgpu_crtc->cursor_y);
1994
1995		dce_v6_0_show_cursor(crtc);
1996		dce_v6_0_lock_cursor(crtc, false);
1997	}
1998}
1999
2000static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2001				   u16 *blue, uint32_t size)
 
2002{
2003	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2004	int i;
2005
2006	/* userspace palettes are always correct as is */
2007	for (i = 0; i < size; i++) {
2008		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2009		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2010		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2011	}
2012	dce_v6_0_crtc_load_lut(crtc);
2013
2014	return 0;
2015}
2016
2017static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2018{
2019	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2020
2021	drm_crtc_cleanup(crtc);
2022	kfree(amdgpu_crtc);
2023}
2024
2025static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2026	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2027	.cursor_move = dce_v6_0_crtc_cursor_move,
2028	.gamma_set = dce_v6_0_crtc_gamma_set,
2029	.set_config = amdgpu_crtc_set_config,
2030	.destroy = dce_v6_0_crtc_destroy,
2031	.page_flip_target = amdgpu_crtc_page_flip_target,
 
 
 
 
2032};
2033
2034static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2035{
2036	struct drm_device *dev = crtc->dev;
2037	struct amdgpu_device *adev = dev->dev_private;
2038	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2039	unsigned type;
2040
2041	switch (mode) {
2042	case DRM_MODE_DPMS_ON:
2043		amdgpu_crtc->enabled = true;
2044		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2045		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2046		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2047		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 
2048		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2049		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2050		drm_crtc_vblank_on(crtc);
2051		dce_v6_0_crtc_load_lut(crtc);
2052		break;
2053	case DRM_MODE_DPMS_STANDBY:
2054	case DRM_MODE_DPMS_SUSPEND:
2055	case DRM_MODE_DPMS_OFF:
2056		drm_crtc_vblank_off(crtc);
2057		if (amdgpu_crtc->enabled)
2058			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2059		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2060		amdgpu_crtc->enabled = false;
2061		break;
2062	}
2063	/* adjust pm to dpms */
2064	amdgpu_pm_compute_clocks(adev);
2065}
2066
2067static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2068{
2069	/* disable crtc pair power gating before programming */
2070	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2071	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2072	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2073}
2074
2075static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2076{
2077	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2078	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2079}
2080
2081static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2082{
2083
2084	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2085	struct drm_device *dev = crtc->dev;
2086	struct amdgpu_device *adev = dev->dev_private;
2087	struct amdgpu_atom_ss ss;
2088	int i;
2089
2090	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2091	if (crtc->primary->fb) {
2092		int r;
2093		struct amdgpu_framebuffer *amdgpu_fb;
2094		struct amdgpu_bo *abo;
2095
2096		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2097		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2098		r = amdgpu_bo_reserve(abo, false);
2099		if (unlikely(r))
2100			DRM_ERROR("failed to reserve abo before unpin\n");
2101		else {
2102			amdgpu_bo_unpin(abo);
2103			amdgpu_bo_unreserve(abo);
2104		}
2105	}
2106	/* disable the GRPH */
2107	dce_v6_0_grph_enable(crtc, false);
2108
2109	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2110
2111	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2112		if (adev->mode_info.crtcs[i] &&
2113		    adev->mode_info.crtcs[i]->enabled &&
2114		    i != amdgpu_crtc->crtc_id &&
2115		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2116			/* one other crtc is using this pll don't turn
2117			 * off the pll
2118			 */
2119			goto done;
2120		}
2121	}
2122
2123	switch (amdgpu_crtc->pll_id) {
2124	case ATOM_PPLL1:
2125	case ATOM_PPLL2:
2126		/* disable the ppll */
2127		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2128						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2129		break;
2130	default:
2131		break;
2132	}
2133done:
2134	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2135	amdgpu_crtc->adjusted_clock = 0;
2136	amdgpu_crtc->encoder = NULL;
2137	amdgpu_crtc->connector = NULL;
2138}
2139
2140static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2141				  struct drm_display_mode *mode,
2142				  struct drm_display_mode *adjusted_mode,
2143				  int x, int y, struct drm_framebuffer *old_fb)
2144{
2145	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2146
2147	if (!amdgpu_crtc->adjusted_clock)
2148		return -EINVAL;
2149
2150	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2151	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2152	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2153	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2154	amdgpu_atombios_crtc_scaler_setup(crtc);
2155	dce_v6_0_cursor_reset(crtc);
2156	/* update the hw version fpr dpm */
2157	amdgpu_crtc->hw_mode = *adjusted_mode;
2158
2159	return 0;
2160}
2161
2162static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2163				     const struct drm_display_mode *mode,
2164				     struct drm_display_mode *adjusted_mode)
2165{
2166
2167	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2168	struct drm_device *dev = crtc->dev;
2169	struct drm_encoder *encoder;
2170
2171	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2172	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2173		if (encoder->crtc == crtc) {
2174			amdgpu_crtc->encoder = encoder;
2175			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2176			break;
2177		}
2178	}
2179	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2180		amdgpu_crtc->encoder = NULL;
2181		amdgpu_crtc->connector = NULL;
2182		return false;
2183	}
2184	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2185		return false;
2186	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2187		return false;
2188	/* pick pll */
2189	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2190	/* if we can't get a PPLL for a non-DP encoder, fail */
2191	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2192	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2193		return false;
2194
2195	return true;
2196}
2197
2198static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2199				  struct drm_framebuffer *old_fb)
2200{
2201	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2202}
2203
2204static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2205					 struct drm_framebuffer *fb,
2206					 int x, int y, enum mode_set_atomic state)
2207{
2208       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2209}
2210
2211static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2212	.dpms = dce_v6_0_crtc_dpms,
2213	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2214	.mode_set = dce_v6_0_crtc_mode_set,
2215	.mode_set_base = dce_v6_0_crtc_set_base,
2216	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2217	.prepare = dce_v6_0_crtc_prepare,
2218	.commit = dce_v6_0_crtc_commit,
2219	.load_lut = dce_v6_0_crtc_load_lut,
2220	.disable = dce_v6_0_crtc_disable,
 
2221};
2222
2223static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2224{
2225	struct amdgpu_crtc *amdgpu_crtc;
2226	int i;
2227
2228	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2229			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2230	if (amdgpu_crtc == NULL)
2231		return -ENOMEM;
2232
2233	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2234
2235	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2236	amdgpu_crtc->crtc_id = index;
2237	adev->mode_info.crtcs[index] = amdgpu_crtc;
2238
2239	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2240	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2241	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2242	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2243
2244	for (i = 0; i < 256; i++) {
2245		amdgpu_crtc->lut_r[i] = i << 2;
2246		amdgpu_crtc->lut_g[i] = i << 2;
2247		amdgpu_crtc->lut_b[i] = i << 2;
2248	}
2249
2250	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2251
2252	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2253	amdgpu_crtc->adjusted_clock = 0;
2254	amdgpu_crtc->encoder = NULL;
2255	amdgpu_crtc->connector = NULL;
2256	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2257
2258	return 0;
2259}
2260
2261static int dce_v6_0_early_init(void *handle)
2262{
2263	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2264
2265	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2266	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2267
2268	dce_v6_0_set_display_funcs(adev);
2269	dce_v6_0_set_irq_funcs(adev);
2270
2271	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2272
2273	switch (adev->asic_type) {
2274	case CHIP_TAHITI:
2275	case CHIP_PITCAIRN:
2276	case CHIP_VERDE:
2277		adev->mode_info.num_hpd = 6;
2278		adev->mode_info.num_dig = 6;
2279		break;
2280	case CHIP_OLAND:
2281		adev->mode_info.num_hpd = 2;
2282		adev->mode_info.num_dig = 2;
2283		break;
2284	default:
2285		return -EINVAL;
2286	}
2287
 
 
2288	return 0;
2289}
2290
2291static int dce_v6_0_sw_init(void *handle)
2292{
2293	int r, i;
2294	bool ret;
2295	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296
2297	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2298		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2299		if (r)
2300			return r;
2301	}
2302
2303	for (i = 8; i < 20; i += 2) {
2304		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2305		if (r)
2306			return r;
2307	}
2308
2309	/* HPD hotplug */
2310	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2311	if (r)
2312		return r;
2313
2314	adev->mode_info.mode_config_initialized = true;
2315
2316	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2317	adev->ddev->mode_config.async_page_flip = true;
2318	adev->ddev->mode_config.max_width = 16384;
2319	adev->ddev->mode_config.max_height = 16384;
2320	adev->ddev->mode_config.preferred_depth = 24;
2321	adev->ddev->mode_config.prefer_shadow = 1;
2322	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2323
2324	r = amdgpu_modeset_create_props(adev);
2325	if (r)
2326		return r;
2327
2328	adev->ddev->mode_config.max_width = 16384;
2329	adev->ddev->mode_config.max_height = 16384;
2330
2331	/* allocate crtcs */
2332	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2333		r = dce_v6_0_crtc_init(adev, i);
2334		if (r)
2335			return r;
2336	}
2337
2338	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2339	if (ret)
2340		amdgpu_print_display_setup(adev->ddev);
2341	else
2342		return -EINVAL;
2343
2344	/* setup afmt */
2345	r = dce_v6_0_afmt_init(adev);
2346	if (r)
2347		return r;
2348
2349	r = dce_v6_0_audio_init(adev);
2350	if (r)
2351		return r;
2352
2353	drm_kms_helper_poll_init(adev->ddev);
2354
2355	return r;
2356}
2357
2358static int dce_v6_0_sw_fini(void *handle)
2359{
2360	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2361
2362	kfree(adev->mode_info.bios_hardcoded_edid);
2363
2364	drm_kms_helper_poll_fini(adev->ddev);
2365
2366	dce_v6_0_audio_fini(adev);
2367	dce_v6_0_afmt_fini(adev);
2368
2369	drm_mode_config_cleanup(adev->ddev);
2370	adev->mode_info.mode_config_initialized = false;
2371
2372	return 0;
2373}
2374
2375static int dce_v6_0_hw_init(void *handle)
2376{
2377	int i;
2378	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2379
 
 
2380	/* init dig PHYs, disp eng pll */
2381	amdgpu_atombios_encoder_init_dig(adev);
2382	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2383
2384	/* initialize hpd */
2385	dce_v6_0_hpd_init(adev);
2386
2387	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2388		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2389	}
2390
2391	dce_v6_0_pageflip_interrupt_init(adev);
2392
2393	return 0;
2394}
2395
2396static int dce_v6_0_hw_fini(void *handle)
2397{
2398	int i;
2399	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2400
2401	dce_v6_0_hpd_fini(adev);
2402
2403	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2404		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2405	}
2406
2407	dce_v6_0_pageflip_interrupt_fini(adev);
2408
2409	return 0;
2410}
2411
2412static int dce_v6_0_suspend(void *handle)
2413{
 
 
 
 
 
2414	return dce_v6_0_hw_fini(handle);
2415}
2416
2417static int dce_v6_0_resume(void *handle)
2418{
2419	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2420	int ret;
2421
 
 
 
2422	ret = dce_v6_0_hw_init(handle);
2423
2424	/* turn on the BL */
2425	if (adev->mode_info.bl_encoder) {
2426		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2427								  adev->mode_info.bl_encoder);
2428		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2429						    bl_level);
2430	}
2431
2432	return ret;
2433}
2434
2435static bool dce_v6_0_is_idle(void *handle)
2436{
2437	return true;
2438}
2439
2440static int dce_v6_0_wait_for_idle(void *handle)
2441{
2442	return 0;
2443}
2444
2445static int dce_v6_0_soft_reset(void *handle)
2446{
2447	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2448	return 0;
2449}
2450
2451static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2452						     int crtc,
2453						     enum amdgpu_interrupt_state state)
2454{
2455	u32 reg_block, interrupt_mask;
2456
2457	if (crtc >= adev->mode_info.num_crtc) {
2458		DRM_DEBUG("invalid crtc %d\n", crtc);
2459		return;
2460	}
2461
2462	switch (crtc) {
2463	case 0:
2464		reg_block = SI_CRTC0_REGISTER_OFFSET;
2465		break;
2466	case 1:
2467		reg_block = SI_CRTC1_REGISTER_OFFSET;
2468		break;
2469	case 2:
2470		reg_block = SI_CRTC2_REGISTER_OFFSET;
2471		break;
2472	case 3:
2473		reg_block = SI_CRTC3_REGISTER_OFFSET;
2474		break;
2475	case 4:
2476		reg_block = SI_CRTC4_REGISTER_OFFSET;
2477		break;
2478	case 5:
2479		reg_block = SI_CRTC5_REGISTER_OFFSET;
2480		break;
2481	default:
2482		DRM_DEBUG("invalid crtc %d\n", crtc);
2483		return;
2484	}
2485
2486	switch (state) {
2487	case AMDGPU_IRQ_STATE_DISABLE:
2488		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2489		interrupt_mask &= ~VBLANK_INT_MASK;
2490		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2491		break;
2492	case AMDGPU_IRQ_STATE_ENABLE:
2493		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2494		interrupt_mask |= VBLANK_INT_MASK;
2495		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2496		break;
2497	default:
2498		break;
2499	}
2500}
2501
2502static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2503						    int crtc,
2504						    enum amdgpu_interrupt_state state)
2505{
2506
2507}
2508
2509static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2510					    struct amdgpu_irq_src *src,
2511					    unsigned type,
2512					    enum amdgpu_interrupt_state state)
2513{
2514	u32 dc_hpd_int_cntl;
2515
2516	if (type >= adev->mode_info.num_hpd) {
2517		DRM_DEBUG("invalid hdp %d\n", type);
2518		return 0;
2519	}
2520
2521	switch (state) {
2522	case AMDGPU_IRQ_STATE_DISABLE:
2523		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2524		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2525		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2526		break;
2527	case AMDGPU_IRQ_STATE_ENABLE:
2528		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2529		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2530		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2531		break;
2532	default:
2533		break;
2534	}
2535
2536	return 0;
2537}
2538
2539static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2540					     struct amdgpu_irq_src *src,
2541					     unsigned type,
2542					     enum amdgpu_interrupt_state state)
2543{
2544	switch (type) {
2545	case AMDGPU_CRTC_IRQ_VBLANK1:
2546		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2547		break;
2548	case AMDGPU_CRTC_IRQ_VBLANK2:
2549		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2550		break;
2551	case AMDGPU_CRTC_IRQ_VBLANK3:
2552		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2553		break;
2554	case AMDGPU_CRTC_IRQ_VBLANK4:
2555		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2556		break;
2557	case AMDGPU_CRTC_IRQ_VBLANK5:
2558		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2559		break;
2560	case AMDGPU_CRTC_IRQ_VBLANK6:
2561		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2562		break;
2563	case AMDGPU_CRTC_IRQ_VLINE1:
2564		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2565		break;
2566	case AMDGPU_CRTC_IRQ_VLINE2:
2567		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2568		break;
2569	case AMDGPU_CRTC_IRQ_VLINE3:
2570		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2571		break;
2572	case AMDGPU_CRTC_IRQ_VLINE4:
2573		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2574		break;
2575	case AMDGPU_CRTC_IRQ_VLINE5:
2576		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2577		break;
2578	case AMDGPU_CRTC_IRQ_VLINE6:
2579		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2580		break;
2581	default:
2582		break;
2583	}
2584	return 0;
2585}
2586
2587static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2588			     struct amdgpu_irq_src *source,
2589			     struct amdgpu_iv_entry *entry)
2590{
2591	unsigned crtc = entry->src_id - 1;
2592	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2593	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
 
2594
2595	switch (entry->src_data) {
2596	case 0: /* vblank */
2597		if (disp_int & interrupt_status_offsets[crtc].vblank)
2598			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2599		else
2600			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2601
2602		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2603			drm_handle_vblank(adev->ddev, crtc);
2604		}
2605		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2606		break;
2607	case 1: /* vline */
2608		if (disp_int & interrupt_status_offsets[crtc].vline)
2609			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2610		else
2611			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2612
2613		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2614		break;
2615	default:
2616		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2617		break;
2618	}
2619
2620	return 0;
2621}
2622
2623static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2624						 struct amdgpu_irq_src *src,
2625						 unsigned type,
2626						 enum amdgpu_interrupt_state state)
2627{
2628	u32 reg;
2629
2630	if (type >= adev->mode_info.num_crtc) {
2631		DRM_ERROR("invalid pageflip crtc %d\n", type);
2632		return -EINVAL;
2633	}
2634
2635	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2636	if (state == AMDGPU_IRQ_STATE_DISABLE)
2637		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2638		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2639	else
2640		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2641		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2642
2643	return 0;
2644}
2645
2646static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2647				 struct amdgpu_irq_src *source,
2648				 struct amdgpu_iv_entry *entry)
2649{
2650		unsigned long flags;
2651	unsigned crtc_id;
2652	struct amdgpu_crtc *amdgpu_crtc;
2653	struct amdgpu_flip_work *works;
2654
2655	crtc_id = (entry->src_id - 8) >> 1;
2656	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2657
2658	if (crtc_id >= adev->mode_info.num_crtc) {
2659		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2660		return -EINVAL;
2661	}
2662
2663	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
2664	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2665		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
2666		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2667
2668	/* IRQ could occur when in initial stage */
2669	if (amdgpu_crtc == NULL)
2670		return 0;
2671
2672	spin_lock_irqsave(&adev->ddev->event_lock, flags);
2673	works = amdgpu_crtc->pflip_works;
2674	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2675		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2676						"AMDGPU_FLIP_SUBMITTED(%d)\n",
2677						amdgpu_crtc->pflip_status,
2678						AMDGPU_FLIP_SUBMITTED);
2679		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2680		return 0;
2681	}
2682
2683	/* page flip completed. clean up */
2684	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2685	amdgpu_crtc->pflip_works = NULL;
2686
2687	/* wakeup usersapce */
2688	if (works->event)
2689		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2690
2691	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2692
2693	drm_crtc_vblank_put(&amdgpu_crtc->base);
2694	schedule_work(&works->unpin_work);
2695
2696	return 0;
2697}
2698
2699static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2700			    struct amdgpu_irq_src *source,
2701			    struct amdgpu_iv_entry *entry)
2702{
2703	uint32_t disp_int, mask, tmp;
2704	unsigned hpd;
2705
2706	if (entry->src_data >= adev->mode_info.num_hpd) {
2707		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2708		return 0;
2709	}
2710
2711	hpd = entry->src_data;
2712	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2713	mask = interrupt_status_offsets[hpd].hpd;
2714
2715	if (disp_int & mask) {
2716		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
2717		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2718		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
2719		schedule_work(&adev->hotplug_work);
2720		DRM_INFO("IH: HPD%d\n", hpd + 1);
2721	}
2722
2723	return 0;
2724
2725}
2726
2727static int dce_v6_0_set_clockgating_state(void *handle,
2728					  enum amd_clockgating_state state)
2729{
2730	return 0;
2731}
2732
2733static int dce_v6_0_set_powergating_state(void *handle,
2734					  enum amd_powergating_state state)
2735{
2736	return 0;
2737}
2738
2739static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2740	.name = "dce_v6_0",
2741	.early_init = dce_v6_0_early_init,
2742	.late_init = NULL,
2743	.sw_init = dce_v6_0_sw_init,
2744	.sw_fini = dce_v6_0_sw_fini,
2745	.hw_init = dce_v6_0_hw_init,
2746	.hw_fini = dce_v6_0_hw_fini,
2747	.suspend = dce_v6_0_suspend,
2748	.resume = dce_v6_0_resume,
2749	.is_idle = dce_v6_0_is_idle,
2750	.wait_for_idle = dce_v6_0_wait_for_idle,
2751	.soft_reset = dce_v6_0_soft_reset,
2752	.set_clockgating_state = dce_v6_0_set_clockgating_state,
2753	.set_powergating_state = dce_v6_0_set_powergating_state,
2754};
2755
2756static void
2757dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2758			  struct drm_display_mode *mode,
2759			  struct drm_display_mode *adjusted_mode)
2760{
2761
2762	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 
2763
2764	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2765
2766	/* need to call this here rather than in prepare() since we need some crtc info */
2767	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2768
2769	/* set scaler clears this on some chips */
2770	dce_v6_0_set_interleave(encoder->crtc, mode);
2771
2772	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2773		dce_v6_0_afmt_enable(encoder, true);
2774		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2775	}
2776}
2777
2778static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2779{
2780
2781	struct amdgpu_device *adev = encoder->dev->dev_private;
2782	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2783	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2784
2785	if ((amdgpu_encoder->active_device &
2786	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2787	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2788	     ENCODER_OBJECT_ID_NONE)) {
2789		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2790		if (dig) {
2791			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2792			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2793				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2794		}
2795	}
2796
2797	amdgpu_atombios_scratch_regs_lock(adev, true);
2798
2799	if (connector) {
2800		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2801
2802		/* select the clock/data port if it uses a router */
2803		if (amdgpu_connector->router.cd_valid)
2804			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2805
2806		/* turn eDP panel on for mode set */
2807		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2808			amdgpu_atombios_encoder_set_edp_panel_power(connector,
2809							     ATOM_TRANSMITTER_ACTION_POWER_ON);
2810	}
2811
2812	/* this is needed for the pll/ss setup to work correctly in some cases */
2813	amdgpu_atombios_encoder_set_crtc_source(encoder);
2814	/* set up the FMT blocks */
2815	dce_v6_0_program_fmt(encoder);
2816}
2817
2818static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2819{
2820
2821	struct drm_device *dev = encoder->dev;
2822	struct amdgpu_device *adev = dev->dev_private;
2823
2824	/* need to call this here as we need the crtc set up */
2825	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2826	amdgpu_atombios_scratch_regs_lock(adev, false);
2827}
2828
2829static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2830{
2831
2832	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2833	struct amdgpu_encoder_atom_dig *dig;
 
2834
2835	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2836
2837	if (amdgpu_atombios_encoder_is_digital(encoder)) {
2838		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2839			dce_v6_0_afmt_enable(encoder, false);
2840		dig = amdgpu_encoder->enc_priv;
2841		dig->dig_encoder = -1;
2842	}
2843	amdgpu_encoder->active_device = 0;
2844}
2845
2846/* these are handled by the primary encoders */
2847static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2848{
2849
2850}
2851
2852static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2853{
2854
2855}
2856
2857static void
2858dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2859		      struct drm_display_mode *mode,
2860		      struct drm_display_mode *adjusted_mode)
2861{
2862
2863}
2864
2865static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2866{
2867
2868}
2869
2870static void
2871dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2872{
2873
2874}
2875
2876static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2877				    const struct drm_display_mode *mode,
2878				    struct drm_display_mode *adjusted_mode)
2879{
2880	return true;
2881}
2882
2883static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2884	.dpms = dce_v6_0_ext_dpms,
2885	.mode_fixup = dce_v6_0_ext_mode_fixup,
2886	.prepare = dce_v6_0_ext_prepare,
2887	.mode_set = dce_v6_0_ext_mode_set,
2888	.commit = dce_v6_0_ext_commit,
2889	.disable = dce_v6_0_ext_disable,
2890	/* no detect for TMDS/LVDS yet */
2891};
2892
2893static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2894	.dpms = amdgpu_atombios_encoder_dpms,
2895	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2896	.prepare = dce_v6_0_encoder_prepare,
2897	.mode_set = dce_v6_0_encoder_mode_set,
2898	.commit = dce_v6_0_encoder_commit,
2899	.disable = dce_v6_0_encoder_disable,
2900	.detect = amdgpu_atombios_encoder_dig_detect,
2901};
2902
2903static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2904	.dpms = amdgpu_atombios_encoder_dpms,
2905	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2906	.prepare = dce_v6_0_encoder_prepare,
2907	.mode_set = dce_v6_0_encoder_mode_set,
2908	.commit = dce_v6_0_encoder_commit,
2909	.detect = amdgpu_atombios_encoder_dac_detect,
2910};
2911
2912static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
2913{
2914	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2915	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2916		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
2917	kfree(amdgpu_encoder->enc_priv);
2918	drm_encoder_cleanup(encoder);
2919	kfree(amdgpu_encoder);
2920}
2921
2922static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
2923	.destroy = dce_v6_0_encoder_destroy,
2924};
2925
2926static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
2927				 uint32_t encoder_enum,
2928				 uint32_t supported_device,
2929				 u16 caps)
2930{
2931	struct drm_device *dev = adev->ddev;
2932	struct drm_encoder *encoder;
2933	struct amdgpu_encoder *amdgpu_encoder;
2934
2935	/* see if we already added it */
2936	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2937		amdgpu_encoder = to_amdgpu_encoder(encoder);
2938		if (amdgpu_encoder->encoder_enum == encoder_enum) {
2939			amdgpu_encoder->devices |= supported_device;
2940			return;
2941		}
2942
2943	}
2944
2945	/* add a new one */
2946	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
2947	if (!amdgpu_encoder)
2948		return;
2949
2950	encoder = &amdgpu_encoder->base;
2951	switch (adev->mode_info.num_crtc) {
2952	case 1:
2953		encoder->possible_crtcs = 0x1;
2954		break;
2955	case 2:
2956	default:
2957		encoder->possible_crtcs = 0x3;
2958		break;
2959	case 4:
2960		encoder->possible_crtcs = 0xf;
2961		break;
2962	case 6:
2963		encoder->possible_crtcs = 0x3f;
2964		break;
2965	}
2966
2967	amdgpu_encoder->enc_priv = NULL;
2968	amdgpu_encoder->encoder_enum = encoder_enum;
2969	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2970	amdgpu_encoder->devices = supported_device;
2971	amdgpu_encoder->rmx_type = RMX_OFF;
2972	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
2973	amdgpu_encoder->is_ext_encoder = false;
2974	amdgpu_encoder->caps = caps;
2975
2976	switch (amdgpu_encoder->encoder_id) {
2977	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2978	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2979		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2980				 DRM_MODE_ENCODER_DAC, NULL);
2981		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
2982		break;
2983	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2984	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2985	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2986	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2987	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2988		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2989			amdgpu_encoder->rmx_type = RMX_FULL;
2990			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2991					 DRM_MODE_ENCODER_LVDS, NULL);
2992			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
2993		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2994			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2995					 DRM_MODE_ENCODER_DAC, NULL);
2996			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2997		} else {
2998			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2999					 DRM_MODE_ENCODER_TMDS, NULL);
3000			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3001		}
3002		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3003		break;
3004	case ENCODER_OBJECT_ID_SI170B:
3005	case ENCODER_OBJECT_ID_CH7303:
3006	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3007	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3008	case ENCODER_OBJECT_ID_TITFP513:
3009	case ENCODER_OBJECT_ID_VT1623:
3010	case ENCODER_OBJECT_ID_HDMI_SI1930:
3011	case ENCODER_OBJECT_ID_TRAVIS:
3012	case ENCODER_OBJECT_ID_NUTMEG:
3013		/* these are handled by the primary encoders */
3014		amdgpu_encoder->is_ext_encoder = true;
3015		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3016			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3017					 DRM_MODE_ENCODER_LVDS, NULL);
3018		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3019			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3020					 DRM_MODE_ENCODER_DAC, NULL);
3021		else
3022			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3023					 DRM_MODE_ENCODER_TMDS, NULL);
3024		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3025		break;
3026	}
3027}
3028
3029static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3030	.set_vga_render_state = &dce_v6_0_set_vga_render_state,
3031	.bandwidth_update = &dce_v6_0_bandwidth_update,
3032	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3033	.vblank_wait = &dce_v6_0_vblank_wait,
3034	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3035	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3036	.hpd_sense = &dce_v6_0_hpd_sense,
3037	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3038	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3039	.page_flip = &dce_v6_0_page_flip,
3040	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3041	.add_encoder = &dce_v6_0_encoder_add,
3042	.add_connector = &amdgpu_connector_add,
3043	.stop_mc_access = &dce_v6_0_stop_mc_access,
3044	.resume_mc_access = &dce_v6_0_resume_mc_access,
3045};
3046
3047static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3048{
3049	if (adev->mode_info.funcs == NULL)
3050		adev->mode_info.funcs = &dce_v6_0_display_funcs;
3051}
3052
3053static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3054	.set = dce_v6_0_set_crtc_interrupt_state,
3055	.process = dce_v6_0_crtc_irq,
3056};
3057
3058static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3059	.set = dce_v6_0_set_pageflip_interrupt_state,
3060	.process = dce_v6_0_pageflip_irq,
3061};
3062
3063static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3064	.set = dce_v6_0_set_hpd_interrupt_state,
3065	.process = dce_v6_0_hpd_irq,
3066};
3067
3068static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3069{
3070	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
 
 
 
3071	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3072
3073	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3074	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3075
3076	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3077	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3078}
3079
3080const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3081{
3082	.type = AMD_IP_BLOCK_TYPE_DCE,
3083	.major = 6,
3084	.minor = 0,
3085	.rev = 0,
3086	.funcs = &dce_v6_0_ip_funcs,
3087};
3088
3089const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3090{
3091	.type = AMD_IP_BLOCK_TYPE_DCE,
3092	.major = 6,
3093	.minor = 4,
3094	.rev = 0,
3095	.funcs = &dce_v6_0_ip_funcs,
3096};
v5.9
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/pci.h>
  25
  26#include <drm/drm_fourcc.h>
  27#include <drm/drm_vblank.h>
  28
  29#include "amdgpu.h"
  30#include "amdgpu_pm.h"
  31#include "amdgpu_i2c.h"
  32#include "atom.h"
  33#include "amdgpu_atombios.h"
  34#include "atombios_crtc.h"
  35#include "atombios_encoders.h"
  36#include "amdgpu_pll.h"
  37#include "amdgpu_connectors.h"
  38#include "amdgpu_display.h"
  39
  40#include "bif/bif_3_0_d.h"
  41#include "bif/bif_3_0_sh_mask.h"
  42#include "oss/oss_1_0_d.h"
  43#include "oss/oss_1_0_sh_mask.h"
  44#include "gca/gfx_6_0_d.h"
  45#include "gca/gfx_6_0_sh_mask.h"
  46#include "gmc/gmc_6_0_d.h"
  47#include "gmc/gmc_6_0_sh_mask.h"
  48#include "dce/dce_6_0_d.h"
  49#include "dce/dce_6_0_sh_mask.h"
  50#include "gca/gfx_7_2_enum.h"
  51#include "dce_v6_0.h"
  52#include "si_enums.h"
  53
  54static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
  55static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  56
  57static const u32 crtc_offsets[6] =
  58{
  59	SI_CRTC0_REGISTER_OFFSET,
  60	SI_CRTC1_REGISTER_OFFSET,
  61	SI_CRTC2_REGISTER_OFFSET,
  62	SI_CRTC3_REGISTER_OFFSET,
  63	SI_CRTC4_REGISTER_OFFSET,
  64	SI_CRTC5_REGISTER_OFFSET
  65};
  66
  67static const u32 hpd_offsets[] =
  68{
  69	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
  70	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
  71	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
  72	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
  73	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
  74	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
  75};
  76
  77static const uint32_t dig_offsets[] = {
  78	SI_CRTC0_REGISTER_OFFSET,
  79	SI_CRTC1_REGISTER_OFFSET,
  80	SI_CRTC2_REGISTER_OFFSET,
  81	SI_CRTC3_REGISTER_OFFSET,
  82	SI_CRTC4_REGISTER_OFFSET,
  83	SI_CRTC5_REGISTER_OFFSET,
  84	(0x13830 - 0x7030) >> 2,
  85};
  86
  87static const struct {
  88	uint32_t	reg;
  89	uint32_t	vblank;
  90	uint32_t	vline;
  91	uint32_t	hpd;
  92
  93} interrupt_status_offsets[6] = { {
  94	.reg = mmDISP_INTERRUPT_STATUS,
  95	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  96	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  97	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  98}, {
  99	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
 100	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
 101	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
 102	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
 103}, {
 104	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
 105	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
 106	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 107	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 108}, {
 109	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 110	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 111	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 112	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 113}, {
 114	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 115	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 116	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 117	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 118}, {
 119	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 120	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 121	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 122	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 123} };
 124
 125static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 126				     u32 block_offset, u32 reg)
 127{
 128	unsigned long flags;
 129	u32 r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130
 131	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 132	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 133	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 134	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 135
 136	return r;
 
 
 
 137}
 138
 139static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
 140				      u32 block_offset, u32 reg, u32 v)
 
 
 
 
 
 
 141{
 142	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143
 144	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 145	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
 146		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
 147	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 148	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 
 
 149}
 150
 151static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 152{
 153	if (crtc >= adev->mode_info.num_crtc)
 154		return 0;
 155	else
 156		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 157}
 158
 159static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 160{
 161	unsigned i;
 162
 163	/* Enable pflip interrupts */
 164	for (i = 0; i < adev->mode_info.num_crtc; i++)
 165		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 166}
 167
 168static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 169{
 170	unsigned i;
 171
 172	/* Disable pflip interrupts */
 173	for (i = 0; i < adev->mode_info.num_crtc; i++)
 174		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 175}
 176
 177/**
 178 * dce_v6_0_page_flip - pageflip callback.
 179 *
 180 * @adev: amdgpu_device pointer
 181 * @crtc_id: crtc to cleanup pageflip on
 182 * @crtc_base: new address of the crtc (GPU MC address)
 183 *
 184 * Does the actual pageflip (evergreen+).
 185 * During vblank we take the crtc lock and wait for the update_pending
 186 * bit to go high, when it does, we release the lock, and allow the
 187 * double buffered update to take place.
 188 * Returns the current update pending status.
 189 */
 190static void dce_v6_0_page_flip(struct amdgpu_device *adev,
 191			       int crtc_id, u64 crtc_base, bool async)
 192{
 193	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 194	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
 195
 196	/* flip at hsync for async, default is vsync */
 197	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 198	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 199	/* update pitch */
 200	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
 201	       fb->pitches[0] / fb->format->cpp[0]);
 202	/* update the scanout addresses */
 203	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 204	       upper_32_bits(crtc_base));
 205	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 206	       (u32)crtc_base);
 207
 208	/* post the write */
 209	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 210}
 211
 212static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 213					u32 *vbl, u32 *position)
 214{
 215	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 216		return -EINVAL;
 217	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 218	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 219
 220	return 0;
 221
 222}
 223
 224/**
 225 * dce_v6_0_hpd_sense - hpd sense callback.
 226 *
 227 * @adev: amdgpu_device pointer
 228 * @hpd: hpd (hotplug detect) pin
 229 *
 230 * Checks if a digital monitor is connected (evergreen+).
 231 * Returns true if connected, false if not connected.
 232 */
 233static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 234			       enum amdgpu_hpd_id hpd)
 235{
 236	bool connected = false;
 237
 238	if (hpd >= adev->mode_info.num_hpd)
 239		return connected;
 240
 241	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 242		connected = true;
 243
 244	return connected;
 245}
 246
 247/**
 248 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
 249 *
 250 * @adev: amdgpu_device pointer
 251 * @hpd: hpd (hotplug detect) pin
 252 *
 253 * Set the polarity of the hpd pin (evergreen+).
 254 */
 255static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 256				      enum amdgpu_hpd_id hpd)
 257{
 258	u32 tmp;
 259	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 260
 261	if (hpd >= adev->mode_info.num_hpd)
 262		return;
 263
 264	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 265	if (connected)
 266		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 267	else
 268		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 269	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 270}
 271
 272/**
 273 * dce_v6_0_hpd_init - hpd setup callback.
 274 *
 275 * @adev: amdgpu_device pointer
 276 *
 277 * Setup the hpd pins used by the card (evergreen+).
 278 * Enable the pin, set the polarity, and enable the hpd interrupts.
 279 */
 280static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 281{
 282	struct drm_device *dev = adev->ddev;
 283	struct drm_connector *connector;
 284	struct drm_connector_list_iter iter;
 285	u32 tmp;
 286
 287	drm_connector_list_iter_begin(dev, &iter);
 288	drm_for_each_connector_iter(connector, &iter) {
 289		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 290
 291		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 292			continue;
 293
 294		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 295		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 296		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 297
 298		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 299		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 300			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 301			 * aux dp channel on imac and help (but not completely fix)
 302			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 303			 * also avoid interrupt storms during dpms.
 304			 */
 305			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 306			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 307			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 308			continue;
 309		}
 310
 311		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 312		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 313	}
 314	drm_connector_list_iter_end(&iter);
 315}
 316
 317/**
 318 * dce_v6_0_hpd_fini - hpd tear down callback.
 319 *
 320 * @adev: amdgpu_device pointer
 321 *
 322 * Tear down the hpd pins used by the card (evergreen+).
 323 * Disable the hpd interrupts.
 324 */
 325static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 326{
 327	struct drm_device *dev = adev->ddev;
 328	struct drm_connector *connector;
 329	struct drm_connector_list_iter iter;
 330	u32 tmp;
 331
 332	drm_connector_list_iter_begin(dev, &iter);
 333	drm_for_each_connector_iter(connector, &iter) {
 334		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 335
 336		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 337			continue;
 338
 339		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 340		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 341		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 342
 343		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 344	}
 345	drm_connector_list_iter_end(&iter);
 346}
 347
 348static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 349{
 350	return mmDC_GPIO_HPD_A;
 351}
 352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 353static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 354					  bool render)
 355{
 356	if (!render)
 357		WREG32(mmVGA_RENDER_CONTROL,
 358			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
 359
 360}
 361
 362static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
 363{
 
 
 364	switch (adev->asic_type) {
 365	case CHIP_TAHITI:
 366	case CHIP_PITCAIRN:
 367	case CHIP_VERDE:
 368		return 6;
 
 369	case CHIP_OLAND:
 370		return 2;
 
 371	default:
 372		return 0;
 373	}
 
 374}
 375
 376void dce_v6_0_disable_dce(struct amdgpu_device *adev)
 377{
 378	/*Disable VGA render and enabled crtc, if has DCE engine*/
 379	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 380		u32 tmp;
 381		int crtc_enabled, i;
 382
 383		dce_v6_0_set_vga_render_state(adev, false);
 384
 385		/*Disable crtc*/
 386		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
 387			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
 388				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 389			if (crtc_enabled) {
 390				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 391				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 392				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 393				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 394				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 395			}
 396		}
 397	}
 398}
 399
 400static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 401{
 402
 403	struct drm_device *dev = encoder->dev;
 404	struct amdgpu_device *adev = dev->dev_private;
 405	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 406	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 407	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 408	int bpc = 0;
 409	u32 tmp = 0;
 410	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 411
 412	if (connector) {
 413		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 414		bpc = amdgpu_connector_get_monitor_bpc(connector);
 415		dither = amdgpu_connector->dither;
 416	}
 417
 418	/* LVDS FMT is set up by atom */
 419	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 420		return;
 421
 422	if (bpc == 0)
 423		return;
 424
 425
 426	switch (bpc) {
 427	case 6:
 428		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 429			/* XXX sort out optimal dither settings */
 430			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 431				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 432				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
 433		else
 434			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
 435		break;
 436	case 8:
 437		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 438			/* XXX sort out optimal dither settings */
 439			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 440				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 441				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 442				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 443				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
 444		else
 445			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 446				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
 447		break;
 448	case 10:
 449	default:
 450		/* not needed */
 451		break;
 452	}
 453
 454	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 455}
 456
 457/**
 458 * cik_get_number_of_dram_channels - get the number of dram channels
 459 *
 460 * @adev: amdgpu_device pointer
 461 *
 462 * Look up the number of video ram channels (CIK).
 463 * Used for display watermark bandwidth calculations
 464 * Returns the number of dram channels
 465 */
 466static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
 467{
 468	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 469
 470	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 471	case 0:
 472	default:
 473		return 1;
 474	case 1:
 475		return 2;
 476	case 2:
 477		return 4;
 478	case 3:
 479		return 8;
 480	case 4:
 481		return 3;
 482	case 5:
 483		return 6;
 484	case 6:
 485		return 10;
 486	case 7:
 487		return 12;
 488	case 8:
 489		return 16;
 490	}
 491}
 492
 493struct dce6_wm_params {
 494	u32 dram_channels; /* number of dram channels */
 495	u32 yclk;          /* bandwidth per dram data pin in kHz */
 496	u32 sclk;          /* engine clock in kHz */
 497	u32 disp_clk;      /* display clock in kHz */
 498	u32 src_width;     /* viewport width */
 499	u32 active_time;   /* active display time in ns */
 500	u32 blank_time;    /* blank time in ns */
 501	bool interlaced;    /* mode is interlaced */
 502	fixed20_12 vsc;    /* vertical scale ratio */
 503	u32 num_heads;     /* number of active crtcs */
 504	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 505	u32 lb_size;       /* line buffer allocated to pipe */
 506	u32 vtaps;         /* vertical scaler taps */
 507};
 508
 509/**
 510 * dce_v6_0_dram_bandwidth - get the dram bandwidth
 511 *
 512 * @wm: watermark calculation data
 513 *
 514 * Calculate the raw dram bandwidth (CIK).
 515 * Used for display watermark bandwidth calculations
 516 * Returns the dram bandwidth in MBytes/s
 517 */
 518static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
 519{
 520	/* Calculate raw DRAM Bandwidth */
 521	fixed20_12 dram_efficiency; /* 0.7 */
 522	fixed20_12 yclk, dram_channels, bandwidth;
 523	fixed20_12 a;
 524
 525	a.full = dfixed_const(1000);
 526	yclk.full = dfixed_const(wm->yclk);
 527	yclk.full = dfixed_div(yclk, a);
 528	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 529	a.full = dfixed_const(10);
 530	dram_efficiency.full = dfixed_const(7);
 531	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 532	bandwidth.full = dfixed_mul(dram_channels, yclk);
 533	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 534
 535	return dfixed_trunc(bandwidth);
 536}
 537
 538/**
 539 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
 540 *
 541 * @wm: watermark calculation data
 542 *
 543 * Calculate the dram bandwidth used for display (CIK).
 544 * Used for display watermark bandwidth calculations
 545 * Returns the dram bandwidth for display in MBytes/s
 546 */
 547static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 548{
 549	/* Calculate DRAM Bandwidth and the part allocated to display. */
 550	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 551	fixed20_12 yclk, dram_channels, bandwidth;
 552	fixed20_12 a;
 553
 554	a.full = dfixed_const(1000);
 555	yclk.full = dfixed_const(wm->yclk);
 556	yclk.full = dfixed_div(yclk, a);
 557	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 558	a.full = dfixed_const(10);
 559	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 560	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 561	bandwidth.full = dfixed_mul(dram_channels, yclk);
 562	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 563
 564	return dfixed_trunc(bandwidth);
 565}
 566
 567/**
 568 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
 569 *
 570 * @wm: watermark calculation data
 571 *
 572 * Calculate the data return bandwidth used for display (CIK).
 573 * Used for display watermark bandwidth calculations
 574 * Returns the data return bandwidth in MBytes/s
 575 */
 576static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
 577{
 578	/* Calculate the display Data return Bandwidth */
 579	fixed20_12 return_efficiency; /* 0.8 */
 580	fixed20_12 sclk, bandwidth;
 581	fixed20_12 a;
 582
 583	a.full = dfixed_const(1000);
 584	sclk.full = dfixed_const(wm->sclk);
 585	sclk.full = dfixed_div(sclk, a);
 586	a.full = dfixed_const(10);
 587	return_efficiency.full = dfixed_const(8);
 588	return_efficiency.full = dfixed_div(return_efficiency, a);
 589	a.full = dfixed_const(32);
 590	bandwidth.full = dfixed_mul(a, sclk);
 591	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 592
 593	return dfixed_trunc(bandwidth);
 594}
 595
 596/**
 597 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
 598 *
 599 * @wm: watermark calculation data
 600 *
 601 * Calculate the dmif bandwidth used for display (CIK).
 602 * Used for display watermark bandwidth calculations
 603 * Returns the dmif bandwidth in MBytes/s
 604 */
 605static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
 606{
 607	/* Calculate the DMIF Request Bandwidth */
 608	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 609	fixed20_12 disp_clk, bandwidth;
 610	fixed20_12 a, b;
 611
 612	a.full = dfixed_const(1000);
 613	disp_clk.full = dfixed_const(wm->disp_clk);
 614	disp_clk.full = dfixed_div(disp_clk, a);
 615	a.full = dfixed_const(32);
 616	b.full = dfixed_mul(a, disp_clk);
 617
 618	a.full = dfixed_const(10);
 619	disp_clk_request_efficiency.full = dfixed_const(8);
 620	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 621
 622	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 623
 624	return dfixed_trunc(bandwidth);
 625}
 626
 627/**
 628 * dce_v6_0_available_bandwidth - get the min available bandwidth
 629 *
 630 * @wm: watermark calculation data
 631 *
 632 * Calculate the min available bandwidth used for display (CIK).
 633 * Used for display watermark bandwidth calculations
 634 * Returns the min available bandwidth in MBytes/s
 635 */
 636static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
 637{
 638	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 639	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
 640	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
 641	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
 642
 643	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 644}
 645
 646/**
 647 * dce_v6_0_average_bandwidth - get the average available bandwidth
 648 *
 649 * @wm: watermark calculation data
 650 *
 651 * Calculate the average available bandwidth used for display (CIK).
 652 * Used for display watermark bandwidth calculations
 653 * Returns the average available bandwidth in MBytes/s
 654 */
 655static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
 656{
 657	/* Calculate the display mode Average Bandwidth
 658	 * DisplayMode should contain the source and destination dimensions,
 659	 * timing, etc.
 660	 */
 661	fixed20_12 bpp;
 662	fixed20_12 line_time;
 663	fixed20_12 src_width;
 664	fixed20_12 bandwidth;
 665	fixed20_12 a;
 666
 667	a.full = dfixed_const(1000);
 668	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 669	line_time.full = dfixed_div(line_time, a);
 670	bpp.full = dfixed_const(wm->bytes_per_pixel);
 671	src_width.full = dfixed_const(wm->src_width);
 672	bandwidth.full = dfixed_mul(src_width, bpp);
 673	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 674	bandwidth.full = dfixed_div(bandwidth, line_time);
 675
 676	return dfixed_trunc(bandwidth);
 677}
 678
 679/**
 680 * dce_v6_0_latency_watermark - get the latency watermark
 681 *
 682 * @wm: watermark calculation data
 683 *
 684 * Calculate the latency watermark (CIK).
 685 * Used for display watermark bandwidth calculations
 686 * Returns the latency watermark in ns
 687 */
 688static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
 689{
 690	/* First calculate the latency in ns */
 691	u32 mc_latency = 2000; /* 2000 ns. */
 692	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
 693	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 694	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 695	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 696	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 697		(wm->num_heads * cursor_line_pair_return_time);
 698	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 699	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 700	u32 tmp, dmif_size = 12288;
 701	fixed20_12 a, b, c;
 702
 703	if (wm->num_heads == 0)
 704		return 0;
 705
 706	a.full = dfixed_const(2);
 707	b.full = dfixed_const(1);
 708	if ((wm->vsc.full > a.full) ||
 709	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 710	    (wm->vtaps >= 5) ||
 711	    ((wm->vsc.full >= a.full) && wm->interlaced))
 712		max_src_lines_per_dst_line = 4;
 713	else
 714		max_src_lines_per_dst_line = 2;
 715
 716	a.full = dfixed_const(available_bandwidth);
 717	b.full = dfixed_const(wm->num_heads);
 718	a.full = dfixed_div(a, b);
 719	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 720	tmp = min(dfixed_trunc(a), tmp);
 721
 722	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 723
 724	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 725	b.full = dfixed_const(1000);
 726	c.full = dfixed_const(lb_fill_bw);
 727	b.full = dfixed_div(c, b);
 728	a.full = dfixed_div(a, b);
 729	line_fill_time = dfixed_trunc(a);
 730
 731	if (line_fill_time < wm->active_time)
 732		return latency;
 733	else
 734		return latency + (line_fill_time - wm->active_time);
 735
 736}
 737
 738/**
 739 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 740 * average and available dram bandwidth
 741 *
 742 * @wm: watermark calculation data
 743 *
 744 * Check if the display average bandwidth fits in the display
 745 * dram bandwidth (CIK).
 746 * Used for display watermark bandwidth calculations
 747 * Returns true if the display fits, false if not.
 748 */
 749static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 750{
 751	if (dce_v6_0_average_bandwidth(wm) <=
 752	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 753		return true;
 754	else
 755		return false;
 756}
 757
 758/**
 759 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
 760 * average and available bandwidth
 761 *
 762 * @wm: watermark calculation data
 763 *
 764 * Check if the display average bandwidth fits in the display
 765 * available bandwidth (CIK).
 766 * Used for display watermark bandwidth calculations
 767 * Returns true if the display fits, false if not.
 768 */
 769static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
 770{
 771	if (dce_v6_0_average_bandwidth(wm) <=
 772	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
 773		return true;
 774	else
 775		return false;
 776}
 777
 778/**
 779 * dce_v6_0_check_latency_hiding - check latency hiding
 780 *
 781 * @wm: watermark calculation data
 782 *
 783 * Check latency hiding (CIK).
 784 * Used for display watermark bandwidth calculations
 785 * Returns true if the display fits, false if not.
 786 */
 787static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
 788{
 789	u32 lb_partitions = wm->lb_size / wm->src_width;
 790	u32 line_time = wm->active_time + wm->blank_time;
 791	u32 latency_tolerant_lines;
 792	u32 latency_hiding;
 793	fixed20_12 a;
 794
 795	a.full = dfixed_const(1);
 796	if (wm->vsc.full > a.full)
 797		latency_tolerant_lines = 1;
 798	else {
 799		if (lb_partitions <= (wm->vtaps + 1))
 800			latency_tolerant_lines = 1;
 801		else
 802			latency_tolerant_lines = 2;
 803	}
 804
 805	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 806
 807	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
 808		return true;
 809	else
 810		return false;
 811}
 812
 813/**
 814 * dce_v6_0_program_watermarks - program display watermarks
 815 *
 816 * @adev: amdgpu_device pointer
 817 * @amdgpu_crtc: the selected display controller
 818 * @lb_size: line buffer size
 819 * @num_heads: number of display controllers in use
 820 *
 821 * Calculate and program the display watermarks for the
 822 * selected display controller (CIK).
 823 */
 824static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 825					struct amdgpu_crtc *amdgpu_crtc,
 826					u32 lb_size, u32 num_heads)
 827{
 828	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 829	struct dce6_wm_params wm_low, wm_high;
 830	u32 dram_channels;
 831	u32 active_time;
 832	u32 line_time = 0;
 833	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 834	u32 priority_a_mark = 0, priority_b_mark = 0;
 835	u32 priority_a_cnt = PRIORITY_OFF;
 836	u32 priority_b_cnt = PRIORITY_OFF;
 837	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
 838	fixed20_12 a, b, c;
 839
 840	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 841		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
 842					    (u32)mode->clock);
 843		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
 844					  (u32)mode->clock);
 845		line_time = min(line_time, (u32)65535);
 846		priority_a_cnt = 0;
 847		priority_b_cnt = 0;
 848
 849		dram_channels = si_get_number_of_dram_channels(adev);
 850
 851		/* watermark for high clocks */
 852		if (adev->pm.dpm_enabled) {
 853			wm_high.yclk =
 854				amdgpu_dpm_get_mclk(adev, false) * 10;
 855			wm_high.sclk =
 856				amdgpu_dpm_get_sclk(adev, false) * 10;
 857		} else {
 858			wm_high.yclk = adev->pm.current_mclk * 10;
 859			wm_high.sclk = adev->pm.current_sclk * 10;
 860		}
 861
 862		wm_high.disp_clk = mode->clock;
 863		wm_high.src_width = mode->crtc_hdisplay;
 864		wm_high.active_time = active_time;
 865		wm_high.blank_time = line_time - wm_high.active_time;
 866		wm_high.interlaced = false;
 867		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 868			wm_high.interlaced = true;
 869		wm_high.vsc = amdgpu_crtc->vsc;
 870		wm_high.vtaps = 1;
 871		if (amdgpu_crtc->rmx_type != RMX_OFF)
 872			wm_high.vtaps = 2;
 873		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
 874		wm_high.lb_size = lb_size;
 875		wm_high.dram_channels = dram_channels;
 876		wm_high.num_heads = num_heads;
 877
 878		if (adev->pm.dpm_enabled) {
 879		/* watermark for low clocks */
 880			wm_low.yclk =
 881				amdgpu_dpm_get_mclk(adev, true) * 10;
 882			wm_low.sclk =
 883				amdgpu_dpm_get_sclk(adev, true) * 10;
 884		} else {
 885			wm_low.yclk = adev->pm.current_mclk * 10;
 886			wm_low.sclk = adev->pm.current_sclk * 10;
 887		}
 888
 889		wm_low.disp_clk = mode->clock;
 890		wm_low.src_width = mode->crtc_hdisplay;
 891		wm_low.active_time = active_time;
 892		wm_low.blank_time = line_time - wm_low.active_time;
 893		wm_low.interlaced = false;
 894		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 895			wm_low.interlaced = true;
 896		wm_low.vsc = amdgpu_crtc->vsc;
 897		wm_low.vtaps = 1;
 898		if (amdgpu_crtc->rmx_type != RMX_OFF)
 899			wm_low.vtaps = 2;
 900		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
 901		wm_low.lb_size = lb_size;
 902		wm_low.dram_channels = dram_channels;
 903		wm_low.num_heads = num_heads;
 904
 905		/* set for high clocks */
 906		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
 907		/* set for low clocks */
 908		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
 909
 910		/* possibly force display priority to high */
 911		/* should really do this at mode validation time... */
 912		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
 913		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
 914		    !dce_v6_0_check_latency_hiding(&wm_high) ||
 915		    (adev->mode_info.disp_priority == 2)) {
 916			DRM_DEBUG_KMS("force priority to high\n");
 917			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 918			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 919		}
 920		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
 921		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
 922		    !dce_v6_0_check_latency_hiding(&wm_low) ||
 923		    (adev->mode_info.disp_priority == 2)) {
 924			DRM_DEBUG_KMS("force priority to high\n");
 925			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 926			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 927		}
 928
 929		a.full = dfixed_const(1000);
 930		b.full = dfixed_const(mode->clock);
 931		b.full = dfixed_div(b, a);
 932		c.full = dfixed_const(latency_watermark_a);
 933		c.full = dfixed_mul(c, b);
 934		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
 935		c.full = dfixed_div(c, a);
 936		a.full = dfixed_const(16);
 937		c.full = dfixed_div(c, a);
 938		priority_a_mark = dfixed_trunc(c);
 939		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
 940
 941		a.full = dfixed_const(1000);
 942		b.full = dfixed_const(mode->clock);
 943		b.full = dfixed_div(b, a);
 944		c.full = dfixed_const(latency_watermark_b);
 945		c.full = dfixed_mul(c, b);
 946		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
 947		c.full = dfixed_div(c, a);
 948		a.full = dfixed_const(16);
 949		c.full = dfixed_div(c, a);
 950		priority_b_mark = dfixed_trunc(c);
 951		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 952
 953		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 954	}
 955
 956	/* select wm A */
 957	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 958	tmp = arb_control3;
 959	tmp &= ~LATENCY_WATERMARK_MASK(3);
 960	tmp |= LATENCY_WATERMARK_MASK(1);
 961	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 962	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
 963	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
 964		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 965	/* select wm B */
 966	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 967	tmp &= ~LATENCY_WATERMARK_MASK(3);
 968	tmp |= LATENCY_WATERMARK_MASK(2);
 969	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 970	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
 971	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
 972		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 973	/* restore original selection */
 974	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
 975
 976	/* write the priority marks */
 977	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
 978	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
 979
 980	/* save values for DPM */
 981	amdgpu_crtc->line_time = line_time;
 982	amdgpu_crtc->wm_high = latency_watermark_a;
 983
 984	/* Save number of lines the linebuffer leads before the scanout */
 985	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 986}
 987
 988/* watermark setup */
 989static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
 990				   struct amdgpu_crtc *amdgpu_crtc,
 991				   struct drm_display_mode *mode,
 992				   struct drm_display_mode *other_mode)
 993{
 994	u32 tmp, buffer_alloc, i;
 995	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 996	/*
 997	 * Line Buffer Setup
 998	 * There are 3 line buffers, each one shared by 2 display controllers.
 999	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1000	 * the display controllers.  The paritioning is done via one of four
1001	 * preset allocations specified in bits 21:20:
1002	 *  0 - half lb
1003	 *  2 - whole lb, other crtc must be disabled
1004	 */
1005	/* this can get tricky if we have two large displays on a paired group
1006	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1007	 * non-linked crtcs for maximum line buffer allocation.
1008	 */
1009	if (amdgpu_crtc->base.enabled && mode) {
1010		if (other_mode) {
1011			tmp = 0; /* 1/2 */
1012			buffer_alloc = 1;
1013		} else {
1014			tmp = 2; /* whole */
1015			buffer_alloc = 2;
1016		}
1017	} else {
1018		tmp = 0;
1019		buffer_alloc = 0;
1020	}
1021
1022	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1023	       DC_LB_MEMORY_CONFIG(tmp));
1024
1025	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1026	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1027	for (i = 0; i < adev->usec_timeout; i++) {
1028		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1029		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1030			break;
1031		udelay(1);
1032	}
1033
1034	if (amdgpu_crtc->base.enabled && mode) {
1035		switch (tmp) {
1036		case 0:
1037		default:
1038			return 4096 * 2;
1039		case 2:
1040			return 8192 * 2;
1041		}
1042	}
1043
1044	/* controller not enabled, so no lb used */
1045	return 0;
1046}
1047
1048
1049/**
1050 *
1051 * dce_v6_0_bandwidth_update - program display watermarks
1052 *
1053 * @adev: amdgpu_device pointer
1054 *
1055 * Calculate and program the display watermarks and line
1056 * buffer allocation (CIK).
1057 */
1058static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1059{
1060	struct drm_display_mode *mode0 = NULL;
1061	struct drm_display_mode *mode1 = NULL;
1062	u32 num_heads = 0, lb_size;
1063	int i;
1064
1065	if (!adev->mode_info.mode_config_initialized)
1066		return;
1067
1068	amdgpu_display_update_priority(adev);
1069
1070	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1071		if (adev->mode_info.crtcs[i]->base.enabled)
1072			num_heads++;
1073	}
1074	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1075		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1076		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1077		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1078		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1079		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1080		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1081	}
1082}
1083
1084static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1085{
1086	int i;
1087	u32 tmp;
1088
1089	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1090		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1091				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1092		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1093					PORT_CONNECTIVITY))
1094			adev->mode_info.audio.pin[i].connected = false;
1095		else
1096			adev->mode_info.audio.pin[i].connected = true;
1097	}
1098
1099}
1100
1101static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1102{
1103	int i;
1104
1105	dce_v6_0_audio_get_connected_pins(adev);
1106
1107	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1108		if (adev->mode_info.audio.pin[i].connected)
1109			return &adev->mode_info.audio.pin[i];
1110	}
1111	DRM_ERROR("No connected audio pins found!\n");
1112	return NULL;
1113}
1114
1115static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1116{
1117	struct amdgpu_device *adev = encoder->dev->dev_private;
1118	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1119	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 
1120
1121	if (!dig || !dig->afmt || !dig->afmt->pin)
1122		return;
1123
1124	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1125	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1126		             dig->afmt->pin->id));
 
 
1127}
1128
1129static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1130						struct drm_display_mode *mode)
1131{
1132	struct drm_device *dev = encoder->dev;
1133	struct amdgpu_device *adev = dev->dev_private;
1134	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1135	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1136	struct drm_connector *connector;
1137	struct drm_connector_list_iter iter;
1138	struct amdgpu_connector *amdgpu_connector = NULL;
1139	int interlace = 0;
1140	u32 tmp;
1141
1142	drm_connector_list_iter_begin(dev, &iter);
1143	drm_for_each_connector_iter(connector, &iter) {
1144		if (connector->encoder == encoder) {
1145			amdgpu_connector = to_amdgpu_connector(connector);
1146			break;
1147		}
1148	}
1149	drm_connector_list_iter_end(&iter);
1150
1151	if (!amdgpu_connector) {
1152		DRM_ERROR("Couldn't find encoder's connector\n");
1153		return;
1154	}
1155
1156	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1157		interlace = 1;
1158
1159	if (connector->latency_present[interlace]) {
1160		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1161				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1162		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1163				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1164	} else {
1165		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1166				VIDEO_LIPSYNC, 0);
1167		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1168				AUDIO_LIPSYNC, 0);
1169	}
1170	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1171			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1172}
1173
1174static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1175{
1176	struct drm_device *dev = encoder->dev;
1177	struct amdgpu_device *adev = dev->dev_private;
1178	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1179	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1180	struct drm_connector *connector;
1181	struct drm_connector_list_iter iter;
1182	struct amdgpu_connector *amdgpu_connector = NULL;
1183	u8 *sadb = NULL;
1184	int sad_count;
1185	u32 tmp;
1186
1187	drm_connector_list_iter_begin(dev, &iter);
1188	drm_for_each_connector_iter(connector, &iter) {
1189		if (connector->encoder == encoder) {
1190			amdgpu_connector = to_amdgpu_connector(connector);
1191			break;
1192		}
1193	}
1194	drm_connector_list_iter_end(&iter);
1195
1196	if (!amdgpu_connector) {
1197		DRM_ERROR("Couldn't find encoder's connector\n");
1198		return;
1199	}
1200
1201	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1202	if (sad_count < 0) {
1203		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1204		sad_count = 0;
1205	}
1206
1207	/* program the speaker allocation */
1208	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1209			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1210	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1211			HDMI_CONNECTION, 0);
1212	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1213			DP_CONNECTION, 0);
1214
1215	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1216		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1217				DP_CONNECTION, 1);
1218	else
1219		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1220				HDMI_CONNECTION, 1);
1221
1222	if (sad_count)
1223		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1224				SPEAKER_ALLOCATION, sadb[0]);
1225	else
1226		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1227				SPEAKER_ALLOCATION, 5); /* stereo */
1228
1229	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1230			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1231
1232	kfree(sadb);
1233}
1234
1235static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1236{
1237	struct drm_device *dev = encoder->dev;
1238	struct amdgpu_device *adev = dev->dev_private;
1239	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1240	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1241	struct drm_connector *connector;
1242	struct drm_connector_list_iter iter;
1243	struct amdgpu_connector *amdgpu_connector = NULL;
1244	struct cea_sad *sads;
1245	int i, sad_count;
1246
1247	static const u16 eld_reg_to_type[][2] = {
1248		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1249		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1250		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1251		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1252		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1253		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1254		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1255		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1256		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1257		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1258		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1259		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1260	};
1261
1262	drm_connector_list_iter_begin(dev, &iter);
1263	drm_for_each_connector_iter(connector, &iter) {
1264		if (connector->encoder == encoder) {
1265			amdgpu_connector = to_amdgpu_connector(connector);
1266			break;
1267		}
1268	}
1269	drm_connector_list_iter_end(&iter);
1270
1271	if (!amdgpu_connector) {
1272		DRM_ERROR("Couldn't find encoder's connector\n");
1273		return;
1274	}
1275
1276	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1277	if (sad_count < 0)
1278		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1279	if (sad_count <= 0)
1280		return;
1281
1282	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1283		u32 tmp = 0;
1284		u8 stereo_freqs = 0;
1285		int max_channels = -1;
1286		int j;
1287
1288		for (j = 0; j < sad_count; j++) {
1289			struct cea_sad *sad = &sads[j];
1290
1291			if (sad->format == eld_reg_to_type[i][1]) {
1292				if (sad->channels > max_channels) {
1293					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1294							MAX_CHANNELS, sad->channels);
1295					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1296							DESCRIPTOR_BYTE_2, sad->byte2);
1297					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1298							SUPPORTED_FREQUENCIES, sad->freq);
1299					max_channels = sad->channels;
1300				}
1301
1302				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1303					stereo_freqs |= sad->freq;
1304				else
1305					break;
1306			}
1307		}
1308
1309		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1310				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1311		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1312	}
1313
1314	kfree(sads);
1315
1316}
1317
1318static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1319				  struct amdgpu_audio_pin *pin,
1320				  bool enable)
1321{
1322	if (!pin)
1323		return;
1324
1325	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1326			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1327}
1328
1329static const u32 pin_offsets[7] =
1330{
1331	(0x1780 - 0x1780),
1332	(0x1786 - 0x1780),
1333	(0x178c - 0x1780),
1334	(0x1792 - 0x1780),
1335	(0x1798 - 0x1780),
1336	(0x179d - 0x1780),
1337	(0x17a4 - 0x1780),
1338};
1339
1340static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1341{
1342	int i;
1343
1344	if (!amdgpu_audio)
1345		return 0;
1346
1347	adev->mode_info.audio.enabled = true;
1348
1349	switch (adev->asic_type) {
1350	case CHIP_TAHITI:
1351	case CHIP_PITCAIRN:
1352	case CHIP_VERDE:
1353	default:
1354		adev->mode_info.audio.num_pins = 6;
1355		break;
1356	case CHIP_OLAND:
1357		adev->mode_info.audio.num_pins = 2;
1358		break;
1359	}
1360
1361	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1362		adev->mode_info.audio.pin[i].channels = -1;
1363		adev->mode_info.audio.pin[i].rate = -1;
1364		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1365		adev->mode_info.audio.pin[i].status_bits = 0;
1366		adev->mode_info.audio.pin[i].category_code = 0;
1367		adev->mode_info.audio.pin[i].connected = false;
1368		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1369		adev->mode_info.audio.pin[i].id = i;
1370		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1371	}
1372
1373	return 0;
1374}
1375
1376static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1377{
1378	int i;
1379
1380	if (!amdgpu_audio)
1381		return;
1382
1383	if (!adev->mode_info.audio.enabled)
1384		return;
1385
1386	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1387		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1388
1389	adev->mode_info.audio.enabled = false;
1390}
1391
1392static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
 
1393{
1394	struct drm_device *dev = encoder->dev;
1395	struct amdgpu_device *adev = dev->dev_private;
1396	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1397	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1398	u32 tmp;
1399
1400	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1401	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1402	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1403	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1404	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1405}
1406
1407static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1408				   uint32_t clock, int bpc)
1409{
1410	struct drm_device *dev = encoder->dev;
1411	struct amdgpu_device *adev = dev->dev_private;
1412	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1413	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1414	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1415	u32 tmp;
1416
1417	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1418	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1419	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1420			bpc > 8 ? 0 : 1);
1421	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1422
1423	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1424	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1425	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1426	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1427	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1428	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1429
1430	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1431	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1432	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1433	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1434	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1435	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1436
1437	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1438	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1439	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1440	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1441	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1442	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1443}
1444
1445static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1446					       struct drm_display_mode *mode)
1447{
1448	struct drm_device *dev = encoder->dev;
1449	struct amdgpu_device *adev = dev->dev_private;
1450	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1451	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1452	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1453	struct hdmi_avi_infoframe frame;
1454	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1455	uint8_t *payload = buffer + 3;
1456	uint8_t *header = buffer;
1457	ssize_t err;
1458	u32 tmp;
1459
1460	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1461	if (err < 0) {
1462		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1463		return;
1464	}
1465
1466	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1467	if (err < 0) {
1468		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1469		return;
1470	}
1471
1472	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1473	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1474	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1475	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1476	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1477	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1478	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1479	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1480
1481	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1482	/* anything other than 0 */
1483	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1484			HDMI_AUDIO_INFO_LINE, 2);
1485	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1486}
1487
1488static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1489{
1490	struct drm_device *dev = encoder->dev;
1491	struct amdgpu_device *adev = dev->dev_private;
1492	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1493	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1494	u32 tmp;
1495
1496	/*
1497	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1498	 * Express [24MHz / target pixel clock] as an exact rational
1499	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1500	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1501	 */
1502	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1503	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1504			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1505	if (em == ATOM_ENCODER_MODE_HDMI) {
1506		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1507				DCCG_AUDIO_DTO_SEL, 0);
1508	} else if (ENCODER_MODE_IS_DP(em)) {
1509		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1510				DCCG_AUDIO_DTO_SEL, 1);
1511	}
1512	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1513	if (em == ATOM_ENCODER_MODE_HDMI) {
1514		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1515		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1516	} else if (ENCODER_MODE_IS_DP(em)) {
1517		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1518		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1519	}
1520}
1521
1522static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1523{
1524	struct drm_device *dev = encoder->dev;
1525	struct amdgpu_device *adev = dev->dev_private;
1526	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1527	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1528	u32 tmp;
1529
1530	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1531	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1532	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1533
1534	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1535	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1536	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1537
1538	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1539	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1540	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1541
1542	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1543	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1544	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1545	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1546	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1547	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1548	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1549	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1550
1551	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1552	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1553	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1554
1555	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1556	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1557	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1558	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1559
1560	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1561	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1562	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1563	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1564}
1565
1566static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1567{
1568	struct drm_device *dev = encoder->dev;
1569	struct amdgpu_device *adev = dev->dev_private;
1570	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1571	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1572	u32 tmp;
1573
1574	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1575	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1576	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1577}
1578
1579static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1580{
1581	struct drm_device *dev = encoder->dev;
1582	struct amdgpu_device *adev = dev->dev_private;
1583	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1584	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1585	u32 tmp;
1586
1587	if (enable) {
1588		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1589		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1590		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1591		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1592		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1593		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1594
1595		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1596		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1597		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1598
1599		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1600		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1601		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1602	} else {
1603		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1604		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1605		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1606		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1607		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1608		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1609
1610		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1611		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1612		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1613	}
1614}
1615
1616static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1617{
1618	struct drm_device *dev = encoder->dev;
1619	struct amdgpu_device *adev = dev->dev_private;
1620	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1621	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1622	u32 tmp;
1623
1624	if (enable) {
1625		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1626		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1627		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1628
1629		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1630		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1631		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1632
1633		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1634		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1635		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1636		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1637		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1638		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1639	} else {
1640		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1641	}
1642}
1643
1644static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1645				  struct drm_display_mode *mode)
1646{
1647	struct drm_device *dev = encoder->dev;
1648	struct amdgpu_device *adev = dev->dev_private;
1649	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1650	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1651	struct drm_connector *connector;
1652	struct drm_connector_list_iter iter;
1653	struct amdgpu_connector *amdgpu_connector = NULL;
1654	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1655	int bpc = 8;
1656
1657	if (!dig || !dig->afmt)
1658		return;
1659
1660	drm_connector_list_iter_begin(dev, &iter);
1661	drm_for_each_connector_iter(connector, &iter) {
1662		if (connector->encoder == encoder) {
1663			amdgpu_connector = to_amdgpu_connector(connector);
1664			break;
1665		}
1666	}
1667	drm_connector_list_iter_end(&iter);
1668
1669	if (!amdgpu_connector) {
1670		DRM_ERROR("Couldn't find encoder's connector\n");
1671		return;
1672	}
1673
1674	if (!dig->afmt->enabled)
1675		return;
1676
1677	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1678	if (!dig->afmt->pin)
1679		return;
1680
1681	if (encoder->crtc) {
1682		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1683		bpc = amdgpu_crtc->bpc;
1684	}
1685
1686	/* disable audio before setting up hw */
1687	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1688
1689	dce_v6_0_audio_set_mute(encoder, true);
1690	dce_v6_0_audio_write_speaker_allocation(encoder);
1691	dce_v6_0_audio_write_sad_regs(encoder);
1692	dce_v6_0_audio_write_latency_fields(encoder, mode);
1693	if (em == ATOM_ENCODER_MODE_HDMI) {
1694		dce_v6_0_audio_set_dto(encoder, mode->clock);
1695		dce_v6_0_audio_set_vbi_packet(encoder);
1696		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1697	} else if (ENCODER_MODE_IS_DP(em)) {
1698		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1699	}
1700	dce_v6_0_audio_set_packet(encoder);
1701	dce_v6_0_audio_select_pin(encoder);
1702	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1703	dce_v6_0_audio_set_mute(encoder, false);
1704	if (em == ATOM_ENCODER_MODE_HDMI) {
1705		dce_v6_0_audio_hdmi_enable(encoder, 1);
1706	} else if (ENCODER_MODE_IS_DP(em)) {
1707		dce_v6_0_audio_dp_enable(encoder, 1);
1708	}
1709
1710	/* enable audio after setting up hw */
1711	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1712}
1713
1714static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1715{
1716	struct drm_device *dev = encoder->dev;
1717	struct amdgpu_device *adev = dev->dev_private;
1718	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1719	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1720
1721	if (!dig || !dig->afmt)
1722		return;
1723
1724	/* Silent, r600_hdmi_enable will raise WARN for us */
1725	if (enable && dig->afmt->enabled)
1726		return;
1727
1728	if (!enable && !dig->afmt->enabled)
1729		return;
1730
1731	if (!enable && dig->afmt->pin) {
1732		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1733		dig->afmt->pin = NULL;
1734	}
1735
1736	dig->afmt->enabled = enable;
1737
1738	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1739		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1740}
1741
1742static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1743{
1744	int i, j;
1745
1746	for (i = 0; i < adev->mode_info.num_dig; i++)
1747		adev->mode_info.afmt[i] = NULL;
1748
1749	/* DCE6 has audio blocks tied to DIG encoders */
1750	for (i = 0; i < adev->mode_info.num_dig; i++) {
1751		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1752		if (adev->mode_info.afmt[i]) {
1753			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1754			adev->mode_info.afmt[i]->id = i;
1755		} else {
1756			for (j = 0; j < i; j++) {
1757				kfree(adev->mode_info.afmt[j]);
1758				adev->mode_info.afmt[j] = NULL;
1759			}
1760			DRM_ERROR("Out of memory allocating afmt table\n");
1761			return -ENOMEM;
1762		}
1763	}
1764	return 0;
1765}
1766
1767static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1768{
1769	int i;
1770
1771	for (i = 0; i < adev->mode_info.num_dig; i++) {
1772		kfree(adev->mode_info.afmt[i]);
1773		adev->mode_info.afmt[i] = NULL;
1774	}
1775}
1776
1777static const u32 vga_control_regs[6] =
1778{
1779	mmD1VGA_CONTROL,
1780	mmD2VGA_CONTROL,
1781	mmD3VGA_CONTROL,
1782	mmD4VGA_CONTROL,
1783	mmD5VGA_CONTROL,
1784	mmD6VGA_CONTROL,
1785};
1786
1787static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1788{
1789	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1790	struct drm_device *dev = crtc->dev;
1791	struct amdgpu_device *adev = dev->dev_private;
1792	u32 vga_control;
1793
1794	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1795	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1796}
1797
1798static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1799{
1800	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1801	struct drm_device *dev = crtc->dev;
1802	struct amdgpu_device *adev = dev->dev_private;
1803
1804	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1805}
1806
1807static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1808				     struct drm_framebuffer *fb,
1809				     int x, int y, int atomic)
1810{
1811	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1812	struct drm_device *dev = crtc->dev;
1813	struct amdgpu_device *adev = dev->dev_private;
 
1814	struct drm_framebuffer *target_fb;
1815	struct drm_gem_object *obj;
1816	struct amdgpu_bo *abo;
1817	uint64_t fb_location, tiling_flags;
1818	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1819	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1820	u32 viewport_w, viewport_h;
1821	int r;
1822	bool bypass_lut = false;
1823	struct drm_format_name_buf format_name;
1824
1825	/* no fb bound */
1826	if (!atomic && !crtc->primary->fb) {
1827		DRM_DEBUG_KMS("No FB bound\n");
1828		return 0;
1829	}
1830
1831	if (atomic)
 
1832		target_fb = fb;
1833	else
 
1834		target_fb = crtc->primary->fb;
 
1835
1836	/* If atomic, assume fb object is pinned & idle & fenced and
1837	 * just update base pointers
1838	 */
1839	obj = target_fb->obj[0];
1840	abo = gem_to_amdgpu_bo(obj);
1841	r = amdgpu_bo_reserve(abo, false);
1842	if (unlikely(r != 0))
1843		return r;
1844
1845	if (!atomic) {
1846		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
 
 
1847		if (unlikely(r != 0)) {
1848			amdgpu_bo_unreserve(abo);
1849			return -EINVAL;
1850		}
1851	}
1852	fb_location = amdgpu_bo_gpu_offset(abo);
1853
1854	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1855	amdgpu_bo_unreserve(abo);
1856
1857	switch (target_fb->format->format) {
1858	case DRM_FORMAT_C8:
1859		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1860			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1861		break;
1862	case DRM_FORMAT_XRGB4444:
1863	case DRM_FORMAT_ARGB4444:
1864		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1865			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1866#ifdef __BIG_ENDIAN
1867		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1868#endif
1869		break;
1870	case DRM_FORMAT_XRGB1555:
1871	case DRM_FORMAT_ARGB1555:
1872		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1873			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1874#ifdef __BIG_ENDIAN
1875		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1876#endif
1877		break;
1878	case DRM_FORMAT_BGRX5551:
1879	case DRM_FORMAT_BGRA5551:
1880		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1881			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1882#ifdef __BIG_ENDIAN
1883		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1884#endif
1885		break;
1886	case DRM_FORMAT_RGB565:
1887		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1888			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1889#ifdef __BIG_ENDIAN
1890		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1891#endif
1892		break;
1893	case DRM_FORMAT_XRGB8888:
1894	case DRM_FORMAT_ARGB8888:
1895		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1896			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1897#ifdef __BIG_ENDIAN
1898		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1899#endif
1900		break;
1901	case DRM_FORMAT_XRGB2101010:
1902	case DRM_FORMAT_ARGB2101010:
1903		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1904			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1905#ifdef __BIG_ENDIAN
1906		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1907#endif
1908		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1909		bypass_lut = true;
1910		break;
1911	case DRM_FORMAT_BGRX1010102:
1912	case DRM_FORMAT_BGRA1010102:
1913		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1914			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1915#ifdef __BIG_ENDIAN
1916		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1917#endif
1918		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1919		bypass_lut = true;
1920		break;
1921	case DRM_FORMAT_XBGR8888:
1922	case DRM_FORMAT_ABGR8888:
1923		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1924			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1925		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1926			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1927#ifdef __BIG_ENDIAN
1928		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1929#endif
1930		break;
1931	default:
1932		DRM_ERROR("Unsupported screen format %s\n",
1933		          drm_get_format_name(target_fb->format->format, &format_name));
1934		return -EINVAL;
1935	}
1936
1937	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1938		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1939
1940		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1941		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1942		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1943		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1944		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1945
1946		fb_format |= GRPH_NUM_BANKS(num_banks);
1947		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1948		fb_format |= GRPH_TILE_SPLIT(tile_split);
1949		fb_format |= GRPH_BANK_WIDTH(bankw);
1950		fb_format |= GRPH_BANK_HEIGHT(bankh);
1951		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1952	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1953		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1954	}
1955
1956	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1957	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1958
1959	dce_v6_0_vga_enable(crtc, false);
1960
1961	/* Make sure surface address is updated at vertical blank rather than
1962	 * horizontal blank
1963	 */
1964	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1965
1966	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1967	       upper_32_bits(fb_location));
1968	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1969	       upper_32_bits(fb_location));
1970	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1971	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1972	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1973	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1974	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1975	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1976
1977	/*
1978	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1979	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1980	 * retain the full precision throughout the pipeline.
1981	 */
1982	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1983		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1984		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1985
1986	if (bypass_lut)
1987		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1988
1989	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1990	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1991	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1992	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1993	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1994	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1995
1996	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1997	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1998
1999	dce_v6_0_grph_enable(crtc, true);
2000
2001	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2002		       target_fb->height);
2003	x &= ~3;
2004	y &= ~1;
2005	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2006	       (x << 16) | y);
2007	viewport_w = crtc->mode.hdisplay;
2008	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2009
2010	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2011	       (viewport_w << 16) | viewport_h);
2012
2013	/* set pageflip to happen anywhere in vblank interval */
2014	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2015
2016	if (!atomic && fb && fb != crtc->primary->fb) {
2017		abo = gem_to_amdgpu_bo(fb->obj[0]);
2018		r = amdgpu_bo_reserve(abo, true);
 
2019		if (unlikely(r != 0))
2020			return r;
2021		amdgpu_bo_unpin(abo);
2022		amdgpu_bo_unreserve(abo);
2023	}
2024
2025	/* Bytes per pixel may have changed */
2026	dce_v6_0_bandwidth_update(adev);
2027
2028	return 0;
2029
2030}
2031
2032static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2033				    struct drm_display_mode *mode)
2034{
2035	struct drm_device *dev = crtc->dev;
2036	struct amdgpu_device *adev = dev->dev_private;
2037	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2038
2039	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2040		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2041		       INTERLEAVE_EN);
2042	else
2043		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2044}
2045
2046static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2047{
2048
2049	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2050	struct drm_device *dev = crtc->dev;
2051	struct amdgpu_device *adev = dev->dev_private;
2052	u16 *r, *g, *b;
2053	int i;
2054
2055	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2056
2057	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2058	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2059		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2060	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2061	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2062	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2063	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2064	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2065	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2066		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2067
2068	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2069
2070	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2071	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2072	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2073
2074	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2075	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2076	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2077
2078	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2079	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2080
2081	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2082	r = crtc->gamma_store;
2083	g = r + crtc->gamma_size;
2084	b = g + crtc->gamma_size;
2085	for (i = 0; i < 256; i++) {
2086		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2087		       ((*r++ & 0xffc0) << 14) |
2088		       ((*g++ & 0xffc0) << 4) |
2089		       (*b++ >> 6));
2090	}
2091
2092	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2093	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2094		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2095		ICON_DEGAMMA_MODE(0) |
2096		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2097	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2098	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2099		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2100	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2101	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2102		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2103	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2104	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2105		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2106	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2107	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2108
2109
2110}
2111
2112static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2113{
2114	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2115	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2116
2117	switch (amdgpu_encoder->encoder_id) {
2118	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2119		return dig->linkb ? 1 : 0;
2120	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2121		return dig->linkb ? 3 : 2;
2122	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2123		return dig->linkb ? 5 : 4;
2124	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2125		return 6;
2126	default:
2127		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2128		return 0;
2129	}
2130}
2131
2132/**
2133 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2134 *
2135 * @crtc: drm crtc
2136 *
2137 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2138 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2139 * monitors a dedicated PPLL must be used.  If a particular board has
2140 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2141 * as there is no need to program the PLL itself.  If we are not able to
2142 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2143 * avoid messing up an existing monitor.
2144 *
2145 *
2146 */
2147static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2148{
2149	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2150	struct drm_device *dev = crtc->dev;
2151	struct amdgpu_device *adev = dev->dev_private;
2152	u32 pll_in_use;
2153	int pll;
2154
2155	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2156		if (adev->clock.dp_extclk)
2157			/* skip PPLL programming if using ext clock */
2158			return ATOM_PPLL_INVALID;
2159		else
2160			return ATOM_PPLL0;
2161	} else {
2162		/* use the same PPLL for all monitors with the same clock */
2163		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2164		if (pll != ATOM_PPLL_INVALID)
2165			return pll;
2166	}
2167
2168	/*  PPLL1, and PPLL2 */
2169	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2170	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2171		return ATOM_PPLL2;
2172	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2173		return ATOM_PPLL1;
2174	DRM_ERROR("unable to allocate a PPLL\n");
2175	return ATOM_PPLL_INVALID;
2176}
2177
2178static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2179{
2180	struct amdgpu_device *adev = crtc->dev->dev_private;
2181	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2182	uint32_t cur_lock;
2183
2184	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2185	if (lock)
2186		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2187	else
2188		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2189	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2190}
2191
2192static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2193{
2194	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2195	struct amdgpu_device *adev = crtc->dev->dev_private;
2196
2197	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2198	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2199	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2200
2201
2202}
2203
2204static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2205{
2206	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2207	struct amdgpu_device *adev = crtc->dev->dev_private;
2208
2209	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2210	       upper_32_bits(amdgpu_crtc->cursor_addr));
2211	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2212	       lower_32_bits(amdgpu_crtc->cursor_addr));
2213
2214	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2215	       CUR_CONTROL__CURSOR_EN_MASK |
2216	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2217	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2218
2219}
2220
2221static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2222				       int x, int y)
2223{
2224	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2225	struct amdgpu_device *adev = crtc->dev->dev_private;
2226	int xorigin = 0, yorigin = 0;
2227
2228	int w = amdgpu_crtc->cursor_width;
2229
2230	amdgpu_crtc->cursor_x = x;
2231	amdgpu_crtc->cursor_y = y;
2232
2233	/* avivo cursor are offset into the total surface */
2234	x += crtc->x;
2235	y += crtc->y;
2236	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2237
2238	if (x < 0) {
2239		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2240		x = 0;
2241	}
2242	if (y < 0) {
2243		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2244		y = 0;
2245	}
2246
2247	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2248	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2249	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2250	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2251
2252	return 0;
2253}
2254
2255static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2256				     int x, int y)
2257{
2258	int ret;
2259
2260	dce_v6_0_lock_cursor(crtc, true);
2261	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2262	dce_v6_0_lock_cursor(crtc, false);
2263
2264	return ret;
2265}
2266
2267static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2268				     struct drm_file *file_priv,
2269				     uint32_t handle,
2270				     uint32_t width,
2271				     uint32_t height,
2272				     int32_t hot_x,
2273				     int32_t hot_y)
2274{
2275	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2276	struct drm_gem_object *obj;
2277	struct amdgpu_bo *aobj;
2278	int ret;
2279
2280	if (!handle) {
2281		/* turn off cursor */
2282		dce_v6_0_hide_cursor(crtc);
2283		obj = NULL;
2284		goto unpin;
2285	}
2286
2287	if ((width > amdgpu_crtc->max_cursor_width) ||
2288	    (height > amdgpu_crtc->max_cursor_height)) {
2289		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2290		return -EINVAL;
2291	}
2292
2293	obj = drm_gem_object_lookup(file_priv, handle);
2294	if (!obj) {
2295		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2296		return -ENOENT;
2297	}
2298
2299	aobj = gem_to_amdgpu_bo(obj);
2300	ret = amdgpu_bo_reserve(aobj, false);
2301	if (ret != 0) {
2302		drm_gem_object_put(obj);
2303		return ret;
2304	}
2305
2306	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2307	amdgpu_bo_unreserve(aobj);
2308	if (ret) {
2309		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2310		drm_gem_object_put(obj);
2311		return ret;
2312	}
2313	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2314
2315	dce_v6_0_lock_cursor(crtc, true);
2316
2317	if (width != amdgpu_crtc->cursor_width ||
2318	    height != amdgpu_crtc->cursor_height ||
2319	    hot_x != amdgpu_crtc->cursor_hot_x ||
2320	    hot_y != amdgpu_crtc->cursor_hot_y) {
2321		int x, y;
2322
2323		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2324		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2325
2326		dce_v6_0_cursor_move_locked(crtc, x, y);
2327
2328		amdgpu_crtc->cursor_width = width;
2329		amdgpu_crtc->cursor_height = height;
2330		amdgpu_crtc->cursor_hot_x = hot_x;
2331		amdgpu_crtc->cursor_hot_y = hot_y;
2332	}
2333
2334	dce_v6_0_show_cursor(crtc);
2335	dce_v6_0_lock_cursor(crtc, false);
2336
2337unpin:
2338	if (amdgpu_crtc->cursor_bo) {
2339		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2340		ret = amdgpu_bo_reserve(aobj, true);
2341		if (likely(ret == 0)) {
2342			amdgpu_bo_unpin(aobj);
2343			amdgpu_bo_unreserve(aobj);
2344		}
2345		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2346	}
2347
2348	amdgpu_crtc->cursor_bo = obj;
2349	return 0;
2350}
2351
2352static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2353{
2354	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2355
2356	if (amdgpu_crtc->cursor_bo) {
2357		dce_v6_0_lock_cursor(crtc, true);
2358
2359		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2360					    amdgpu_crtc->cursor_y);
2361
2362		dce_v6_0_show_cursor(crtc);
2363		dce_v6_0_lock_cursor(crtc, false);
2364	}
2365}
2366
2367static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2368				   u16 *blue, uint32_t size,
2369				   struct drm_modeset_acquire_ctx *ctx)
2370{
 
 
 
 
 
 
 
 
 
2371	dce_v6_0_crtc_load_lut(crtc);
2372
2373	return 0;
2374}
2375
2376static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2377{
2378	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2379
2380	drm_crtc_cleanup(crtc);
2381	kfree(amdgpu_crtc);
2382}
2383
2384static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2385	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2386	.cursor_move = dce_v6_0_crtc_cursor_move,
2387	.gamma_set = dce_v6_0_crtc_gamma_set,
2388	.set_config = amdgpu_display_crtc_set_config,
2389	.destroy = dce_v6_0_crtc_destroy,
2390	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2391	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2392	.enable_vblank = amdgpu_enable_vblank_kms,
2393	.disable_vblank = amdgpu_disable_vblank_kms,
2394	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2395};
2396
2397static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2398{
2399	struct drm_device *dev = crtc->dev;
2400	struct amdgpu_device *adev = dev->dev_private;
2401	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2402	unsigned type;
2403
2404	switch (mode) {
2405	case DRM_MODE_DPMS_ON:
2406		amdgpu_crtc->enabled = true;
2407		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2408		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2409		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2410		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2411						amdgpu_crtc->crtc_id);
2412		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2413		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2414		drm_crtc_vblank_on(crtc);
2415		dce_v6_0_crtc_load_lut(crtc);
2416		break;
2417	case DRM_MODE_DPMS_STANDBY:
2418	case DRM_MODE_DPMS_SUSPEND:
2419	case DRM_MODE_DPMS_OFF:
2420		drm_crtc_vblank_off(crtc);
2421		if (amdgpu_crtc->enabled)
2422			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2423		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2424		amdgpu_crtc->enabled = false;
2425		break;
2426	}
2427	/* adjust pm to dpms */
2428	amdgpu_pm_compute_clocks(adev);
2429}
2430
2431static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2432{
2433	/* disable crtc pair power gating before programming */
2434	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2435	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2436	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2437}
2438
2439static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2440{
2441	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2442	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2443}
2444
2445static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2446{
2447
2448	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2449	struct drm_device *dev = crtc->dev;
2450	struct amdgpu_device *adev = dev->dev_private;
2451	struct amdgpu_atom_ss ss;
2452	int i;
2453
2454	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2455	if (crtc->primary->fb) {
2456		int r;
 
2457		struct amdgpu_bo *abo;
2458
2459		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2460		r = amdgpu_bo_reserve(abo, true);
 
2461		if (unlikely(r))
2462			DRM_ERROR("failed to reserve abo before unpin\n");
2463		else {
2464			amdgpu_bo_unpin(abo);
2465			amdgpu_bo_unreserve(abo);
2466		}
2467	}
2468	/* disable the GRPH */
2469	dce_v6_0_grph_enable(crtc, false);
2470
2471	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2472
2473	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2474		if (adev->mode_info.crtcs[i] &&
2475		    adev->mode_info.crtcs[i]->enabled &&
2476		    i != amdgpu_crtc->crtc_id &&
2477		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2478			/* one other crtc is using this pll don't turn
2479			 * off the pll
2480			 */
2481			goto done;
2482		}
2483	}
2484
2485	switch (amdgpu_crtc->pll_id) {
2486	case ATOM_PPLL1:
2487	case ATOM_PPLL2:
2488		/* disable the ppll */
2489		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2490						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2491		break;
2492	default:
2493		break;
2494	}
2495done:
2496	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2497	amdgpu_crtc->adjusted_clock = 0;
2498	amdgpu_crtc->encoder = NULL;
2499	amdgpu_crtc->connector = NULL;
2500}
2501
2502static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2503				  struct drm_display_mode *mode,
2504				  struct drm_display_mode *adjusted_mode,
2505				  int x, int y, struct drm_framebuffer *old_fb)
2506{
2507	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2508
2509	if (!amdgpu_crtc->adjusted_clock)
2510		return -EINVAL;
2511
2512	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2513	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2514	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2515	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2516	amdgpu_atombios_crtc_scaler_setup(crtc);
2517	dce_v6_0_cursor_reset(crtc);
2518	/* update the hw version fpr dpm */
2519	amdgpu_crtc->hw_mode = *adjusted_mode;
2520
2521	return 0;
2522}
2523
2524static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2525				     const struct drm_display_mode *mode,
2526				     struct drm_display_mode *adjusted_mode)
2527{
2528
2529	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2530	struct drm_device *dev = crtc->dev;
2531	struct drm_encoder *encoder;
2532
2533	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2534	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2535		if (encoder->crtc == crtc) {
2536			amdgpu_crtc->encoder = encoder;
2537			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2538			break;
2539		}
2540	}
2541	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2542		amdgpu_crtc->encoder = NULL;
2543		amdgpu_crtc->connector = NULL;
2544		return false;
2545	}
2546	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2547		return false;
2548	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2549		return false;
2550	/* pick pll */
2551	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2552	/* if we can't get a PPLL for a non-DP encoder, fail */
2553	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2554	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2555		return false;
2556
2557	return true;
2558}
2559
2560static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2561				  struct drm_framebuffer *old_fb)
2562{
2563	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2564}
2565
2566static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2567					 struct drm_framebuffer *fb,
2568					 int x, int y, enum mode_set_atomic state)
2569{
2570       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2571}
2572
2573static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2574	.dpms = dce_v6_0_crtc_dpms,
2575	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2576	.mode_set = dce_v6_0_crtc_mode_set,
2577	.mode_set_base = dce_v6_0_crtc_set_base,
2578	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2579	.prepare = dce_v6_0_crtc_prepare,
2580	.commit = dce_v6_0_crtc_commit,
 
2581	.disable = dce_v6_0_crtc_disable,
2582	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2583};
2584
2585static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2586{
2587	struct amdgpu_crtc *amdgpu_crtc;
 
2588
2589	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2590			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2591	if (amdgpu_crtc == NULL)
2592		return -ENOMEM;
2593
2594	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2595
2596	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2597	amdgpu_crtc->crtc_id = index;
2598	adev->mode_info.crtcs[index] = amdgpu_crtc;
2599
2600	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2601	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2602	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2603	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2604
 
 
 
 
 
 
2605	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2606
2607	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2608	amdgpu_crtc->adjusted_clock = 0;
2609	amdgpu_crtc->encoder = NULL;
2610	amdgpu_crtc->connector = NULL;
2611	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2612
2613	return 0;
2614}
2615
2616static int dce_v6_0_early_init(void *handle)
2617{
2618	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2619
2620	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2621	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2622
2623	dce_v6_0_set_display_funcs(adev);
 
2624
2625	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2626
2627	switch (adev->asic_type) {
2628	case CHIP_TAHITI:
2629	case CHIP_PITCAIRN:
2630	case CHIP_VERDE:
2631		adev->mode_info.num_hpd = 6;
2632		adev->mode_info.num_dig = 6;
2633		break;
2634	case CHIP_OLAND:
2635		adev->mode_info.num_hpd = 2;
2636		adev->mode_info.num_dig = 2;
2637		break;
2638	default:
2639		return -EINVAL;
2640	}
2641
2642	dce_v6_0_set_irq_funcs(adev);
2643
2644	return 0;
2645}
2646
2647static int dce_v6_0_sw_init(void *handle)
2648{
2649	int r, i;
2650	bool ret;
2651	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2652
2653	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2654		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2655		if (r)
2656			return r;
2657	}
2658
2659	for (i = 8; i < 20; i += 2) {
2660		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2661		if (r)
2662			return r;
2663	}
2664
2665	/* HPD hotplug */
2666	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2667	if (r)
2668		return r;
2669
2670	adev->mode_info.mode_config_initialized = true;
2671
2672	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2673	adev->ddev->mode_config.async_page_flip = true;
2674	adev->ddev->mode_config.max_width = 16384;
2675	adev->ddev->mode_config.max_height = 16384;
2676	adev->ddev->mode_config.preferred_depth = 24;
2677	adev->ddev->mode_config.prefer_shadow = 1;
2678	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2679
2680	r = amdgpu_display_modeset_create_props(adev);
2681	if (r)
2682		return r;
2683
2684	adev->ddev->mode_config.max_width = 16384;
2685	adev->ddev->mode_config.max_height = 16384;
2686
2687	/* allocate crtcs */
2688	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2689		r = dce_v6_0_crtc_init(adev, i);
2690		if (r)
2691			return r;
2692	}
2693
2694	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2695	if (ret)
2696		amdgpu_display_print_display_setup(adev->ddev);
2697	else
2698		return -EINVAL;
2699
2700	/* setup afmt */
2701	r = dce_v6_0_afmt_init(adev);
2702	if (r)
2703		return r;
2704
2705	r = dce_v6_0_audio_init(adev);
2706	if (r)
2707		return r;
2708
2709	drm_kms_helper_poll_init(adev->ddev);
2710
2711	return r;
2712}
2713
2714static int dce_v6_0_sw_fini(void *handle)
2715{
2716	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2717
2718	kfree(adev->mode_info.bios_hardcoded_edid);
2719
2720	drm_kms_helper_poll_fini(adev->ddev);
2721
2722	dce_v6_0_audio_fini(adev);
2723	dce_v6_0_afmt_fini(adev);
2724
2725	drm_mode_config_cleanup(adev->ddev);
2726	adev->mode_info.mode_config_initialized = false;
2727
2728	return 0;
2729}
2730
2731static int dce_v6_0_hw_init(void *handle)
2732{
2733	int i;
2734	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2735
2736	/* disable vga render */
2737	dce_v6_0_set_vga_render_state(adev, false);
2738	/* init dig PHYs, disp eng pll */
2739	amdgpu_atombios_encoder_init_dig(adev);
2740	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2741
2742	/* initialize hpd */
2743	dce_v6_0_hpd_init(adev);
2744
2745	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2746		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2747	}
2748
2749	dce_v6_0_pageflip_interrupt_init(adev);
2750
2751	return 0;
2752}
2753
2754static int dce_v6_0_hw_fini(void *handle)
2755{
2756	int i;
2757	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2758
2759	dce_v6_0_hpd_fini(adev);
2760
2761	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2762		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2763	}
2764
2765	dce_v6_0_pageflip_interrupt_fini(adev);
2766
2767	return 0;
2768}
2769
2770static int dce_v6_0_suspend(void *handle)
2771{
2772	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2773
2774	adev->mode_info.bl_level =
2775		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2776
2777	return dce_v6_0_hw_fini(handle);
2778}
2779
2780static int dce_v6_0_resume(void *handle)
2781{
2782	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2783	int ret;
2784
2785	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2786							   adev->mode_info.bl_level);
2787
2788	ret = dce_v6_0_hw_init(handle);
2789
2790	/* turn on the BL */
2791	if (adev->mode_info.bl_encoder) {
2792		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2793								  adev->mode_info.bl_encoder);
2794		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2795						    bl_level);
2796	}
2797
2798	return ret;
2799}
2800
2801static bool dce_v6_0_is_idle(void *handle)
2802{
2803	return true;
2804}
2805
2806static int dce_v6_0_wait_for_idle(void *handle)
2807{
2808	return 0;
2809}
2810
2811static int dce_v6_0_soft_reset(void *handle)
2812{
2813	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2814	return 0;
2815}
2816
2817static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2818						     int crtc,
2819						     enum amdgpu_interrupt_state state)
2820{
2821	u32 reg_block, interrupt_mask;
2822
2823	if (crtc >= adev->mode_info.num_crtc) {
2824		DRM_DEBUG("invalid crtc %d\n", crtc);
2825		return;
2826	}
2827
2828	switch (crtc) {
2829	case 0:
2830		reg_block = SI_CRTC0_REGISTER_OFFSET;
2831		break;
2832	case 1:
2833		reg_block = SI_CRTC1_REGISTER_OFFSET;
2834		break;
2835	case 2:
2836		reg_block = SI_CRTC2_REGISTER_OFFSET;
2837		break;
2838	case 3:
2839		reg_block = SI_CRTC3_REGISTER_OFFSET;
2840		break;
2841	case 4:
2842		reg_block = SI_CRTC4_REGISTER_OFFSET;
2843		break;
2844	case 5:
2845		reg_block = SI_CRTC5_REGISTER_OFFSET;
2846		break;
2847	default:
2848		DRM_DEBUG("invalid crtc %d\n", crtc);
2849		return;
2850	}
2851
2852	switch (state) {
2853	case AMDGPU_IRQ_STATE_DISABLE:
2854		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2855		interrupt_mask &= ~VBLANK_INT_MASK;
2856		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2857		break;
2858	case AMDGPU_IRQ_STATE_ENABLE:
2859		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2860		interrupt_mask |= VBLANK_INT_MASK;
2861		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2862		break;
2863	default:
2864		break;
2865	}
2866}
2867
2868static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2869						    int crtc,
2870						    enum amdgpu_interrupt_state state)
2871{
2872
2873}
2874
2875static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2876					    struct amdgpu_irq_src *src,
2877					    unsigned type,
2878					    enum amdgpu_interrupt_state state)
2879{
2880	u32 dc_hpd_int_cntl;
2881
2882	if (type >= adev->mode_info.num_hpd) {
2883		DRM_DEBUG("invalid hdp %d\n", type);
2884		return 0;
2885	}
2886
2887	switch (state) {
2888	case AMDGPU_IRQ_STATE_DISABLE:
2889		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2890		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2891		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2892		break;
2893	case AMDGPU_IRQ_STATE_ENABLE:
2894		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2895		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2896		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2897		break;
2898	default:
2899		break;
2900	}
2901
2902	return 0;
2903}
2904
2905static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2906					     struct amdgpu_irq_src *src,
2907					     unsigned type,
2908					     enum amdgpu_interrupt_state state)
2909{
2910	switch (type) {
2911	case AMDGPU_CRTC_IRQ_VBLANK1:
2912		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2913		break;
2914	case AMDGPU_CRTC_IRQ_VBLANK2:
2915		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2916		break;
2917	case AMDGPU_CRTC_IRQ_VBLANK3:
2918		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2919		break;
2920	case AMDGPU_CRTC_IRQ_VBLANK4:
2921		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2922		break;
2923	case AMDGPU_CRTC_IRQ_VBLANK5:
2924		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2925		break;
2926	case AMDGPU_CRTC_IRQ_VBLANK6:
2927		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2928		break;
2929	case AMDGPU_CRTC_IRQ_VLINE1:
2930		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2931		break;
2932	case AMDGPU_CRTC_IRQ_VLINE2:
2933		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2934		break;
2935	case AMDGPU_CRTC_IRQ_VLINE3:
2936		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2937		break;
2938	case AMDGPU_CRTC_IRQ_VLINE4:
2939		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2940		break;
2941	case AMDGPU_CRTC_IRQ_VLINE5:
2942		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2943		break;
2944	case AMDGPU_CRTC_IRQ_VLINE6:
2945		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2946		break;
2947	default:
2948		break;
2949	}
2950	return 0;
2951}
2952
2953static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2954			     struct amdgpu_irq_src *source,
2955			     struct amdgpu_iv_entry *entry)
2956{
2957	unsigned crtc = entry->src_id - 1;
2958	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2959	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2960								    crtc);
2961
2962	switch (entry->src_data[0]) {
2963	case 0: /* vblank */
2964		if (disp_int & interrupt_status_offsets[crtc].vblank)
2965			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2966		else
2967			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2968
2969		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2970			drm_handle_vblank(adev->ddev, crtc);
2971		}
2972		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2973		break;
2974	case 1: /* vline */
2975		if (disp_int & interrupt_status_offsets[crtc].vline)
2976			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2977		else
2978			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2979
2980		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2981		break;
2982	default:
2983		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2984		break;
2985	}
2986
2987	return 0;
2988}
2989
2990static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2991						 struct amdgpu_irq_src *src,
2992						 unsigned type,
2993						 enum amdgpu_interrupt_state state)
2994{
2995	u32 reg;
2996
2997	if (type >= adev->mode_info.num_crtc) {
2998		DRM_ERROR("invalid pageflip crtc %d\n", type);
2999		return -EINVAL;
3000	}
3001
3002	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3003	if (state == AMDGPU_IRQ_STATE_DISABLE)
3004		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3005		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3006	else
3007		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3008		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3009
3010	return 0;
3011}
3012
3013static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3014				 struct amdgpu_irq_src *source,
3015				 struct amdgpu_iv_entry *entry)
3016{
3017	unsigned long flags;
3018	unsigned crtc_id;
3019	struct amdgpu_crtc *amdgpu_crtc;
3020	struct amdgpu_flip_work *works;
3021
3022	crtc_id = (entry->src_id - 8) >> 1;
3023	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3024
3025	if (crtc_id >= adev->mode_info.num_crtc) {
3026		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3027		return -EINVAL;
3028	}
3029
3030	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3031	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3032		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3033		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3034
3035	/* IRQ could occur when in initial stage */
3036	if (amdgpu_crtc == NULL)
3037		return 0;
3038
3039	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3040	works = amdgpu_crtc->pflip_works;
3041	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3042		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3043						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3044						amdgpu_crtc->pflip_status,
3045						AMDGPU_FLIP_SUBMITTED);
3046		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3047		return 0;
3048	}
3049
3050	/* page flip completed. clean up */
3051	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3052	amdgpu_crtc->pflip_works = NULL;
3053
3054	/* wakeup usersapce */
3055	if (works->event)
3056		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3057
3058	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3059
3060	drm_crtc_vblank_put(&amdgpu_crtc->base);
3061	schedule_work(&works->unpin_work);
3062
3063	return 0;
3064}
3065
3066static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3067			    struct amdgpu_irq_src *source,
3068			    struct amdgpu_iv_entry *entry)
3069{
3070	uint32_t disp_int, mask, tmp;
3071	unsigned hpd;
3072
3073	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3074		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3075		return 0;
3076	}
3077
3078	hpd = entry->src_data[0];
3079	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3080	mask = interrupt_status_offsets[hpd].hpd;
3081
3082	if (disp_int & mask) {
3083		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3084		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3085		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3086		schedule_work(&adev->hotplug_work);
3087		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3088	}
3089
3090	return 0;
3091
3092}
3093
3094static int dce_v6_0_set_clockgating_state(void *handle,
3095					  enum amd_clockgating_state state)
3096{
3097	return 0;
3098}
3099
3100static int dce_v6_0_set_powergating_state(void *handle,
3101					  enum amd_powergating_state state)
3102{
3103	return 0;
3104}
3105
3106static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3107	.name = "dce_v6_0",
3108	.early_init = dce_v6_0_early_init,
3109	.late_init = NULL,
3110	.sw_init = dce_v6_0_sw_init,
3111	.sw_fini = dce_v6_0_sw_fini,
3112	.hw_init = dce_v6_0_hw_init,
3113	.hw_fini = dce_v6_0_hw_fini,
3114	.suspend = dce_v6_0_suspend,
3115	.resume = dce_v6_0_resume,
3116	.is_idle = dce_v6_0_is_idle,
3117	.wait_for_idle = dce_v6_0_wait_for_idle,
3118	.soft_reset = dce_v6_0_soft_reset,
3119	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3120	.set_powergating_state = dce_v6_0_set_powergating_state,
3121};
3122
3123static void
3124dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3125			  struct drm_display_mode *mode,
3126			  struct drm_display_mode *adjusted_mode)
3127{
3128
3129	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3130	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3131
3132	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3133
3134	/* need to call this here rather than in prepare() since we need some crtc info */
3135	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3136
3137	/* set scaler clears this on some chips */
3138	dce_v6_0_set_interleave(encoder->crtc, mode);
3139
3140	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3141		dce_v6_0_afmt_enable(encoder, true);
3142		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3143	}
3144}
3145
3146static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3147{
3148
3149	struct amdgpu_device *adev = encoder->dev->dev_private;
3150	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3151	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3152
3153	if ((amdgpu_encoder->active_device &
3154	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3155	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3156	     ENCODER_OBJECT_ID_NONE)) {
3157		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3158		if (dig) {
3159			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3160			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3161				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3162		}
3163	}
3164
3165	amdgpu_atombios_scratch_regs_lock(adev, true);
3166
3167	if (connector) {
3168		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3169
3170		/* select the clock/data port if it uses a router */
3171		if (amdgpu_connector->router.cd_valid)
3172			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3173
3174		/* turn eDP panel on for mode set */
3175		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3176			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3177							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3178	}
3179
3180	/* this is needed for the pll/ss setup to work correctly in some cases */
3181	amdgpu_atombios_encoder_set_crtc_source(encoder);
3182	/* set up the FMT blocks */
3183	dce_v6_0_program_fmt(encoder);
3184}
3185
3186static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3187{
3188
3189	struct drm_device *dev = encoder->dev;
3190	struct amdgpu_device *adev = dev->dev_private;
3191
3192	/* need to call this here as we need the crtc set up */
3193	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3194	amdgpu_atombios_scratch_regs_lock(adev, false);
3195}
3196
3197static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3198{
3199
3200	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3201	struct amdgpu_encoder_atom_dig *dig;
3202	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3203
3204	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3205
3206	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3207		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3208			dce_v6_0_afmt_enable(encoder, false);
3209		dig = amdgpu_encoder->enc_priv;
3210		dig->dig_encoder = -1;
3211	}
3212	amdgpu_encoder->active_device = 0;
3213}
3214
3215/* these are handled by the primary encoders */
3216static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3217{
3218
3219}
3220
3221static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3222{
3223
3224}
3225
3226static void
3227dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3228		      struct drm_display_mode *mode,
3229		      struct drm_display_mode *adjusted_mode)
3230{
3231
3232}
3233
3234static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3235{
3236
3237}
3238
3239static void
3240dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3241{
3242
3243}
3244
3245static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3246				    const struct drm_display_mode *mode,
3247				    struct drm_display_mode *adjusted_mode)
3248{
3249	return true;
3250}
3251
3252static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3253	.dpms = dce_v6_0_ext_dpms,
3254	.mode_fixup = dce_v6_0_ext_mode_fixup,
3255	.prepare = dce_v6_0_ext_prepare,
3256	.mode_set = dce_v6_0_ext_mode_set,
3257	.commit = dce_v6_0_ext_commit,
3258	.disable = dce_v6_0_ext_disable,
3259	/* no detect for TMDS/LVDS yet */
3260};
3261
3262static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3263	.dpms = amdgpu_atombios_encoder_dpms,
3264	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3265	.prepare = dce_v6_0_encoder_prepare,
3266	.mode_set = dce_v6_0_encoder_mode_set,
3267	.commit = dce_v6_0_encoder_commit,
3268	.disable = dce_v6_0_encoder_disable,
3269	.detect = amdgpu_atombios_encoder_dig_detect,
3270};
3271
3272static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3273	.dpms = amdgpu_atombios_encoder_dpms,
3274	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3275	.prepare = dce_v6_0_encoder_prepare,
3276	.mode_set = dce_v6_0_encoder_mode_set,
3277	.commit = dce_v6_0_encoder_commit,
3278	.detect = amdgpu_atombios_encoder_dac_detect,
3279};
3280
3281static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3282{
3283	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3284	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3285		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3286	kfree(amdgpu_encoder->enc_priv);
3287	drm_encoder_cleanup(encoder);
3288	kfree(amdgpu_encoder);
3289}
3290
3291static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3292	.destroy = dce_v6_0_encoder_destroy,
3293};
3294
3295static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3296				 uint32_t encoder_enum,
3297				 uint32_t supported_device,
3298				 u16 caps)
3299{
3300	struct drm_device *dev = adev->ddev;
3301	struct drm_encoder *encoder;
3302	struct amdgpu_encoder *amdgpu_encoder;
3303
3304	/* see if we already added it */
3305	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3306		amdgpu_encoder = to_amdgpu_encoder(encoder);
3307		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3308			amdgpu_encoder->devices |= supported_device;
3309			return;
3310		}
3311
3312	}
3313
3314	/* add a new one */
3315	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3316	if (!amdgpu_encoder)
3317		return;
3318
3319	encoder = &amdgpu_encoder->base;
3320	switch (adev->mode_info.num_crtc) {
3321	case 1:
3322		encoder->possible_crtcs = 0x1;
3323		break;
3324	case 2:
3325	default:
3326		encoder->possible_crtcs = 0x3;
3327		break;
3328	case 4:
3329		encoder->possible_crtcs = 0xf;
3330		break;
3331	case 6:
3332		encoder->possible_crtcs = 0x3f;
3333		break;
3334	}
3335
3336	amdgpu_encoder->enc_priv = NULL;
3337	amdgpu_encoder->encoder_enum = encoder_enum;
3338	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3339	amdgpu_encoder->devices = supported_device;
3340	amdgpu_encoder->rmx_type = RMX_OFF;
3341	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3342	amdgpu_encoder->is_ext_encoder = false;
3343	amdgpu_encoder->caps = caps;
3344
3345	switch (amdgpu_encoder->encoder_id) {
3346	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3347	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3348		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3349				 DRM_MODE_ENCODER_DAC, NULL);
3350		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3351		break;
3352	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3353	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3354	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3355	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3356	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3357		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3358			amdgpu_encoder->rmx_type = RMX_FULL;
3359			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3360					 DRM_MODE_ENCODER_LVDS, NULL);
3361			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3362		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3363			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3364					 DRM_MODE_ENCODER_DAC, NULL);
3365			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3366		} else {
3367			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3368					 DRM_MODE_ENCODER_TMDS, NULL);
3369			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3370		}
3371		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3372		break;
3373	case ENCODER_OBJECT_ID_SI170B:
3374	case ENCODER_OBJECT_ID_CH7303:
3375	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3376	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3377	case ENCODER_OBJECT_ID_TITFP513:
3378	case ENCODER_OBJECT_ID_VT1623:
3379	case ENCODER_OBJECT_ID_HDMI_SI1930:
3380	case ENCODER_OBJECT_ID_TRAVIS:
3381	case ENCODER_OBJECT_ID_NUTMEG:
3382		/* these are handled by the primary encoders */
3383		amdgpu_encoder->is_ext_encoder = true;
3384		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3385			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3386					 DRM_MODE_ENCODER_LVDS, NULL);
3387		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3388			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3389					 DRM_MODE_ENCODER_DAC, NULL);
3390		else
3391			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3392					 DRM_MODE_ENCODER_TMDS, NULL);
3393		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3394		break;
3395	}
3396}
3397
3398static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
 
3399	.bandwidth_update = &dce_v6_0_bandwidth_update,
3400	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
 
3401	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3402	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3403	.hpd_sense = &dce_v6_0_hpd_sense,
3404	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3405	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3406	.page_flip = &dce_v6_0_page_flip,
3407	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3408	.add_encoder = &dce_v6_0_encoder_add,
3409	.add_connector = &amdgpu_connector_add,
 
 
3410};
3411
3412static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3413{
3414	adev->mode_info.funcs = &dce_v6_0_display_funcs;
 
3415}
3416
3417static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3418	.set = dce_v6_0_set_crtc_interrupt_state,
3419	.process = dce_v6_0_crtc_irq,
3420};
3421
3422static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3423	.set = dce_v6_0_set_pageflip_interrupt_state,
3424	.process = dce_v6_0_pageflip_irq,
3425};
3426
3427static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3428	.set = dce_v6_0_set_hpd_interrupt_state,
3429	.process = dce_v6_0_hpd_irq,
3430};
3431
3432static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3433{
3434	if (adev->mode_info.num_crtc > 0)
3435		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3436	else
3437		adev->crtc_irq.num_types = 0;
3438	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3439
3440	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3441	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3442
3443	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3444	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3445}
3446
3447const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3448{
3449	.type = AMD_IP_BLOCK_TYPE_DCE,
3450	.major = 6,
3451	.minor = 0,
3452	.rev = 0,
3453	.funcs = &dce_v6_0_ip_funcs,
3454};
3455
3456const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3457{
3458	.type = AMD_IP_BLOCK_TYPE_DCE,
3459	.major = 6,
3460	.minor = 4,
3461	.rev = 0,
3462	.funcs = &dce_v6_0_ip_funcs,
3463};