Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/pci.h>
  25
  26#include <drm/drm_fourcc.h>
  27#include <drm/drm_vblank.h>
  28
  29#include "amdgpu.h"
  30#include "amdgpu_pm.h"
  31#include "amdgpu_i2c.h"
  32#include "atom.h"
  33#include "amdgpu_atombios.h"
  34#include "atombios_crtc.h"
  35#include "atombios_encoders.h"
  36#include "amdgpu_pll.h"
  37#include "amdgpu_connectors.h"
  38#include "amdgpu_display.h"
  39
  40#include "bif/bif_3_0_d.h"
  41#include "bif/bif_3_0_sh_mask.h"
  42#include "oss/oss_1_0_d.h"
  43#include "oss/oss_1_0_sh_mask.h"
  44#include "gca/gfx_6_0_d.h"
  45#include "gca/gfx_6_0_sh_mask.h"
  46#include "gmc/gmc_6_0_d.h"
  47#include "gmc/gmc_6_0_sh_mask.h"
  48#include "dce/dce_6_0_d.h"
  49#include "dce/dce_6_0_sh_mask.h"
  50#include "gca/gfx_7_2_enum.h"
  51#include "dce_v6_0.h"
  52#include "si_enums.h"
  53
  54static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
  55static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  56
  57static const u32 crtc_offsets[6] =
  58{
  59	SI_CRTC0_REGISTER_OFFSET,
  60	SI_CRTC1_REGISTER_OFFSET,
  61	SI_CRTC2_REGISTER_OFFSET,
  62	SI_CRTC3_REGISTER_OFFSET,
  63	SI_CRTC4_REGISTER_OFFSET,
  64	SI_CRTC5_REGISTER_OFFSET
  65};
  66
  67static const u32 hpd_offsets[] =
  68{
  69	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
  70	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
  71	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
  72	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
  73	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
  74	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
  75};
  76
  77static const uint32_t dig_offsets[] = {
  78	SI_CRTC0_REGISTER_OFFSET,
  79	SI_CRTC1_REGISTER_OFFSET,
  80	SI_CRTC2_REGISTER_OFFSET,
  81	SI_CRTC3_REGISTER_OFFSET,
  82	SI_CRTC4_REGISTER_OFFSET,
  83	SI_CRTC5_REGISTER_OFFSET,
  84	(0x13830 - 0x7030) >> 2,
  85};
  86
  87static const struct {
  88	uint32_t	reg;
  89	uint32_t	vblank;
  90	uint32_t	vline;
  91	uint32_t	hpd;
  92
  93} interrupt_status_offsets[6] = { {
  94	.reg = mmDISP_INTERRUPT_STATUS,
  95	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  96	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  97	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  98}, {
  99	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
 100	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
 101	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
 102	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
 103}, {
 104	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
 105	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
 106	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 107	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 108}, {
 109	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 110	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 111	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 112	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 113}, {
 114	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 115	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 116	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 117	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 118}, {
 119	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 120	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 121	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 122	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 123} };
 124
 125static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 126				     u32 block_offset, u32 reg)
 127{
 128	unsigned long flags;
 129	u32 r;
 130
 131	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 132	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 133	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 134	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 135
 136	return r;
 137}
 138
 139static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
 140				      u32 block_offset, u32 reg, u32 v)
 141{
 142	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143
 144	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 145	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
 146		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
 147	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 148	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 
 
 149}
 150
 151static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 152{
 153	if (crtc >= adev->mode_info.num_crtc)
 154		return 0;
 155	else
 156		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 157}
 158
 159static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 160{
 161	unsigned i;
 162
 163	/* Enable pflip interrupts */
 164	for (i = 0; i < adev->mode_info.num_crtc; i++)
 165		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 166}
 167
 168static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 169{
 170	unsigned i;
 171
 172	/* Disable pflip interrupts */
 173	for (i = 0; i < adev->mode_info.num_crtc; i++)
 174		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 175}
 176
 177/**
 178 * dce_v6_0_page_flip - pageflip callback.
 179 *
 180 * @adev: amdgpu_device pointer
 181 * @crtc_id: crtc to cleanup pageflip on
 182 * @crtc_base: new address of the crtc (GPU MC address)
 183 *
 184 * Does the actual pageflip (evergreen+).
 185 * During vblank we take the crtc lock and wait for the update_pending
 186 * bit to go high, when it does, we release the lock, and allow the
 187 * double buffered update to take place.
 188 * Returns the current update pending status.
 189 */
 190static void dce_v6_0_page_flip(struct amdgpu_device *adev,
 191			       int crtc_id, u64 crtc_base, bool async)
 192{
 193	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 194	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
 195
 196	/* flip at hsync for async, default is vsync */
 197	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 198	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 199	/* update pitch */
 200	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
 201	       fb->pitches[0] / fb->format->cpp[0]);
 202	/* update the scanout addresses */
 203	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 204	       upper_32_bits(crtc_base));
 205	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 206	       (u32)crtc_base);
 207
 208	/* post the write */
 209	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 210}
 211
 212static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 213					u32 *vbl, u32 *position)
 214{
 215	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 216		return -EINVAL;
 217	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 218	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 219
 220	return 0;
 221
 222}
 223
 224/**
 225 * dce_v6_0_hpd_sense - hpd sense callback.
 226 *
 227 * @adev: amdgpu_device pointer
 228 * @hpd: hpd (hotplug detect) pin
 229 *
 230 * Checks if a digital monitor is connected (evergreen+).
 231 * Returns true if connected, false if not connected.
 232 */
 233static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 234			       enum amdgpu_hpd_id hpd)
 235{
 236	bool connected = false;
 237
 238	if (hpd >= adev->mode_info.num_hpd)
 239		return connected;
 240
 241	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 242		connected = true;
 243
 244	return connected;
 245}
 246
 247/**
 248 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
 249 *
 250 * @adev: amdgpu_device pointer
 251 * @hpd: hpd (hotplug detect) pin
 252 *
 253 * Set the polarity of the hpd pin (evergreen+).
 254 */
 255static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 256				      enum amdgpu_hpd_id hpd)
 257{
 258	u32 tmp;
 259	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 260
 261	if (hpd >= adev->mode_info.num_hpd)
 262		return;
 263
 264	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 265	if (connected)
 266		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 267	else
 268		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 269	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 270}
 271
 272/**
 273 * dce_v6_0_hpd_init - hpd setup callback.
 274 *
 275 * @adev: amdgpu_device pointer
 276 *
 277 * Setup the hpd pins used by the card (evergreen+).
 278 * Enable the pin, set the polarity, and enable the hpd interrupts.
 279 */
 280static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 281{
 282	struct drm_device *dev = adev->ddev;
 283	struct drm_connector *connector;
 284	u32 tmp;
 285
 286	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 287		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 288
 289		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 290			continue;
 291
 292		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 293		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 294		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 295
 296		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 297		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 298			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 299			 * aux dp channel on imac and help (but not completely fix)
 300			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 301			 * also avoid interrupt storms during dpms.
 302			 */
 303			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 304			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 305			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 306			continue;
 307		}
 308
 309		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 310		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 311	}
 312
 313}
 314
 315/**
 316 * dce_v6_0_hpd_fini - hpd tear down callback.
 317 *
 318 * @adev: amdgpu_device pointer
 319 *
 320 * Tear down the hpd pins used by the card (evergreen+).
 321 * Disable the hpd interrupts.
 322 */
 323static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 324{
 325	struct drm_device *dev = adev->ddev;
 326	struct drm_connector *connector;
 327	u32 tmp;
 328
 329	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 330		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 331
 332		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 333			continue;
 334
 335		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 336		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 337		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 338
 339		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 340	}
 341}
 342
 343static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 344{
 345	return mmDC_GPIO_HPD_A;
 346}
 347
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 348static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 349					  bool render)
 350{
 351	if (!render)
 352		WREG32(mmVGA_RENDER_CONTROL,
 353			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
 354
 355}
 356
 357static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
 358{
 
 
 359	switch (adev->asic_type) {
 360	case CHIP_TAHITI:
 361	case CHIP_PITCAIRN:
 362	case CHIP_VERDE:
 363		return 6;
 
 364	case CHIP_OLAND:
 365		return 2;
 
 366	default:
 367		return 0;
 368	}
 
 369}
 370
 371void dce_v6_0_disable_dce(struct amdgpu_device *adev)
 372{
 373	/*Disable VGA render and enabled crtc, if has DCE engine*/
 374	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 375		u32 tmp;
 376		int crtc_enabled, i;
 377
 378		dce_v6_0_set_vga_render_state(adev, false);
 379
 380		/*Disable crtc*/
 381		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
 382			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
 383				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 384			if (crtc_enabled) {
 385				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 386				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 387				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 388				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 389				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 390			}
 391		}
 392	}
 393}
 394
 395static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 396{
 397
 398	struct drm_device *dev = encoder->dev;
 399	struct amdgpu_device *adev = dev->dev_private;
 400	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 401	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 402	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 403	int bpc = 0;
 404	u32 tmp = 0;
 405	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 406
 407	if (connector) {
 408		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 409		bpc = amdgpu_connector_get_monitor_bpc(connector);
 410		dither = amdgpu_connector->dither;
 411	}
 412
 413	/* LVDS FMT is set up by atom */
 414	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 415		return;
 416
 417	if (bpc == 0)
 418		return;
 419
 420
 421	switch (bpc) {
 422	case 6:
 423		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 424			/* XXX sort out optimal dither settings */
 425			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 426				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 427				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
 428		else
 429			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
 430		break;
 431	case 8:
 432		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 433			/* XXX sort out optimal dither settings */
 434			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 435				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 436				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 437				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 438				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
 439		else
 440			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 441				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
 442		break;
 443	case 10:
 444	default:
 445		/* not needed */
 446		break;
 447	}
 448
 449	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 450}
 451
 452/**
 453 * cik_get_number_of_dram_channels - get the number of dram channels
 454 *
 455 * @adev: amdgpu_device pointer
 456 *
 457 * Look up the number of video ram channels (CIK).
 458 * Used for display watermark bandwidth calculations
 459 * Returns the number of dram channels
 460 */
 461static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
 462{
 463	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 464
 465	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 466	case 0:
 467	default:
 468		return 1;
 469	case 1:
 470		return 2;
 471	case 2:
 472		return 4;
 473	case 3:
 474		return 8;
 475	case 4:
 476		return 3;
 477	case 5:
 478		return 6;
 479	case 6:
 480		return 10;
 481	case 7:
 482		return 12;
 483	case 8:
 484		return 16;
 485	}
 486}
 487
 488struct dce6_wm_params {
 489	u32 dram_channels; /* number of dram channels */
 490	u32 yclk;          /* bandwidth per dram data pin in kHz */
 491	u32 sclk;          /* engine clock in kHz */
 492	u32 disp_clk;      /* display clock in kHz */
 493	u32 src_width;     /* viewport width */
 494	u32 active_time;   /* active display time in ns */
 495	u32 blank_time;    /* blank time in ns */
 496	bool interlaced;    /* mode is interlaced */
 497	fixed20_12 vsc;    /* vertical scale ratio */
 498	u32 num_heads;     /* number of active crtcs */
 499	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 500	u32 lb_size;       /* line buffer allocated to pipe */
 501	u32 vtaps;         /* vertical scaler taps */
 502};
 503
 504/**
 505 * dce_v6_0_dram_bandwidth - get the dram bandwidth
 506 *
 507 * @wm: watermark calculation data
 508 *
 509 * Calculate the raw dram bandwidth (CIK).
 510 * Used for display watermark bandwidth calculations
 511 * Returns the dram bandwidth in MBytes/s
 512 */
 513static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
 514{
 515	/* Calculate raw DRAM Bandwidth */
 516	fixed20_12 dram_efficiency; /* 0.7 */
 517	fixed20_12 yclk, dram_channels, bandwidth;
 518	fixed20_12 a;
 519
 520	a.full = dfixed_const(1000);
 521	yclk.full = dfixed_const(wm->yclk);
 522	yclk.full = dfixed_div(yclk, a);
 523	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 524	a.full = dfixed_const(10);
 525	dram_efficiency.full = dfixed_const(7);
 526	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 527	bandwidth.full = dfixed_mul(dram_channels, yclk);
 528	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 529
 530	return dfixed_trunc(bandwidth);
 531}
 532
 533/**
 534 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
 535 *
 536 * @wm: watermark calculation data
 537 *
 538 * Calculate the dram bandwidth used for display (CIK).
 539 * Used for display watermark bandwidth calculations
 540 * Returns the dram bandwidth for display in MBytes/s
 541 */
 542static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 543{
 544	/* Calculate DRAM Bandwidth and the part allocated to display. */
 545	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 546	fixed20_12 yclk, dram_channels, bandwidth;
 547	fixed20_12 a;
 548
 549	a.full = dfixed_const(1000);
 550	yclk.full = dfixed_const(wm->yclk);
 551	yclk.full = dfixed_div(yclk, a);
 552	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 553	a.full = dfixed_const(10);
 554	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 555	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 556	bandwidth.full = dfixed_mul(dram_channels, yclk);
 557	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 558
 559	return dfixed_trunc(bandwidth);
 560}
 561
 562/**
 563 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
 564 *
 565 * @wm: watermark calculation data
 566 *
 567 * Calculate the data return bandwidth used for display (CIK).
 568 * Used for display watermark bandwidth calculations
 569 * Returns the data return bandwidth in MBytes/s
 570 */
 571static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
 572{
 573	/* Calculate the display Data return Bandwidth */
 574	fixed20_12 return_efficiency; /* 0.8 */
 575	fixed20_12 sclk, bandwidth;
 576	fixed20_12 a;
 577
 578	a.full = dfixed_const(1000);
 579	sclk.full = dfixed_const(wm->sclk);
 580	sclk.full = dfixed_div(sclk, a);
 581	a.full = dfixed_const(10);
 582	return_efficiency.full = dfixed_const(8);
 583	return_efficiency.full = dfixed_div(return_efficiency, a);
 584	a.full = dfixed_const(32);
 585	bandwidth.full = dfixed_mul(a, sclk);
 586	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 587
 588	return dfixed_trunc(bandwidth);
 589}
 590
 591/**
 592 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
 593 *
 594 * @wm: watermark calculation data
 595 *
 596 * Calculate the dmif bandwidth used for display (CIK).
 597 * Used for display watermark bandwidth calculations
 598 * Returns the dmif bandwidth in MBytes/s
 599 */
 600static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
 601{
 602	/* Calculate the DMIF Request Bandwidth */
 603	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 604	fixed20_12 disp_clk, bandwidth;
 605	fixed20_12 a, b;
 606
 607	a.full = dfixed_const(1000);
 608	disp_clk.full = dfixed_const(wm->disp_clk);
 609	disp_clk.full = dfixed_div(disp_clk, a);
 610	a.full = dfixed_const(32);
 611	b.full = dfixed_mul(a, disp_clk);
 612
 613	a.full = dfixed_const(10);
 614	disp_clk_request_efficiency.full = dfixed_const(8);
 615	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 616
 617	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 618
 619	return dfixed_trunc(bandwidth);
 620}
 621
 622/**
 623 * dce_v6_0_available_bandwidth - get the min available bandwidth
 624 *
 625 * @wm: watermark calculation data
 626 *
 627 * Calculate the min available bandwidth used for display (CIK).
 628 * Used for display watermark bandwidth calculations
 629 * Returns the min available bandwidth in MBytes/s
 630 */
 631static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
 632{
 633	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 634	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
 635	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
 636	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
 637
 638	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 639}
 640
 641/**
 642 * dce_v6_0_average_bandwidth - get the average available bandwidth
 643 *
 644 * @wm: watermark calculation data
 645 *
 646 * Calculate the average available bandwidth used for display (CIK).
 647 * Used for display watermark bandwidth calculations
 648 * Returns the average available bandwidth in MBytes/s
 649 */
 650static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
 651{
 652	/* Calculate the display mode Average Bandwidth
 653	 * DisplayMode should contain the source and destination dimensions,
 654	 * timing, etc.
 655	 */
 656	fixed20_12 bpp;
 657	fixed20_12 line_time;
 658	fixed20_12 src_width;
 659	fixed20_12 bandwidth;
 660	fixed20_12 a;
 661
 662	a.full = dfixed_const(1000);
 663	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 664	line_time.full = dfixed_div(line_time, a);
 665	bpp.full = dfixed_const(wm->bytes_per_pixel);
 666	src_width.full = dfixed_const(wm->src_width);
 667	bandwidth.full = dfixed_mul(src_width, bpp);
 668	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 669	bandwidth.full = dfixed_div(bandwidth, line_time);
 670
 671	return dfixed_trunc(bandwidth);
 672}
 673
 674/**
 675 * dce_v6_0_latency_watermark - get the latency watermark
 676 *
 677 * @wm: watermark calculation data
 678 *
 679 * Calculate the latency watermark (CIK).
 680 * Used for display watermark bandwidth calculations
 681 * Returns the latency watermark in ns
 682 */
 683static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
 684{
 685	/* First calculate the latency in ns */
 686	u32 mc_latency = 2000; /* 2000 ns. */
 687	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
 688	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 689	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 690	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 691	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 692		(wm->num_heads * cursor_line_pair_return_time);
 693	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 694	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 695	u32 tmp, dmif_size = 12288;
 696	fixed20_12 a, b, c;
 697
 698	if (wm->num_heads == 0)
 699		return 0;
 700
 701	a.full = dfixed_const(2);
 702	b.full = dfixed_const(1);
 703	if ((wm->vsc.full > a.full) ||
 704	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 705	    (wm->vtaps >= 5) ||
 706	    ((wm->vsc.full >= a.full) && wm->interlaced))
 707		max_src_lines_per_dst_line = 4;
 708	else
 709		max_src_lines_per_dst_line = 2;
 710
 711	a.full = dfixed_const(available_bandwidth);
 712	b.full = dfixed_const(wm->num_heads);
 713	a.full = dfixed_div(a, b);
 714	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 715	tmp = min(dfixed_trunc(a), tmp);
 716
 717	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718
 719	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 720	b.full = dfixed_const(1000);
 721	c.full = dfixed_const(lb_fill_bw);
 722	b.full = dfixed_div(c, b);
 723	a.full = dfixed_div(a, b);
 724	line_fill_time = dfixed_trunc(a);
 725
 726	if (line_fill_time < wm->active_time)
 727		return latency;
 728	else
 729		return latency + (line_fill_time - wm->active_time);
 730
 731}
 732
 733/**
 734 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 735 * average and available dram bandwidth
 736 *
 737 * @wm: watermark calculation data
 738 *
 739 * Check if the display average bandwidth fits in the display
 740 * dram bandwidth (CIK).
 741 * Used for display watermark bandwidth calculations
 742 * Returns true if the display fits, false if not.
 743 */
 744static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 745{
 746	if (dce_v6_0_average_bandwidth(wm) <=
 747	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 748		return true;
 749	else
 750		return false;
 751}
 752
 753/**
 754 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
 755 * average and available bandwidth
 756 *
 757 * @wm: watermark calculation data
 758 *
 759 * Check if the display average bandwidth fits in the display
 760 * available bandwidth (CIK).
 761 * Used for display watermark bandwidth calculations
 762 * Returns true if the display fits, false if not.
 763 */
 764static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
 765{
 766	if (dce_v6_0_average_bandwidth(wm) <=
 767	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
 768		return true;
 769	else
 770		return false;
 771}
 772
 773/**
 774 * dce_v6_0_check_latency_hiding - check latency hiding
 775 *
 776 * @wm: watermark calculation data
 777 *
 778 * Check latency hiding (CIK).
 779 * Used for display watermark bandwidth calculations
 780 * Returns true if the display fits, false if not.
 781 */
 782static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
 783{
 784	u32 lb_partitions = wm->lb_size / wm->src_width;
 785	u32 line_time = wm->active_time + wm->blank_time;
 786	u32 latency_tolerant_lines;
 787	u32 latency_hiding;
 788	fixed20_12 a;
 789
 790	a.full = dfixed_const(1);
 791	if (wm->vsc.full > a.full)
 792		latency_tolerant_lines = 1;
 793	else {
 794		if (lb_partitions <= (wm->vtaps + 1))
 795			latency_tolerant_lines = 1;
 796		else
 797			latency_tolerant_lines = 2;
 798	}
 799
 800	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 801
 802	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
 803		return true;
 804	else
 805		return false;
 806}
 807
 808/**
 809 * dce_v6_0_program_watermarks - program display watermarks
 810 *
 811 * @adev: amdgpu_device pointer
 812 * @amdgpu_crtc: the selected display controller
 813 * @lb_size: line buffer size
 814 * @num_heads: number of display controllers in use
 815 *
 816 * Calculate and program the display watermarks for the
 817 * selected display controller (CIK).
 818 */
 819static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 820					struct amdgpu_crtc *amdgpu_crtc,
 821					u32 lb_size, u32 num_heads)
 822{
 823	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 824	struct dce6_wm_params wm_low, wm_high;
 825	u32 dram_channels;
 826	u32 active_time;
 827	u32 line_time = 0;
 828	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 829	u32 priority_a_mark = 0, priority_b_mark = 0;
 830	u32 priority_a_cnt = PRIORITY_OFF;
 831	u32 priority_b_cnt = PRIORITY_OFF;
 832	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
 833	fixed20_12 a, b, c;
 834
 835	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 836		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
 837					    (u32)mode->clock);
 838		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
 839					  (u32)mode->clock);
 840		line_time = min(line_time, (u32)65535);
 841		priority_a_cnt = 0;
 842		priority_b_cnt = 0;
 843
 844		dram_channels = si_get_number_of_dram_channels(adev);
 845
 846		/* watermark for high clocks */
 847		if (adev->pm.dpm_enabled) {
 848			wm_high.yclk =
 849				amdgpu_dpm_get_mclk(adev, false) * 10;
 850			wm_high.sclk =
 851				amdgpu_dpm_get_sclk(adev, false) * 10;
 852		} else {
 853			wm_high.yclk = adev->pm.current_mclk * 10;
 854			wm_high.sclk = adev->pm.current_sclk * 10;
 855		}
 856
 857		wm_high.disp_clk = mode->clock;
 858		wm_high.src_width = mode->crtc_hdisplay;
 859		wm_high.active_time = active_time;
 860		wm_high.blank_time = line_time - wm_high.active_time;
 861		wm_high.interlaced = false;
 862		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 863			wm_high.interlaced = true;
 864		wm_high.vsc = amdgpu_crtc->vsc;
 865		wm_high.vtaps = 1;
 866		if (amdgpu_crtc->rmx_type != RMX_OFF)
 867			wm_high.vtaps = 2;
 868		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
 869		wm_high.lb_size = lb_size;
 870		wm_high.dram_channels = dram_channels;
 871		wm_high.num_heads = num_heads;
 872
 873		if (adev->pm.dpm_enabled) {
 874		/* watermark for low clocks */
 875			wm_low.yclk =
 876				amdgpu_dpm_get_mclk(adev, true) * 10;
 877			wm_low.sclk =
 878				amdgpu_dpm_get_sclk(adev, true) * 10;
 879		} else {
 880			wm_low.yclk = adev->pm.current_mclk * 10;
 881			wm_low.sclk = adev->pm.current_sclk * 10;
 882		}
 883
 884		wm_low.disp_clk = mode->clock;
 885		wm_low.src_width = mode->crtc_hdisplay;
 886		wm_low.active_time = active_time;
 887		wm_low.blank_time = line_time - wm_low.active_time;
 888		wm_low.interlaced = false;
 889		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 890			wm_low.interlaced = true;
 891		wm_low.vsc = amdgpu_crtc->vsc;
 892		wm_low.vtaps = 1;
 893		if (amdgpu_crtc->rmx_type != RMX_OFF)
 894			wm_low.vtaps = 2;
 895		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
 896		wm_low.lb_size = lb_size;
 897		wm_low.dram_channels = dram_channels;
 898		wm_low.num_heads = num_heads;
 899
 900		/* set for high clocks */
 901		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
 902		/* set for low clocks */
 903		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
 904
 905		/* possibly force display priority to high */
 906		/* should really do this at mode validation time... */
 907		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
 908		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
 909		    !dce_v6_0_check_latency_hiding(&wm_high) ||
 910		    (adev->mode_info.disp_priority == 2)) {
 911			DRM_DEBUG_KMS("force priority to high\n");
 912			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 913			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 914		}
 915		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
 916		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
 917		    !dce_v6_0_check_latency_hiding(&wm_low) ||
 918		    (adev->mode_info.disp_priority == 2)) {
 919			DRM_DEBUG_KMS("force priority to high\n");
 920			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 921			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 922		}
 923
 924		a.full = dfixed_const(1000);
 925		b.full = dfixed_const(mode->clock);
 926		b.full = dfixed_div(b, a);
 927		c.full = dfixed_const(latency_watermark_a);
 928		c.full = dfixed_mul(c, b);
 929		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
 930		c.full = dfixed_div(c, a);
 931		a.full = dfixed_const(16);
 932		c.full = dfixed_div(c, a);
 933		priority_a_mark = dfixed_trunc(c);
 934		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
 935
 936		a.full = dfixed_const(1000);
 937		b.full = dfixed_const(mode->clock);
 938		b.full = dfixed_div(b, a);
 939		c.full = dfixed_const(latency_watermark_b);
 940		c.full = dfixed_mul(c, b);
 941		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
 942		c.full = dfixed_div(c, a);
 943		a.full = dfixed_const(16);
 944		c.full = dfixed_div(c, a);
 945		priority_b_mark = dfixed_trunc(c);
 946		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 947
 948		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 949	}
 950
 951	/* select wm A */
 952	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 953	tmp = arb_control3;
 954	tmp &= ~LATENCY_WATERMARK_MASK(3);
 955	tmp |= LATENCY_WATERMARK_MASK(1);
 956	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 957	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
 958	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
 959		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 960	/* select wm B */
 961	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 962	tmp &= ~LATENCY_WATERMARK_MASK(3);
 963	tmp |= LATENCY_WATERMARK_MASK(2);
 964	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 965	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
 966	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
 967		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 968	/* restore original selection */
 969	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
 970
 971	/* write the priority marks */
 972	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
 973	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
 974
 975	/* save values for DPM */
 976	amdgpu_crtc->line_time = line_time;
 977	amdgpu_crtc->wm_high = latency_watermark_a;
 978
 979	/* Save number of lines the linebuffer leads before the scanout */
 980	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 981}
 982
 983/* watermark setup */
 984static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
 985				   struct amdgpu_crtc *amdgpu_crtc,
 986				   struct drm_display_mode *mode,
 987				   struct drm_display_mode *other_mode)
 988{
 989	u32 tmp, buffer_alloc, i;
 990	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 991	/*
 992	 * Line Buffer Setup
 993	 * There are 3 line buffers, each one shared by 2 display controllers.
 994	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
 995	 * the display controllers.  The paritioning is done via one of four
 996	 * preset allocations specified in bits 21:20:
 997	 *  0 - half lb
 998	 *  2 - whole lb, other crtc must be disabled
 999	 */
1000	/* this can get tricky if we have two large displays on a paired group
1001	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1002	 * non-linked crtcs for maximum line buffer allocation.
1003	 */
1004	if (amdgpu_crtc->base.enabled && mode) {
1005		if (other_mode) {
1006			tmp = 0; /* 1/2 */
1007			buffer_alloc = 1;
1008		} else {
1009			tmp = 2; /* whole */
1010			buffer_alloc = 2;
1011		}
1012	} else {
1013		tmp = 0;
1014		buffer_alloc = 0;
1015	}
1016
1017	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1018	       DC_LB_MEMORY_CONFIG(tmp));
1019
1020	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1021	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1022	for (i = 0; i < adev->usec_timeout; i++) {
1023		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1024		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1025			break;
1026		udelay(1);
1027	}
1028
1029	if (amdgpu_crtc->base.enabled && mode) {
1030		switch (tmp) {
1031		case 0:
1032		default:
1033			return 4096 * 2;
1034		case 2:
1035			return 8192 * 2;
1036		}
1037	}
1038
1039	/* controller not enabled, so no lb used */
1040	return 0;
1041}
1042
1043
1044/**
1045 *
1046 * dce_v6_0_bandwidth_update - program display watermarks
1047 *
1048 * @adev: amdgpu_device pointer
1049 *
1050 * Calculate and program the display watermarks and line
1051 * buffer allocation (CIK).
1052 */
1053static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1054{
1055	struct drm_display_mode *mode0 = NULL;
1056	struct drm_display_mode *mode1 = NULL;
1057	u32 num_heads = 0, lb_size;
1058	int i;
1059
1060	if (!adev->mode_info.mode_config_initialized)
1061		return;
1062
1063	amdgpu_display_update_priority(adev);
1064
1065	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1066		if (adev->mode_info.crtcs[i]->base.enabled)
1067			num_heads++;
1068	}
1069	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1070		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1071		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1072		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1073		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1074		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1075		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1076	}
1077}
1078
1079static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1080{
1081	int i;
1082	u32 tmp;
1083
1084	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1085		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1086				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1087		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1088					PORT_CONNECTIVITY))
1089			adev->mode_info.audio.pin[i].connected = false;
1090		else
1091			adev->mode_info.audio.pin[i].connected = true;
1092	}
1093
1094}
1095
1096static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1097{
1098	int i;
1099
1100	dce_v6_0_audio_get_connected_pins(adev);
1101
1102	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1103		if (adev->mode_info.audio.pin[i].connected)
1104			return &adev->mode_info.audio.pin[i];
1105	}
1106	DRM_ERROR("No connected audio pins found!\n");
1107	return NULL;
1108}
1109
1110static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1111{
1112	struct amdgpu_device *adev = encoder->dev->dev_private;
1113	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1114	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 
1115
1116	if (!dig || !dig->afmt || !dig->afmt->pin)
1117		return;
1118
1119	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1120	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1121		             dig->afmt->pin->id));
 
 
1122}
1123
1124static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1125						struct drm_display_mode *mode)
1126{
1127	struct amdgpu_device *adev = encoder->dev->dev_private;
1128	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1129	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1130	struct drm_connector *connector;
1131	struct amdgpu_connector *amdgpu_connector = NULL;
1132	int interlace = 0;
1133	u32 tmp;
1134
1135	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1136		if (connector->encoder == encoder) {
1137			amdgpu_connector = to_amdgpu_connector(connector);
1138			break;
1139		}
1140	}
1141
1142	if (!amdgpu_connector) {
1143		DRM_ERROR("Couldn't find encoder's connector\n");
1144		return;
1145	}
1146
1147	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1148		interlace = 1;
1149
1150	if (connector->latency_present[interlace]) {
1151		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1152				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1153		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1154				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1155	} else {
1156		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1157				VIDEO_LIPSYNC, 0);
1158		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1159				AUDIO_LIPSYNC, 0);
1160	}
1161	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1162			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1163}
1164
1165static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1166{
1167	struct amdgpu_device *adev = encoder->dev->dev_private;
1168	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1169	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1170	struct drm_connector *connector;
1171	struct amdgpu_connector *amdgpu_connector = NULL;
1172	u8 *sadb = NULL;
1173	int sad_count;
1174	u32 tmp;
1175
1176	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1177		if (connector->encoder == encoder) {
1178			amdgpu_connector = to_amdgpu_connector(connector);
1179			break;
1180		}
1181	}
1182
1183	if (!amdgpu_connector) {
1184		DRM_ERROR("Couldn't find encoder's connector\n");
1185		return;
1186	}
1187
1188	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1189	if (sad_count < 0) {
1190		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1191		sad_count = 0;
1192	}
1193
1194	/* program the speaker allocation */
1195	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1196			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1197	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1198			HDMI_CONNECTION, 0);
1199	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1200			DP_CONNECTION, 0);
1201
1202	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1203		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1204				DP_CONNECTION, 1);
1205	else
1206		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1207				HDMI_CONNECTION, 1);
1208
1209	if (sad_count)
1210		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1211				SPEAKER_ALLOCATION, sadb[0]);
1212	else
1213		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1214				SPEAKER_ALLOCATION, 5); /* stereo */
1215
1216	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1217			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1218
1219	kfree(sadb);
1220}
1221
1222static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1223{
1224	struct amdgpu_device *adev = encoder->dev->dev_private;
1225	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1226	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1227	struct drm_connector *connector;
1228	struct amdgpu_connector *amdgpu_connector = NULL;
1229	struct cea_sad *sads;
1230	int i, sad_count;
1231
1232	static const u16 eld_reg_to_type[][2] = {
1233		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1234		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1235		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1236		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1237		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1238		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1239		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1240		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1241		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1242		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1243		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1244		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1245	};
1246
1247	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1248		if (connector->encoder == encoder) {
1249			amdgpu_connector = to_amdgpu_connector(connector);
1250			break;
1251		}
1252	}
1253
1254	if (!amdgpu_connector) {
1255		DRM_ERROR("Couldn't find encoder's connector\n");
1256		return;
1257	}
1258
1259	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1260	if (sad_count <= 0) {
1261		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1262		return;
1263	}
1264
1265	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1266		u32 tmp = 0;
1267		u8 stereo_freqs = 0;
1268		int max_channels = -1;
1269		int j;
1270
1271		for (j = 0; j < sad_count; j++) {
1272			struct cea_sad *sad = &sads[j];
1273
1274			if (sad->format == eld_reg_to_type[i][1]) {
1275				if (sad->channels > max_channels) {
1276					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1277							MAX_CHANNELS, sad->channels);
1278					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1279							DESCRIPTOR_BYTE_2, sad->byte2);
1280					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1281							SUPPORTED_FREQUENCIES, sad->freq);
1282					max_channels = sad->channels;
1283				}
1284
1285				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1286					stereo_freqs |= sad->freq;
1287				else
1288					break;
1289			}
1290		}
1291
1292		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1293				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1294		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1295	}
1296
1297	kfree(sads);
1298
1299}
1300
1301static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1302				  struct amdgpu_audio_pin *pin,
1303				  bool enable)
1304{
1305	if (!pin)
1306		return;
1307
1308	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1309			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1310}
1311
1312static const u32 pin_offsets[7] =
1313{
1314	(0x1780 - 0x1780),
1315	(0x1786 - 0x1780),
1316	(0x178c - 0x1780),
1317	(0x1792 - 0x1780),
1318	(0x1798 - 0x1780),
1319	(0x179d - 0x1780),
1320	(0x17a4 - 0x1780),
1321};
1322
1323static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1324{
1325	int i;
1326
1327	if (!amdgpu_audio)
1328		return 0;
1329
1330	adev->mode_info.audio.enabled = true;
1331
1332	switch (adev->asic_type) {
1333	case CHIP_TAHITI:
1334	case CHIP_PITCAIRN:
1335	case CHIP_VERDE:
1336	default:
1337		adev->mode_info.audio.num_pins = 6;
1338		break;
1339	case CHIP_OLAND:
1340		adev->mode_info.audio.num_pins = 2;
1341		break;
1342	}
1343
1344	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1345		adev->mode_info.audio.pin[i].channels = -1;
1346		adev->mode_info.audio.pin[i].rate = -1;
1347		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1348		adev->mode_info.audio.pin[i].status_bits = 0;
1349		adev->mode_info.audio.pin[i].category_code = 0;
1350		adev->mode_info.audio.pin[i].connected = false;
1351		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1352		adev->mode_info.audio.pin[i].id = i;
1353		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1354	}
1355
1356	return 0;
1357}
1358
1359static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1360{
1361	int i;
1362
1363	if (!amdgpu_audio)
1364		return;
1365
1366	if (!adev->mode_info.audio.enabled)
1367		return;
1368
1369	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1370		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1371
1372	adev->mode_info.audio.enabled = false;
1373}
1374
1375static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
 
1376{
1377	struct drm_device *dev = encoder->dev;
1378	struct amdgpu_device *adev = dev->dev_private;
1379	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1380	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1381	u32 tmp;
1382
1383	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1384	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1385	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1386	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1387	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1388}
1389
1390static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1391				   uint32_t clock, int bpc)
1392{
1393	struct drm_device *dev = encoder->dev;
1394	struct amdgpu_device *adev = dev->dev_private;
1395	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1396	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1397	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1398	u32 tmp;
1399
1400	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1401	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1402	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1403			bpc > 8 ? 0 : 1);
1404	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1405
1406	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1407	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1408	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1409	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1410	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1411	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1412
1413	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1414	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1415	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1416	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1417	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1418	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1419
1420	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1421	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1422	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1423	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1424	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1425	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1426}
1427
1428static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1429					       struct drm_display_mode *mode)
1430{
1431	struct drm_device *dev = encoder->dev;
1432	struct amdgpu_device *adev = dev->dev_private;
1433	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1434	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1435	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1436	struct hdmi_avi_infoframe frame;
1437	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1438	uint8_t *payload = buffer + 3;
1439	uint8_t *header = buffer;
1440	ssize_t err;
1441	u32 tmp;
1442
1443	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1444	if (err < 0) {
1445		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1446		return;
1447	}
1448
1449	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1450	if (err < 0) {
1451		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1452		return;
1453	}
1454
1455	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1456	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1457	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1458	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1459	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1460	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1461	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1462	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1463
1464	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1465	/* anything other than 0 */
1466	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1467			HDMI_AUDIO_INFO_LINE, 2);
1468	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1469}
1470
1471static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1472{
1473	struct drm_device *dev = encoder->dev;
1474	struct amdgpu_device *adev = dev->dev_private;
1475	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1476	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1477	u32 tmp;
1478
1479	/*
1480	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1481	 * Express [24MHz / target pixel clock] as an exact rational
1482	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1483	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1484	 */
1485	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1486	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1487			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1488	if (em == ATOM_ENCODER_MODE_HDMI) {
1489		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1490				DCCG_AUDIO_DTO_SEL, 0);
1491	} else if (ENCODER_MODE_IS_DP(em)) {
1492		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1493				DCCG_AUDIO_DTO_SEL, 1);
1494	}
1495	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1496	if (em == ATOM_ENCODER_MODE_HDMI) {
1497		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1498		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1499	} else if (ENCODER_MODE_IS_DP(em)) {
1500		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1501		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1502	}
1503}
1504
1505static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1506{
1507	struct drm_device *dev = encoder->dev;
1508	struct amdgpu_device *adev = dev->dev_private;
1509	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1510	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1511	u32 tmp;
1512
1513	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1514	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1515	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1516
1517	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1518	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1519	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1520
1521	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1522	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1523	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1524
1525	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1526	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1527	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1528	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1529	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1530	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1531	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1532	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1533
1534	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1535	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1536	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1537
1538	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1539	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1540	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1541	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1542
1543	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1544	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1545	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1546	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1547}
1548
1549static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1550{
1551	struct drm_device *dev = encoder->dev;
1552	struct amdgpu_device *adev = dev->dev_private;
1553	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1554	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1555	u32 tmp;
1556
1557	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1558	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1559	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1560}
1561
1562static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1563{
1564	struct drm_device *dev = encoder->dev;
1565	struct amdgpu_device *adev = dev->dev_private;
1566	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1567	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1568	u32 tmp;
1569
1570	if (enable) {
1571		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1572		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1573		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1574		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1575		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1576		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1577
1578		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1579		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1580		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1581
1582		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1583		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1584		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1585	} else {
1586		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1587		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1588		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1589		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1590		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1591		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1592
1593		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1594		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1595		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1596	}
1597}
1598
1599static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1600{
1601	struct drm_device *dev = encoder->dev;
1602	struct amdgpu_device *adev = dev->dev_private;
1603	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1604	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1605	u32 tmp;
1606
1607	if (enable) {
1608		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1609		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1610		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1611
1612		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1613		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1614		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1615
1616		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1617		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1618		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1619		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1620		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1621		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1622	} else {
1623		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1624	}
1625}
1626
 
 
 
1627static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1628				  struct drm_display_mode *mode)
1629{
1630	struct drm_device *dev = encoder->dev;
1631	struct amdgpu_device *adev = dev->dev_private;
1632	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1633	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1634	struct drm_connector *connector;
1635	struct amdgpu_connector *amdgpu_connector = NULL;
1636	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1637	int bpc = 8;
1638
1639	if (!dig || !dig->afmt)
1640		return;
1641
1642	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1643		if (connector->encoder == encoder) {
1644			amdgpu_connector = to_amdgpu_connector(connector);
1645			break;
1646		}
1647	}
1648
1649	if (!amdgpu_connector) {
1650		DRM_ERROR("Couldn't find encoder's connector\n");
1651		return;
1652	}
1653
1654	if (!dig->afmt->enabled)
1655		return;
1656
1657	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1658	if (!dig->afmt->pin)
1659		return;
1660
1661	if (encoder->crtc) {
1662		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1663		bpc = amdgpu_crtc->bpc;
1664	}
1665
1666	/* disable audio before setting up hw */
1667	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1668
1669	dce_v6_0_audio_set_mute(encoder, true);
1670	dce_v6_0_audio_write_speaker_allocation(encoder);
1671	dce_v6_0_audio_write_sad_regs(encoder);
1672	dce_v6_0_audio_write_latency_fields(encoder, mode);
1673	if (em == ATOM_ENCODER_MODE_HDMI) {
1674		dce_v6_0_audio_set_dto(encoder, mode->clock);
1675		dce_v6_0_audio_set_vbi_packet(encoder);
1676		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1677	} else if (ENCODER_MODE_IS_DP(em)) {
1678		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1679	}
1680	dce_v6_0_audio_set_packet(encoder);
1681	dce_v6_0_audio_select_pin(encoder);
1682	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1683	dce_v6_0_audio_set_mute(encoder, false);
1684	if (em == ATOM_ENCODER_MODE_HDMI) {
1685		dce_v6_0_audio_hdmi_enable(encoder, 1);
1686	} else if (ENCODER_MODE_IS_DP(em)) {
1687		dce_v6_0_audio_dp_enable(encoder, 1);
1688	}
1689
1690	/* enable audio after setting up hw */
1691	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1692}
1693
1694static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1695{
1696	struct drm_device *dev = encoder->dev;
1697	struct amdgpu_device *adev = dev->dev_private;
1698	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1699	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1700
1701	if (!dig || !dig->afmt)
1702		return;
1703
1704	/* Silent, r600_hdmi_enable will raise WARN for us */
1705	if (enable && dig->afmt->enabled)
1706		return;
1707
1708	if (!enable && !dig->afmt->enabled)
1709		return;
1710
1711	if (!enable && dig->afmt->pin) {
1712		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1713		dig->afmt->pin = NULL;
1714	}
1715
1716	dig->afmt->enabled = enable;
1717
1718	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1719		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1720}
1721
1722static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1723{
1724	int i, j;
1725
1726	for (i = 0; i < adev->mode_info.num_dig; i++)
1727		adev->mode_info.afmt[i] = NULL;
1728
1729	/* DCE6 has audio blocks tied to DIG encoders */
1730	for (i = 0; i < adev->mode_info.num_dig; i++) {
1731		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1732		if (adev->mode_info.afmt[i]) {
1733			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1734			adev->mode_info.afmt[i]->id = i;
1735		} else {
1736			for (j = 0; j < i; j++) {
1737				kfree(adev->mode_info.afmt[j]);
1738				adev->mode_info.afmt[j] = NULL;
1739			}
1740			DRM_ERROR("Out of memory allocating afmt table\n");
1741			return -ENOMEM;
1742		}
1743	}
1744	return 0;
1745}
1746
1747static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1748{
1749	int i;
1750
1751	for (i = 0; i < adev->mode_info.num_dig; i++) {
1752		kfree(adev->mode_info.afmt[i]);
1753		adev->mode_info.afmt[i] = NULL;
1754	}
1755}
1756
1757static const u32 vga_control_regs[6] =
1758{
1759	mmD1VGA_CONTROL,
1760	mmD2VGA_CONTROL,
1761	mmD3VGA_CONTROL,
1762	mmD4VGA_CONTROL,
1763	mmD5VGA_CONTROL,
1764	mmD6VGA_CONTROL,
1765};
1766
1767static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1768{
1769	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1770	struct drm_device *dev = crtc->dev;
1771	struct amdgpu_device *adev = dev->dev_private;
1772	u32 vga_control;
1773
1774	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1775	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1776}
1777
1778static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1779{
1780	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1781	struct drm_device *dev = crtc->dev;
1782	struct amdgpu_device *adev = dev->dev_private;
1783
1784	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1785}
1786
1787static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1788				     struct drm_framebuffer *fb,
1789				     int x, int y, int atomic)
1790{
1791	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1792	struct drm_device *dev = crtc->dev;
1793	struct amdgpu_device *adev = dev->dev_private;
 
1794	struct drm_framebuffer *target_fb;
1795	struct drm_gem_object *obj;
1796	struct amdgpu_bo *abo;
1797	uint64_t fb_location, tiling_flags;
1798	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1799	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1800	u32 viewport_w, viewport_h;
1801	int r;
1802	bool bypass_lut = false;
1803	struct drm_format_name_buf format_name;
1804
1805	/* no fb bound */
1806	if (!atomic && !crtc->primary->fb) {
1807		DRM_DEBUG_KMS("No FB bound\n");
1808		return 0;
1809	}
1810
1811	if (atomic)
 
1812		target_fb = fb;
1813	else
 
1814		target_fb = crtc->primary->fb;
 
1815
1816	/* If atomic, assume fb object is pinned & idle & fenced and
1817	 * just update base pointers
1818	 */
1819	obj = target_fb->obj[0];
1820	abo = gem_to_amdgpu_bo(obj);
1821	r = amdgpu_bo_reserve(abo, false);
1822	if (unlikely(r != 0))
1823		return r;
1824
1825	if (!atomic) {
1826		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
 
 
1827		if (unlikely(r != 0)) {
1828			amdgpu_bo_unreserve(abo);
1829			return -EINVAL;
1830		}
1831	}
1832	fb_location = amdgpu_bo_gpu_offset(abo);
1833
1834	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1835	amdgpu_bo_unreserve(abo);
1836
1837	switch (target_fb->format->format) {
1838	case DRM_FORMAT_C8:
1839		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1840			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1841		break;
1842	case DRM_FORMAT_XRGB4444:
1843	case DRM_FORMAT_ARGB4444:
1844		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1845			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1846#ifdef __BIG_ENDIAN
1847		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1848#endif
1849		break;
1850	case DRM_FORMAT_XRGB1555:
1851	case DRM_FORMAT_ARGB1555:
1852		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1853			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1854#ifdef __BIG_ENDIAN
1855		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1856#endif
1857		break;
1858	case DRM_FORMAT_BGRX5551:
1859	case DRM_FORMAT_BGRA5551:
1860		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1861			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1862#ifdef __BIG_ENDIAN
1863		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1864#endif
1865		break;
1866	case DRM_FORMAT_RGB565:
1867		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1868			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1869#ifdef __BIG_ENDIAN
1870		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1871#endif
1872		break;
1873	case DRM_FORMAT_XRGB8888:
1874	case DRM_FORMAT_ARGB8888:
1875		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1876			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1877#ifdef __BIG_ENDIAN
1878		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1879#endif
1880		break;
1881	case DRM_FORMAT_XRGB2101010:
1882	case DRM_FORMAT_ARGB2101010:
1883		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1884			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1885#ifdef __BIG_ENDIAN
1886		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1887#endif
1888		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1889		bypass_lut = true;
1890		break;
1891	case DRM_FORMAT_BGRX1010102:
1892	case DRM_FORMAT_BGRA1010102:
1893		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1894			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1895#ifdef __BIG_ENDIAN
1896		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1897#endif
1898		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1899		bypass_lut = true;
1900		break;
1901	case DRM_FORMAT_XBGR8888:
1902	case DRM_FORMAT_ABGR8888:
1903		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1904			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1905		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1906			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1907#ifdef __BIG_ENDIAN
1908		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1909#endif
1910		break;
1911	default:
1912		DRM_ERROR("Unsupported screen format %s\n",
1913		          drm_get_format_name(target_fb->format->format, &format_name));
1914		return -EINVAL;
1915	}
1916
1917	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1918		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1919
1920		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1921		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1922		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1923		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1924		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1925
1926		fb_format |= GRPH_NUM_BANKS(num_banks);
1927		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1928		fb_format |= GRPH_TILE_SPLIT(tile_split);
1929		fb_format |= GRPH_BANK_WIDTH(bankw);
1930		fb_format |= GRPH_BANK_HEIGHT(bankh);
1931		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1932	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1933		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1934	}
1935
1936	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1937	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1938
1939	dce_v6_0_vga_enable(crtc, false);
1940
1941	/* Make sure surface address is updated at vertical blank rather than
1942	 * horizontal blank
1943	 */
1944	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1945
1946	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1947	       upper_32_bits(fb_location));
1948	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1949	       upper_32_bits(fb_location));
1950	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1951	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1952	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1953	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1954	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1955	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1956
1957	/*
1958	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1959	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1960	 * retain the full precision throughout the pipeline.
1961	 */
1962	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1963		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1964		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1965
1966	if (bypass_lut)
1967		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1968
1969	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1970	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1971	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1972	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1973	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1974	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1975
1976	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1977	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1978
1979	dce_v6_0_grph_enable(crtc, true);
1980
1981	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1982		       target_fb->height);
1983	x &= ~3;
1984	y &= ~1;
1985	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1986	       (x << 16) | y);
1987	viewport_w = crtc->mode.hdisplay;
1988	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1989
1990	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1991	       (viewport_w << 16) | viewport_h);
1992
1993	/* set pageflip to happen anywhere in vblank interval */
1994	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1995
1996	if (!atomic && fb && fb != crtc->primary->fb) {
1997		abo = gem_to_amdgpu_bo(fb->obj[0]);
1998		r = amdgpu_bo_reserve(abo, true);
 
1999		if (unlikely(r != 0))
2000			return r;
2001		amdgpu_bo_unpin(abo);
2002		amdgpu_bo_unreserve(abo);
2003	}
2004
2005	/* Bytes per pixel may have changed */
2006	dce_v6_0_bandwidth_update(adev);
2007
2008	return 0;
2009
2010}
2011
2012static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2013				    struct drm_display_mode *mode)
2014{
2015	struct drm_device *dev = crtc->dev;
2016	struct amdgpu_device *adev = dev->dev_private;
2017	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2018
2019	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2020		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2021		       INTERLEAVE_EN);
2022	else
2023		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2024}
2025
2026static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2027{
2028
2029	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2030	struct drm_device *dev = crtc->dev;
2031	struct amdgpu_device *adev = dev->dev_private;
2032	u16 *r, *g, *b;
2033	int i;
2034
2035	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2036
2037	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2038	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2039		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2040	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2041	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2042	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2043	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2044	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2045	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2046		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2047
2048	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2049
2050	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2051	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2052	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2053
2054	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2055	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2056	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2057
2058	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2059	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2060
2061	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2062	r = crtc->gamma_store;
2063	g = r + crtc->gamma_size;
2064	b = g + crtc->gamma_size;
2065	for (i = 0; i < 256; i++) {
2066		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2067		       ((*r++ & 0xffc0) << 14) |
2068		       ((*g++ & 0xffc0) << 4) |
2069		       (*b++ >> 6));
2070	}
2071
2072	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2073	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2074		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2075		ICON_DEGAMMA_MODE(0) |
2076		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2077	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2078	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2079		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2080	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2081	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2082		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2083	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2084	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2085		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2086	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2087	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2088
2089
2090}
2091
2092static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2093{
2094	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2095	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2096
2097	switch (amdgpu_encoder->encoder_id) {
2098	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2099		return dig->linkb ? 1 : 0;
2100	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2101		return dig->linkb ? 3 : 2;
2102	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2103		return dig->linkb ? 5 : 4;
2104	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2105		return 6;
2106	default:
2107		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2108		return 0;
2109	}
2110}
2111
2112/**
2113 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2114 *
2115 * @crtc: drm crtc
2116 *
2117 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2118 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2119 * monitors a dedicated PPLL must be used.  If a particular board has
2120 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2121 * as there is no need to program the PLL itself.  If we are not able to
2122 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2123 * avoid messing up an existing monitor.
2124 *
2125 *
2126 */
2127static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2128{
2129	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2130	struct drm_device *dev = crtc->dev;
2131	struct amdgpu_device *adev = dev->dev_private;
2132	u32 pll_in_use;
2133	int pll;
2134
2135	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2136		if (adev->clock.dp_extclk)
2137			/* skip PPLL programming if using ext clock */
2138			return ATOM_PPLL_INVALID;
2139		else
2140			return ATOM_PPLL0;
2141	} else {
2142		/* use the same PPLL for all monitors with the same clock */
2143		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2144		if (pll != ATOM_PPLL_INVALID)
2145			return pll;
2146	}
2147
2148	/*  PPLL1, and PPLL2 */
2149	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2150	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2151		return ATOM_PPLL2;
2152	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2153		return ATOM_PPLL1;
2154	DRM_ERROR("unable to allocate a PPLL\n");
2155	return ATOM_PPLL_INVALID;
2156}
2157
2158static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2159{
2160	struct amdgpu_device *adev = crtc->dev->dev_private;
2161	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2162	uint32_t cur_lock;
2163
2164	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2165	if (lock)
2166		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2167	else
2168		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2169	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2170}
2171
2172static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2173{
2174	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2175	struct amdgpu_device *adev = crtc->dev->dev_private;
2176
2177	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2178		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2179		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2180
2181
2182}
2183
2184static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2185{
2186	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2187	struct amdgpu_device *adev = crtc->dev->dev_private;
2188
2189	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2190	       upper_32_bits(amdgpu_crtc->cursor_addr));
2191	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2192	       lower_32_bits(amdgpu_crtc->cursor_addr));
2193
2194	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2195		   CUR_CONTROL__CURSOR_EN_MASK |
2196		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2197		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2198
2199}
2200
2201static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2202				       int x, int y)
2203{
2204	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2205	struct amdgpu_device *adev = crtc->dev->dev_private;
2206	int xorigin = 0, yorigin = 0;
2207
2208	int w = amdgpu_crtc->cursor_width;
2209
2210	amdgpu_crtc->cursor_x = x;
2211	amdgpu_crtc->cursor_y = y;
2212
2213	/* avivo cursor are offset into the total surface */
2214	x += crtc->x;
2215	y += crtc->y;
2216	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2217
2218	if (x < 0) {
2219		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2220		x = 0;
2221	}
2222	if (y < 0) {
2223		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2224		y = 0;
2225	}
2226
2227	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2228	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2229	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2230	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2231
2232	return 0;
2233}
2234
2235static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2236				     int x, int y)
2237{
2238	int ret;
2239
2240	dce_v6_0_lock_cursor(crtc, true);
2241	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2242	dce_v6_0_lock_cursor(crtc, false);
2243
2244	return ret;
2245}
2246
2247static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2248				     struct drm_file *file_priv,
2249				     uint32_t handle,
2250				     uint32_t width,
2251				     uint32_t height,
2252				     int32_t hot_x,
2253				     int32_t hot_y)
2254{
2255	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2256	struct drm_gem_object *obj;
2257	struct amdgpu_bo *aobj;
2258	int ret;
2259
2260	if (!handle) {
2261		/* turn off cursor */
2262		dce_v6_0_hide_cursor(crtc);
2263		obj = NULL;
2264		goto unpin;
2265	}
2266
2267	if ((width > amdgpu_crtc->max_cursor_width) ||
2268	    (height > amdgpu_crtc->max_cursor_height)) {
2269		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2270		return -EINVAL;
2271	}
2272
2273	obj = drm_gem_object_lookup(file_priv, handle);
2274	if (!obj) {
2275		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2276		return -ENOENT;
2277	}
2278
2279	aobj = gem_to_amdgpu_bo(obj);
2280	ret = amdgpu_bo_reserve(aobj, false);
2281	if (ret != 0) {
2282		drm_gem_object_put_unlocked(obj);
2283		return ret;
2284	}
2285
2286	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2287	amdgpu_bo_unreserve(aobj);
2288	if (ret) {
2289		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2290		drm_gem_object_put_unlocked(obj);
2291		return ret;
2292	}
2293	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2294
2295	dce_v6_0_lock_cursor(crtc, true);
2296
2297	if (width != amdgpu_crtc->cursor_width ||
2298	    height != amdgpu_crtc->cursor_height ||
2299	    hot_x != amdgpu_crtc->cursor_hot_x ||
2300	    hot_y != amdgpu_crtc->cursor_hot_y) {
2301		int x, y;
2302
2303		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2304		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2305
2306		dce_v6_0_cursor_move_locked(crtc, x, y);
2307
2308		amdgpu_crtc->cursor_width = width;
2309		amdgpu_crtc->cursor_height = height;
2310		amdgpu_crtc->cursor_hot_x = hot_x;
2311		amdgpu_crtc->cursor_hot_y = hot_y;
2312	}
2313
2314	dce_v6_0_show_cursor(crtc);
2315	dce_v6_0_lock_cursor(crtc, false);
2316
2317unpin:
2318	if (amdgpu_crtc->cursor_bo) {
2319		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2320		ret = amdgpu_bo_reserve(aobj, true);
2321		if (likely(ret == 0)) {
2322			amdgpu_bo_unpin(aobj);
2323			amdgpu_bo_unreserve(aobj);
2324		}
2325		drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2326	}
2327
2328	amdgpu_crtc->cursor_bo = obj;
2329	return 0;
2330}
2331
2332static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2333{
2334	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2335
2336	if (amdgpu_crtc->cursor_bo) {
2337		dce_v6_0_lock_cursor(crtc, true);
2338
2339		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2340					    amdgpu_crtc->cursor_y);
2341
2342		dce_v6_0_show_cursor(crtc);
2343		dce_v6_0_lock_cursor(crtc, false);
2344	}
2345}
2346
2347static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2348				   u16 *blue, uint32_t size,
2349				   struct drm_modeset_acquire_ctx *ctx)
2350{
 
 
 
 
 
 
 
 
 
2351	dce_v6_0_crtc_load_lut(crtc);
2352
2353	return 0;
2354}
2355
2356static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2357{
2358	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2359
2360	drm_crtc_cleanup(crtc);
2361	kfree(amdgpu_crtc);
2362}
2363
2364static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2365	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2366	.cursor_move = dce_v6_0_crtc_cursor_move,
2367	.gamma_set = dce_v6_0_crtc_gamma_set,
2368	.set_config = amdgpu_display_crtc_set_config,
2369	.destroy = dce_v6_0_crtc_destroy,
2370	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2371};
2372
2373static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2374{
2375	struct drm_device *dev = crtc->dev;
2376	struct amdgpu_device *adev = dev->dev_private;
2377	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2378	unsigned type;
2379
2380	switch (mode) {
2381	case DRM_MODE_DPMS_ON:
2382		amdgpu_crtc->enabled = true;
2383		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2384		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2385		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2386		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2387						amdgpu_crtc->crtc_id);
2388		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2389		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2390		drm_crtc_vblank_on(crtc);
2391		dce_v6_0_crtc_load_lut(crtc);
2392		break;
2393	case DRM_MODE_DPMS_STANDBY:
2394	case DRM_MODE_DPMS_SUSPEND:
2395	case DRM_MODE_DPMS_OFF:
2396		drm_crtc_vblank_off(crtc);
2397		if (amdgpu_crtc->enabled)
2398			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2399		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2400		amdgpu_crtc->enabled = false;
2401		break;
2402	}
2403	/* adjust pm to dpms */
2404	amdgpu_pm_compute_clocks(adev);
2405}
2406
2407static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2408{
2409	/* disable crtc pair power gating before programming */
2410	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2411	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2412	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2413}
2414
2415static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2416{
2417	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2418	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2419}
2420
2421static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2422{
2423
2424	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2425	struct drm_device *dev = crtc->dev;
2426	struct amdgpu_device *adev = dev->dev_private;
2427	struct amdgpu_atom_ss ss;
2428	int i;
2429
2430	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2431	if (crtc->primary->fb) {
2432		int r;
 
2433		struct amdgpu_bo *abo;
2434
2435		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2436		r = amdgpu_bo_reserve(abo, true);
 
2437		if (unlikely(r))
2438			DRM_ERROR("failed to reserve abo before unpin\n");
2439		else {
2440			amdgpu_bo_unpin(abo);
2441			amdgpu_bo_unreserve(abo);
2442		}
2443	}
2444	/* disable the GRPH */
2445	dce_v6_0_grph_enable(crtc, false);
2446
2447	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2448
2449	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2450		if (adev->mode_info.crtcs[i] &&
2451		    adev->mode_info.crtcs[i]->enabled &&
2452		    i != amdgpu_crtc->crtc_id &&
2453		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2454			/* one other crtc is using this pll don't turn
2455			 * off the pll
2456			 */
2457			goto done;
2458		}
2459	}
2460
2461	switch (amdgpu_crtc->pll_id) {
2462	case ATOM_PPLL1:
2463	case ATOM_PPLL2:
2464		/* disable the ppll */
2465		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2466						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2467		break;
2468	default:
2469		break;
2470	}
2471done:
2472	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2473	amdgpu_crtc->adjusted_clock = 0;
2474	amdgpu_crtc->encoder = NULL;
2475	amdgpu_crtc->connector = NULL;
2476}
2477
2478static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2479				  struct drm_display_mode *mode,
2480				  struct drm_display_mode *adjusted_mode,
2481				  int x, int y, struct drm_framebuffer *old_fb)
2482{
2483	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2484
2485	if (!amdgpu_crtc->adjusted_clock)
2486		return -EINVAL;
2487
2488	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2489	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2490	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2491	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2492	amdgpu_atombios_crtc_scaler_setup(crtc);
2493	dce_v6_0_cursor_reset(crtc);
2494	/* update the hw version fpr dpm */
2495	amdgpu_crtc->hw_mode = *adjusted_mode;
2496
2497	return 0;
2498}
2499
2500static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2501				     const struct drm_display_mode *mode,
2502				     struct drm_display_mode *adjusted_mode)
2503{
2504
2505	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2506	struct drm_device *dev = crtc->dev;
2507	struct drm_encoder *encoder;
2508
2509	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2510	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2511		if (encoder->crtc == crtc) {
2512			amdgpu_crtc->encoder = encoder;
2513			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2514			break;
2515		}
2516	}
2517	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2518		amdgpu_crtc->encoder = NULL;
2519		amdgpu_crtc->connector = NULL;
2520		return false;
2521	}
2522	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2523		return false;
2524	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2525		return false;
2526	/* pick pll */
2527	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2528	/* if we can't get a PPLL for a non-DP encoder, fail */
2529	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2530	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2531		return false;
2532
2533	return true;
2534}
2535
2536static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2537				  struct drm_framebuffer *old_fb)
2538{
2539	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2540}
2541
2542static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2543					 struct drm_framebuffer *fb,
2544					 int x, int y, enum mode_set_atomic state)
2545{
2546       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2547}
2548
2549static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2550	.dpms = dce_v6_0_crtc_dpms,
2551	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2552	.mode_set = dce_v6_0_crtc_mode_set,
2553	.mode_set_base = dce_v6_0_crtc_set_base,
2554	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2555	.prepare = dce_v6_0_crtc_prepare,
2556	.commit = dce_v6_0_crtc_commit,
 
2557	.disable = dce_v6_0_crtc_disable,
2558};
2559
2560static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2561{
2562	struct amdgpu_crtc *amdgpu_crtc;
 
2563
2564	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2565			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2566	if (amdgpu_crtc == NULL)
2567		return -ENOMEM;
2568
2569	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2570
2571	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2572	amdgpu_crtc->crtc_id = index;
2573	adev->mode_info.crtcs[index] = amdgpu_crtc;
2574
2575	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2576	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2577	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2578	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2579
 
 
 
 
 
 
2580	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2581
2582	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2583	amdgpu_crtc->adjusted_clock = 0;
2584	amdgpu_crtc->encoder = NULL;
2585	amdgpu_crtc->connector = NULL;
2586	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2587
2588	return 0;
2589}
2590
2591static int dce_v6_0_early_init(void *handle)
2592{
2593	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2594
2595	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2596	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2597
2598	dce_v6_0_set_display_funcs(adev);
 
2599
2600	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2601
2602	switch (adev->asic_type) {
2603	case CHIP_TAHITI:
2604	case CHIP_PITCAIRN:
2605	case CHIP_VERDE:
2606		adev->mode_info.num_hpd = 6;
2607		adev->mode_info.num_dig = 6;
2608		break;
2609	case CHIP_OLAND:
2610		adev->mode_info.num_hpd = 2;
2611		adev->mode_info.num_dig = 2;
2612		break;
2613	default:
2614		return -EINVAL;
2615	}
2616
2617	dce_v6_0_set_irq_funcs(adev);
2618
2619	return 0;
2620}
2621
2622static int dce_v6_0_sw_init(void *handle)
2623{
2624	int r, i;
2625	bool ret;
2626	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2627
2628	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2629		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2630		if (r)
2631			return r;
2632	}
2633
2634	for (i = 8; i < 20; i += 2) {
2635		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2636		if (r)
2637			return r;
2638	}
2639
2640	/* HPD hotplug */
2641	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2642	if (r)
2643		return r;
2644
2645	adev->mode_info.mode_config_initialized = true;
2646
2647	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2648	adev->ddev->mode_config.async_page_flip = true;
2649	adev->ddev->mode_config.max_width = 16384;
2650	adev->ddev->mode_config.max_height = 16384;
2651	adev->ddev->mode_config.preferred_depth = 24;
2652	adev->ddev->mode_config.prefer_shadow = 1;
2653	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2654
2655	r = amdgpu_display_modeset_create_props(adev);
2656	if (r)
2657		return r;
2658
2659	adev->ddev->mode_config.max_width = 16384;
2660	adev->ddev->mode_config.max_height = 16384;
2661
2662	/* allocate crtcs */
2663	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2664		r = dce_v6_0_crtc_init(adev, i);
2665		if (r)
2666			return r;
2667	}
2668
2669	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2670	if (ret)
2671		amdgpu_display_print_display_setup(adev->ddev);
2672	else
2673		return -EINVAL;
2674
2675	/* setup afmt */
2676	r = dce_v6_0_afmt_init(adev);
2677	if (r)
2678		return r;
2679
2680	r = dce_v6_0_audio_init(adev);
2681	if (r)
2682		return r;
2683
2684	drm_kms_helper_poll_init(adev->ddev);
2685
2686	return r;
2687}
2688
2689static int dce_v6_0_sw_fini(void *handle)
2690{
2691	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2692
2693	kfree(adev->mode_info.bios_hardcoded_edid);
2694
2695	drm_kms_helper_poll_fini(adev->ddev);
2696
2697	dce_v6_0_audio_fini(adev);
2698	dce_v6_0_afmt_fini(adev);
2699
2700	drm_mode_config_cleanup(adev->ddev);
2701	adev->mode_info.mode_config_initialized = false;
2702
2703	return 0;
2704}
2705
2706static int dce_v6_0_hw_init(void *handle)
2707{
2708	int i;
2709	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2710
2711	/* disable vga render */
2712	dce_v6_0_set_vga_render_state(adev, false);
2713	/* init dig PHYs, disp eng pll */
2714	amdgpu_atombios_encoder_init_dig(adev);
2715	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2716
2717	/* initialize hpd */
2718	dce_v6_0_hpd_init(adev);
2719
2720	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2721		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2722	}
2723
2724	dce_v6_0_pageflip_interrupt_init(adev);
2725
2726	return 0;
2727}
2728
2729static int dce_v6_0_hw_fini(void *handle)
2730{
2731	int i;
2732	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2733
2734	dce_v6_0_hpd_fini(adev);
2735
2736	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2737		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2738	}
2739
2740	dce_v6_0_pageflip_interrupt_fini(adev);
2741
2742	return 0;
2743}
2744
2745static int dce_v6_0_suspend(void *handle)
2746{
2747	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2748
2749	adev->mode_info.bl_level =
2750		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2751
2752	return dce_v6_0_hw_fini(handle);
2753}
2754
2755static int dce_v6_0_resume(void *handle)
2756{
2757	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2758	int ret;
2759
2760	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2761							   adev->mode_info.bl_level);
2762
2763	ret = dce_v6_0_hw_init(handle);
2764
2765	/* turn on the BL */
2766	if (adev->mode_info.bl_encoder) {
2767		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2768								  adev->mode_info.bl_encoder);
2769		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2770						    bl_level);
2771	}
2772
2773	return ret;
2774}
2775
2776static bool dce_v6_0_is_idle(void *handle)
2777{
2778	return true;
2779}
2780
2781static int dce_v6_0_wait_for_idle(void *handle)
2782{
2783	return 0;
2784}
2785
2786static int dce_v6_0_soft_reset(void *handle)
2787{
2788	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2789	return 0;
2790}
2791
2792static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2793						     int crtc,
2794						     enum amdgpu_interrupt_state state)
2795{
2796	u32 reg_block, interrupt_mask;
2797
2798	if (crtc >= adev->mode_info.num_crtc) {
2799		DRM_DEBUG("invalid crtc %d\n", crtc);
2800		return;
2801	}
2802
2803	switch (crtc) {
2804	case 0:
2805		reg_block = SI_CRTC0_REGISTER_OFFSET;
2806		break;
2807	case 1:
2808		reg_block = SI_CRTC1_REGISTER_OFFSET;
2809		break;
2810	case 2:
2811		reg_block = SI_CRTC2_REGISTER_OFFSET;
2812		break;
2813	case 3:
2814		reg_block = SI_CRTC3_REGISTER_OFFSET;
2815		break;
2816	case 4:
2817		reg_block = SI_CRTC4_REGISTER_OFFSET;
2818		break;
2819	case 5:
2820		reg_block = SI_CRTC5_REGISTER_OFFSET;
2821		break;
2822	default:
2823		DRM_DEBUG("invalid crtc %d\n", crtc);
2824		return;
2825	}
2826
2827	switch (state) {
2828	case AMDGPU_IRQ_STATE_DISABLE:
2829		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2830		interrupt_mask &= ~VBLANK_INT_MASK;
2831		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2832		break;
2833	case AMDGPU_IRQ_STATE_ENABLE:
2834		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2835		interrupt_mask |= VBLANK_INT_MASK;
2836		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2837		break;
2838	default:
2839		break;
2840	}
2841}
2842
2843static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2844						    int crtc,
2845						    enum amdgpu_interrupt_state state)
2846{
2847
2848}
2849
2850static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2851					    struct amdgpu_irq_src *src,
2852					    unsigned type,
2853					    enum amdgpu_interrupt_state state)
2854{
2855	u32 dc_hpd_int_cntl;
2856
2857	if (type >= adev->mode_info.num_hpd) {
2858		DRM_DEBUG("invalid hdp %d\n", type);
2859		return 0;
2860	}
2861
2862	switch (state) {
2863	case AMDGPU_IRQ_STATE_DISABLE:
2864		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2865		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2866		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2867		break;
2868	case AMDGPU_IRQ_STATE_ENABLE:
2869		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2870		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2871		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2872		break;
2873	default:
2874		break;
2875	}
2876
2877	return 0;
2878}
2879
2880static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2881					     struct amdgpu_irq_src *src,
2882					     unsigned type,
2883					     enum amdgpu_interrupt_state state)
2884{
2885	switch (type) {
2886	case AMDGPU_CRTC_IRQ_VBLANK1:
2887		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2888		break;
2889	case AMDGPU_CRTC_IRQ_VBLANK2:
2890		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2891		break;
2892	case AMDGPU_CRTC_IRQ_VBLANK3:
2893		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2894		break;
2895	case AMDGPU_CRTC_IRQ_VBLANK4:
2896		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2897		break;
2898	case AMDGPU_CRTC_IRQ_VBLANK5:
2899		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2900		break;
2901	case AMDGPU_CRTC_IRQ_VBLANK6:
2902		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2903		break;
2904	case AMDGPU_CRTC_IRQ_VLINE1:
2905		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2906		break;
2907	case AMDGPU_CRTC_IRQ_VLINE2:
2908		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2909		break;
2910	case AMDGPU_CRTC_IRQ_VLINE3:
2911		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2912		break;
2913	case AMDGPU_CRTC_IRQ_VLINE4:
2914		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2915		break;
2916	case AMDGPU_CRTC_IRQ_VLINE5:
2917		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2918		break;
2919	case AMDGPU_CRTC_IRQ_VLINE6:
2920		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2921		break;
2922	default:
2923		break;
2924	}
2925	return 0;
2926}
2927
2928static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2929			     struct amdgpu_irq_src *source,
2930			     struct amdgpu_iv_entry *entry)
2931{
2932	unsigned crtc = entry->src_id - 1;
2933	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2934	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2935								    crtc);
2936
2937	switch (entry->src_data[0]) {
2938	case 0: /* vblank */
2939		if (disp_int & interrupt_status_offsets[crtc].vblank)
2940			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2941		else
2942			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2943
2944		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2945			drm_handle_vblank(adev->ddev, crtc);
2946		}
2947		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2948		break;
2949	case 1: /* vline */
2950		if (disp_int & interrupt_status_offsets[crtc].vline)
2951			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2952		else
2953			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2954
2955		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2956		break;
2957	default:
2958		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2959		break;
2960	}
2961
2962	return 0;
2963}
2964
2965static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2966						 struct amdgpu_irq_src *src,
2967						 unsigned type,
2968						 enum amdgpu_interrupt_state state)
2969{
2970	u32 reg;
2971
2972	if (type >= adev->mode_info.num_crtc) {
2973		DRM_ERROR("invalid pageflip crtc %d\n", type);
2974		return -EINVAL;
2975	}
2976
2977	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2978	if (state == AMDGPU_IRQ_STATE_DISABLE)
2979		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2980		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2981	else
2982		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2983		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2984
2985	return 0;
2986}
2987
2988static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2989				 struct amdgpu_irq_src *source,
2990				 struct amdgpu_iv_entry *entry)
2991{
2992	unsigned long flags;
2993	unsigned crtc_id;
2994	struct amdgpu_crtc *amdgpu_crtc;
2995	struct amdgpu_flip_work *works;
2996
2997	crtc_id = (entry->src_id - 8) >> 1;
2998	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2999
3000	if (crtc_id >= adev->mode_info.num_crtc) {
3001		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3002		return -EINVAL;
3003	}
3004
3005	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3006	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3007		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3008		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3009
3010	/* IRQ could occur when in initial stage */
3011	if (amdgpu_crtc == NULL)
3012		return 0;
3013
3014	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3015	works = amdgpu_crtc->pflip_works;
3016	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3017		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3018						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3019						amdgpu_crtc->pflip_status,
3020						AMDGPU_FLIP_SUBMITTED);
3021		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3022		return 0;
3023	}
3024
3025	/* page flip completed. clean up */
3026	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3027	amdgpu_crtc->pflip_works = NULL;
3028
3029	/* wakeup usersapce */
3030	if (works->event)
3031		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3032
3033	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3034
3035	drm_crtc_vblank_put(&amdgpu_crtc->base);
3036	schedule_work(&works->unpin_work);
3037
3038	return 0;
3039}
3040
3041static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3042			    struct amdgpu_irq_src *source,
3043			    struct amdgpu_iv_entry *entry)
3044{
3045	uint32_t disp_int, mask, tmp;
3046	unsigned hpd;
3047
3048	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3049		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3050		return 0;
3051	}
3052
3053	hpd = entry->src_data[0];
3054	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3055	mask = interrupt_status_offsets[hpd].hpd;
3056
3057	if (disp_int & mask) {
3058		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3059		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3060		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3061		schedule_work(&adev->hotplug_work);
3062		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3063	}
3064
3065	return 0;
3066
3067}
3068
3069static int dce_v6_0_set_clockgating_state(void *handle,
3070					  enum amd_clockgating_state state)
3071{
3072	return 0;
3073}
3074
3075static int dce_v6_0_set_powergating_state(void *handle,
3076					  enum amd_powergating_state state)
3077{
3078	return 0;
3079}
3080
3081static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3082	.name = "dce_v6_0",
3083	.early_init = dce_v6_0_early_init,
3084	.late_init = NULL,
3085	.sw_init = dce_v6_0_sw_init,
3086	.sw_fini = dce_v6_0_sw_fini,
3087	.hw_init = dce_v6_0_hw_init,
3088	.hw_fini = dce_v6_0_hw_fini,
3089	.suspend = dce_v6_0_suspend,
3090	.resume = dce_v6_0_resume,
3091	.is_idle = dce_v6_0_is_idle,
3092	.wait_for_idle = dce_v6_0_wait_for_idle,
3093	.soft_reset = dce_v6_0_soft_reset,
3094	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3095	.set_powergating_state = dce_v6_0_set_powergating_state,
3096};
3097
3098static void
3099dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3100			  struct drm_display_mode *mode,
3101			  struct drm_display_mode *adjusted_mode)
3102{
3103
3104	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3105	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3106
3107	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3108
3109	/* need to call this here rather than in prepare() since we need some crtc info */
3110	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3111
3112	/* set scaler clears this on some chips */
3113	dce_v6_0_set_interleave(encoder->crtc, mode);
3114
3115	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3116		dce_v6_0_afmt_enable(encoder, true);
3117		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3118	}
3119}
3120
3121static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3122{
3123
3124	struct amdgpu_device *adev = encoder->dev->dev_private;
3125	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3126	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3127
3128	if ((amdgpu_encoder->active_device &
3129	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3130	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3131	     ENCODER_OBJECT_ID_NONE)) {
3132		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3133		if (dig) {
3134			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3135			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3136				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3137		}
3138	}
3139
3140	amdgpu_atombios_scratch_regs_lock(adev, true);
3141
3142	if (connector) {
3143		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3144
3145		/* select the clock/data port if it uses a router */
3146		if (amdgpu_connector->router.cd_valid)
3147			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3148
3149		/* turn eDP panel on for mode set */
3150		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3151			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3152							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3153	}
3154
3155	/* this is needed for the pll/ss setup to work correctly in some cases */
3156	amdgpu_atombios_encoder_set_crtc_source(encoder);
3157	/* set up the FMT blocks */
3158	dce_v6_0_program_fmt(encoder);
3159}
3160
3161static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3162{
3163
3164	struct drm_device *dev = encoder->dev;
3165	struct amdgpu_device *adev = dev->dev_private;
3166
3167	/* need to call this here as we need the crtc set up */
3168	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3169	amdgpu_atombios_scratch_regs_lock(adev, false);
3170}
3171
3172static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3173{
3174
3175	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3176	struct amdgpu_encoder_atom_dig *dig;
3177	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3178
3179	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3180
3181	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3182		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3183			dce_v6_0_afmt_enable(encoder, false);
3184		dig = amdgpu_encoder->enc_priv;
3185		dig->dig_encoder = -1;
3186	}
3187	amdgpu_encoder->active_device = 0;
3188}
3189
3190/* these are handled by the primary encoders */
3191static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3192{
3193
3194}
3195
3196static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3197{
3198
3199}
3200
3201static void
3202dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3203		      struct drm_display_mode *mode,
3204		      struct drm_display_mode *adjusted_mode)
3205{
3206
3207}
3208
3209static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3210{
3211
3212}
3213
3214static void
3215dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3216{
3217
3218}
3219
3220static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3221				    const struct drm_display_mode *mode,
3222				    struct drm_display_mode *adjusted_mode)
3223{
3224	return true;
3225}
3226
3227static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3228	.dpms = dce_v6_0_ext_dpms,
3229	.mode_fixup = dce_v6_0_ext_mode_fixup,
3230	.prepare = dce_v6_0_ext_prepare,
3231	.mode_set = dce_v6_0_ext_mode_set,
3232	.commit = dce_v6_0_ext_commit,
3233	.disable = dce_v6_0_ext_disable,
3234	/* no detect for TMDS/LVDS yet */
3235};
3236
3237static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3238	.dpms = amdgpu_atombios_encoder_dpms,
3239	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3240	.prepare = dce_v6_0_encoder_prepare,
3241	.mode_set = dce_v6_0_encoder_mode_set,
3242	.commit = dce_v6_0_encoder_commit,
3243	.disable = dce_v6_0_encoder_disable,
3244	.detect = amdgpu_atombios_encoder_dig_detect,
3245};
3246
3247static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3248	.dpms = amdgpu_atombios_encoder_dpms,
3249	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3250	.prepare = dce_v6_0_encoder_prepare,
3251	.mode_set = dce_v6_0_encoder_mode_set,
3252	.commit = dce_v6_0_encoder_commit,
3253	.detect = amdgpu_atombios_encoder_dac_detect,
3254};
3255
3256static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3257{
3258	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3259	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3260		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3261	kfree(amdgpu_encoder->enc_priv);
3262	drm_encoder_cleanup(encoder);
3263	kfree(amdgpu_encoder);
3264}
3265
3266static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3267	.destroy = dce_v6_0_encoder_destroy,
3268};
3269
3270static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3271				 uint32_t encoder_enum,
3272				 uint32_t supported_device,
3273				 u16 caps)
3274{
3275	struct drm_device *dev = adev->ddev;
3276	struct drm_encoder *encoder;
3277	struct amdgpu_encoder *amdgpu_encoder;
3278
3279	/* see if we already added it */
3280	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3281		amdgpu_encoder = to_amdgpu_encoder(encoder);
3282		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3283			amdgpu_encoder->devices |= supported_device;
3284			return;
3285		}
3286
3287	}
3288
3289	/* add a new one */
3290	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3291	if (!amdgpu_encoder)
3292		return;
3293
3294	encoder = &amdgpu_encoder->base;
3295	switch (adev->mode_info.num_crtc) {
3296	case 1:
3297		encoder->possible_crtcs = 0x1;
3298		break;
3299	case 2:
3300	default:
3301		encoder->possible_crtcs = 0x3;
3302		break;
3303	case 4:
3304		encoder->possible_crtcs = 0xf;
3305		break;
3306	case 6:
3307		encoder->possible_crtcs = 0x3f;
3308		break;
3309	}
3310
3311	amdgpu_encoder->enc_priv = NULL;
3312	amdgpu_encoder->encoder_enum = encoder_enum;
3313	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3314	amdgpu_encoder->devices = supported_device;
3315	amdgpu_encoder->rmx_type = RMX_OFF;
3316	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3317	amdgpu_encoder->is_ext_encoder = false;
3318	amdgpu_encoder->caps = caps;
3319
3320	switch (amdgpu_encoder->encoder_id) {
3321	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3322	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3323		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3324				 DRM_MODE_ENCODER_DAC, NULL);
3325		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3326		break;
3327	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3328	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3329	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3330	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3331	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3332		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3333			amdgpu_encoder->rmx_type = RMX_FULL;
3334			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3335					 DRM_MODE_ENCODER_LVDS, NULL);
3336			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3337		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3338			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3339					 DRM_MODE_ENCODER_DAC, NULL);
3340			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3341		} else {
3342			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3343					 DRM_MODE_ENCODER_TMDS, NULL);
3344			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3345		}
3346		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3347		break;
3348	case ENCODER_OBJECT_ID_SI170B:
3349	case ENCODER_OBJECT_ID_CH7303:
3350	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3351	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3352	case ENCODER_OBJECT_ID_TITFP513:
3353	case ENCODER_OBJECT_ID_VT1623:
3354	case ENCODER_OBJECT_ID_HDMI_SI1930:
3355	case ENCODER_OBJECT_ID_TRAVIS:
3356	case ENCODER_OBJECT_ID_NUTMEG:
3357		/* these are handled by the primary encoders */
3358		amdgpu_encoder->is_ext_encoder = true;
3359		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3360			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3361					 DRM_MODE_ENCODER_LVDS, NULL);
3362		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3363			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3364					 DRM_MODE_ENCODER_DAC, NULL);
3365		else
3366			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3367					 DRM_MODE_ENCODER_TMDS, NULL);
3368		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3369		break;
3370	}
3371}
3372
3373static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
 
3374	.bandwidth_update = &dce_v6_0_bandwidth_update,
3375	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
 
3376	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3377	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3378	.hpd_sense = &dce_v6_0_hpd_sense,
3379	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3380	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3381	.page_flip = &dce_v6_0_page_flip,
3382	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3383	.add_encoder = &dce_v6_0_encoder_add,
3384	.add_connector = &amdgpu_connector_add,
 
 
3385};
3386
3387static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3388{
3389	adev->mode_info.funcs = &dce_v6_0_display_funcs;
 
3390}
3391
3392static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3393	.set = dce_v6_0_set_crtc_interrupt_state,
3394	.process = dce_v6_0_crtc_irq,
3395};
3396
3397static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3398	.set = dce_v6_0_set_pageflip_interrupt_state,
3399	.process = dce_v6_0_pageflip_irq,
3400};
3401
3402static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3403	.set = dce_v6_0_set_hpd_interrupt_state,
3404	.process = dce_v6_0_hpd_irq,
3405};
3406
3407static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3408{
3409	if (adev->mode_info.num_crtc > 0)
3410		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3411	else
3412		adev->crtc_irq.num_types = 0;
3413	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3414
3415	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3416	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3417
3418	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3419	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3420}
3421
3422const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3423{
3424	.type = AMD_IP_BLOCK_TYPE_DCE,
3425	.major = 6,
3426	.minor = 0,
3427	.rev = 0,
3428	.funcs = &dce_v6_0_ip_funcs,
3429};
3430
3431const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3432{
3433	.type = AMD_IP_BLOCK_TYPE_DCE,
3434	.major = 6,
3435	.minor = 4,
3436	.rev = 0,
3437	.funcs = &dce_v6_0_ip_funcs,
3438};
v4.10.11
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "drmP.h"
 
 
 
 
 
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "atom.h"
  28#include "amdgpu_atombios.h"
  29#include "atombios_crtc.h"
  30#include "atombios_encoders.h"
  31#include "amdgpu_pll.h"
  32#include "amdgpu_connectors.h"
 
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gca/gfx_6_0_d.h"
  39#include "gca/gfx_6_0_sh_mask.h"
  40#include "gmc/gmc_6_0_d.h"
  41#include "gmc/gmc_6_0_sh_mask.h"
  42#include "dce/dce_6_0_d.h"
  43#include "dce/dce_6_0_sh_mask.h"
  44#include "gca/gfx_7_2_enum.h"
 
  45#include "si_enums.h"
  46
  47static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52	SI_CRTC0_REGISTER_OFFSET,
  53	SI_CRTC1_REGISTER_OFFSET,
  54	SI_CRTC2_REGISTER_OFFSET,
  55	SI_CRTC3_REGISTER_OFFSET,
  56	SI_CRTC4_REGISTER_OFFSET,
  57	SI_CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
  63	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
  64	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
  65	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
  66	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
  67	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71	SI_CRTC0_REGISTER_OFFSET,
  72	SI_CRTC1_REGISTER_OFFSET,
  73	SI_CRTC2_REGISTER_OFFSET,
  74	SI_CRTC3_REGISTER_OFFSET,
  75	SI_CRTC4_REGISTER_OFFSET,
  76	SI_CRTC5_REGISTER_OFFSET,
  77	(0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81	uint32_t	reg;
  82	uint32_t	vblank;
  83	uint32_t	vline;
  84	uint32_t	hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87	.reg = mmDISP_INTERRUPT_STATUS,
  88	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 118static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119				     u32 block_offset, u32 reg)
 120{
 121	DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
 122	return 0;
 
 
 
 
 
 
 
 123}
 124
 125static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
 126				      u32 block_offset, u32 reg, u32 v)
 127{
 128	DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
 129}
 130
 131static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 132{
 133	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
 134		return true;
 135	else
 136		return false;
 137}
 138
 139static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 140{
 141	u32 pos1, pos2;
 142
 143	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 144	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 145
 146	if (pos1 != pos2)
 147		return true;
 148	else
 149		return false;
 150}
 151
 152/**
 153 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
 154 *
 155 * @crtc: crtc to wait for vblank on
 156 *
 157 * Wait for vblank on the requested crtc (evergreen+).
 158 */
 159static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 160{
 161	unsigned i = 100;
 162
 163	if (crtc >= adev->mode_info.num_crtc)
 164		return;
 165
 166	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 167		return;
 168
 169	/* depending on when we hit vblank, we may be close to active; if so,
 170	 * wait for another frame.
 171	 */
 172	while (dce_v6_0_is_in_vblank(adev, crtc)) {
 173		if (i++ == 100) {
 174			i = 0;
 175			if (!dce_v6_0_is_counter_moving(adev, crtc))
 176				break;
 177		}
 178	}
 179
 180	while (!dce_v6_0_is_in_vblank(adev, crtc)) {
 181		if (i++ == 100) {
 182			i = 0;
 183			if (!dce_v6_0_is_counter_moving(adev, crtc))
 184				break;
 185		}
 186	}
 187}
 188
 189static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 190{
 191	if (crtc >= adev->mode_info.num_crtc)
 192		return 0;
 193	else
 194		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 195}
 196
 197static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 198{
 199	unsigned i;
 200
 201	/* Enable pflip interrupts */
 202	for (i = 0; i < adev->mode_info.num_crtc; i++)
 203		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 204}
 205
 206static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 207{
 208	unsigned i;
 209
 210	/* Disable pflip interrupts */
 211	for (i = 0; i < adev->mode_info.num_crtc; i++)
 212		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 213}
 214
 215/**
 216 * dce_v6_0_page_flip - pageflip callback.
 217 *
 218 * @adev: amdgpu_device pointer
 219 * @crtc_id: crtc to cleanup pageflip on
 220 * @crtc_base: new address of the crtc (GPU MC address)
 221 *
 222 * Does the actual pageflip (evergreen+).
 223 * During vblank we take the crtc lock and wait for the update_pending
 224 * bit to go high, when it does, we release the lock, and allow the
 225 * double buffered update to take place.
 226 * Returns the current update pending status.
 227 */
 228static void dce_v6_0_page_flip(struct amdgpu_device *adev,
 229			       int crtc_id, u64 crtc_base, bool async)
 230{
 231	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
 232
 233	/* flip at hsync for async, default is vsync */
 234	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 235	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 
 
 
 236	/* update the scanout addresses */
 237	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 238	       upper_32_bits(crtc_base));
 239	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 240	       (u32)crtc_base);
 241
 242	/* post the write */
 243	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 244}
 245
 246static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 247					u32 *vbl, u32 *position)
 248{
 249	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 250		return -EINVAL;
 251	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 252	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 253
 254	return 0;
 255
 256}
 257
 258/**
 259 * dce_v6_0_hpd_sense - hpd sense callback.
 260 *
 261 * @adev: amdgpu_device pointer
 262 * @hpd: hpd (hotplug detect) pin
 263 *
 264 * Checks if a digital monitor is connected (evergreen+).
 265 * Returns true if connected, false if not connected.
 266 */
 267static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 268			       enum amdgpu_hpd_id hpd)
 269{
 270	bool connected = false;
 271
 272	if (hpd >= adev->mode_info.num_hpd)
 273		return connected;
 274
 275	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 276		connected = true;
 277
 278	return connected;
 279}
 280
 281/**
 282 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
 283 *
 284 * @adev: amdgpu_device pointer
 285 * @hpd: hpd (hotplug detect) pin
 286 *
 287 * Set the polarity of the hpd pin (evergreen+).
 288 */
 289static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 290				      enum amdgpu_hpd_id hpd)
 291{
 292	u32 tmp;
 293	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 294
 295	if (hpd >= adev->mode_info.num_hpd)
 296		return;
 297
 298	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 299	if (connected)
 300		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 301	else
 302		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 303	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 304}
 305
 306/**
 307 * dce_v6_0_hpd_init - hpd setup callback.
 308 *
 309 * @adev: amdgpu_device pointer
 310 *
 311 * Setup the hpd pins used by the card (evergreen+).
 312 * Enable the pin, set the polarity, and enable the hpd interrupts.
 313 */
 314static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 315{
 316	struct drm_device *dev = adev->ddev;
 317	struct drm_connector *connector;
 318	u32 tmp;
 319
 320	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 321		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 322
 323		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 324			continue;
 325
 326		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 327		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 328		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 329
 330		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 331		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 332			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 333			 * aux dp channel on imac and help (but not completely fix)
 334			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 335			 * also avoid interrupt storms during dpms.
 336			 */
 337			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 338			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 339			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 340			continue;
 341		}
 342
 343		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 344		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 345	}
 346
 347}
 348
 349/**
 350 * dce_v6_0_hpd_fini - hpd tear down callback.
 351 *
 352 * @adev: amdgpu_device pointer
 353 *
 354 * Tear down the hpd pins used by the card (evergreen+).
 355 * Disable the hpd interrupts.
 356 */
 357static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 358{
 359	struct drm_device *dev = adev->ddev;
 360	struct drm_connector *connector;
 361	u32 tmp;
 362
 363	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 364		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 365
 366		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 367			continue;
 368
 369		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 370		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 371		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 372
 373		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 374	}
 375}
 376
 377static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 378{
 379	return mmDC_GPIO_HPD_A;
 380}
 381
 382static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
 383{
 384	if (crtc >= adev->mode_info.num_crtc)
 385		return 0;
 386	else
 387		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 388}
 389
 390static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
 391				    struct amdgpu_mode_mc_save *save)
 392{
 393	u32 crtc_enabled, tmp, frame_count;
 394	int i, j;
 395
 396	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 397	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 398
 399	/* disable VGA render */
 400	WREG32(mmVGA_RENDER_CONTROL, 0);
 401
 402	/* blank the display controllers */
 403	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 404		crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 405		if (crtc_enabled) {
 406			save->crtc_enabled[i] = true;
 407			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 408
 409			if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
 410				dce_v6_0_vblank_wait(adev, i);
 411				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 412				tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
 413				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 414				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 415			}
 416			/* wait for the next frame */
 417			frame_count = evergreen_get_vblank_counter(adev, i);
 418			for (j = 0; j < adev->usec_timeout; j++) {
 419				if (evergreen_get_vblank_counter(adev, i) != frame_count)
 420					break;
 421				udelay(1);
 422			}
 423
 424			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 425			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 426			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 427			tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 428			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 429			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 430			save->crtc_enabled[i] = false;
 431			/* ***** */
 432		} else {
 433			save->crtc_enabled[i] = false;
 434		}
 435	}
 436}
 437
 438static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
 439				      struct amdgpu_mode_mc_save *save)
 440{
 441	u32 tmp;
 442	int i, j;
 443
 444	/* update crtc base addresses */
 445	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 446		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 447		       upper_32_bits(adev->mc.vram_start));
 448		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 449		       upper_32_bits(adev->mc.vram_start));
 450		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 451		       (u32)adev->mc.vram_start);
 452		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
 453		       (u32)adev->mc.vram_start);
 454	}
 455
 456	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
 457	WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
 458
 459	/* unlock regs and wait for update */
 460	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 461		if (save->crtc_enabled[i]) {
 462			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
 463			if ((tmp & 0x7) != 0) {
 464				tmp &= ~0x7;
 465				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 466			}
 467			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 468			if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
 469				tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
 470				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
 471			}
 472			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
 473			if (tmp & 1) {
 474				tmp &= ~1;
 475				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
 476			}
 477			for (j = 0; j < adev->usec_timeout; j++) {
 478				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
 479				if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
 480					break;
 481				udelay(1);
 482			}
 483		}
 484	}
 485
 486	/* Unlock vga access */
 487	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 488	mdelay(1);
 489	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 490
 491}
 492
 493static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 494					  bool render)
 495{
 496	if (!render)
 497		WREG32(mmVGA_RENDER_CONTROL,
 498			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
 499
 500}
 501
 502static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
 503{
 504	int num_crtc = 0;
 505
 506	switch (adev->asic_type) {
 507	case CHIP_TAHITI:
 508	case CHIP_PITCAIRN:
 509	case CHIP_VERDE:
 510		num_crtc = 6;
 511		break;
 512	case CHIP_OLAND:
 513		num_crtc = 2;
 514		break;
 515	default:
 516		num_crtc = 0;
 517	}
 518	return num_crtc;
 519}
 520
 521void dce_v6_0_disable_dce(struct amdgpu_device *adev)
 522{
 523	/*Disable VGA render and enabled crtc, if has DCE engine*/
 524	if (amdgpu_atombios_has_dce_engine_info(adev)) {
 525		u32 tmp;
 526		int crtc_enabled, i;
 527
 528		dce_v6_0_set_vga_render_state(adev, false);
 529
 530		/*Disable crtc*/
 531		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
 532			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
 533				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 534			if (crtc_enabled) {
 535				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 536				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 537				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 538				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 539				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 540			}
 541		}
 542	}
 543}
 544
 545static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 546{
 547
 548	struct drm_device *dev = encoder->dev;
 549	struct amdgpu_device *adev = dev->dev_private;
 550	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 551	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 552	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 553	int bpc = 0;
 554	u32 tmp = 0;
 555	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 556
 557	if (connector) {
 558		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 559		bpc = amdgpu_connector_get_monitor_bpc(connector);
 560		dither = amdgpu_connector->dither;
 561	}
 562
 563	/* LVDS FMT is set up by atom */
 564	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 565		return;
 566
 567	if (bpc == 0)
 568		return;
 569
 570
 571	switch (bpc) {
 572	case 6:
 573		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 574			/* XXX sort out optimal dither settings */
 575			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 576				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 577				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
 578		else
 579			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
 580		break;
 581	case 8:
 582		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 583			/* XXX sort out optimal dither settings */
 584			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 585				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 586				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 587				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 588				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
 589		else
 590			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 591				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
 592		break;
 593	case 10:
 594	default:
 595		/* not needed */
 596		break;
 597	}
 598
 599	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 600}
 601
 602/**
 603 * cik_get_number_of_dram_channels - get the number of dram channels
 604 *
 605 * @adev: amdgpu_device pointer
 606 *
 607 * Look up the number of video ram channels (CIK).
 608 * Used for display watermark bandwidth calculations
 609 * Returns the number of dram channels
 610 */
 611static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
 612{
 613	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 614
 615	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 616	case 0:
 617	default:
 618		return 1;
 619	case 1:
 620		return 2;
 621	case 2:
 622		return 4;
 623	case 3:
 624		return 8;
 625	case 4:
 626		return 3;
 627	case 5:
 628		return 6;
 629	case 6:
 630		return 10;
 631	case 7:
 632		return 12;
 633	case 8:
 634		return 16;
 635	}
 636}
 637
 638struct dce6_wm_params {
 639	u32 dram_channels; /* number of dram channels */
 640	u32 yclk;          /* bandwidth per dram data pin in kHz */
 641	u32 sclk;          /* engine clock in kHz */
 642	u32 disp_clk;      /* display clock in kHz */
 643	u32 src_width;     /* viewport width */
 644	u32 active_time;   /* active display time in ns */
 645	u32 blank_time;    /* blank time in ns */
 646	bool interlaced;    /* mode is interlaced */
 647	fixed20_12 vsc;    /* vertical scale ratio */
 648	u32 num_heads;     /* number of active crtcs */
 649	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 650	u32 lb_size;       /* line buffer allocated to pipe */
 651	u32 vtaps;         /* vertical scaler taps */
 652};
 653
 654/**
 655 * dce_v6_0_dram_bandwidth - get the dram bandwidth
 656 *
 657 * @wm: watermark calculation data
 658 *
 659 * Calculate the raw dram bandwidth (CIK).
 660 * Used for display watermark bandwidth calculations
 661 * Returns the dram bandwidth in MBytes/s
 662 */
 663static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
 664{
 665	/* Calculate raw DRAM Bandwidth */
 666	fixed20_12 dram_efficiency; /* 0.7 */
 667	fixed20_12 yclk, dram_channels, bandwidth;
 668	fixed20_12 a;
 669
 670	a.full = dfixed_const(1000);
 671	yclk.full = dfixed_const(wm->yclk);
 672	yclk.full = dfixed_div(yclk, a);
 673	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 674	a.full = dfixed_const(10);
 675	dram_efficiency.full = dfixed_const(7);
 676	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 677	bandwidth.full = dfixed_mul(dram_channels, yclk);
 678	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 679
 680	return dfixed_trunc(bandwidth);
 681}
 682
 683/**
 684 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
 685 *
 686 * @wm: watermark calculation data
 687 *
 688 * Calculate the dram bandwidth used for display (CIK).
 689 * Used for display watermark bandwidth calculations
 690 * Returns the dram bandwidth for display in MBytes/s
 691 */
 692static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 693{
 694	/* Calculate DRAM Bandwidth and the part allocated to display. */
 695	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 696	fixed20_12 yclk, dram_channels, bandwidth;
 697	fixed20_12 a;
 698
 699	a.full = dfixed_const(1000);
 700	yclk.full = dfixed_const(wm->yclk);
 701	yclk.full = dfixed_div(yclk, a);
 702	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 703	a.full = dfixed_const(10);
 704	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 705	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 706	bandwidth.full = dfixed_mul(dram_channels, yclk);
 707	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 708
 709	return dfixed_trunc(bandwidth);
 710}
 711
 712/**
 713 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
 714 *
 715 * @wm: watermark calculation data
 716 *
 717 * Calculate the data return bandwidth used for display (CIK).
 718 * Used for display watermark bandwidth calculations
 719 * Returns the data return bandwidth in MBytes/s
 720 */
 721static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
 722{
 723	/* Calculate the display Data return Bandwidth */
 724	fixed20_12 return_efficiency; /* 0.8 */
 725	fixed20_12 sclk, bandwidth;
 726	fixed20_12 a;
 727
 728	a.full = dfixed_const(1000);
 729	sclk.full = dfixed_const(wm->sclk);
 730	sclk.full = dfixed_div(sclk, a);
 731	a.full = dfixed_const(10);
 732	return_efficiency.full = dfixed_const(8);
 733	return_efficiency.full = dfixed_div(return_efficiency, a);
 734	a.full = dfixed_const(32);
 735	bandwidth.full = dfixed_mul(a, sclk);
 736	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 737
 738	return dfixed_trunc(bandwidth);
 739}
 740
 741/**
 742 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
 743 *
 744 * @wm: watermark calculation data
 745 *
 746 * Calculate the dmif bandwidth used for display (CIK).
 747 * Used for display watermark bandwidth calculations
 748 * Returns the dmif bandwidth in MBytes/s
 749 */
 750static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
 751{
 752	/* Calculate the DMIF Request Bandwidth */
 753	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 754	fixed20_12 disp_clk, bandwidth;
 755	fixed20_12 a, b;
 756
 757	a.full = dfixed_const(1000);
 758	disp_clk.full = dfixed_const(wm->disp_clk);
 759	disp_clk.full = dfixed_div(disp_clk, a);
 760	a.full = dfixed_const(32);
 761	b.full = dfixed_mul(a, disp_clk);
 762
 763	a.full = dfixed_const(10);
 764	disp_clk_request_efficiency.full = dfixed_const(8);
 765	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 766
 767	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 768
 769	return dfixed_trunc(bandwidth);
 770}
 771
 772/**
 773 * dce_v6_0_available_bandwidth - get the min available bandwidth
 774 *
 775 * @wm: watermark calculation data
 776 *
 777 * Calculate the min available bandwidth used for display (CIK).
 778 * Used for display watermark bandwidth calculations
 779 * Returns the min available bandwidth in MBytes/s
 780 */
 781static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
 782{
 783	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 784	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
 785	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
 786	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
 787
 788	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 789}
 790
 791/**
 792 * dce_v6_0_average_bandwidth - get the average available bandwidth
 793 *
 794 * @wm: watermark calculation data
 795 *
 796 * Calculate the average available bandwidth used for display (CIK).
 797 * Used for display watermark bandwidth calculations
 798 * Returns the average available bandwidth in MBytes/s
 799 */
 800static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
 801{
 802	/* Calculate the display mode Average Bandwidth
 803	 * DisplayMode should contain the source and destination dimensions,
 804	 * timing, etc.
 805	 */
 806	fixed20_12 bpp;
 807	fixed20_12 line_time;
 808	fixed20_12 src_width;
 809	fixed20_12 bandwidth;
 810	fixed20_12 a;
 811
 812	a.full = dfixed_const(1000);
 813	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 814	line_time.full = dfixed_div(line_time, a);
 815	bpp.full = dfixed_const(wm->bytes_per_pixel);
 816	src_width.full = dfixed_const(wm->src_width);
 817	bandwidth.full = dfixed_mul(src_width, bpp);
 818	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 819	bandwidth.full = dfixed_div(bandwidth, line_time);
 820
 821	return dfixed_trunc(bandwidth);
 822}
 823
 824/**
 825 * dce_v6_0_latency_watermark - get the latency watermark
 826 *
 827 * @wm: watermark calculation data
 828 *
 829 * Calculate the latency watermark (CIK).
 830 * Used for display watermark bandwidth calculations
 831 * Returns the latency watermark in ns
 832 */
 833static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
 834{
 835	/* First calculate the latency in ns */
 836	u32 mc_latency = 2000; /* 2000 ns. */
 837	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
 838	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 839	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 840	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 841	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 842		(wm->num_heads * cursor_line_pair_return_time);
 843	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 844	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 845	u32 tmp, dmif_size = 12288;
 846	fixed20_12 a, b, c;
 847
 848	if (wm->num_heads == 0)
 849		return 0;
 850
 851	a.full = dfixed_const(2);
 852	b.full = dfixed_const(1);
 853	if ((wm->vsc.full > a.full) ||
 854	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 855	    (wm->vtaps >= 5) ||
 856	    ((wm->vsc.full >= a.full) && wm->interlaced))
 857		max_src_lines_per_dst_line = 4;
 858	else
 859		max_src_lines_per_dst_line = 2;
 860
 861	a.full = dfixed_const(available_bandwidth);
 862	b.full = dfixed_const(wm->num_heads);
 863	a.full = dfixed_div(a, b);
 
 
 864
 865	b.full = dfixed_const(mc_latency + 512);
 866	c.full = dfixed_const(wm->disp_clk);
 867	b.full = dfixed_div(b, c);
 868
 869	c.full = dfixed_const(dmif_size);
 870	b.full = dfixed_div(c, b);
 871
 872	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
 873
 874	b.full = dfixed_const(1000);
 875	c.full = dfixed_const(wm->disp_clk);
 876	b.full = dfixed_div(c, b);
 877	c.full = dfixed_const(wm->bytes_per_pixel);
 878	b.full = dfixed_mul(b, c);
 879
 880	lb_fill_bw = min(tmp, dfixed_trunc(b));
 881
 882	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 883	b.full = dfixed_const(1000);
 884	c.full = dfixed_const(lb_fill_bw);
 885	b.full = dfixed_div(c, b);
 886	a.full = dfixed_div(a, b);
 887	line_fill_time = dfixed_trunc(a);
 888
 889	if (line_fill_time < wm->active_time)
 890		return latency;
 891	else
 892		return latency + (line_fill_time - wm->active_time);
 893
 894}
 895
 896/**
 897 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 898 * average and available dram bandwidth
 899 *
 900 * @wm: watermark calculation data
 901 *
 902 * Check if the display average bandwidth fits in the display
 903 * dram bandwidth (CIK).
 904 * Used for display watermark bandwidth calculations
 905 * Returns true if the display fits, false if not.
 906 */
 907static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 908{
 909	if (dce_v6_0_average_bandwidth(wm) <=
 910	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 911		return true;
 912	else
 913		return false;
 914}
 915
 916/**
 917 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
 918 * average and available bandwidth
 919 *
 920 * @wm: watermark calculation data
 921 *
 922 * Check if the display average bandwidth fits in the display
 923 * available bandwidth (CIK).
 924 * Used for display watermark bandwidth calculations
 925 * Returns true if the display fits, false if not.
 926 */
 927static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
 928{
 929	if (dce_v6_0_average_bandwidth(wm) <=
 930	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
 931		return true;
 932	else
 933		return false;
 934}
 935
 936/**
 937 * dce_v6_0_check_latency_hiding - check latency hiding
 938 *
 939 * @wm: watermark calculation data
 940 *
 941 * Check latency hiding (CIK).
 942 * Used for display watermark bandwidth calculations
 943 * Returns true if the display fits, false if not.
 944 */
 945static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
 946{
 947	u32 lb_partitions = wm->lb_size / wm->src_width;
 948	u32 line_time = wm->active_time + wm->blank_time;
 949	u32 latency_tolerant_lines;
 950	u32 latency_hiding;
 951	fixed20_12 a;
 952
 953	a.full = dfixed_const(1);
 954	if (wm->vsc.full > a.full)
 955		latency_tolerant_lines = 1;
 956	else {
 957		if (lb_partitions <= (wm->vtaps + 1))
 958			latency_tolerant_lines = 1;
 959		else
 960			latency_tolerant_lines = 2;
 961	}
 962
 963	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 964
 965	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
 966		return true;
 967	else
 968		return false;
 969}
 970
 971/**
 972 * dce_v6_0_program_watermarks - program display watermarks
 973 *
 974 * @adev: amdgpu_device pointer
 975 * @amdgpu_crtc: the selected display controller
 976 * @lb_size: line buffer size
 977 * @num_heads: number of display controllers in use
 978 *
 979 * Calculate and program the display watermarks for the
 980 * selected display controller (CIK).
 981 */
 982static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 983					struct amdgpu_crtc *amdgpu_crtc,
 984					u32 lb_size, u32 num_heads)
 985{
 986	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 987	struct dce6_wm_params wm_low, wm_high;
 988	u32 dram_channels;
 989	u32 pixel_period;
 990	u32 line_time = 0;
 991	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 992	u32 priority_a_mark = 0, priority_b_mark = 0;
 993	u32 priority_a_cnt = PRIORITY_OFF;
 994	u32 priority_b_cnt = PRIORITY_OFF;
 995	u32 tmp, arb_control3;
 996	fixed20_12 a, b, c;
 997
 998	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 999		pixel_period = 1000000 / (u32)mode->clock;
1000		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
 
 
 
1001		priority_a_cnt = 0;
1002		priority_b_cnt = 0;
1003
1004		dram_channels = si_get_number_of_dram_channels(adev);
1005
1006		/* watermark for high clocks */
1007		if (adev->pm.dpm_enabled) {
1008			wm_high.yclk =
1009				amdgpu_dpm_get_mclk(adev, false) * 10;
1010			wm_high.sclk =
1011				amdgpu_dpm_get_sclk(adev, false) * 10;
1012		} else {
1013			wm_high.yclk = adev->pm.current_mclk * 10;
1014			wm_high.sclk = adev->pm.current_sclk * 10;
1015		}
1016
1017		wm_high.disp_clk = mode->clock;
1018		wm_high.src_width = mode->crtc_hdisplay;
1019		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1020		wm_high.blank_time = line_time - wm_high.active_time;
1021		wm_high.interlaced = false;
1022		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1023			wm_high.interlaced = true;
1024		wm_high.vsc = amdgpu_crtc->vsc;
1025		wm_high.vtaps = 1;
1026		if (amdgpu_crtc->rmx_type != RMX_OFF)
1027			wm_high.vtaps = 2;
1028		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1029		wm_high.lb_size = lb_size;
1030		wm_high.dram_channels = dram_channels;
1031		wm_high.num_heads = num_heads;
1032
1033		if (adev->pm.dpm_enabled) {
1034		/* watermark for low clocks */
1035			wm_low.yclk =
1036				amdgpu_dpm_get_mclk(adev, true) * 10;
1037			wm_low.sclk =
1038				amdgpu_dpm_get_sclk(adev, true) * 10;
1039		} else {
1040			wm_low.yclk = adev->pm.current_mclk * 10;
1041			wm_low.sclk = adev->pm.current_sclk * 10;
1042		}
1043
1044		wm_low.disp_clk = mode->clock;
1045		wm_low.src_width = mode->crtc_hdisplay;
1046		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1047		wm_low.blank_time = line_time - wm_low.active_time;
1048		wm_low.interlaced = false;
1049		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1050			wm_low.interlaced = true;
1051		wm_low.vsc = amdgpu_crtc->vsc;
1052		wm_low.vtaps = 1;
1053		if (amdgpu_crtc->rmx_type != RMX_OFF)
1054			wm_low.vtaps = 2;
1055		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1056		wm_low.lb_size = lb_size;
1057		wm_low.dram_channels = dram_channels;
1058		wm_low.num_heads = num_heads;
1059
1060		/* set for high clocks */
1061		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1062		/* set for low clocks */
1063		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1064
1065		/* possibly force display priority to high */
1066		/* should really do this at mode validation time... */
1067		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1068		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1069		    !dce_v6_0_check_latency_hiding(&wm_high) ||
1070		    (adev->mode_info.disp_priority == 2)) {
1071			DRM_DEBUG_KMS("force priority to high\n");
1072			priority_a_cnt |= PRIORITY_ALWAYS_ON;
1073			priority_b_cnt |= PRIORITY_ALWAYS_ON;
1074		}
1075		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1076		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1077		    !dce_v6_0_check_latency_hiding(&wm_low) ||
1078		    (adev->mode_info.disp_priority == 2)) {
1079			DRM_DEBUG_KMS("force priority to high\n");
1080			priority_a_cnt |= PRIORITY_ALWAYS_ON;
1081			priority_b_cnt |= PRIORITY_ALWAYS_ON;
1082		}
1083
1084		a.full = dfixed_const(1000);
1085		b.full = dfixed_const(mode->clock);
1086		b.full = dfixed_div(b, a);
1087		c.full = dfixed_const(latency_watermark_a);
1088		c.full = dfixed_mul(c, b);
1089		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1090		c.full = dfixed_div(c, a);
1091		a.full = dfixed_const(16);
1092		c.full = dfixed_div(c, a);
1093		priority_a_mark = dfixed_trunc(c);
1094		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1095
1096		a.full = dfixed_const(1000);
1097		b.full = dfixed_const(mode->clock);
1098		b.full = dfixed_div(b, a);
1099		c.full = dfixed_const(latency_watermark_b);
1100		c.full = dfixed_mul(c, b);
1101		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1102		c.full = dfixed_div(c, a);
1103		a.full = dfixed_const(16);
1104		c.full = dfixed_div(c, a);
1105		priority_b_mark = dfixed_trunc(c);
1106		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 
 
1107	}
1108
1109	/* select wm A */
1110	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1111	tmp = arb_control3;
1112	tmp &= ~LATENCY_WATERMARK_MASK(3);
1113	tmp |= LATENCY_WATERMARK_MASK(1);
1114	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1115	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1116	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
1117		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1118	/* select wm B */
1119	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1120	tmp &= ~LATENCY_WATERMARK_MASK(3);
1121	tmp |= LATENCY_WATERMARK_MASK(2);
1122	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1123	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1124	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1125		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1126	/* restore original selection */
1127	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1128
1129	/* write the priority marks */
1130	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1131	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1132
1133	/* save values for DPM */
1134	amdgpu_crtc->line_time = line_time;
1135	amdgpu_crtc->wm_high = latency_watermark_a;
 
 
 
1136}
1137
1138/* watermark setup */
1139static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1140				   struct amdgpu_crtc *amdgpu_crtc,
1141				   struct drm_display_mode *mode,
1142				   struct drm_display_mode *other_mode)
1143{
1144	u32 tmp, buffer_alloc, i;
1145	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1146	/*
1147	 * Line Buffer Setup
1148	 * There are 3 line buffers, each one shared by 2 display controllers.
1149	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1150	 * the display controllers.  The paritioning is done via one of four
1151	 * preset allocations specified in bits 21:20:
1152	 *  0 - half lb
1153	 *  2 - whole lb, other crtc must be disabled
1154	 */
1155	/* this can get tricky if we have two large displays on a paired group
1156	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1157	 * non-linked crtcs for maximum line buffer allocation.
1158	 */
1159	if (amdgpu_crtc->base.enabled && mode) {
1160		if (other_mode) {
1161			tmp = 0; /* 1/2 */
1162			buffer_alloc = 1;
1163		} else {
1164			tmp = 2; /* whole */
1165			buffer_alloc = 2;
1166		}
1167	} else {
1168		tmp = 0;
1169		buffer_alloc = 0;
1170	}
1171
1172	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1173	       DC_LB_MEMORY_CONFIG(tmp));
1174
1175	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1176	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1177	for (i = 0; i < adev->usec_timeout; i++) {
1178		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1179		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1180			break;
1181		udelay(1);
1182	}
1183
1184	if (amdgpu_crtc->base.enabled && mode) {
1185		switch (tmp) {
1186		case 0:
1187		default:
1188			return 4096 * 2;
1189		case 2:
1190			return 8192 * 2;
1191		}
1192	}
1193
1194	/* controller not enabled, so no lb used */
1195	return 0;
1196}
1197
1198
1199/**
1200 *
1201 * dce_v6_0_bandwidth_update - program display watermarks
1202 *
1203 * @adev: amdgpu_device pointer
1204 *
1205 * Calculate and program the display watermarks and line
1206 * buffer allocation (CIK).
1207 */
1208static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1209{
1210	struct drm_display_mode *mode0 = NULL;
1211	struct drm_display_mode *mode1 = NULL;
1212	u32 num_heads = 0, lb_size;
1213	int i;
1214
1215	if (!adev->mode_info.mode_config_initialized)
1216		return;
1217
1218	amdgpu_update_display_priority(adev);
1219
1220	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1221		if (adev->mode_info.crtcs[i]->base.enabled)
1222			num_heads++;
1223	}
1224	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1225		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1226		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1227		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1228		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1229		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1230		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1231	}
1232}
1233/*
1234static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1235{
1236	int i;
1237	u32 offset, tmp;
1238
1239	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1240		offset = adev->mode_info.audio.pin[i].offset;
1241		tmp = RREG32_AUDIO_ENDPT(offset,
1242				      AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1243		if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1244			adev->mode_info.audio.pin[i].connected = false;
1245		else
1246			adev->mode_info.audio.pin[i].connected = true;
1247	}
1248
1249}
1250
1251static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1252{
1253	int i;
1254
1255	dce_v6_0_audio_get_connected_pins(adev);
1256
1257	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1258		if (adev->mode_info.audio.pin[i].connected)
1259			return &adev->mode_info.audio.pin[i];
1260	}
1261	DRM_ERROR("No connected audio pins found!\n");
1262	return NULL;
1263}
1264
1265static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1266{
1267	struct amdgpu_device *adev = encoder->dev->dev_private;
1268	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1269	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1270	u32 offset;
1271
1272	if (!dig || !dig->afmt || !dig->afmt->pin)
1273		return;
1274
1275	offset = dig->afmt->offset;
1276
1277	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1278	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1279
1280}
1281
1282static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1283						struct drm_display_mode *mode)
1284{
1285	DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286}
1287
1288static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1289{
1290	DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291}
1292
1293static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1294{
1295	DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296
1297}
1298*/
1299static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1300				  struct amdgpu_audio_pin *pin,
1301				  bool enable)
1302{
1303	DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
 
 
 
 
1304}
1305
1306static const u32 pin_offsets[7] =
1307{
1308	(0x1780 - 0x1780),
1309	(0x1786 - 0x1780),
1310	(0x178c - 0x1780),
1311	(0x1792 - 0x1780),
1312	(0x1798 - 0x1780),
1313	(0x179d - 0x1780),
1314	(0x17a4 - 0x1780),
1315};
1316
1317static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1318{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319	return 0;
1320}
1321
1322static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1323{
 
 
 
 
1324
 
 
 
 
 
 
 
1325}
1326
1327/*
1328static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1329{
1330	DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
1331}
1332*/
1333/*
1334 * build a HDMI Video Info Frame
1335 */
1336/*
1337static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1338					       void *buffer, size_t size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1339{
1340	DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1341}
1342
1343static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1344{
1345	DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346}
1347*/
1348/*
1349 * update the info frames with the data from the current display mode
1350 */
1351static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1352				  struct drm_display_mode *mode)
1353{
1354	DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355}
1356
1357static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1358{
1359	struct drm_device *dev = encoder->dev;
1360	struct amdgpu_device *adev = dev->dev_private;
1361	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1362	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1363
1364	if (!dig || !dig->afmt)
1365		return;
1366
1367	/* Silent, r600_hdmi_enable will raise WARN for us */
1368	if (enable && dig->afmt->enabled)
1369		return;
 
1370	if (!enable && !dig->afmt->enabled)
1371		return;
1372
1373	if (!enable && dig->afmt->pin) {
1374		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1375		dig->afmt->pin = NULL;
1376	}
1377
1378	dig->afmt->enabled = enable;
1379
1380	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1381		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1382}
1383
1384static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1385{
1386	int i, j;
1387
1388	for (i = 0; i < adev->mode_info.num_dig; i++)
1389		adev->mode_info.afmt[i] = NULL;
1390
1391	/* DCE6 has audio blocks tied to DIG encoders */
1392	for (i = 0; i < adev->mode_info.num_dig; i++) {
1393		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1394		if (adev->mode_info.afmt[i]) {
1395			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1396			adev->mode_info.afmt[i]->id = i;
1397		} else {
1398			for (j = 0; j < i; j++) {
1399				kfree(adev->mode_info.afmt[j]);
1400				adev->mode_info.afmt[j] = NULL;
1401			}
1402			DRM_ERROR("Out of memory allocating afmt table\n");
1403			return -ENOMEM;
1404		}
1405	}
1406	return 0;
1407}
1408
1409static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1410{
1411	int i;
1412
1413	for (i = 0; i < adev->mode_info.num_dig; i++) {
1414		kfree(adev->mode_info.afmt[i]);
1415		adev->mode_info.afmt[i] = NULL;
1416	}
1417}
1418
1419static const u32 vga_control_regs[6] =
1420{
1421	mmD1VGA_CONTROL,
1422	mmD2VGA_CONTROL,
1423	mmD3VGA_CONTROL,
1424	mmD4VGA_CONTROL,
1425	mmD5VGA_CONTROL,
1426	mmD6VGA_CONTROL,
1427};
1428
1429static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1430{
1431	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1432	struct drm_device *dev = crtc->dev;
1433	struct amdgpu_device *adev = dev->dev_private;
1434	u32 vga_control;
1435
1436	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1437	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1438}
1439
1440static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1441{
1442	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1443	struct drm_device *dev = crtc->dev;
1444	struct amdgpu_device *adev = dev->dev_private;
1445
1446	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1447}
1448
1449static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1450				     struct drm_framebuffer *fb,
1451				     int x, int y, int atomic)
1452{
1453	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1454	struct drm_device *dev = crtc->dev;
1455	struct amdgpu_device *adev = dev->dev_private;
1456	struct amdgpu_framebuffer *amdgpu_fb;
1457	struct drm_framebuffer *target_fb;
1458	struct drm_gem_object *obj;
1459	struct amdgpu_bo *abo;
1460	uint64_t fb_location, tiling_flags;
1461	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1462	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1463	u32 viewport_w, viewport_h;
1464	int r;
1465	bool bypass_lut = false;
1466	struct drm_format_name_buf format_name;
1467
1468	/* no fb bound */
1469	if (!atomic && !crtc->primary->fb) {
1470		DRM_DEBUG_KMS("No FB bound\n");
1471		return 0;
1472	}
1473
1474	if (atomic) {
1475		amdgpu_fb = to_amdgpu_framebuffer(fb);
1476		target_fb = fb;
1477	} else {
1478		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1479		target_fb = crtc->primary->fb;
1480	}
1481
1482	/* If atomic, assume fb object is pinned & idle & fenced and
1483	 * just update base pointers
1484	 */
1485	obj = amdgpu_fb->obj;
1486	abo = gem_to_amdgpu_bo(obj);
1487	r = amdgpu_bo_reserve(abo, false);
1488	if (unlikely(r != 0))
1489		return r;
1490
1491	if (atomic) {
1492		fb_location = amdgpu_bo_gpu_offset(abo);
1493	} else {
1494		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1495		if (unlikely(r != 0)) {
1496			amdgpu_bo_unreserve(abo);
1497			return -EINVAL;
1498		}
1499	}
 
1500
1501	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1502	amdgpu_bo_unreserve(abo);
1503
1504	switch (target_fb->pixel_format) {
1505	case DRM_FORMAT_C8:
1506		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1507			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1508		break;
1509	case DRM_FORMAT_XRGB4444:
1510	case DRM_FORMAT_ARGB4444:
1511		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1512			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1513#ifdef __BIG_ENDIAN
1514		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1515#endif
1516		break;
1517	case DRM_FORMAT_XRGB1555:
1518	case DRM_FORMAT_ARGB1555:
1519		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1520			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1521#ifdef __BIG_ENDIAN
1522		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1523#endif
1524		break;
1525	case DRM_FORMAT_BGRX5551:
1526	case DRM_FORMAT_BGRA5551:
1527		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1528			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1529#ifdef __BIG_ENDIAN
1530		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1531#endif
1532		break;
1533	case DRM_FORMAT_RGB565:
1534		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1535			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1536#ifdef __BIG_ENDIAN
1537		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1538#endif
1539		break;
1540	case DRM_FORMAT_XRGB8888:
1541	case DRM_FORMAT_ARGB8888:
1542		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1543			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1544#ifdef __BIG_ENDIAN
1545		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1546#endif
1547		break;
1548	case DRM_FORMAT_XRGB2101010:
1549	case DRM_FORMAT_ARGB2101010:
1550		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1551			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1552#ifdef __BIG_ENDIAN
1553		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1554#endif
1555		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1556		bypass_lut = true;
1557		break;
1558	case DRM_FORMAT_BGRX1010102:
1559	case DRM_FORMAT_BGRA1010102:
1560		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1561			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1562#ifdef __BIG_ENDIAN
1563		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1564#endif
1565		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1566		bypass_lut = true;
1567		break;
 
 
 
 
 
 
 
 
 
 
1568	default:
1569		DRM_ERROR("Unsupported screen format %s\n",
1570		          drm_get_format_name(target_fb->pixel_format, &format_name));
1571		return -EINVAL;
1572	}
1573
1574	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1575		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1576
1577		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1578		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1579		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1580		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1581		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1582
1583		fb_format |= GRPH_NUM_BANKS(num_banks);
1584		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1585		fb_format |= GRPH_TILE_SPLIT(tile_split);
1586		fb_format |= GRPH_BANK_WIDTH(bankw);
1587		fb_format |= GRPH_BANK_HEIGHT(bankh);
1588		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1589	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1590		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1591	}
1592
1593	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1594	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1595
1596	dce_v6_0_vga_enable(crtc, false);
1597
1598	/* Make sure surface address is updated at vertical blank rather than
1599	 * horizontal blank
1600	 */
1601	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1602
1603	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1604	       upper_32_bits(fb_location));
1605	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1606	       upper_32_bits(fb_location));
1607	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1608	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1609	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1610	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1611	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1612	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1613
1614	/*
1615	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1616	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1617	 * retain the full precision throughout the pipeline.
1618	 */
1619	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1620		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1621		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1622
1623	if (bypass_lut)
1624		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1625
1626	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1627	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1628	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1629	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1630	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1631	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1632
1633	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1634	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1635
1636	dce_v6_0_grph_enable(crtc, true);
1637
1638	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1639		       target_fb->height);
1640	x &= ~3;
1641	y &= ~1;
1642	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1643	       (x << 16) | y);
1644	viewport_w = crtc->mode.hdisplay;
1645	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1646
1647	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1648	       (viewport_w << 16) | viewport_h);
1649
1650	/* set pageflip to happen anywhere in vblank interval */
1651	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1652
1653	if (!atomic && fb && fb != crtc->primary->fb) {
1654		amdgpu_fb = to_amdgpu_framebuffer(fb);
1655		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1656		r = amdgpu_bo_reserve(abo, false);
1657		if (unlikely(r != 0))
1658			return r;
1659		amdgpu_bo_unpin(abo);
1660		amdgpu_bo_unreserve(abo);
1661	}
1662
1663	/* Bytes per pixel may have changed */
1664	dce_v6_0_bandwidth_update(adev);
1665
1666	return 0;
1667
1668}
1669
1670static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1671				    struct drm_display_mode *mode)
1672{
1673	struct drm_device *dev = crtc->dev;
1674	struct amdgpu_device *adev = dev->dev_private;
1675	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1676
1677	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1678		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
1679		       INTERLEAVE_EN);
1680	else
1681		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1682}
1683
1684static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1685{
1686
1687	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1688	struct drm_device *dev = crtc->dev;
1689	struct amdgpu_device *adev = dev->dev_private;
 
1690	int i;
1691
1692	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1693
1694	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1695	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1696		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
1697	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1698	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
1699	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1700	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
1701	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1702	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
1703		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
1704
1705	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1706
1707	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1708	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1709	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1710
1711	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1712	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1713	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1714
1715	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1716	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1717
1718	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
 
 
 
1719	for (i = 0; i < 256; i++) {
1720		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1721		       (amdgpu_crtc->lut_r[i] << 20) |
1722		       (amdgpu_crtc->lut_g[i] << 10) |
1723		       (amdgpu_crtc->lut_b[i] << 0));
1724	}
1725
1726	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1727	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
1728		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
1729		ICON_DEGAMMA_MODE(0) |
1730		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
1731	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1732	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
1733		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
1734	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1735	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
1736		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
1737	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1738	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
1739		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
1740	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
1741	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1742
1743
1744}
1745
1746static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1747{
1748	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1749	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1750
1751	switch (amdgpu_encoder->encoder_id) {
1752	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1753		return dig->linkb ? 1 : 0;
1754	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1755		return dig->linkb ? 3 : 2;
1756	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1757		return dig->linkb ? 5 : 4;
1758	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1759		return 6;
1760	default:
1761		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1762		return 0;
1763	}
1764}
1765
1766/**
1767 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1768 *
1769 * @crtc: drm crtc
1770 *
1771 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
1772 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
1773 * monitors a dedicated PPLL must be used.  If a particular board has
1774 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1775 * as there is no need to program the PLL itself.  If we are not able to
1776 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1777 * avoid messing up an existing monitor.
1778 *
1779 *
1780 */
1781static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1782{
1783	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1784	struct drm_device *dev = crtc->dev;
1785	struct amdgpu_device *adev = dev->dev_private;
1786	u32 pll_in_use;
1787	int pll;
1788
1789	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1790		if (adev->clock.dp_extclk)
1791			/* skip PPLL programming if using ext clock */
1792			return ATOM_PPLL_INVALID;
1793		else
1794			return ATOM_PPLL0;
1795	} else {
1796		/* use the same PPLL for all monitors with the same clock */
1797		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1798		if (pll != ATOM_PPLL_INVALID)
1799			return pll;
1800	}
1801
1802	/*  PPLL1, and PPLL2 */
1803	pll_in_use = amdgpu_pll_get_use_mask(crtc);
1804	if (!(pll_in_use & (1 << ATOM_PPLL2)))
1805		return ATOM_PPLL2;
1806	if (!(pll_in_use & (1 << ATOM_PPLL1)))
1807		return ATOM_PPLL1;
1808	DRM_ERROR("unable to allocate a PPLL\n");
1809	return ATOM_PPLL_INVALID;
1810}
1811
1812static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1813{
1814	struct amdgpu_device *adev = crtc->dev->dev_private;
1815	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1816	uint32_t cur_lock;
1817
1818	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
1819	if (lock)
1820		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1821	else
1822		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1823	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1824}
1825
1826static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1827{
1828	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1829	struct amdgpu_device *adev = crtc->dev->dev_private;
1830
1831	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1832		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1833		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1834
1835
1836}
1837
1838static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1839{
1840	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1841	struct amdgpu_device *adev = crtc->dev->dev_private;
1842
1843	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1844	       upper_32_bits(amdgpu_crtc->cursor_addr));
1845	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1846	       lower_32_bits(amdgpu_crtc->cursor_addr));
1847
1848	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1849		   CUR_CONTROL__CURSOR_EN_MASK |
1850		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1851		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1852
1853}
1854
1855static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1856				       int x, int y)
1857{
1858	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1859	struct amdgpu_device *adev = crtc->dev->dev_private;
1860	int xorigin = 0, yorigin = 0;
1861
1862	int w = amdgpu_crtc->cursor_width;
1863
1864	amdgpu_crtc->cursor_x = x;
1865	amdgpu_crtc->cursor_y = y;
1866
1867	/* avivo cursor are offset into the total surface */
1868	x += crtc->x;
1869	y += crtc->y;
1870	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1871
1872	if (x < 0) {
1873		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1874		x = 0;
1875	}
1876	if (y < 0) {
1877		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1878		y = 0;
1879	}
1880
1881	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1882	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1883	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1884	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1885
1886	return 0;
1887}
1888
1889static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1890				     int x, int y)
1891{
1892	int ret;
1893
1894	dce_v6_0_lock_cursor(crtc, true);
1895	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1896	dce_v6_0_lock_cursor(crtc, false);
1897
1898	return ret;
1899}
1900
1901static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1902				     struct drm_file *file_priv,
1903				     uint32_t handle,
1904				     uint32_t width,
1905				     uint32_t height,
1906				     int32_t hot_x,
1907				     int32_t hot_y)
1908{
1909	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1910	struct drm_gem_object *obj;
1911	struct amdgpu_bo *aobj;
1912	int ret;
1913
1914	if (!handle) {
1915		/* turn off cursor */
1916		dce_v6_0_hide_cursor(crtc);
1917		obj = NULL;
1918		goto unpin;
1919	}
1920
1921	if ((width > amdgpu_crtc->max_cursor_width) ||
1922	    (height > amdgpu_crtc->max_cursor_height)) {
1923		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1924		return -EINVAL;
1925	}
1926
1927	obj = drm_gem_object_lookup(file_priv, handle);
1928	if (!obj) {
1929		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1930		return -ENOENT;
1931	}
1932
1933	aobj = gem_to_amdgpu_bo(obj);
1934	ret = amdgpu_bo_reserve(aobj, false);
1935	if (ret != 0) {
1936		drm_gem_object_unreference_unlocked(obj);
1937		return ret;
1938	}
1939
1940	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
1941	amdgpu_bo_unreserve(aobj);
1942	if (ret) {
1943		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
1944		drm_gem_object_unreference_unlocked(obj);
1945		return ret;
1946	}
 
1947
1948	dce_v6_0_lock_cursor(crtc, true);
1949
1950	if (width != amdgpu_crtc->cursor_width ||
1951	    height != amdgpu_crtc->cursor_height ||
1952	    hot_x != amdgpu_crtc->cursor_hot_x ||
1953	    hot_y != amdgpu_crtc->cursor_hot_y) {
1954		int x, y;
1955
1956		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
1957		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
1958
1959		dce_v6_0_cursor_move_locked(crtc, x, y);
1960
1961		amdgpu_crtc->cursor_width = width;
1962		amdgpu_crtc->cursor_height = height;
1963		amdgpu_crtc->cursor_hot_x = hot_x;
1964		amdgpu_crtc->cursor_hot_y = hot_y;
1965	}
1966
1967	dce_v6_0_show_cursor(crtc);
1968	dce_v6_0_lock_cursor(crtc, false);
1969
1970unpin:
1971	if (amdgpu_crtc->cursor_bo) {
1972		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1973		ret = amdgpu_bo_reserve(aobj, false);
1974		if (likely(ret == 0)) {
1975			amdgpu_bo_unpin(aobj);
1976			amdgpu_bo_unreserve(aobj);
1977		}
1978		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
1979	}
1980
1981	amdgpu_crtc->cursor_bo = obj;
1982	return 0;
1983}
1984
1985static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1986{
1987	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1988
1989	if (amdgpu_crtc->cursor_bo) {
1990		dce_v6_0_lock_cursor(crtc, true);
1991
1992		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1993					    amdgpu_crtc->cursor_y);
1994
1995		dce_v6_0_show_cursor(crtc);
1996		dce_v6_0_lock_cursor(crtc, false);
1997	}
1998}
1999
2000static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2001				   u16 *blue, uint32_t size)
 
2002{
2003	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2004	int i;
2005
2006	/* userspace palettes are always correct as is */
2007	for (i = 0; i < size; i++) {
2008		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2009		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2010		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2011	}
2012	dce_v6_0_crtc_load_lut(crtc);
2013
2014	return 0;
2015}
2016
2017static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2018{
2019	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2020
2021	drm_crtc_cleanup(crtc);
2022	kfree(amdgpu_crtc);
2023}
2024
2025static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2026	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2027	.cursor_move = dce_v6_0_crtc_cursor_move,
2028	.gamma_set = dce_v6_0_crtc_gamma_set,
2029	.set_config = amdgpu_crtc_set_config,
2030	.destroy = dce_v6_0_crtc_destroy,
2031	.page_flip_target = amdgpu_crtc_page_flip_target,
2032};
2033
2034static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2035{
2036	struct drm_device *dev = crtc->dev;
2037	struct amdgpu_device *adev = dev->dev_private;
2038	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2039	unsigned type;
2040
2041	switch (mode) {
2042	case DRM_MODE_DPMS_ON:
2043		amdgpu_crtc->enabled = true;
2044		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2045		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2046		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2047		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 
2048		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2049		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2050		drm_crtc_vblank_on(crtc);
2051		dce_v6_0_crtc_load_lut(crtc);
2052		break;
2053	case DRM_MODE_DPMS_STANDBY:
2054	case DRM_MODE_DPMS_SUSPEND:
2055	case DRM_MODE_DPMS_OFF:
2056		drm_crtc_vblank_off(crtc);
2057		if (amdgpu_crtc->enabled)
2058			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2059		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2060		amdgpu_crtc->enabled = false;
2061		break;
2062	}
2063	/* adjust pm to dpms */
2064	amdgpu_pm_compute_clocks(adev);
2065}
2066
2067static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2068{
2069	/* disable crtc pair power gating before programming */
2070	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2071	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2072	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2073}
2074
2075static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2076{
2077	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2078	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2079}
2080
2081static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2082{
2083
2084	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2085	struct drm_device *dev = crtc->dev;
2086	struct amdgpu_device *adev = dev->dev_private;
2087	struct amdgpu_atom_ss ss;
2088	int i;
2089
2090	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2091	if (crtc->primary->fb) {
2092		int r;
2093		struct amdgpu_framebuffer *amdgpu_fb;
2094		struct amdgpu_bo *abo;
2095
2096		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2097		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2098		r = amdgpu_bo_reserve(abo, false);
2099		if (unlikely(r))
2100			DRM_ERROR("failed to reserve abo before unpin\n");
2101		else {
2102			amdgpu_bo_unpin(abo);
2103			amdgpu_bo_unreserve(abo);
2104		}
2105	}
2106	/* disable the GRPH */
2107	dce_v6_0_grph_enable(crtc, false);
2108
2109	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2110
2111	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2112		if (adev->mode_info.crtcs[i] &&
2113		    adev->mode_info.crtcs[i]->enabled &&
2114		    i != amdgpu_crtc->crtc_id &&
2115		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2116			/* one other crtc is using this pll don't turn
2117			 * off the pll
2118			 */
2119			goto done;
2120		}
2121	}
2122
2123	switch (amdgpu_crtc->pll_id) {
2124	case ATOM_PPLL1:
2125	case ATOM_PPLL2:
2126		/* disable the ppll */
2127		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2128						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2129		break;
2130	default:
2131		break;
2132	}
2133done:
2134	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2135	amdgpu_crtc->adjusted_clock = 0;
2136	amdgpu_crtc->encoder = NULL;
2137	amdgpu_crtc->connector = NULL;
2138}
2139
2140static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2141				  struct drm_display_mode *mode,
2142				  struct drm_display_mode *adjusted_mode,
2143				  int x, int y, struct drm_framebuffer *old_fb)
2144{
2145	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2146
2147	if (!amdgpu_crtc->adjusted_clock)
2148		return -EINVAL;
2149
2150	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2151	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2152	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2153	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2154	amdgpu_atombios_crtc_scaler_setup(crtc);
2155	dce_v6_0_cursor_reset(crtc);
2156	/* update the hw version fpr dpm */
2157	amdgpu_crtc->hw_mode = *adjusted_mode;
2158
2159	return 0;
2160}
2161
2162static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2163				     const struct drm_display_mode *mode,
2164				     struct drm_display_mode *adjusted_mode)
2165{
2166
2167	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2168	struct drm_device *dev = crtc->dev;
2169	struct drm_encoder *encoder;
2170
2171	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2172	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2173		if (encoder->crtc == crtc) {
2174			amdgpu_crtc->encoder = encoder;
2175			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2176			break;
2177		}
2178	}
2179	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2180		amdgpu_crtc->encoder = NULL;
2181		amdgpu_crtc->connector = NULL;
2182		return false;
2183	}
2184	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2185		return false;
2186	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2187		return false;
2188	/* pick pll */
2189	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2190	/* if we can't get a PPLL for a non-DP encoder, fail */
2191	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2192	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2193		return false;
2194
2195	return true;
2196}
2197
2198static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2199				  struct drm_framebuffer *old_fb)
2200{
2201	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2202}
2203
2204static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2205					 struct drm_framebuffer *fb,
2206					 int x, int y, enum mode_set_atomic state)
2207{
2208       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2209}
2210
2211static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2212	.dpms = dce_v6_0_crtc_dpms,
2213	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2214	.mode_set = dce_v6_0_crtc_mode_set,
2215	.mode_set_base = dce_v6_0_crtc_set_base,
2216	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2217	.prepare = dce_v6_0_crtc_prepare,
2218	.commit = dce_v6_0_crtc_commit,
2219	.load_lut = dce_v6_0_crtc_load_lut,
2220	.disable = dce_v6_0_crtc_disable,
2221};
2222
2223static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2224{
2225	struct amdgpu_crtc *amdgpu_crtc;
2226	int i;
2227
2228	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2229			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2230	if (amdgpu_crtc == NULL)
2231		return -ENOMEM;
2232
2233	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2234
2235	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2236	amdgpu_crtc->crtc_id = index;
2237	adev->mode_info.crtcs[index] = amdgpu_crtc;
2238
2239	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2240	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2241	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2242	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2243
2244	for (i = 0; i < 256; i++) {
2245		amdgpu_crtc->lut_r[i] = i << 2;
2246		amdgpu_crtc->lut_g[i] = i << 2;
2247		amdgpu_crtc->lut_b[i] = i << 2;
2248	}
2249
2250	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2251
2252	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2253	amdgpu_crtc->adjusted_clock = 0;
2254	amdgpu_crtc->encoder = NULL;
2255	amdgpu_crtc->connector = NULL;
2256	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2257
2258	return 0;
2259}
2260
2261static int dce_v6_0_early_init(void *handle)
2262{
2263	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2264
2265	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2266	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2267
2268	dce_v6_0_set_display_funcs(adev);
2269	dce_v6_0_set_irq_funcs(adev);
2270
2271	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2272
2273	switch (adev->asic_type) {
2274	case CHIP_TAHITI:
2275	case CHIP_PITCAIRN:
2276	case CHIP_VERDE:
2277		adev->mode_info.num_hpd = 6;
2278		adev->mode_info.num_dig = 6;
2279		break;
2280	case CHIP_OLAND:
2281		adev->mode_info.num_hpd = 2;
2282		adev->mode_info.num_dig = 2;
2283		break;
2284	default:
2285		return -EINVAL;
2286	}
2287
 
 
2288	return 0;
2289}
2290
2291static int dce_v6_0_sw_init(void *handle)
2292{
2293	int r, i;
2294	bool ret;
2295	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296
2297	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2298		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2299		if (r)
2300			return r;
2301	}
2302
2303	for (i = 8; i < 20; i += 2) {
2304		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2305		if (r)
2306			return r;
2307	}
2308
2309	/* HPD hotplug */
2310	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2311	if (r)
2312		return r;
2313
2314	adev->mode_info.mode_config_initialized = true;
2315
2316	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2317	adev->ddev->mode_config.async_page_flip = true;
2318	adev->ddev->mode_config.max_width = 16384;
2319	adev->ddev->mode_config.max_height = 16384;
2320	adev->ddev->mode_config.preferred_depth = 24;
2321	adev->ddev->mode_config.prefer_shadow = 1;
2322	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2323
2324	r = amdgpu_modeset_create_props(adev);
2325	if (r)
2326		return r;
2327
2328	adev->ddev->mode_config.max_width = 16384;
2329	adev->ddev->mode_config.max_height = 16384;
2330
2331	/* allocate crtcs */
2332	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2333		r = dce_v6_0_crtc_init(adev, i);
2334		if (r)
2335			return r;
2336	}
2337
2338	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2339	if (ret)
2340		amdgpu_print_display_setup(adev->ddev);
2341	else
2342		return -EINVAL;
2343
2344	/* setup afmt */
2345	r = dce_v6_0_afmt_init(adev);
2346	if (r)
2347		return r;
2348
2349	r = dce_v6_0_audio_init(adev);
2350	if (r)
2351		return r;
2352
2353	drm_kms_helper_poll_init(adev->ddev);
2354
2355	return r;
2356}
2357
2358static int dce_v6_0_sw_fini(void *handle)
2359{
2360	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2361
2362	kfree(adev->mode_info.bios_hardcoded_edid);
2363
2364	drm_kms_helper_poll_fini(adev->ddev);
2365
2366	dce_v6_0_audio_fini(adev);
2367	dce_v6_0_afmt_fini(adev);
2368
2369	drm_mode_config_cleanup(adev->ddev);
2370	adev->mode_info.mode_config_initialized = false;
2371
2372	return 0;
2373}
2374
2375static int dce_v6_0_hw_init(void *handle)
2376{
2377	int i;
2378	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2379
 
 
2380	/* init dig PHYs, disp eng pll */
2381	amdgpu_atombios_encoder_init_dig(adev);
2382	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2383
2384	/* initialize hpd */
2385	dce_v6_0_hpd_init(adev);
2386
2387	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2388		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2389	}
2390
2391	dce_v6_0_pageflip_interrupt_init(adev);
2392
2393	return 0;
2394}
2395
2396static int dce_v6_0_hw_fini(void *handle)
2397{
2398	int i;
2399	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2400
2401	dce_v6_0_hpd_fini(adev);
2402
2403	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2404		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2405	}
2406
2407	dce_v6_0_pageflip_interrupt_fini(adev);
2408
2409	return 0;
2410}
2411
2412static int dce_v6_0_suspend(void *handle)
2413{
 
 
 
 
 
2414	return dce_v6_0_hw_fini(handle);
2415}
2416
2417static int dce_v6_0_resume(void *handle)
2418{
2419	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2420	int ret;
2421
 
 
 
2422	ret = dce_v6_0_hw_init(handle);
2423
2424	/* turn on the BL */
2425	if (adev->mode_info.bl_encoder) {
2426		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2427								  adev->mode_info.bl_encoder);
2428		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2429						    bl_level);
2430	}
2431
2432	return ret;
2433}
2434
2435static bool dce_v6_0_is_idle(void *handle)
2436{
2437	return true;
2438}
2439
2440static int dce_v6_0_wait_for_idle(void *handle)
2441{
2442	return 0;
2443}
2444
2445static int dce_v6_0_soft_reset(void *handle)
2446{
2447	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2448	return 0;
2449}
2450
2451static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2452						     int crtc,
2453						     enum amdgpu_interrupt_state state)
2454{
2455	u32 reg_block, interrupt_mask;
2456
2457	if (crtc >= adev->mode_info.num_crtc) {
2458		DRM_DEBUG("invalid crtc %d\n", crtc);
2459		return;
2460	}
2461
2462	switch (crtc) {
2463	case 0:
2464		reg_block = SI_CRTC0_REGISTER_OFFSET;
2465		break;
2466	case 1:
2467		reg_block = SI_CRTC1_REGISTER_OFFSET;
2468		break;
2469	case 2:
2470		reg_block = SI_CRTC2_REGISTER_OFFSET;
2471		break;
2472	case 3:
2473		reg_block = SI_CRTC3_REGISTER_OFFSET;
2474		break;
2475	case 4:
2476		reg_block = SI_CRTC4_REGISTER_OFFSET;
2477		break;
2478	case 5:
2479		reg_block = SI_CRTC5_REGISTER_OFFSET;
2480		break;
2481	default:
2482		DRM_DEBUG("invalid crtc %d\n", crtc);
2483		return;
2484	}
2485
2486	switch (state) {
2487	case AMDGPU_IRQ_STATE_DISABLE:
2488		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2489		interrupt_mask &= ~VBLANK_INT_MASK;
2490		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2491		break;
2492	case AMDGPU_IRQ_STATE_ENABLE:
2493		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2494		interrupt_mask |= VBLANK_INT_MASK;
2495		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2496		break;
2497	default:
2498		break;
2499	}
2500}
2501
2502static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2503						    int crtc,
2504						    enum amdgpu_interrupt_state state)
2505{
2506
2507}
2508
2509static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2510					    struct amdgpu_irq_src *src,
2511					    unsigned type,
2512					    enum amdgpu_interrupt_state state)
2513{
2514	u32 dc_hpd_int_cntl;
2515
2516	if (type >= adev->mode_info.num_hpd) {
2517		DRM_DEBUG("invalid hdp %d\n", type);
2518		return 0;
2519	}
2520
2521	switch (state) {
2522	case AMDGPU_IRQ_STATE_DISABLE:
2523		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2524		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2525		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2526		break;
2527	case AMDGPU_IRQ_STATE_ENABLE:
2528		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2529		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2530		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2531		break;
2532	default:
2533		break;
2534	}
2535
2536	return 0;
2537}
2538
2539static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2540					     struct amdgpu_irq_src *src,
2541					     unsigned type,
2542					     enum amdgpu_interrupt_state state)
2543{
2544	switch (type) {
2545	case AMDGPU_CRTC_IRQ_VBLANK1:
2546		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2547		break;
2548	case AMDGPU_CRTC_IRQ_VBLANK2:
2549		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2550		break;
2551	case AMDGPU_CRTC_IRQ_VBLANK3:
2552		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2553		break;
2554	case AMDGPU_CRTC_IRQ_VBLANK4:
2555		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2556		break;
2557	case AMDGPU_CRTC_IRQ_VBLANK5:
2558		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2559		break;
2560	case AMDGPU_CRTC_IRQ_VBLANK6:
2561		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2562		break;
2563	case AMDGPU_CRTC_IRQ_VLINE1:
2564		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2565		break;
2566	case AMDGPU_CRTC_IRQ_VLINE2:
2567		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2568		break;
2569	case AMDGPU_CRTC_IRQ_VLINE3:
2570		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2571		break;
2572	case AMDGPU_CRTC_IRQ_VLINE4:
2573		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2574		break;
2575	case AMDGPU_CRTC_IRQ_VLINE5:
2576		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2577		break;
2578	case AMDGPU_CRTC_IRQ_VLINE6:
2579		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2580		break;
2581	default:
2582		break;
2583	}
2584	return 0;
2585}
2586
2587static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2588			     struct amdgpu_irq_src *source,
2589			     struct amdgpu_iv_entry *entry)
2590{
2591	unsigned crtc = entry->src_id - 1;
2592	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2593	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
 
2594
2595	switch (entry->src_data) {
2596	case 0: /* vblank */
2597		if (disp_int & interrupt_status_offsets[crtc].vblank)
2598			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2599		else
2600			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2601
2602		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2603			drm_handle_vblank(adev->ddev, crtc);
2604		}
2605		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2606		break;
2607	case 1: /* vline */
2608		if (disp_int & interrupt_status_offsets[crtc].vline)
2609			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2610		else
2611			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2612
2613		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2614		break;
2615	default:
2616		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2617		break;
2618	}
2619
2620	return 0;
2621}
2622
2623static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2624						 struct amdgpu_irq_src *src,
2625						 unsigned type,
2626						 enum amdgpu_interrupt_state state)
2627{
2628	u32 reg;
2629
2630	if (type >= adev->mode_info.num_crtc) {
2631		DRM_ERROR("invalid pageflip crtc %d\n", type);
2632		return -EINVAL;
2633	}
2634
2635	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2636	if (state == AMDGPU_IRQ_STATE_DISABLE)
2637		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2638		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2639	else
2640		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2641		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2642
2643	return 0;
2644}
2645
2646static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2647				 struct amdgpu_irq_src *source,
2648				 struct amdgpu_iv_entry *entry)
2649{
2650		unsigned long flags;
2651	unsigned crtc_id;
2652	struct amdgpu_crtc *amdgpu_crtc;
2653	struct amdgpu_flip_work *works;
2654
2655	crtc_id = (entry->src_id - 8) >> 1;
2656	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2657
2658	if (crtc_id >= adev->mode_info.num_crtc) {
2659		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2660		return -EINVAL;
2661	}
2662
2663	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
2664	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2665		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
2666		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2667
2668	/* IRQ could occur when in initial stage */
2669	if (amdgpu_crtc == NULL)
2670		return 0;
2671
2672	spin_lock_irqsave(&adev->ddev->event_lock, flags);
2673	works = amdgpu_crtc->pflip_works;
2674	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2675		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2676						"AMDGPU_FLIP_SUBMITTED(%d)\n",
2677						amdgpu_crtc->pflip_status,
2678						AMDGPU_FLIP_SUBMITTED);
2679		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2680		return 0;
2681	}
2682
2683	/* page flip completed. clean up */
2684	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2685	amdgpu_crtc->pflip_works = NULL;
2686
2687	/* wakeup usersapce */
2688	if (works->event)
2689		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2690
2691	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2692
2693	drm_crtc_vblank_put(&amdgpu_crtc->base);
2694	schedule_work(&works->unpin_work);
2695
2696	return 0;
2697}
2698
2699static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2700			    struct amdgpu_irq_src *source,
2701			    struct amdgpu_iv_entry *entry)
2702{
2703	uint32_t disp_int, mask, tmp;
2704	unsigned hpd;
2705
2706	if (entry->src_data >= adev->mode_info.num_hpd) {
2707		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2708		return 0;
2709	}
2710
2711	hpd = entry->src_data;
2712	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2713	mask = interrupt_status_offsets[hpd].hpd;
2714
2715	if (disp_int & mask) {
2716		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
2717		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2718		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
2719		schedule_work(&adev->hotplug_work);
2720		DRM_INFO("IH: HPD%d\n", hpd + 1);
2721	}
2722
2723	return 0;
2724
2725}
2726
2727static int dce_v6_0_set_clockgating_state(void *handle,
2728					  enum amd_clockgating_state state)
2729{
2730	return 0;
2731}
2732
2733static int dce_v6_0_set_powergating_state(void *handle,
2734					  enum amd_powergating_state state)
2735{
2736	return 0;
2737}
2738
2739static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2740	.name = "dce_v6_0",
2741	.early_init = dce_v6_0_early_init,
2742	.late_init = NULL,
2743	.sw_init = dce_v6_0_sw_init,
2744	.sw_fini = dce_v6_0_sw_fini,
2745	.hw_init = dce_v6_0_hw_init,
2746	.hw_fini = dce_v6_0_hw_fini,
2747	.suspend = dce_v6_0_suspend,
2748	.resume = dce_v6_0_resume,
2749	.is_idle = dce_v6_0_is_idle,
2750	.wait_for_idle = dce_v6_0_wait_for_idle,
2751	.soft_reset = dce_v6_0_soft_reset,
2752	.set_clockgating_state = dce_v6_0_set_clockgating_state,
2753	.set_powergating_state = dce_v6_0_set_powergating_state,
2754};
2755
2756static void
2757dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2758			  struct drm_display_mode *mode,
2759			  struct drm_display_mode *adjusted_mode)
2760{
2761
2762	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 
2763
2764	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2765
2766	/* need to call this here rather than in prepare() since we need some crtc info */
2767	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2768
2769	/* set scaler clears this on some chips */
2770	dce_v6_0_set_interleave(encoder->crtc, mode);
2771
2772	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2773		dce_v6_0_afmt_enable(encoder, true);
2774		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2775	}
2776}
2777
2778static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2779{
2780
2781	struct amdgpu_device *adev = encoder->dev->dev_private;
2782	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2783	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2784
2785	if ((amdgpu_encoder->active_device &
2786	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2787	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2788	     ENCODER_OBJECT_ID_NONE)) {
2789		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2790		if (dig) {
2791			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2792			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2793				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2794		}
2795	}
2796
2797	amdgpu_atombios_scratch_regs_lock(adev, true);
2798
2799	if (connector) {
2800		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2801
2802		/* select the clock/data port if it uses a router */
2803		if (amdgpu_connector->router.cd_valid)
2804			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2805
2806		/* turn eDP panel on for mode set */
2807		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2808			amdgpu_atombios_encoder_set_edp_panel_power(connector,
2809							     ATOM_TRANSMITTER_ACTION_POWER_ON);
2810	}
2811
2812	/* this is needed for the pll/ss setup to work correctly in some cases */
2813	amdgpu_atombios_encoder_set_crtc_source(encoder);
2814	/* set up the FMT blocks */
2815	dce_v6_0_program_fmt(encoder);
2816}
2817
2818static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2819{
2820
2821	struct drm_device *dev = encoder->dev;
2822	struct amdgpu_device *adev = dev->dev_private;
2823
2824	/* need to call this here as we need the crtc set up */
2825	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2826	amdgpu_atombios_scratch_regs_lock(adev, false);
2827}
2828
2829static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2830{
2831
2832	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2833	struct amdgpu_encoder_atom_dig *dig;
 
2834
2835	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2836
2837	if (amdgpu_atombios_encoder_is_digital(encoder)) {
2838		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2839			dce_v6_0_afmt_enable(encoder, false);
2840		dig = amdgpu_encoder->enc_priv;
2841		dig->dig_encoder = -1;
2842	}
2843	amdgpu_encoder->active_device = 0;
2844}
2845
2846/* these are handled by the primary encoders */
2847static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2848{
2849
2850}
2851
2852static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2853{
2854
2855}
2856
2857static void
2858dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2859		      struct drm_display_mode *mode,
2860		      struct drm_display_mode *adjusted_mode)
2861{
2862
2863}
2864
2865static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2866{
2867
2868}
2869
2870static void
2871dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2872{
2873
2874}
2875
2876static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2877				    const struct drm_display_mode *mode,
2878				    struct drm_display_mode *adjusted_mode)
2879{
2880	return true;
2881}
2882
2883static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2884	.dpms = dce_v6_0_ext_dpms,
2885	.mode_fixup = dce_v6_0_ext_mode_fixup,
2886	.prepare = dce_v6_0_ext_prepare,
2887	.mode_set = dce_v6_0_ext_mode_set,
2888	.commit = dce_v6_0_ext_commit,
2889	.disable = dce_v6_0_ext_disable,
2890	/* no detect for TMDS/LVDS yet */
2891};
2892
2893static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2894	.dpms = amdgpu_atombios_encoder_dpms,
2895	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2896	.prepare = dce_v6_0_encoder_prepare,
2897	.mode_set = dce_v6_0_encoder_mode_set,
2898	.commit = dce_v6_0_encoder_commit,
2899	.disable = dce_v6_0_encoder_disable,
2900	.detect = amdgpu_atombios_encoder_dig_detect,
2901};
2902
2903static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2904	.dpms = amdgpu_atombios_encoder_dpms,
2905	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2906	.prepare = dce_v6_0_encoder_prepare,
2907	.mode_set = dce_v6_0_encoder_mode_set,
2908	.commit = dce_v6_0_encoder_commit,
2909	.detect = amdgpu_atombios_encoder_dac_detect,
2910};
2911
2912static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
2913{
2914	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2915	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2916		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
2917	kfree(amdgpu_encoder->enc_priv);
2918	drm_encoder_cleanup(encoder);
2919	kfree(amdgpu_encoder);
2920}
2921
2922static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
2923	.destroy = dce_v6_0_encoder_destroy,
2924};
2925
2926static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
2927				 uint32_t encoder_enum,
2928				 uint32_t supported_device,
2929				 u16 caps)
2930{
2931	struct drm_device *dev = adev->ddev;
2932	struct drm_encoder *encoder;
2933	struct amdgpu_encoder *amdgpu_encoder;
2934
2935	/* see if we already added it */
2936	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2937		amdgpu_encoder = to_amdgpu_encoder(encoder);
2938		if (amdgpu_encoder->encoder_enum == encoder_enum) {
2939			amdgpu_encoder->devices |= supported_device;
2940			return;
2941		}
2942
2943	}
2944
2945	/* add a new one */
2946	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
2947	if (!amdgpu_encoder)
2948		return;
2949
2950	encoder = &amdgpu_encoder->base;
2951	switch (adev->mode_info.num_crtc) {
2952	case 1:
2953		encoder->possible_crtcs = 0x1;
2954		break;
2955	case 2:
2956	default:
2957		encoder->possible_crtcs = 0x3;
2958		break;
2959	case 4:
2960		encoder->possible_crtcs = 0xf;
2961		break;
2962	case 6:
2963		encoder->possible_crtcs = 0x3f;
2964		break;
2965	}
2966
2967	amdgpu_encoder->enc_priv = NULL;
2968	amdgpu_encoder->encoder_enum = encoder_enum;
2969	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2970	amdgpu_encoder->devices = supported_device;
2971	amdgpu_encoder->rmx_type = RMX_OFF;
2972	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
2973	amdgpu_encoder->is_ext_encoder = false;
2974	amdgpu_encoder->caps = caps;
2975
2976	switch (amdgpu_encoder->encoder_id) {
2977	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2978	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2979		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2980				 DRM_MODE_ENCODER_DAC, NULL);
2981		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
2982		break;
2983	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2984	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2985	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2986	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2987	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2988		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2989			amdgpu_encoder->rmx_type = RMX_FULL;
2990			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2991					 DRM_MODE_ENCODER_LVDS, NULL);
2992			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
2993		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2994			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2995					 DRM_MODE_ENCODER_DAC, NULL);
2996			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2997		} else {
2998			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2999					 DRM_MODE_ENCODER_TMDS, NULL);
3000			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3001		}
3002		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3003		break;
3004	case ENCODER_OBJECT_ID_SI170B:
3005	case ENCODER_OBJECT_ID_CH7303:
3006	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3007	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3008	case ENCODER_OBJECT_ID_TITFP513:
3009	case ENCODER_OBJECT_ID_VT1623:
3010	case ENCODER_OBJECT_ID_HDMI_SI1930:
3011	case ENCODER_OBJECT_ID_TRAVIS:
3012	case ENCODER_OBJECT_ID_NUTMEG:
3013		/* these are handled by the primary encoders */
3014		amdgpu_encoder->is_ext_encoder = true;
3015		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3016			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3017					 DRM_MODE_ENCODER_LVDS, NULL);
3018		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3019			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3020					 DRM_MODE_ENCODER_DAC, NULL);
3021		else
3022			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3023					 DRM_MODE_ENCODER_TMDS, NULL);
3024		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3025		break;
3026	}
3027}
3028
3029static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3030	.set_vga_render_state = &dce_v6_0_set_vga_render_state,
3031	.bandwidth_update = &dce_v6_0_bandwidth_update,
3032	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3033	.vblank_wait = &dce_v6_0_vblank_wait,
3034	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3035	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3036	.hpd_sense = &dce_v6_0_hpd_sense,
3037	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3038	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3039	.page_flip = &dce_v6_0_page_flip,
3040	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3041	.add_encoder = &dce_v6_0_encoder_add,
3042	.add_connector = &amdgpu_connector_add,
3043	.stop_mc_access = &dce_v6_0_stop_mc_access,
3044	.resume_mc_access = &dce_v6_0_resume_mc_access,
3045};
3046
3047static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3048{
3049	if (adev->mode_info.funcs == NULL)
3050		adev->mode_info.funcs = &dce_v6_0_display_funcs;
3051}
3052
3053static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3054	.set = dce_v6_0_set_crtc_interrupt_state,
3055	.process = dce_v6_0_crtc_irq,
3056};
3057
3058static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3059	.set = dce_v6_0_set_pageflip_interrupt_state,
3060	.process = dce_v6_0_pageflip_irq,
3061};
3062
3063static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3064	.set = dce_v6_0_set_hpd_interrupt_state,
3065	.process = dce_v6_0_hpd_irq,
3066};
3067
3068static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3069{
3070	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
 
 
 
3071	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3072
3073	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3074	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3075
3076	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3077	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3078}
3079
3080const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3081{
3082	.type = AMD_IP_BLOCK_TYPE_DCE,
3083	.major = 6,
3084	.minor = 0,
3085	.rev = 0,
3086	.funcs = &dce_v6_0_ip_funcs,
3087};
3088
3089const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3090{
3091	.type = AMD_IP_BLOCK_TYPE_DCE,
3092	.major = 6,
3093	.minor = 4,
3094	.rev = 0,
3095	.funcs = &dce_v6_0_ip_funcs,
3096};