Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "cikd.h"
28#include "atom.h"
29#include "amdgpu_atombios.h"
30#include "atombios_crtc.h"
31#include "atombios_encoders.h"
32#include "amdgpu_pll.h"
33#include "amdgpu_connectors.h"
34#include "dce_v8_0.h"
35
36#include "dce/dce_8_0_d.h"
37#include "dce/dce_8_0_sh_mask.h"
38
39#include "gca/gfx_7_2_enum.h"
40
41#include "gmc/gmc_7_1_d.h"
42#include "gmc/gmc_7_1_sh_mask.h"
43
44#include "oss/oss_2_0_d.h"
45#include "oss/oss_2_0_sh_mask.h"
46
47static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
48static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
49
50static const u32 crtc_offsets[6] =
51{
52 CRTC0_REGISTER_OFFSET,
53 CRTC1_REGISTER_OFFSET,
54 CRTC2_REGISTER_OFFSET,
55 CRTC3_REGISTER_OFFSET,
56 CRTC4_REGISTER_OFFSET,
57 CRTC5_REGISTER_OFFSET
58};
59
60static const u32 hpd_offsets[] =
61{
62 HPD0_REGISTER_OFFSET,
63 HPD1_REGISTER_OFFSET,
64 HPD2_REGISTER_OFFSET,
65 HPD3_REGISTER_OFFSET,
66 HPD4_REGISTER_OFFSET,
67 HPD5_REGISTER_OFFSET
68};
69
70static const uint32_t dig_offsets[] = {
71 CRTC0_REGISTER_OFFSET,
72 CRTC1_REGISTER_OFFSET,
73 CRTC2_REGISTER_OFFSET,
74 CRTC3_REGISTER_OFFSET,
75 CRTC4_REGISTER_OFFSET,
76 CRTC5_REGISTER_OFFSET,
77 (0x13830 - 0x7030) >> 2,
78};
79
80static const struct {
81 uint32_t reg;
82 uint32_t vblank;
83 uint32_t vline;
84 uint32_t hpd;
85
86} interrupt_status_offsets[6] = { {
87 .reg = mmDISP_INTERRUPT_STATUS,
88 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
89 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
90 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
91}, {
92 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
93 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
94 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
95 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
96}, {
97 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
98 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
99 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
100 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
101}, {
102 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
103 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
104 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
105 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
106}, {
107 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
108 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
109 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
110 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
111}, {
112 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
113 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
114 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
115 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
116} };
117
118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
119 u32 block_offset, u32 reg)
120{
121 unsigned long flags;
122 u32 r;
123
124 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
125 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
126 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
127 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
128
129 return r;
130}
131
132static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
133 u32 block_offset, u32 reg, u32 v)
134{
135 unsigned long flags;
136
137 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
138 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
139 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
140 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
141}
142
143static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
144{
145 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
146 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
147 return true;
148 else
149 return false;
150}
151
152static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
153{
154 u32 pos1, pos2;
155
156 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
157 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
158
159 if (pos1 != pos2)
160 return true;
161 else
162 return false;
163}
164
165/**
166 * dce_v8_0_vblank_wait - vblank wait asic callback.
167 *
168 * @adev: amdgpu_device pointer
169 * @crtc: crtc to wait for vblank on
170 *
171 * Wait for vblank on the requested crtc (evergreen+).
172 */
173static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
174{
175 unsigned i = 100;
176
177 if (crtc >= adev->mode_info.num_crtc)
178 return;
179
180 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
181 return;
182
183 /* depending on when we hit vblank, we may be close to active; if so,
184 * wait for another frame.
185 */
186 while (dce_v8_0_is_in_vblank(adev, crtc)) {
187 if (i++ == 100) {
188 i = 0;
189 if (!dce_v8_0_is_counter_moving(adev, crtc))
190 break;
191 }
192 }
193
194 while (!dce_v8_0_is_in_vblank(adev, crtc)) {
195 if (i++ == 100) {
196 i = 0;
197 if (!dce_v8_0_is_counter_moving(adev, crtc))
198 break;
199 }
200 }
201}
202
203static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
204{
205 if (crtc >= adev->mode_info.num_crtc)
206 return 0;
207 else
208 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
209}
210
211static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
212{
213 unsigned i;
214
215 /* Enable pflip interrupts */
216 for (i = 0; i < adev->mode_info.num_crtc; i++)
217 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
218}
219
220static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
221{
222 unsigned i;
223
224 /* Disable pflip interrupts */
225 for (i = 0; i < adev->mode_info.num_crtc; i++)
226 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
227}
228
229/**
230 * dce_v8_0_page_flip - pageflip callback.
231 *
232 * @adev: amdgpu_device pointer
233 * @crtc_id: crtc to cleanup pageflip on
234 * @crtc_base: new address of the crtc (GPU MC address)
235 *
236 * Triggers the actual pageflip by updating the primary
237 * surface base address.
238 */
239static void dce_v8_0_page_flip(struct amdgpu_device *adev,
240 int crtc_id, u64 crtc_base, bool async)
241{
242 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
243
244 /* flip at hsync for async, default is vsync */
245 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
246 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
247 /* update the primary scanout addresses */
248 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
249 upper_32_bits(crtc_base));
250 /* writing to the low address triggers the update */
251 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
252 lower_32_bits(crtc_base));
253 /* post the write */
254 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
255}
256
257static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
258 u32 *vbl, u32 *position)
259{
260 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 return -EINVAL;
262
263 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
264 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
265
266 return 0;
267}
268
269/**
270 * dce_v8_0_hpd_sense - hpd sense callback.
271 *
272 * @adev: amdgpu_device pointer
273 * @hpd: hpd (hotplug detect) pin
274 *
275 * Checks if a digital monitor is connected (evergreen+).
276 * Returns true if connected, false if not connected.
277 */
278static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
279 enum amdgpu_hpd_id hpd)
280{
281 bool connected = false;
282
283 if (hpd >= adev->mode_info.num_hpd)
284 return connected;
285
286 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
287 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
288 connected = true;
289
290 return connected;
291}
292
293/**
294 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
295 *
296 * @adev: amdgpu_device pointer
297 * @hpd: hpd (hotplug detect) pin
298 *
299 * Set the polarity of the hpd pin (evergreen+).
300 */
301static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
302 enum amdgpu_hpd_id hpd)
303{
304 u32 tmp;
305 bool connected = dce_v8_0_hpd_sense(adev, hpd);
306
307 if (hpd >= adev->mode_info.num_hpd)
308 return;
309
310 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
311 if (connected)
312 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
313 else
314 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
315 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
316}
317
318/**
319 * dce_v8_0_hpd_init - hpd setup callback.
320 *
321 * @adev: amdgpu_device pointer
322 *
323 * Setup the hpd pins used by the card (evergreen+).
324 * Enable the pin, set the polarity, and enable the hpd interrupts.
325 */
326static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
327{
328 struct drm_device *dev = adev->ddev;
329 struct drm_connector *connector;
330 u32 tmp;
331
332 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
333 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
334
335 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
336 continue;
337
338 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
339 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
340 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
341
342 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
343 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
344 /* don't try to enable hpd on eDP or LVDS avoid breaking the
345 * aux dp channel on imac and help (but not completely fix)
346 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
347 * also avoid interrupt storms during dpms.
348 */
349 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
350 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
351 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
352 continue;
353 }
354
355 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
356 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
357 }
358}
359
360/**
361 * dce_v8_0_hpd_fini - hpd tear down callback.
362 *
363 * @adev: amdgpu_device pointer
364 *
365 * Tear down the hpd pins used by the card (evergreen+).
366 * Disable the hpd interrupts.
367 */
368static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
369{
370 struct drm_device *dev = adev->ddev;
371 struct drm_connector *connector;
372 u32 tmp;
373
374 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
375 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
376
377 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
378 continue;
379
380 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
381 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
382 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
383
384 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
385 }
386}
387
388static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
389{
390 return mmDC_GPIO_HPD_A;
391}
392
393static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
394{
395 u32 crtc_hung = 0;
396 u32 crtc_status[6];
397 u32 i, j, tmp;
398
399 for (i = 0; i < adev->mode_info.num_crtc; i++) {
400 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
401 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
402 crtc_hung |= (1 << i);
403 }
404 }
405
406 for (j = 0; j < 10; j++) {
407 for (i = 0; i < adev->mode_info.num_crtc; i++) {
408 if (crtc_hung & (1 << i)) {
409 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
410 if (tmp != crtc_status[i])
411 crtc_hung &= ~(1 << i);
412 }
413 }
414 if (crtc_hung == 0)
415 return false;
416 udelay(100);
417 }
418
419 return true;
420}
421
422static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
423 struct amdgpu_mode_mc_save *save)
424{
425 u32 crtc_enabled, tmp;
426 int i;
427
428 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
429 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
430
431 /* disable VGA render */
432 tmp = RREG32(mmVGA_RENDER_CONTROL);
433 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
434 WREG32(mmVGA_RENDER_CONTROL, tmp);
435
436 /* blank the display controllers */
437 for (i = 0; i < adev->mode_info.num_crtc; i++) {
438 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
439 CRTC_CONTROL, CRTC_MASTER_EN);
440 if (crtc_enabled) {
441#if 1
442 save->crtc_enabled[i] = true;
443 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
444 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
445 /*it is correct only for RGB ; black is 0*/
446 WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
447 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
448 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
449 }
450 mdelay(20);
451#else
452 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
453 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
454 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
455 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
456 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
457 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
458 save->crtc_enabled[i] = false;
459 /* ***** */
460#endif
461 } else {
462 save->crtc_enabled[i] = false;
463 }
464 }
465}
466
467static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
468 struct amdgpu_mode_mc_save *save)
469{
470 u32 tmp;
471 int i;
472
473 /* update crtc base addresses */
474 for (i = 0; i < adev->mode_info.num_crtc; i++) {
475 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
476 upper_32_bits(adev->mc.vram_start));
477 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
478 (u32)adev->mc.vram_start);
479
480 if (save->crtc_enabled[i]) {
481 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
482 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
483 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
484 }
485 mdelay(20);
486 }
487
488 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
489 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
490
491 /* Unlock vga access */
492 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
493 mdelay(1);
494 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
495}
496
497static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
498 bool render)
499{
500 u32 tmp;
501
502 /* Lockout access through VGA aperture*/
503 tmp = RREG32(mmVGA_HDP_CONTROL);
504 if (render)
505 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
506 else
507 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
508 WREG32(mmVGA_HDP_CONTROL, tmp);
509
510 /* disable VGA render */
511 tmp = RREG32(mmVGA_RENDER_CONTROL);
512 if (render)
513 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
514 else
515 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
516 WREG32(mmVGA_RENDER_CONTROL, tmp);
517}
518
519static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
520{
521 int num_crtc = 0;
522
523 switch (adev->asic_type) {
524 case CHIP_BONAIRE:
525 case CHIP_HAWAII:
526 num_crtc = 6;
527 break;
528 case CHIP_KAVERI:
529 num_crtc = 4;
530 break;
531 case CHIP_KABINI:
532 case CHIP_MULLINS:
533 num_crtc = 2;
534 break;
535 default:
536 num_crtc = 0;
537 }
538 return num_crtc;
539}
540
541void dce_v8_0_disable_dce(struct amdgpu_device *adev)
542{
543 /*Disable VGA render and enabled crtc, if has DCE engine*/
544 if (amdgpu_atombios_has_dce_engine_info(adev)) {
545 u32 tmp;
546 int crtc_enabled, i;
547
548 dce_v8_0_set_vga_render_state(adev, false);
549
550 /*Disable crtc*/
551 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
552 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
553 CRTC_CONTROL, CRTC_MASTER_EN);
554 if (crtc_enabled) {
555 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
556 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
557 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
558 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
559 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
560 }
561 }
562 }
563}
564
565static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
566{
567 struct drm_device *dev = encoder->dev;
568 struct amdgpu_device *adev = dev->dev_private;
569 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
570 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
571 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
572 int bpc = 0;
573 u32 tmp = 0;
574 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
575
576 if (connector) {
577 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
578 bpc = amdgpu_connector_get_monitor_bpc(connector);
579 dither = amdgpu_connector->dither;
580 }
581
582 /* LVDS/eDP FMT is set up by atom */
583 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
584 return;
585
586 /* not needed for analog */
587 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
588 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
589 return;
590
591 if (bpc == 0)
592 return;
593
594 switch (bpc) {
595 case 6:
596 if (dither == AMDGPU_FMT_DITHER_ENABLE)
597 /* XXX sort out optimal dither settings */
598 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
599 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
600 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
601 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
602 else
603 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
604 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
605 break;
606 case 8:
607 if (dither == AMDGPU_FMT_DITHER_ENABLE)
608 /* XXX sort out optimal dither settings */
609 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
610 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
611 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
612 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
613 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
614 else
615 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
616 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
617 break;
618 case 10:
619 if (dither == AMDGPU_FMT_DITHER_ENABLE)
620 /* XXX sort out optimal dither settings */
621 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
622 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
623 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
624 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
625 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
626 else
627 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
628 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
629 break;
630 default:
631 /* not needed */
632 break;
633 }
634
635 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
636}
637
638
639/* display watermark setup */
640/**
641 * dce_v8_0_line_buffer_adjust - Set up the line buffer
642 *
643 * @adev: amdgpu_device pointer
644 * @amdgpu_crtc: the selected display controller
645 * @mode: the current display mode on the selected display
646 * controller
647 *
648 * Setup up the line buffer allocation for
649 * the selected display controller (CIK).
650 * Returns the line buffer size in pixels.
651 */
652static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
653 struct amdgpu_crtc *amdgpu_crtc,
654 struct drm_display_mode *mode)
655{
656 u32 tmp, buffer_alloc, i;
657 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
658 /*
659 * Line Buffer Setup
660 * There are 6 line buffers, one for each display controllers.
661 * There are 3 partitions per LB. Select the number of partitions
662 * to enable based on the display width. For display widths larger
663 * than 4096, you need use to use 2 display controllers and combine
664 * them using the stereo blender.
665 */
666 if (amdgpu_crtc->base.enabled && mode) {
667 if (mode->crtc_hdisplay < 1920) {
668 tmp = 1;
669 buffer_alloc = 2;
670 } else if (mode->crtc_hdisplay < 2560) {
671 tmp = 2;
672 buffer_alloc = 2;
673 } else if (mode->crtc_hdisplay < 4096) {
674 tmp = 0;
675 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
676 } else {
677 DRM_DEBUG_KMS("Mode too big for LB!\n");
678 tmp = 0;
679 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
680 }
681 } else {
682 tmp = 1;
683 buffer_alloc = 0;
684 }
685
686 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
687 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
688 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
689
690 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
691 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
692 for (i = 0; i < adev->usec_timeout; i++) {
693 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
694 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
695 break;
696 udelay(1);
697 }
698
699 if (amdgpu_crtc->base.enabled && mode) {
700 switch (tmp) {
701 case 0:
702 default:
703 return 4096 * 2;
704 case 1:
705 return 1920 * 2;
706 case 2:
707 return 2560 * 2;
708 }
709 }
710
711 /* controller not enabled, so no lb used */
712 return 0;
713}
714
715/**
716 * cik_get_number_of_dram_channels - get the number of dram channels
717 *
718 * @adev: amdgpu_device pointer
719 *
720 * Look up the number of video ram channels (CIK).
721 * Used for display watermark bandwidth calculations
722 * Returns the number of dram channels
723 */
724static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
725{
726 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
727
728 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
729 case 0:
730 default:
731 return 1;
732 case 1:
733 return 2;
734 case 2:
735 return 4;
736 case 3:
737 return 8;
738 case 4:
739 return 3;
740 case 5:
741 return 6;
742 case 6:
743 return 10;
744 case 7:
745 return 12;
746 case 8:
747 return 16;
748 }
749}
750
751struct dce8_wm_params {
752 u32 dram_channels; /* number of dram channels */
753 u32 yclk; /* bandwidth per dram data pin in kHz */
754 u32 sclk; /* engine clock in kHz */
755 u32 disp_clk; /* display clock in kHz */
756 u32 src_width; /* viewport width */
757 u32 active_time; /* active display time in ns */
758 u32 blank_time; /* blank time in ns */
759 bool interlaced; /* mode is interlaced */
760 fixed20_12 vsc; /* vertical scale ratio */
761 u32 num_heads; /* number of active crtcs */
762 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
763 u32 lb_size; /* line buffer allocated to pipe */
764 u32 vtaps; /* vertical scaler taps */
765};
766
767/**
768 * dce_v8_0_dram_bandwidth - get the dram bandwidth
769 *
770 * @wm: watermark calculation data
771 *
772 * Calculate the raw dram bandwidth (CIK).
773 * Used for display watermark bandwidth calculations
774 * Returns the dram bandwidth in MBytes/s
775 */
776static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
777{
778 /* Calculate raw DRAM Bandwidth */
779 fixed20_12 dram_efficiency; /* 0.7 */
780 fixed20_12 yclk, dram_channels, bandwidth;
781 fixed20_12 a;
782
783 a.full = dfixed_const(1000);
784 yclk.full = dfixed_const(wm->yclk);
785 yclk.full = dfixed_div(yclk, a);
786 dram_channels.full = dfixed_const(wm->dram_channels * 4);
787 a.full = dfixed_const(10);
788 dram_efficiency.full = dfixed_const(7);
789 dram_efficiency.full = dfixed_div(dram_efficiency, a);
790 bandwidth.full = dfixed_mul(dram_channels, yclk);
791 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
792
793 return dfixed_trunc(bandwidth);
794}
795
796/**
797 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
798 *
799 * @wm: watermark calculation data
800 *
801 * Calculate the dram bandwidth used for display (CIK).
802 * Used for display watermark bandwidth calculations
803 * Returns the dram bandwidth for display in MBytes/s
804 */
805static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
806{
807 /* Calculate DRAM Bandwidth and the part allocated to display. */
808 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
809 fixed20_12 yclk, dram_channels, bandwidth;
810 fixed20_12 a;
811
812 a.full = dfixed_const(1000);
813 yclk.full = dfixed_const(wm->yclk);
814 yclk.full = dfixed_div(yclk, a);
815 dram_channels.full = dfixed_const(wm->dram_channels * 4);
816 a.full = dfixed_const(10);
817 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
818 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
819 bandwidth.full = dfixed_mul(dram_channels, yclk);
820 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
821
822 return dfixed_trunc(bandwidth);
823}
824
825/**
826 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
827 *
828 * @wm: watermark calculation data
829 *
830 * Calculate the data return bandwidth used for display (CIK).
831 * Used for display watermark bandwidth calculations
832 * Returns the data return bandwidth in MBytes/s
833 */
834static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
835{
836 /* Calculate the display Data return Bandwidth */
837 fixed20_12 return_efficiency; /* 0.8 */
838 fixed20_12 sclk, bandwidth;
839 fixed20_12 a;
840
841 a.full = dfixed_const(1000);
842 sclk.full = dfixed_const(wm->sclk);
843 sclk.full = dfixed_div(sclk, a);
844 a.full = dfixed_const(10);
845 return_efficiency.full = dfixed_const(8);
846 return_efficiency.full = dfixed_div(return_efficiency, a);
847 a.full = dfixed_const(32);
848 bandwidth.full = dfixed_mul(a, sclk);
849 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
850
851 return dfixed_trunc(bandwidth);
852}
853
854/**
855 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
856 *
857 * @wm: watermark calculation data
858 *
859 * Calculate the dmif bandwidth used for display (CIK).
860 * Used for display watermark bandwidth calculations
861 * Returns the dmif bandwidth in MBytes/s
862 */
863static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
864{
865 /* Calculate the DMIF Request Bandwidth */
866 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
867 fixed20_12 disp_clk, bandwidth;
868 fixed20_12 a, b;
869
870 a.full = dfixed_const(1000);
871 disp_clk.full = dfixed_const(wm->disp_clk);
872 disp_clk.full = dfixed_div(disp_clk, a);
873 a.full = dfixed_const(32);
874 b.full = dfixed_mul(a, disp_clk);
875
876 a.full = dfixed_const(10);
877 disp_clk_request_efficiency.full = dfixed_const(8);
878 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
879
880 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
881
882 return dfixed_trunc(bandwidth);
883}
884
885/**
886 * dce_v8_0_available_bandwidth - get the min available bandwidth
887 *
888 * @wm: watermark calculation data
889 *
890 * Calculate the min available bandwidth used for display (CIK).
891 * Used for display watermark bandwidth calculations
892 * Returns the min available bandwidth in MBytes/s
893 */
894static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
895{
896 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
897 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
898 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
899 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
900
901 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
902}
903
904/**
905 * dce_v8_0_average_bandwidth - get the average available bandwidth
906 *
907 * @wm: watermark calculation data
908 *
909 * Calculate the average available bandwidth used for display (CIK).
910 * Used for display watermark bandwidth calculations
911 * Returns the average available bandwidth in MBytes/s
912 */
913static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
914{
915 /* Calculate the display mode Average Bandwidth
916 * DisplayMode should contain the source and destination dimensions,
917 * timing, etc.
918 */
919 fixed20_12 bpp;
920 fixed20_12 line_time;
921 fixed20_12 src_width;
922 fixed20_12 bandwidth;
923 fixed20_12 a;
924
925 a.full = dfixed_const(1000);
926 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
927 line_time.full = dfixed_div(line_time, a);
928 bpp.full = dfixed_const(wm->bytes_per_pixel);
929 src_width.full = dfixed_const(wm->src_width);
930 bandwidth.full = dfixed_mul(src_width, bpp);
931 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
932 bandwidth.full = dfixed_div(bandwidth, line_time);
933
934 return dfixed_trunc(bandwidth);
935}
936
937/**
938 * dce_v8_0_latency_watermark - get the latency watermark
939 *
940 * @wm: watermark calculation data
941 *
942 * Calculate the latency watermark (CIK).
943 * Used for display watermark bandwidth calculations
944 * Returns the latency watermark in ns
945 */
946static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
947{
948 /* First calculate the latency in ns */
949 u32 mc_latency = 2000; /* 2000 ns. */
950 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
951 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
952 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
953 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
954 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
955 (wm->num_heads * cursor_line_pair_return_time);
956 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
957 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
958 u32 tmp, dmif_size = 12288;
959 fixed20_12 a, b, c;
960
961 if (wm->num_heads == 0)
962 return 0;
963
964 a.full = dfixed_const(2);
965 b.full = dfixed_const(1);
966 if ((wm->vsc.full > a.full) ||
967 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
968 (wm->vtaps >= 5) ||
969 ((wm->vsc.full >= a.full) && wm->interlaced))
970 max_src_lines_per_dst_line = 4;
971 else
972 max_src_lines_per_dst_line = 2;
973
974 a.full = dfixed_const(available_bandwidth);
975 b.full = dfixed_const(wm->num_heads);
976 a.full = dfixed_div(a, b);
977
978 b.full = dfixed_const(mc_latency + 512);
979 c.full = dfixed_const(wm->disp_clk);
980 b.full = dfixed_div(b, c);
981
982 c.full = dfixed_const(dmif_size);
983 b.full = dfixed_div(c, b);
984
985 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
986
987 b.full = dfixed_const(1000);
988 c.full = dfixed_const(wm->disp_clk);
989 b.full = dfixed_div(c, b);
990 c.full = dfixed_const(wm->bytes_per_pixel);
991 b.full = dfixed_mul(b, c);
992
993 lb_fill_bw = min(tmp, dfixed_trunc(b));
994
995 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
996 b.full = dfixed_const(1000);
997 c.full = dfixed_const(lb_fill_bw);
998 b.full = dfixed_div(c, b);
999 a.full = dfixed_div(a, b);
1000 line_fill_time = dfixed_trunc(a);
1001
1002 if (line_fill_time < wm->active_time)
1003 return latency;
1004 else
1005 return latency + (line_fill_time - wm->active_time);
1006
1007}
1008
1009/**
1010 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1011 * average and available dram bandwidth
1012 *
1013 * @wm: watermark calculation data
1014 *
1015 * Check if the display average bandwidth fits in the display
1016 * dram bandwidth (CIK).
1017 * Used for display watermark bandwidth calculations
1018 * Returns true if the display fits, false if not.
1019 */
1020static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1021{
1022 if (dce_v8_0_average_bandwidth(wm) <=
1023 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1024 return true;
1025 else
1026 return false;
1027}
1028
1029/**
1030 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1031 * average and available bandwidth
1032 *
1033 * @wm: watermark calculation data
1034 *
1035 * Check if the display average bandwidth fits in the display
1036 * available bandwidth (CIK).
1037 * Used for display watermark bandwidth calculations
1038 * Returns true if the display fits, false if not.
1039 */
1040static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1041{
1042 if (dce_v8_0_average_bandwidth(wm) <=
1043 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1044 return true;
1045 else
1046 return false;
1047}
1048
1049/**
1050 * dce_v8_0_check_latency_hiding - check latency hiding
1051 *
1052 * @wm: watermark calculation data
1053 *
1054 * Check latency hiding (CIK).
1055 * Used for display watermark bandwidth calculations
1056 * Returns true if the display fits, false if not.
1057 */
1058static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1059{
1060 u32 lb_partitions = wm->lb_size / wm->src_width;
1061 u32 line_time = wm->active_time + wm->blank_time;
1062 u32 latency_tolerant_lines;
1063 u32 latency_hiding;
1064 fixed20_12 a;
1065
1066 a.full = dfixed_const(1);
1067 if (wm->vsc.full > a.full)
1068 latency_tolerant_lines = 1;
1069 else {
1070 if (lb_partitions <= (wm->vtaps + 1))
1071 latency_tolerant_lines = 1;
1072 else
1073 latency_tolerant_lines = 2;
1074 }
1075
1076 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1077
1078 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1079 return true;
1080 else
1081 return false;
1082}
1083
1084/**
1085 * dce_v8_0_program_watermarks - program display watermarks
1086 *
1087 * @adev: amdgpu_device pointer
1088 * @amdgpu_crtc: the selected display controller
1089 * @lb_size: line buffer size
1090 * @num_heads: number of display controllers in use
1091 *
1092 * Calculate and program the display watermarks for the
1093 * selected display controller (CIK).
1094 */
1095static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1096 struct amdgpu_crtc *amdgpu_crtc,
1097 u32 lb_size, u32 num_heads)
1098{
1099 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1100 struct dce8_wm_params wm_low, wm_high;
1101 u32 pixel_period;
1102 u32 line_time = 0;
1103 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1104 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1105
1106 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1107 pixel_period = 1000000 / (u32)mode->clock;
1108 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1109
1110 /* watermark for high clocks */
1111 if (adev->pm.dpm_enabled) {
1112 wm_high.yclk =
1113 amdgpu_dpm_get_mclk(adev, false) * 10;
1114 wm_high.sclk =
1115 amdgpu_dpm_get_sclk(adev, false) * 10;
1116 } else {
1117 wm_high.yclk = adev->pm.current_mclk * 10;
1118 wm_high.sclk = adev->pm.current_sclk * 10;
1119 }
1120
1121 wm_high.disp_clk = mode->clock;
1122 wm_high.src_width = mode->crtc_hdisplay;
1123 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1124 wm_high.blank_time = line_time - wm_high.active_time;
1125 wm_high.interlaced = false;
1126 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1127 wm_high.interlaced = true;
1128 wm_high.vsc = amdgpu_crtc->vsc;
1129 wm_high.vtaps = 1;
1130 if (amdgpu_crtc->rmx_type != RMX_OFF)
1131 wm_high.vtaps = 2;
1132 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1133 wm_high.lb_size = lb_size;
1134 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1135 wm_high.num_heads = num_heads;
1136
1137 /* set for high clocks */
1138 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1139
1140 /* possibly force display priority to high */
1141 /* should really do this at mode validation time... */
1142 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1143 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1144 !dce_v8_0_check_latency_hiding(&wm_high) ||
1145 (adev->mode_info.disp_priority == 2)) {
1146 DRM_DEBUG_KMS("force priority to high\n");
1147 }
1148
1149 /* watermark for low clocks */
1150 if (adev->pm.dpm_enabled) {
1151 wm_low.yclk =
1152 amdgpu_dpm_get_mclk(adev, true) * 10;
1153 wm_low.sclk =
1154 amdgpu_dpm_get_sclk(adev, true) * 10;
1155 } else {
1156 wm_low.yclk = adev->pm.current_mclk * 10;
1157 wm_low.sclk = adev->pm.current_sclk * 10;
1158 }
1159
1160 wm_low.disp_clk = mode->clock;
1161 wm_low.src_width = mode->crtc_hdisplay;
1162 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1163 wm_low.blank_time = line_time - wm_low.active_time;
1164 wm_low.interlaced = false;
1165 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1166 wm_low.interlaced = true;
1167 wm_low.vsc = amdgpu_crtc->vsc;
1168 wm_low.vtaps = 1;
1169 if (amdgpu_crtc->rmx_type != RMX_OFF)
1170 wm_low.vtaps = 2;
1171 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1172 wm_low.lb_size = lb_size;
1173 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1174 wm_low.num_heads = num_heads;
1175
1176 /* set for low clocks */
1177 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1178
1179 /* possibly force display priority to high */
1180 /* should really do this at mode validation time... */
1181 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1182 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1183 !dce_v8_0_check_latency_hiding(&wm_low) ||
1184 (adev->mode_info.disp_priority == 2)) {
1185 DRM_DEBUG_KMS("force priority to high\n");
1186 }
1187 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1188 }
1189
1190 /* select wm A */
1191 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1192 tmp = wm_mask;
1193 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1194 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1195 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1196 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1197 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1198 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1199 /* select wm B */
1200 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1201 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1202 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1203 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1204 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1205 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1206 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1207 /* restore original selection */
1208 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1209
1210 /* save values for DPM */
1211 amdgpu_crtc->line_time = line_time;
1212 amdgpu_crtc->wm_high = latency_watermark_a;
1213 amdgpu_crtc->wm_low = latency_watermark_b;
1214 /* Save number of lines the linebuffer leads before the scanout */
1215 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1216}
1217
1218/**
1219 * dce_v8_0_bandwidth_update - program display watermarks
1220 *
1221 * @adev: amdgpu_device pointer
1222 *
1223 * Calculate and program the display watermarks and line
1224 * buffer allocation (CIK).
1225 */
1226static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1227{
1228 struct drm_display_mode *mode = NULL;
1229 u32 num_heads = 0, lb_size;
1230 int i;
1231
1232 amdgpu_update_display_priority(adev);
1233
1234 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1235 if (adev->mode_info.crtcs[i]->base.enabled)
1236 num_heads++;
1237 }
1238 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1239 mode = &adev->mode_info.crtcs[i]->base.mode;
1240 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1241 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1242 lb_size, num_heads);
1243 }
1244}
1245
1246static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1247{
1248 int i;
1249 u32 offset, tmp;
1250
1251 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1252 offset = adev->mode_info.audio.pin[i].offset;
1253 tmp = RREG32_AUDIO_ENDPT(offset,
1254 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1255 if (((tmp &
1256 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1257 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1258 adev->mode_info.audio.pin[i].connected = false;
1259 else
1260 adev->mode_info.audio.pin[i].connected = true;
1261 }
1262}
1263
1264static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1265{
1266 int i;
1267
1268 dce_v8_0_audio_get_connected_pins(adev);
1269
1270 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1271 if (adev->mode_info.audio.pin[i].connected)
1272 return &adev->mode_info.audio.pin[i];
1273 }
1274 DRM_ERROR("No connected audio pins found!\n");
1275 return NULL;
1276}
1277
1278static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1279{
1280 struct amdgpu_device *adev = encoder->dev->dev_private;
1281 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1282 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1283 u32 offset;
1284
1285 if (!dig || !dig->afmt || !dig->afmt->pin)
1286 return;
1287
1288 offset = dig->afmt->offset;
1289
1290 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1291 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1292}
1293
1294static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1295 struct drm_display_mode *mode)
1296{
1297 struct amdgpu_device *adev = encoder->dev->dev_private;
1298 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1299 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1300 struct drm_connector *connector;
1301 struct amdgpu_connector *amdgpu_connector = NULL;
1302 u32 tmp = 0, offset;
1303
1304 if (!dig || !dig->afmt || !dig->afmt->pin)
1305 return;
1306
1307 offset = dig->afmt->pin->offset;
1308
1309 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1310 if (connector->encoder == encoder) {
1311 amdgpu_connector = to_amdgpu_connector(connector);
1312 break;
1313 }
1314 }
1315
1316 if (!amdgpu_connector) {
1317 DRM_ERROR("Couldn't find encoder's connector\n");
1318 return;
1319 }
1320
1321 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1322 if (connector->latency_present[1])
1323 tmp =
1324 (connector->video_latency[1] <<
1325 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1326 (connector->audio_latency[1] <<
1327 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1328 else
1329 tmp =
1330 (0 <<
1331 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1332 (0 <<
1333 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1334 } else {
1335 if (connector->latency_present[0])
1336 tmp =
1337 (connector->video_latency[0] <<
1338 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1339 (connector->audio_latency[0] <<
1340 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1341 else
1342 tmp =
1343 (0 <<
1344 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1345 (0 <<
1346 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1347
1348 }
1349 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1350}
1351
1352static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1353{
1354 struct amdgpu_device *adev = encoder->dev->dev_private;
1355 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1356 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1357 struct drm_connector *connector;
1358 struct amdgpu_connector *amdgpu_connector = NULL;
1359 u32 offset, tmp;
1360 u8 *sadb = NULL;
1361 int sad_count;
1362
1363 if (!dig || !dig->afmt || !dig->afmt->pin)
1364 return;
1365
1366 offset = dig->afmt->pin->offset;
1367
1368 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1369 if (connector->encoder == encoder) {
1370 amdgpu_connector = to_amdgpu_connector(connector);
1371 break;
1372 }
1373 }
1374
1375 if (!amdgpu_connector) {
1376 DRM_ERROR("Couldn't find encoder's connector\n");
1377 return;
1378 }
1379
1380 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1381 if (sad_count < 0) {
1382 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1383 sad_count = 0;
1384 }
1385
1386 /* program the speaker allocation */
1387 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1388 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1389 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1390 /* set HDMI mode */
1391 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1392 if (sad_count)
1393 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1394 else
1395 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1396 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1397
1398 kfree(sadb);
1399}
1400
1401static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1402{
1403 struct amdgpu_device *adev = encoder->dev->dev_private;
1404 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1405 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1406 u32 offset;
1407 struct drm_connector *connector;
1408 struct amdgpu_connector *amdgpu_connector = NULL;
1409 struct cea_sad *sads;
1410 int i, sad_count;
1411
1412 static const u16 eld_reg_to_type[][2] = {
1413 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1414 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1415 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1416 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1417 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1418 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1419 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1420 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1421 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1422 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1423 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1424 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1425 };
1426
1427 if (!dig || !dig->afmt || !dig->afmt->pin)
1428 return;
1429
1430 offset = dig->afmt->pin->offset;
1431
1432 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1433 if (connector->encoder == encoder) {
1434 amdgpu_connector = to_amdgpu_connector(connector);
1435 break;
1436 }
1437 }
1438
1439 if (!amdgpu_connector) {
1440 DRM_ERROR("Couldn't find encoder's connector\n");
1441 return;
1442 }
1443
1444 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1445 if (sad_count <= 0) {
1446 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1447 return;
1448 }
1449 BUG_ON(!sads);
1450
1451 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1452 u32 value = 0;
1453 u8 stereo_freqs = 0;
1454 int max_channels = -1;
1455 int j;
1456
1457 for (j = 0; j < sad_count; j++) {
1458 struct cea_sad *sad = &sads[j];
1459
1460 if (sad->format == eld_reg_to_type[i][1]) {
1461 if (sad->channels > max_channels) {
1462 value = (sad->channels <<
1463 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1464 (sad->byte2 <<
1465 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1466 (sad->freq <<
1467 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1468 max_channels = sad->channels;
1469 }
1470
1471 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1472 stereo_freqs |= sad->freq;
1473 else
1474 break;
1475 }
1476 }
1477
1478 value |= (stereo_freqs <<
1479 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1480
1481 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1482 }
1483
1484 kfree(sads);
1485}
1486
1487static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1488 struct amdgpu_audio_pin *pin,
1489 bool enable)
1490{
1491 if (!pin)
1492 return;
1493
1494 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1495 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1496}
1497
1498static const u32 pin_offsets[7] =
1499{
1500 (0x1780 - 0x1780),
1501 (0x1786 - 0x1780),
1502 (0x178c - 0x1780),
1503 (0x1792 - 0x1780),
1504 (0x1798 - 0x1780),
1505 (0x179d - 0x1780),
1506 (0x17a4 - 0x1780),
1507};
1508
1509static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1510{
1511 int i;
1512
1513 if (!amdgpu_audio)
1514 return 0;
1515
1516 adev->mode_info.audio.enabled = true;
1517
1518 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1519 adev->mode_info.audio.num_pins = 7;
1520 else if ((adev->asic_type == CHIP_KABINI) ||
1521 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1522 adev->mode_info.audio.num_pins = 3;
1523 else if ((adev->asic_type == CHIP_BONAIRE) ||
1524 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1525 adev->mode_info.audio.num_pins = 7;
1526 else
1527 adev->mode_info.audio.num_pins = 3;
1528
1529 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1530 adev->mode_info.audio.pin[i].channels = -1;
1531 adev->mode_info.audio.pin[i].rate = -1;
1532 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1533 adev->mode_info.audio.pin[i].status_bits = 0;
1534 adev->mode_info.audio.pin[i].category_code = 0;
1535 adev->mode_info.audio.pin[i].connected = false;
1536 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1537 adev->mode_info.audio.pin[i].id = i;
1538 /* disable audio. it will be set up later */
1539 /* XXX remove once we switch to ip funcs */
1540 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1541 }
1542
1543 return 0;
1544}
1545
1546static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1547{
1548 int i;
1549
1550 if (!amdgpu_audio)
1551 return;
1552
1553 if (!adev->mode_info.audio.enabled)
1554 return;
1555
1556 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1557 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1558
1559 adev->mode_info.audio.enabled = false;
1560}
1561
1562/*
1563 * update the N and CTS parameters for a given pixel clock rate
1564 */
1565static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1566{
1567 struct drm_device *dev = encoder->dev;
1568 struct amdgpu_device *adev = dev->dev_private;
1569 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1570 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1571 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1572 uint32_t offset = dig->afmt->offset;
1573
1574 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1575 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1576
1577 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1578 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1579
1580 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1581 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1582}
1583
1584/*
1585 * build a HDMI Video Info Frame
1586 */
1587static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1588 void *buffer, size_t size)
1589{
1590 struct drm_device *dev = encoder->dev;
1591 struct amdgpu_device *adev = dev->dev_private;
1592 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1593 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1594 uint32_t offset = dig->afmt->offset;
1595 uint8_t *frame = buffer + 3;
1596 uint8_t *header = buffer;
1597
1598 WREG32(mmAFMT_AVI_INFO0 + offset,
1599 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1600 WREG32(mmAFMT_AVI_INFO1 + offset,
1601 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1602 WREG32(mmAFMT_AVI_INFO2 + offset,
1603 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1604 WREG32(mmAFMT_AVI_INFO3 + offset,
1605 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1606}
1607
1608static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1609{
1610 struct drm_device *dev = encoder->dev;
1611 struct amdgpu_device *adev = dev->dev_private;
1612 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1613 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1614 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1615 u32 dto_phase = 24 * 1000;
1616 u32 dto_modulo = clock;
1617
1618 if (!dig || !dig->afmt)
1619 return;
1620
1621 /* XXX two dtos; generally use dto0 for hdmi */
1622 /* Express [24MHz / target pixel clock] as an exact rational
1623 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1624 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1625 */
1626 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1627 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1628 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1629}
1630
1631/*
1632 * update the info frames with the data from the current display mode
1633 */
1634static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1635 struct drm_display_mode *mode)
1636{
1637 struct drm_device *dev = encoder->dev;
1638 struct amdgpu_device *adev = dev->dev_private;
1639 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1640 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1641 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1642 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1643 struct hdmi_avi_infoframe frame;
1644 uint32_t offset, val;
1645 ssize_t err;
1646 int bpc = 8;
1647
1648 if (!dig || !dig->afmt)
1649 return;
1650
1651 /* Silent, r600_hdmi_enable will raise WARN for us */
1652 if (!dig->afmt->enabled)
1653 return;
1654
1655 offset = dig->afmt->offset;
1656
1657 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1658 if (encoder->crtc) {
1659 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1660 bpc = amdgpu_crtc->bpc;
1661 }
1662
1663 /* disable audio prior to setting up hw */
1664 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1665 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1666
1667 dce_v8_0_audio_set_dto(encoder, mode->clock);
1668
1669 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1670 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1671
1672 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1673
1674 val = RREG32(mmHDMI_CONTROL + offset);
1675 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1676 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1677
1678 switch (bpc) {
1679 case 0:
1680 case 6:
1681 case 8:
1682 case 16:
1683 default:
1684 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1685 connector->name, bpc);
1686 break;
1687 case 10:
1688 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1689 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1690 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1691 connector->name);
1692 break;
1693 case 12:
1694 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1695 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1696 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1697 connector->name);
1698 break;
1699 }
1700
1701 WREG32(mmHDMI_CONTROL + offset, val);
1702
1703 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1704 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1705 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1706 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1707
1708 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1709 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1710 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1711
1712 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1713 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1714
1715 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1716 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1717
1718 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1719
1720 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1721 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1722 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1723
1724 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1725 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1726
1727 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1728
1729 if (bpc > 8)
1730 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1731 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1732 else
1733 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1734 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1735 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1736
1737 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1738
1739 WREG32(mmAFMT_60958_0 + offset,
1740 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1741
1742 WREG32(mmAFMT_60958_1 + offset,
1743 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1744
1745 WREG32(mmAFMT_60958_2 + offset,
1746 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1747 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1748 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1749 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1750 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1751 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1752
1753 dce_v8_0_audio_write_speaker_allocation(encoder);
1754
1755
1756 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1757 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1758
1759 dce_v8_0_afmt_audio_select_pin(encoder);
1760 dce_v8_0_audio_write_sad_regs(encoder);
1761 dce_v8_0_audio_write_latency_fields(encoder, mode);
1762
1763 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1764 if (err < 0) {
1765 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1766 return;
1767 }
1768
1769 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1770 if (err < 0) {
1771 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1772 return;
1773 }
1774
1775 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1776
1777 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1778 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1779 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1780
1781 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1782 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1783 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1784
1785 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1786 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1787
1788 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1789 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1790 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1791 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1792
1793 /* enable audio after setting up hw */
1794 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1795}
1796
1797static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1798{
1799 struct drm_device *dev = encoder->dev;
1800 struct amdgpu_device *adev = dev->dev_private;
1801 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1802 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1803
1804 if (!dig || !dig->afmt)
1805 return;
1806
1807 /* Silent, r600_hdmi_enable will raise WARN for us */
1808 if (enable && dig->afmt->enabled)
1809 return;
1810 if (!enable && !dig->afmt->enabled)
1811 return;
1812
1813 if (!enable && dig->afmt->pin) {
1814 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1815 dig->afmt->pin = NULL;
1816 }
1817
1818 dig->afmt->enabled = enable;
1819
1820 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1821 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1822}
1823
1824static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1825{
1826 int i;
1827
1828 for (i = 0; i < adev->mode_info.num_dig; i++)
1829 adev->mode_info.afmt[i] = NULL;
1830
1831 /* DCE8 has audio blocks tied to DIG encoders */
1832 for (i = 0; i < adev->mode_info.num_dig; i++) {
1833 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1834 if (adev->mode_info.afmt[i]) {
1835 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1836 adev->mode_info.afmt[i]->id = i;
1837 } else {
1838 int j;
1839 for (j = 0; j < i; j++) {
1840 kfree(adev->mode_info.afmt[j]);
1841 adev->mode_info.afmt[j] = NULL;
1842 }
1843 return -ENOMEM;
1844 }
1845 }
1846 return 0;
1847}
1848
1849static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1850{
1851 int i;
1852
1853 for (i = 0; i < adev->mode_info.num_dig; i++) {
1854 kfree(adev->mode_info.afmt[i]);
1855 adev->mode_info.afmt[i] = NULL;
1856 }
1857}
1858
1859static const u32 vga_control_regs[6] =
1860{
1861 mmD1VGA_CONTROL,
1862 mmD2VGA_CONTROL,
1863 mmD3VGA_CONTROL,
1864 mmD4VGA_CONTROL,
1865 mmD5VGA_CONTROL,
1866 mmD6VGA_CONTROL,
1867};
1868
1869static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1870{
1871 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1872 struct drm_device *dev = crtc->dev;
1873 struct amdgpu_device *adev = dev->dev_private;
1874 u32 vga_control;
1875
1876 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1877 if (enable)
1878 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1879 else
1880 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1881}
1882
1883static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1884{
1885 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1886 struct drm_device *dev = crtc->dev;
1887 struct amdgpu_device *adev = dev->dev_private;
1888
1889 if (enable)
1890 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1891 else
1892 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1893}
1894
1895static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1896 struct drm_framebuffer *fb,
1897 int x, int y, int atomic)
1898{
1899 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1900 struct drm_device *dev = crtc->dev;
1901 struct amdgpu_device *adev = dev->dev_private;
1902 struct amdgpu_framebuffer *amdgpu_fb;
1903 struct drm_framebuffer *target_fb;
1904 struct drm_gem_object *obj;
1905 struct amdgpu_bo *abo;
1906 uint64_t fb_location, tiling_flags;
1907 uint32_t fb_format, fb_pitch_pixels;
1908 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1909 u32 pipe_config;
1910 u32 viewport_w, viewport_h;
1911 int r;
1912 bool bypass_lut = false;
1913 struct drm_format_name_buf format_name;
1914
1915 /* no fb bound */
1916 if (!atomic && !crtc->primary->fb) {
1917 DRM_DEBUG_KMS("No FB bound\n");
1918 return 0;
1919 }
1920
1921 if (atomic) {
1922 amdgpu_fb = to_amdgpu_framebuffer(fb);
1923 target_fb = fb;
1924 } else {
1925 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1926 target_fb = crtc->primary->fb;
1927 }
1928
1929 /* If atomic, assume fb object is pinned & idle & fenced and
1930 * just update base pointers
1931 */
1932 obj = amdgpu_fb->obj;
1933 abo = gem_to_amdgpu_bo(obj);
1934 r = amdgpu_bo_reserve(abo, false);
1935 if (unlikely(r != 0))
1936 return r;
1937
1938 if (atomic) {
1939 fb_location = amdgpu_bo_gpu_offset(abo);
1940 } else {
1941 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1942 if (unlikely(r != 0)) {
1943 amdgpu_bo_unreserve(abo);
1944 return -EINVAL;
1945 }
1946 }
1947
1948 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1949 amdgpu_bo_unreserve(abo);
1950
1951 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1952
1953 switch (target_fb->pixel_format) {
1954 case DRM_FORMAT_C8:
1955 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1956 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1957 break;
1958 case DRM_FORMAT_XRGB4444:
1959 case DRM_FORMAT_ARGB4444:
1960 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1961 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1962#ifdef __BIG_ENDIAN
1963 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1964#endif
1965 break;
1966 case DRM_FORMAT_XRGB1555:
1967 case DRM_FORMAT_ARGB1555:
1968 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1969 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1970#ifdef __BIG_ENDIAN
1971 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1972#endif
1973 break;
1974 case DRM_FORMAT_BGRX5551:
1975 case DRM_FORMAT_BGRA5551:
1976 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1977 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1978#ifdef __BIG_ENDIAN
1979 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1980#endif
1981 break;
1982 case DRM_FORMAT_RGB565:
1983 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1984 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1985#ifdef __BIG_ENDIAN
1986 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1987#endif
1988 break;
1989 case DRM_FORMAT_XRGB8888:
1990 case DRM_FORMAT_ARGB8888:
1991 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1992 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1993#ifdef __BIG_ENDIAN
1994 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1995#endif
1996 break;
1997 case DRM_FORMAT_XRGB2101010:
1998 case DRM_FORMAT_ARGB2101010:
1999 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2000 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2001#ifdef __BIG_ENDIAN
2002 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2003#endif
2004 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2005 bypass_lut = true;
2006 break;
2007 case DRM_FORMAT_BGRX1010102:
2008 case DRM_FORMAT_BGRA1010102:
2009 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2010 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2011#ifdef __BIG_ENDIAN
2012 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2013#endif
2014 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2015 bypass_lut = true;
2016 break;
2017 default:
2018 DRM_ERROR("Unsupported screen format %s\n",
2019 drm_get_format_name(target_fb->pixel_format, &format_name));
2020 return -EINVAL;
2021 }
2022
2023 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2024 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2025
2026 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2027 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2028 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2029 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2030 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2031
2032 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2033 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2034 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2035 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2036 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2037 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2038 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
2039 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2040 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2041 }
2042
2043 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2044
2045 dce_v8_0_vga_enable(crtc, false);
2046
2047 /* Make sure surface address is updated at vertical blank rather than
2048 * horizontal blank
2049 */
2050 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
2051
2052 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2053 upper_32_bits(fb_location));
2054 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2055 upper_32_bits(fb_location));
2056 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2057 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2058 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2059 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2060 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2061 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2062
2063 /*
2064 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2065 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2066 * retain the full precision throughout the pipeline.
2067 */
2068 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2069 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2070 ~LUT_10BIT_BYPASS_EN);
2071
2072 if (bypass_lut)
2073 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2074
2075 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2076 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2077 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2078 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2079 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2080 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2081
2082 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2083 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2084
2085 dce_v8_0_grph_enable(crtc, true);
2086
2087 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2088 target_fb->height);
2089
2090 x &= ~3;
2091 y &= ~1;
2092 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2093 (x << 16) | y);
2094 viewport_w = crtc->mode.hdisplay;
2095 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2096 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2097 (viewport_w << 16) | viewport_h);
2098
2099 /* set pageflip to happen anywhere in vblank interval */
2100 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2101
2102 if (!atomic && fb && fb != crtc->primary->fb) {
2103 amdgpu_fb = to_amdgpu_framebuffer(fb);
2104 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2105 r = amdgpu_bo_reserve(abo, false);
2106 if (unlikely(r != 0))
2107 return r;
2108 amdgpu_bo_unpin(abo);
2109 amdgpu_bo_unreserve(abo);
2110 }
2111
2112 /* Bytes per pixel may have changed */
2113 dce_v8_0_bandwidth_update(adev);
2114
2115 return 0;
2116}
2117
2118static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2119 struct drm_display_mode *mode)
2120{
2121 struct drm_device *dev = crtc->dev;
2122 struct amdgpu_device *adev = dev->dev_private;
2123 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2124
2125 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2126 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2127 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2128 else
2129 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2130}
2131
2132static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2133{
2134 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2135 struct drm_device *dev = crtc->dev;
2136 struct amdgpu_device *adev = dev->dev_private;
2137 int i;
2138
2139 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2140
2141 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2142 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2143 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2144 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2145 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2146 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2147 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2148 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2149 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2150 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2151
2152 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2153
2154 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2155 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2156 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2157
2158 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2159 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2160 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2161
2162 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2163 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2164
2165 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2166 for (i = 0; i < 256; i++) {
2167 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2168 (amdgpu_crtc->lut_r[i] << 20) |
2169 (amdgpu_crtc->lut_g[i] << 10) |
2170 (amdgpu_crtc->lut_b[i] << 0));
2171 }
2172
2173 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2174 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2175 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2176 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2177 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2178 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2179 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2180 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2181 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2182 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2183 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2184 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2185 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2186 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2187 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2188 /* XXX this only needs to be programmed once per crtc at startup,
2189 * not sure where the best place for it is
2190 */
2191 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2192 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2193}
2194
2195static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2196{
2197 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2198 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2199
2200 switch (amdgpu_encoder->encoder_id) {
2201 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2202 if (dig->linkb)
2203 return 1;
2204 else
2205 return 0;
2206 break;
2207 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2208 if (dig->linkb)
2209 return 3;
2210 else
2211 return 2;
2212 break;
2213 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2214 if (dig->linkb)
2215 return 5;
2216 else
2217 return 4;
2218 break;
2219 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2220 return 6;
2221 break;
2222 default:
2223 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2224 return 0;
2225 }
2226}
2227
2228/**
2229 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2230 *
2231 * @crtc: drm crtc
2232 *
2233 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2234 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2235 * monitors a dedicated PPLL must be used. If a particular board has
2236 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2237 * as there is no need to program the PLL itself. If we are not able to
2238 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2239 * avoid messing up an existing monitor.
2240 *
2241 * Asic specific PLL information
2242 *
2243 * DCE 8.x
2244 * KB/KV
2245 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2246 * CI
2247 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2248 *
2249 */
2250static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2251{
2252 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2253 struct drm_device *dev = crtc->dev;
2254 struct amdgpu_device *adev = dev->dev_private;
2255 u32 pll_in_use;
2256 int pll;
2257
2258 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2259 if (adev->clock.dp_extclk)
2260 /* skip PPLL programming if using ext clock */
2261 return ATOM_PPLL_INVALID;
2262 else {
2263 /* use the same PPLL for all DP monitors */
2264 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2265 if (pll != ATOM_PPLL_INVALID)
2266 return pll;
2267 }
2268 } else {
2269 /* use the same PPLL for all monitors with the same clock */
2270 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2271 if (pll != ATOM_PPLL_INVALID)
2272 return pll;
2273 }
2274 /* otherwise, pick one of the plls */
2275 if ((adev->asic_type == CHIP_KABINI) ||
2276 (adev->asic_type == CHIP_MULLINS)) {
2277 /* KB/ML has PPLL1 and PPLL2 */
2278 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2279 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2280 return ATOM_PPLL2;
2281 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2282 return ATOM_PPLL1;
2283 DRM_ERROR("unable to allocate a PPLL\n");
2284 return ATOM_PPLL_INVALID;
2285 } else {
2286 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2287 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2288 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2289 return ATOM_PPLL2;
2290 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2291 return ATOM_PPLL1;
2292 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2293 return ATOM_PPLL0;
2294 DRM_ERROR("unable to allocate a PPLL\n");
2295 return ATOM_PPLL_INVALID;
2296 }
2297 return ATOM_PPLL_INVALID;
2298}
2299
2300static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2301{
2302 struct amdgpu_device *adev = crtc->dev->dev_private;
2303 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2304 uint32_t cur_lock;
2305
2306 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2307 if (lock)
2308 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2309 else
2310 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2311 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2312}
2313
2314static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2315{
2316 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2317 struct amdgpu_device *adev = crtc->dev->dev_private;
2318
2319 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2320 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2321 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2322}
2323
2324static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2325{
2326 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2327 struct amdgpu_device *adev = crtc->dev->dev_private;
2328
2329 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2330 upper_32_bits(amdgpu_crtc->cursor_addr));
2331 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2332 lower_32_bits(amdgpu_crtc->cursor_addr));
2333
2334 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2335 CUR_CONTROL__CURSOR_EN_MASK |
2336 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2337 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2338}
2339
2340static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2341 int x, int y)
2342{
2343 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2344 struct amdgpu_device *adev = crtc->dev->dev_private;
2345 int xorigin = 0, yorigin = 0;
2346
2347 amdgpu_crtc->cursor_x = x;
2348 amdgpu_crtc->cursor_y = y;
2349
2350 /* avivo cursor are offset into the total surface */
2351 x += crtc->x;
2352 y += crtc->y;
2353 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2354
2355 if (x < 0) {
2356 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2357 x = 0;
2358 }
2359 if (y < 0) {
2360 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2361 y = 0;
2362 }
2363
2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2366 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2367 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2368
2369 return 0;
2370}
2371
2372static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2373 int x, int y)
2374{
2375 int ret;
2376
2377 dce_v8_0_lock_cursor(crtc, true);
2378 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2379 dce_v8_0_lock_cursor(crtc, false);
2380
2381 return ret;
2382}
2383
2384static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2385 struct drm_file *file_priv,
2386 uint32_t handle,
2387 uint32_t width,
2388 uint32_t height,
2389 int32_t hot_x,
2390 int32_t hot_y)
2391{
2392 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2393 struct drm_gem_object *obj;
2394 struct amdgpu_bo *aobj;
2395 int ret;
2396
2397 if (!handle) {
2398 /* turn off cursor */
2399 dce_v8_0_hide_cursor(crtc);
2400 obj = NULL;
2401 goto unpin;
2402 }
2403
2404 if ((width > amdgpu_crtc->max_cursor_width) ||
2405 (height > amdgpu_crtc->max_cursor_height)) {
2406 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2407 return -EINVAL;
2408 }
2409
2410 obj = drm_gem_object_lookup(file_priv, handle);
2411 if (!obj) {
2412 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2413 return -ENOENT;
2414 }
2415
2416 aobj = gem_to_amdgpu_bo(obj);
2417 ret = amdgpu_bo_reserve(aobj, false);
2418 if (ret != 0) {
2419 drm_gem_object_unreference_unlocked(obj);
2420 return ret;
2421 }
2422
2423 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2424 amdgpu_bo_unreserve(aobj);
2425 if (ret) {
2426 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2427 drm_gem_object_unreference_unlocked(obj);
2428 return ret;
2429 }
2430
2431 dce_v8_0_lock_cursor(crtc, true);
2432
2433 if (width != amdgpu_crtc->cursor_width ||
2434 height != amdgpu_crtc->cursor_height ||
2435 hot_x != amdgpu_crtc->cursor_hot_x ||
2436 hot_y != amdgpu_crtc->cursor_hot_y) {
2437 int x, y;
2438
2439 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2440 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2441
2442 dce_v8_0_cursor_move_locked(crtc, x, y);
2443
2444 amdgpu_crtc->cursor_width = width;
2445 amdgpu_crtc->cursor_height = height;
2446 amdgpu_crtc->cursor_hot_x = hot_x;
2447 amdgpu_crtc->cursor_hot_y = hot_y;
2448 }
2449
2450 dce_v8_0_show_cursor(crtc);
2451 dce_v8_0_lock_cursor(crtc, false);
2452
2453unpin:
2454 if (amdgpu_crtc->cursor_bo) {
2455 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2456 ret = amdgpu_bo_reserve(aobj, false);
2457 if (likely(ret == 0)) {
2458 amdgpu_bo_unpin(aobj);
2459 amdgpu_bo_unreserve(aobj);
2460 }
2461 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2462 }
2463
2464 amdgpu_crtc->cursor_bo = obj;
2465 return 0;
2466}
2467
2468static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2469{
2470 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2471
2472 if (amdgpu_crtc->cursor_bo) {
2473 dce_v8_0_lock_cursor(crtc, true);
2474
2475 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2476 amdgpu_crtc->cursor_y);
2477
2478 dce_v8_0_show_cursor(crtc);
2479
2480 dce_v8_0_lock_cursor(crtc, false);
2481 }
2482}
2483
2484static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2485 u16 *blue, uint32_t size)
2486{
2487 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2488 int i;
2489
2490 /* userspace palettes are always correct as is */
2491 for (i = 0; i < size; i++) {
2492 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2493 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2494 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2495 }
2496 dce_v8_0_crtc_load_lut(crtc);
2497
2498 return 0;
2499}
2500
2501static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2502{
2503 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2504
2505 drm_crtc_cleanup(crtc);
2506 kfree(amdgpu_crtc);
2507}
2508
2509static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2510 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2511 .cursor_move = dce_v8_0_crtc_cursor_move,
2512 .gamma_set = dce_v8_0_crtc_gamma_set,
2513 .set_config = amdgpu_crtc_set_config,
2514 .destroy = dce_v8_0_crtc_destroy,
2515 .page_flip_target = amdgpu_crtc_page_flip_target,
2516};
2517
2518static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2519{
2520 struct drm_device *dev = crtc->dev;
2521 struct amdgpu_device *adev = dev->dev_private;
2522 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2523 unsigned type;
2524
2525 switch (mode) {
2526 case DRM_MODE_DPMS_ON:
2527 amdgpu_crtc->enabled = true;
2528 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2529 dce_v8_0_vga_enable(crtc, true);
2530 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2531 dce_v8_0_vga_enable(crtc, false);
2532 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2533 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2534 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2535 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2536 drm_crtc_vblank_on(crtc);
2537 dce_v8_0_crtc_load_lut(crtc);
2538 break;
2539 case DRM_MODE_DPMS_STANDBY:
2540 case DRM_MODE_DPMS_SUSPEND:
2541 case DRM_MODE_DPMS_OFF:
2542 drm_crtc_vblank_off(crtc);
2543 if (amdgpu_crtc->enabled) {
2544 dce_v8_0_vga_enable(crtc, true);
2545 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2546 dce_v8_0_vga_enable(crtc, false);
2547 }
2548 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2549 amdgpu_crtc->enabled = false;
2550 break;
2551 }
2552 /* adjust pm to dpms */
2553 amdgpu_pm_compute_clocks(adev);
2554}
2555
2556static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2557{
2558 /* disable crtc pair power gating before programming */
2559 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2560 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2561 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2562}
2563
2564static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2565{
2566 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2567 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2568}
2569
2570static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2571{
2572 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2573 struct drm_device *dev = crtc->dev;
2574 struct amdgpu_device *adev = dev->dev_private;
2575 struct amdgpu_atom_ss ss;
2576 int i;
2577
2578 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2579 if (crtc->primary->fb) {
2580 int r;
2581 struct amdgpu_framebuffer *amdgpu_fb;
2582 struct amdgpu_bo *abo;
2583
2584 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2585 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2586 r = amdgpu_bo_reserve(abo, false);
2587 if (unlikely(r))
2588 DRM_ERROR("failed to reserve abo before unpin\n");
2589 else {
2590 amdgpu_bo_unpin(abo);
2591 amdgpu_bo_unreserve(abo);
2592 }
2593 }
2594 /* disable the GRPH */
2595 dce_v8_0_grph_enable(crtc, false);
2596
2597 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2598
2599 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2600 if (adev->mode_info.crtcs[i] &&
2601 adev->mode_info.crtcs[i]->enabled &&
2602 i != amdgpu_crtc->crtc_id &&
2603 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2604 /* one other crtc is using this pll don't turn
2605 * off the pll
2606 */
2607 goto done;
2608 }
2609 }
2610
2611 switch (amdgpu_crtc->pll_id) {
2612 case ATOM_PPLL1:
2613 case ATOM_PPLL2:
2614 /* disable the ppll */
2615 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2616 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2617 break;
2618 case ATOM_PPLL0:
2619 /* disable the ppll */
2620 if ((adev->asic_type == CHIP_KAVERI) ||
2621 (adev->asic_type == CHIP_BONAIRE) ||
2622 (adev->asic_type == CHIP_HAWAII))
2623 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2624 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2625 break;
2626 default:
2627 break;
2628 }
2629done:
2630 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2631 amdgpu_crtc->adjusted_clock = 0;
2632 amdgpu_crtc->encoder = NULL;
2633 amdgpu_crtc->connector = NULL;
2634}
2635
2636static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2637 struct drm_display_mode *mode,
2638 struct drm_display_mode *adjusted_mode,
2639 int x, int y, struct drm_framebuffer *old_fb)
2640{
2641 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2642
2643 if (!amdgpu_crtc->adjusted_clock)
2644 return -EINVAL;
2645
2646 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2647 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2648 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2649 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2650 amdgpu_atombios_crtc_scaler_setup(crtc);
2651 dce_v8_0_cursor_reset(crtc);
2652 /* update the hw version fpr dpm */
2653 amdgpu_crtc->hw_mode = *adjusted_mode;
2654
2655 return 0;
2656}
2657
2658static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2659 const struct drm_display_mode *mode,
2660 struct drm_display_mode *adjusted_mode)
2661{
2662 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2663 struct drm_device *dev = crtc->dev;
2664 struct drm_encoder *encoder;
2665
2666 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2667 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2668 if (encoder->crtc == crtc) {
2669 amdgpu_crtc->encoder = encoder;
2670 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2671 break;
2672 }
2673 }
2674 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2675 amdgpu_crtc->encoder = NULL;
2676 amdgpu_crtc->connector = NULL;
2677 return false;
2678 }
2679 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2680 return false;
2681 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2682 return false;
2683 /* pick pll */
2684 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2685 /* if we can't get a PPLL for a non-DP encoder, fail */
2686 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2687 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2688 return false;
2689
2690 return true;
2691}
2692
2693static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2694 struct drm_framebuffer *old_fb)
2695{
2696 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2697}
2698
2699static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2700 struct drm_framebuffer *fb,
2701 int x, int y, enum mode_set_atomic state)
2702{
2703 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2704}
2705
2706static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2707 .dpms = dce_v8_0_crtc_dpms,
2708 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2709 .mode_set = dce_v8_0_crtc_mode_set,
2710 .mode_set_base = dce_v8_0_crtc_set_base,
2711 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2712 .prepare = dce_v8_0_crtc_prepare,
2713 .commit = dce_v8_0_crtc_commit,
2714 .load_lut = dce_v8_0_crtc_load_lut,
2715 .disable = dce_v8_0_crtc_disable,
2716};
2717
2718static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2719{
2720 struct amdgpu_crtc *amdgpu_crtc;
2721 int i;
2722
2723 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2724 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2725 if (amdgpu_crtc == NULL)
2726 return -ENOMEM;
2727
2728 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2729
2730 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2731 amdgpu_crtc->crtc_id = index;
2732 adev->mode_info.crtcs[index] = amdgpu_crtc;
2733
2734 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2735 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2736 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2737 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2738
2739 for (i = 0; i < 256; i++) {
2740 amdgpu_crtc->lut_r[i] = i << 2;
2741 amdgpu_crtc->lut_g[i] = i << 2;
2742 amdgpu_crtc->lut_b[i] = i << 2;
2743 }
2744
2745 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2746
2747 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2748 amdgpu_crtc->adjusted_clock = 0;
2749 amdgpu_crtc->encoder = NULL;
2750 amdgpu_crtc->connector = NULL;
2751 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2752
2753 return 0;
2754}
2755
2756static int dce_v8_0_early_init(void *handle)
2757{
2758 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2759
2760 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2761 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2762
2763 dce_v8_0_set_display_funcs(adev);
2764 dce_v8_0_set_irq_funcs(adev);
2765
2766 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2767
2768 switch (adev->asic_type) {
2769 case CHIP_BONAIRE:
2770 case CHIP_HAWAII:
2771 adev->mode_info.num_hpd = 6;
2772 adev->mode_info.num_dig = 6;
2773 break;
2774 case CHIP_KAVERI:
2775 adev->mode_info.num_hpd = 6;
2776 adev->mode_info.num_dig = 7;
2777 break;
2778 case CHIP_KABINI:
2779 case CHIP_MULLINS:
2780 adev->mode_info.num_hpd = 6;
2781 adev->mode_info.num_dig = 6; /* ? */
2782 break;
2783 default:
2784 /* FIXME: not supported yet */
2785 return -EINVAL;
2786 }
2787
2788 return 0;
2789}
2790
2791static int dce_v8_0_sw_init(void *handle)
2792{
2793 int r, i;
2794 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2795
2796 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2797 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2798 if (r)
2799 return r;
2800 }
2801
2802 for (i = 8; i < 20; i += 2) {
2803 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2804 if (r)
2805 return r;
2806 }
2807
2808 /* HPD hotplug */
2809 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2810 if (r)
2811 return r;
2812
2813 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2814
2815 adev->ddev->mode_config.async_page_flip = true;
2816
2817 adev->ddev->mode_config.max_width = 16384;
2818 adev->ddev->mode_config.max_height = 16384;
2819
2820 adev->ddev->mode_config.preferred_depth = 24;
2821 adev->ddev->mode_config.prefer_shadow = 1;
2822
2823 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2824
2825 r = amdgpu_modeset_create_props(adev);
2826 if (r)
2827 return r;
2828
2829 adev->ddev->mode_config.max_width = 16384;
2830 adev->ddev->mode_config.max_height = 16384;
2831
2832 /* allocate crtcs */
2833 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2834 r = dce_v8_0_crtc_init(adev, i);
2835 if (r)
2836 return r;
2837 }
2838
2839 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2840 amdgpu_print_display_setup(adev->ddev);
2841 else
2842 return -EINVAL;
2843
2844 /* setup afmt */
2845 r = dce_v8_0_afmt_init(adev);
2846 if (r)
2847 return r;
2848
2849 r = dce_v8_0_audio_init(adev);
2850 if (r)
2851 return r;
2852
2853 drm_kms_helper_poll_init(adev->ddev);
2854
2855 adev->mode_info.mode_config_initialized = true;
2856 return 0;
2857}
2858
2859static int dce_v8_0_sw_fini(void *handle)
2860{
2861 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2862
2863 kfree(adev->mode_info.bios_hardcoded_edid);
2864
2865 drm_kms_helper_poll_fini(adev->ddev);
2866
2867 dce_v8_0_audio_fini(adev);
2868
2869 dce_v8_0_afmt_fini(adev);
2870
2871 drm_mode_config_cleanup(adev->ddev);
2872 adev->mode_info.mode_config_initialized = false;
2873
2874 return 0;
2875}
2876
2877static int dce_v8_0_hw_init(void *handle)
2878{
2879 int i;
2880 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2881
2882 /* init dig PHYs, disp eng pll */
2883 amdgpu_atombios_encoder_init_dig(adev);
2884 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2885
2886 /* initialize hpd */
2887 dce_v8_0_hpd_init(adev);
2888
2889 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2890 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2891 }
2892
2893 dce_v8_0_pageflip_interrupt_init(adev);
2894
2895 return 0;
2896}
2897
2898static int dce_v8_0_hw_fini(void *handle)
2899{
2900 int i;
2901 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2902
2903 dce_v8_0_hpd_fini(adev);
2904
2905 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2906 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2907 }
2908
2909 dce_v8_0_pageflip_interrupt_fini(adev);
2910
2911 return 0;
2912}
2913
2914static int dce_v8_0_suspend(void *handle)
2915{
2916 return dce_v8_0_hw_fini(handle);
2917}
2918
2919static int dce_v8_0_resume(void *handle)
2920{
2921 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2922 int ret;
2923
2924 ret = dce_v8_0_hw_init(handle);
2925
2926 /* turn on the BL */
2927 if (adev->mode_info.bl_encoder) {
2928 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2929 adev->mode_info.bl_encoder);
2930 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2931 bl_level);
2932 }
2933
2934 return ret;
2935}
2936
2937static bool dce_v8_0_is_idle(void *handle)
2938{
2939 return true;
2940}
2941
2942static int dce_v8_0_wait_for_idle(void *handle)
2943{
2944 return 0;
2945}
2946
2947static int dce_v8_0_soft_reset(void *handle)
2948{
2949 u32 srbm_soft_reset = 0, tmp;
2950 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2951
2952 if (dce_v8_0_is_display_hung(adev))
2953 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2954
2955 if (srbm_soft_reset) {
2956 tmp = RREG32(mmSRBM_SOFT_RESET);
2957 tmp |= srbm_soft_reset;
2958 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2959 WREG32(mmSRBM_SOFT_RESET, tmp);
2960 tmp = RREG32(mmSRBM_SOFT_RESET);
2961
2962 udelay(50);
2963
2964 tmp &= ~srbm_soft_reset;
2965 WREG32(mmSRBM_SOFT_RESET, tmp);
2966 tmp = RREG32(mmSRBM_SOFT_RESET);
2967
2968 /* Wait a little for things to settle down */
2969 udelay(50);
2970 }
2971 return 0;
2972}
2973
2974static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2975 int crtc,
2976 enum amdgpu_interrupt_state state)
2977{
2978 u32 reg_block, lb_interrupt_mask;
2979
2980 if (crtc >= adev->mode_info.num_crtc) {
2981 DRM_DEBUG("invalid crtc %d\n", crtc);
2982 return;
2983 }
2984
2985 switch (crtc) {
2986 case 0:
2987 reg_block = CRTC0_REGISTER_OFFSET;
2988 break;
2989 case 1:
2990 reg_block = CRTC1_REGISTER_OFFSET;
2991 break;
2992 case 2:
2993 reg_block = CRTC2_REGISTER_OFFSET;
2994 break;
2995 case 3:
2996 reg_block = CRTC3_REGISTER_OFFSET;
2997 break;
2998 case 4:
2999 reg_block = CRTC4_REGISTER_OFFSET;
3000 break;
3001 case 5:
3002 reg_block = CRTC5_REGISTER_OFFSET;
3003 break;
3004 default:
3005 DRM_DEBUG("invalid crtc %d\n", crtc);
3006 return;
3007 }
3008
3009 switch (state) {
3010 case AMDGPU_IRQ_STATE_DISABLE:
3011 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3012 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3013 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3014 break;
3015 case AMDGPU_IRQ_STATE_ENABLE:
3016 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3017 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3018 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3019 break;
3020 default:
3021 break;
3022 }
3023}
3024
3025static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3026 int crtc,
3027 enum amdgpu_interrupt_state state)
3028{
3029 u32 reg_block, lb_interrupt_mask;
3030
3031 if (crtc >= adev->mode_info.num_crtc) {
3032 DRM_DEBUG("invalid crtc %d\n", crtc);
3033 return;
3034 }
3035
3036 switch (crtc) {
3037 case 0:
3038 reg_block = CRTC0_REGISTER_OFFSET;
3039 break;
3040 case 1:
3041 reg_block = CRTC1_REGISTER_OFFSET;
3042 break;
3043 case 2:
3044 reg_block = CRTC2_REGISTER_OFFSET;
3045 break;
3046 case 3:
3047 reg_block = CRTC3_REGISTER_OFFSET;
3048 break;
3049 case 4:
3050 reg_block = CRTC4_REGISTER_OFFSET;
3051 break;
3052 case 5:
3053 reg_block = CRTC5_REGISTER_OFFSET;
3054 break;
3055 default:
3056 DRM_DEBUG("invalid crtc %d\n", crtc);
3057 return;
3058 }
3059
3060 switch (state) {
3061 case AMDGPU_IRQ_STATE_DISABLE:
3062 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3063 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3064 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3065 break;
3066 case AMDGPU_IRQ_STATE_ENABLE:
3067 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3068 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3069 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3070 break;
3071 default:
3072 break;
3073 }
3074}
3075
3076static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3077 struct amdgpu_irq_src *src,
3078 unsigned type,
3079 enum amdgpu_interrupt_state state)
3080{
3081 u32 dc_hpd_int_cntl;
3082
3083 if (type >= adev->mode_info.num_hpd) {
3084 DRM_DEBUG("invalid hdp %d\n", type);
3085 return 0;
3086 }
3087
3088 switch (state) {
3089 case AMDGPU_IRQ_STATE_DISABLE:
3090 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3091 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3092 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3093 break;
3094 case AMDGPU_IRQ_STATE_ENABLE:
3095 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3096 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3097 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3098 break;
3099 default:
3100 break;
3101 }
3102
3103 return 0;
3104}
3105
3106static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3107 struct amdgpu_irq_src *src,
3108 unsigned type,
3109 enum amdgpu_interrupt_state state)
3110{
3111 switch (type) {
3112 case AMDGPU_CRTC_IRQ_VBLANK1:
3113 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3114 break;
3115 case AMDGPU_CRTC_IRQ_VBLANK2:
3116 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3117 break;
3118 case AMDGPU_CRTC_IRQ_VBLANK3:
3119 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3120 break;
3121 case AMDGPU_CRTC_IRQ_VBLANK4:
3122 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3123 break;
3124 case AMDGPU_CRTC_IRQ_VBLANK5:
3125 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3126 break;
3127 case AMDGPU_CRTC_IRQ_VBLANK6:
3128 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3129 break;
3130 case AMDGPU_CRTC_IRQ_VLINE1:
3131 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3132 break;
3133 case AMDGPU_CRTC_IRQ_VLINE2:
3134 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3135 break;
3136 case AMDGPU_CRTC_IRQ_VLINE3:
3137 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3138 break;
3139 case AMDGPU_CRTC_IRQ_VLINE4:
3140 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3141 break;
3142 case AMDGPU_CRTC_IRQ_VLINE5:
3143 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3144 break;
3145 case AMDGPU_CRTC_IRQ_VLINE6:
3146 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3147 break;
3148 default:
3149 break;
3150 }
3151 return 0;
3152}
3153
3154static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3155 struct amdgpu_irq_src *source,
3156 struct amdgpu_iv_entry *entry)
3157{
3158 unsigned crtc = entry->src_id - 1;
3159 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3160 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3161
3162 switch (entry->src_data) {
3163 case 0: /* vblank */
3164 if (disp_int & interrupt_status_offsets[crtc].vblank)
3165 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3166 else
3167 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3168
3169 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3170 drm_handle_vblank(adev->ddev, crtc);
3171 }
3172 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3173 break;
3174 case 1: /* vline */
3175 if (disp_int & interrupt_status_offsets[crtc].vline)
3176 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3177 else
3178 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3179
3180 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3181 break;
3182 default:
3183 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3184 break;
3185 }
3186
3187 return 0;
3188}
3189
3190static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3191 struct amdgpu_irq_src *src,
3192 unsigned type,
3193 enum amdgpu_interrupt_state state)
3194{
3195 u32 reg;
3196
3197 if (type >= adev->mode_info.num_crtc) {
3198 DRM_ERROR("invalid pageflip crtc %d\n", type);
3199 return -EINVAL;
3200 }
3201
3202 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3203 if (state == AMDGPU_IRQ_STATE_DISABLE)
3204 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3205 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3206 else
3207 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3208 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3209
3210 return 0;
3211}
3212
3213static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3214 struct amdgpu_irq_src *source,
3215 struct amdgpu_iv_entry *entry)
3216{
3217 unsigned long flags;
3218 unsigned crtc_id;
3219 struct amdgpu_crtc *amdgpu_crtc;
3220 struct amdgpu_flip_work *works;
3221
3222 crtc_id = (entry->src_id - 8) >> 1;
3223 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3224
3225 if (crtc_id >= adev->mode_info.num_crtc) {
3226 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3227 return -EINVAL;
3228 }
3229
3230 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3231 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3232 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3233 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3234
3235 /* IRQ could occur when in initial stage */
3236 if (amdgpu_crtc == NULL)
3237 return 0;
3238
3239 spin_lock_irqsave(&adev->ddev->event_lock, flags);
3240 works = amdgpu_crtc->pflip_works;
3241 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3242 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3243 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3244 amdgpu_crtc->pflip_status,
3245 AMDGPU_FLIP_SUBMITTED);
3246 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3247 return 0;
3248 }
3249
3250 /* page flip completed. clean up */
3251 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3252 amdgpu_crtc->pflip_works = NULL;
3253
3254 /* wakeup usersapce */
3255 if (works->event)
3256 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3257
3258 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3259
3260 drm_crtc_vblank_put(&amdgpu_crtc->base);
3261 schedule_work(&works->unpin_work);
3262
3263 return 0;
3264}
3265
3266static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3267 struct amdgpu_irq_src *source,
3268 struct amdgpu_iv_entry *entry)
3269{
3270 uint32_t disp_int, mask, tmp;
3271 unsigned hpd;
3272
3273 if (entry->src_data >= adev->mode_info.num_hpd) {
3274 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3275 return 0;
3276 }
3277
3278 hpd = entry->src_data;
3279 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3280 mask = interrupt_status_offsets[hpd].hpd;
3281
3282 if (disp_int & mask) {
3283 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3284 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3285 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3286 schedule_work(&adev->hotplug_work);
3287 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3288 }
3289
3290 return 0;
3291
3292}
3293
3294static int dce_v8_0_set_clockgating_state(void *handle,
3295 enum amd_clockgating_state state)
3296{
3297 return 0;
3298}
3299
3300static int dce_v8_0_set_powergating_state(void *handle,
3301 enum amd_powergating_state state)
3302{
3303 return 0;
3304}
3305
3306static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3307 .name = "dce_v8_0",
3308 .early_init = dce_v8_0_early_init,
3309 .late_init = NULL,
3310 .sw_init = dce_v8_0_sw_init,
3311 .sw_fini = dce_v8_0_sw_fini,
3312 .hw_init = dce_v8_0_hw_init,
3313 .hw_fini = dce_v8_0_hw_fini,
3314 .suspend = dce_v8_0_suspend,
3315 .resume = dce_v8_0_resume,
3316 .is_idle = dce_v8_0_is_idle,
3317 .wait_for_idle = dce_v8_0_wait_for_idle,
3318 .soft_reset = dce_v8_0_soft_reset,
3319 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3320 .set_powergating_state = dce_v8_0_set_powergating_state,
3321};
3322
3323static void
3324dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3325 struct drm_display_mode *mode,
3326 struct drm_display_mode *adjusted_mode)
3327{
3328 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3329
3330 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3331
3332 /* need to call this here rather than in prepare() since we need some crtc info */
3333 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3334
3335 /* set scaler clears this on some chips */
3336 dce_v8_0_set_interleave(encoder->crtc, mode);
3337
3338 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3339 dce_v8_0_afmt_enable(encoder, true);
3340 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3341 }
3342}
3343
3344static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3345{
3346 struct amdgpu_device *adev = encoder->dev->dev_private;
3347 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3348 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3349
3350 if ((amdgpu_encoder->active_device &
3351 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3352 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3353 ENCODER_OBJECT_ID_NONE)) {
3354 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3355 if (dig) {
3356 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3357 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3358 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3359 }
3360 }
3361
3362 amdgpu_atombios_scratch_regs_lock(adev, true);
3363
3364 if (connector) {
3365 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3366
3367 /* select the clock/data port if it uses a router */
3368 if (amdgpu_connector->router.cd_valid)
3369 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3370
3371 /* turn eDP panel on for mode set */
3372 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3373 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3374 ATOM_TRANSMITTER_ACTION_POWER_ON);
3375 }
3376
3377 /* this is needed for the pll/ss setup to work correctly in some cases */
3378 amdgpu_atombios_encoder_set_crtc_source(encoder);
3379 /* set up the FMT blocks */
3380 dce_v8_0_program_fmt(encoder);
3381}
3382
3383static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3384{
3385 struct drm_device *dev = encoder->dev;
3386 struct amdgpu_device *adev = dev->dev_private;
3387
3388 /* need to call this here as we need the crtc set up */
3389 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3390 amdgpu_atombios_scratch_regs_lock(adev, false);
3391}
3392
3393static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3394{
3395 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3396 struct amdgpu_encoder_atom_dig *dig;
3397
3398 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3399
3400 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3401 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3402 dce_v8_0_afmt_enable(encoder, false);
3403 dig = amdgpu_encoder->enc_priv;
3404 dig->dig_encoder = -1;
3405 }
3406 amdgpu_encoder->active_device = 0;
3407}
3408
3409/* these are handled by the primary encoders */
3410static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3411{
3412
3413}
3414
3415static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3416{
3417
3418}
3419
3420static void
3421dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3422 struct drm_display_mode *mode,
3423 struct drm_display_mode *adjusted_mode)
3424{
3425
3426}
3427
3428static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3429{
3430
3431}
3432
3433static void
3434dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3435{
3436
3437}
3438
3439static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3440 .dpms = dce_v8_0_ext_dpms,
3441 .prepare = dce_v8_0_ext_prepare,
3442 .mode_set = dce_v8_0_ext_mode_set,
3443 .commit = dce_v8_0_ext_commit,
3444 .disable = dce_v8_0_ext_disable,
3445 /* no detect for TMDS/LVDS yet */
3446};
3447
3448static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3449 .dpms = amdgpu_atombios_encoder_dpms,
3450 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3451 .prepare = dce_v8_0_encoder_prepare,
3452 .mode_set = dce_v8_0_encoder_mode_set,
3453 .commit = dce_v8_0_encoder_commit,
3454 .disable = dce_v8_0_encoder_disable,
3455 .detect = amdgpu_atombios_encoder_dig_detect,
3456};
3457
3458static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3459 .dpms = amdgpu_atombios_encoder_dpms,
3460 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3461 .prepare = dce_v8_0_encoder_prepare,
3462 .mode_set = dce_v8_0_encoder_mode_set,
3463 .commit = dce_v8_0_encoder_commit,
3464 .detect = amdgpu_atombios_encoder_dac_detect,
3465};
3466
3467static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3468{
3469 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3470 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3471 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3472 kfree(amdgpu_encoder->enc_priv);
3473 drm_encoder_cleanup(encoder);
3474 kfree(amdgpu_encoder);
3475}
3476
3477static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3478 .destroy = dce_v8_0_encoder_destroy,
3479};
3480
3481static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3482 uint32_t encoder_enum,
3483 uint32_t supported_device,
3484 u16 caps)
3485{
3486 struct drm_device *dev = adev->ddev;
3487 struct drm_encoder *encoder;
3488 struct amdgpu_encoder *amdgpu_encoder;
3489
3490 /* see if we already added it */
3491 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3492 amdgpu_encoder = to_amdgpu_encoder(encoder);
3493 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3494 amdgpu_encoder->devices |= supported_device;
3495 return;
3496 }
3497
3498 }
3499
3500 /* add a new one */
3501 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3502 if (!amdgpu_encoder)
3503 return;
3504
3505 encoder = &amdgpu_encoder->base;
3506 switch (adev->mode_info.num_crtc) {
3507 case 1:
3508 encoder->possible_crtcs = 0x1;
3509 break;
3510 case 2:
3511 default:
3512 encoder->possible_crtcs = 0x3;
3513 break;
3514 case 4:
3515 encoder->possible_crtcs = 0xf;
3516 break;
3517 case 6:
3518 encoder->possible_crtcs = 0x3f;
3519 break;
3520 }
3521
3522 amdgpu_encoder->enc_priv = NULL;
3523
3524 amdgpu_encoder->encoder_enum = encoder_enum;
3525 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3526 amdgpu_encoder->devices = supported_device;
3527 amdgpu_encoder->rmx_type = RMX_OFF;
3528 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3529 amdgpu_encoder->is_ext_encoder = false;
3530 amdgpu_encoder->caps = caps;
3531
3532 switch (amdgpu_encoder->encoder_id) {
3533 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3534 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3535 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3536 DRM_MODE_ENCODER_DAC, NULL);
3537 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3538 break;
3539 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3540 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3541 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3542 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3543 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3544 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3545 amdgpu_encoder->rmx_type = RMX_FULL;
3546 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3547 DRM_MODE_ENCODER_LVDS, NULL);
3548 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3549 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3550 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3551 DRM_MODE_ENCODER_DAC, NULL);
3552 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3553 } else {
3554 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3555 DRM_MODE_ENCODER_TMDS, NULL);
3556 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3557 }
3558 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3559 break;
3560 case ENCODER_OBJECT_ID_SI170B:
3561 case ENCODER_OBJECT_ID_CH7303:
3562 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3563 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3564 case ENCODER_OBJECT_ID_TITFP513:
3565 case ENCODER_OBJECT_ID_VT1623:
3566 case ENCODER_OBJECT_ID_HDMI_SI1930:
3567 case ENCODER_OBJECT_ID_TRAVIS:
3568 case ENCODER_OBJECT_ID_NUTMEG:
3569 /* these are handled by the primary encoders */
3570 amdgpu_encoder->is_ext_encoder = true;
3571 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3572 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3573 DRM_MODE_ENCODER_LVDS, NULL);
3574 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3575 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3576 DRM_MODE_ENCODER_DAC, NULL);
3577 else
3578 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3579 DRM_MODE_ENCODER_TMDS, NULL);
3580 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3581 break;
3582 }
3583}
3584
3585static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3586 .set_vga_render_state = &dce_v8_0_set_vga_render_state,
3587 .bandwidth_update = &dce_v8_0_bandwidth_update,
3588 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3589 .vblank_wait = &dce_v8_0_vblank_wait,
3590 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3591 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3592 .hpd_sense = &dce_v8_0_hpd_sense,
3593 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3594 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3595 .page_flip = &dce_v8_0_page_flip,
3596 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3597 .add_encoder = &dce_v8_0_encoder_add,
3598 .add_connector = &amdgpu_connector_add,
3599 .stop_mc_access = &dce_v8_0_stop_mc_access,
3600 .resume_mc_access = &dce_v8_0_resume_mc_access,
3601};
3602
3603static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3604{
3605 if (adev->mode_info.funcs == NULL)
3606 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3607}
3608
3609static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3610 .set = dce_v8_0_set_crtc_interrupt_state,
3611 .process = dce_v8_0_crtc_irq,
3612};
3613
3614static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3615 .set = dce_v8_0_set_pageflip_interrupt_state,
3616 .process = dce_v8_0_pageflip_irq,
3617};
3618
3619static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3620 .set = dce_v8_0_set_hpd_interrupt_state,
3621 .process = dce_v8_0_hpd_irq,
3622};
3623
3624static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3625{
3626 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3627 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3628
3629 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3630 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3631
3632 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3633 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3634}
3635
3636const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3637{
3638 .type = AMD_IP_BLOCK_TYPE_DCE,
3639 .major = 8,
3640 .minor = 0,
3641 .rev = 0,
3642 .funcs = &dce_v8_0_ip_funcs,
3643};
3644
3645const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3646{
3647 .type = AMD_IP_BLOCK_TYPE_DCE,
3648 .major = 8,
3649 .minor = 1,
3650 .rev = 0,
3651 .funcs = &dce_v8_0_ip_funcs,
3652};
3653
3654const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3655{
3656 .type = AMD_IP_BLOCK_TYPE_DCE,
3657 .major = 8,
3658 .minor = 2,
3659 .rev = 0,
3660 .funcs = &dce_v8_0_ip_funcs,
3661};
3662
3663const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3664{
3665 .type = AMD_IP_BLOCK_TYPE_DCE,
3666 .major = 8,
3667 .minor = 3,
3668 .rev = 0,
3669 .funcs = &dce_v8_0_ip_funcs,
3670};
3671
3672const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3673{
3674 .type = AMD_IP_BLOCK_TYPE_DCE,
3675 .major = 8,
3676 .minor = 5,
3677 .rev = 0,
3678 .funcs = &dce_v8_0_ip_funcs,
3679};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <drm/drmP.h>
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "cikd.h"
28#include "atom.h"
29#include "amdgpu_atombios.h"
30#include "atombios_crtc.h"
31#include "atombios_encoders.h"
32#include "amdgpu_pll.h"
33#include "amdgpu_connectors.h"
34#include "dce_v8_0.h"
35
36#include "dce/dce_8_0_d.h"
37#include "dce/dce_8_0_sh_mask.h"
38
39#include "gca/gfx_7_2_enum.h"
40
41#include "gmc/gmc_7_1_d.h"
42#include "gmc/gmc_7_1_sh_mask.h"
43
44#include "oss/oss_2_0_d.h"
45#include "oss/oss_2_0_sh_mask.h"
46
47static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
48static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
49
50static const u32 crtc_offsets[6] =
51{
52 CRTC0_REGISTER_OFFSET,
53 CRTC1_REGISTER_OFFSET,
54 CRTC2_REGISTER_OFFSET,
55 CRTC3_REGISTER_OFFSET,
56 CRTC4_REGISTER_OFFSET,
57 CRTC5_REGISTER_OFFSET
58};
59
60static const u32 hpd_offsets[] =
61{
62 HPD0_REGISTER_OFFSET,
63 HPD1_REGISTER_OFFSET,
64 HPD2_REGISTER_OFFSET,
65 HPD3_REGISTER_OFFSET,
66 HPD4_REGISTER_OFFSET,
67 HPD5_REGISTER_OFFSET
68};
69
70static const uint32_t dig_offsets[] = {
71 CRTC0_REGISTER_OFFSET,
72 CRTC1_REGISTER_OFFSET,
73 CRTC2_REGISTER_OFFSET,
74 CRTC3_REGISTER_OFFSET,
75 CRTC4_REGISTER_OFFSET,
76 CRTC5_REGISTER_OFFSET,
77 (0x13830 - 0x7030) >> 2,
78};
79
80static const struct {
81 uint32_t reg;
82 uint32_t vblank;
83 uint32_t vline;
84 uint32_t hpd;
85
86} interrupt_status_offsets[6] = { {
87 .reg = mmDISP_INTERRUPT_STATUS,
88 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
89 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
90 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
91}, {
92 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
93 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
94 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
95 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
96}, {
97 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
98 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
99 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
100 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
101}, {
102 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
103 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
104 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
105 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
106}, {
107 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
108 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
109 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
110 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
111}, {
112 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
113 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
114 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
115 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
116} };
117
118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
119 u32 block_offset, u32 reg)
120{
121 unsigned long flags;
122 u32 r;
123
124 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
125 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
126 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
127 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
128
129 return r;
130}
131
132static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
133 u32 block_offset, u32 reg, u32 v)
134{
135 unsigned long flags;
136
137 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
138 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
139 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
140 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
141}
142
143static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
144{
145 if (crtc >= adev->mode_info.num_crtc)
146 return 0;
147 else
148 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
149}
150
151static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
152{
153 unsigned i;
154
155 /* Enable pflip interrupts */
156 for (i = 0; i < adev->mode_info.num_crtc; i++)
157 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
158}
159
160static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
161{
162 unsigned i;
163
164 /* Disable pflip interrupts */
165 for (i = 0; i < adev->mode_info.num_crtc; i++)
166 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
167}
168
169/**
170 * dce_v8_0_page_flip - pageflip callback.
171 *
172 * @adev: amdgpu_device pointer
173 * @crtc_id: crtc to cleanup pageflip on
174 * @crtc_base: new address of the crtc (GPU MC address)
175 *
176 * Triggers the actual pageflip by updating the primary
177 * surface base address.
178 */
179static void dce_v8_0_page_flip(struct amdgpu_device *adev,
180 int crtc_id, u64 crtc_base, bool async)
181{
182 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
183
184 /* flip at hsync for async, default is vsync */
185 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
186 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
187 /* update the primary scanout addresses */
188 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
189 upper_32_bits(crtc_base));
190 /* writing to the low address triggers the update */
191 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
192 lower_32_bits(crtc_base));
193 /* post the write */
194 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
195}
196
197static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
198 u32 *vbl, u32 *position)
199{
200 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
201 return -EINVAL;
202
203 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
204 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
205
206 return 0;
207}
208
209/**
210 * dce_v8_0_hpd_sense - hpd sense callback.
211 *
212 * @adev: amdgpu_device pointer
213 * @hpd: hpd (hotplug detect) pin
214 *
215 * Checks if a digital monitor is connected (evergreen+).
216 * Returns true if connected, false if not connected.
217 */
218static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
219 enum amdgpu_hpd_id hpd)
220{
221 bool connected = false;
222
223 if (hpd >= adev->mode_info.num_hpd)
224 return connected;
225
226 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
227 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
228 connected = true;
229
230 return connected;
231}
232
233/**
234 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
235 *
236 * @adev: amdgpu_device pointer
237 * @hpd: hpd (hotplug detect) pin
238 *
239 * Set the polarity of the hpd pin (evergreen+).
240 */
241static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
242 enum amdgpu_hpd_id hpd)
243{
244 u32 tmp;
245 bool connected = dce_v8_0_hpd_sense(adev, hpd);
246
247 if (hpd >= adev->mode_info.num_hpd)
248 return;
249
250 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
251 if (connected)
252 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
253 else
254 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
255 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
256}
257
258/**
259 * dce_v8_0_hpd_init - hpd setup callback.
260 *
261 * @adev: amdgpu_device pointer
262 *
263 * Setup the hpd pins used by the card (evergreen+).
264 * Enable the pin, set the polarity, and enable the hpd interrupts.
265 */
266static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
267{
268 struct drm_device *dev = adev->ddev;
269 struct drm_connector *connector;
270 u32 tmp;
271
272 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
273 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
274
275 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
276 continue;
277
278 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
279 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
280 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
281
282 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
283 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
284 /* don't try to enable hpd on eDP or LVDS avoid breaking the
285 * aux dp channel on imac and help (but not completely fix)
286 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
287 * also avoid interrupt storms during dpms.
288 */
289 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
290 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
291 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
292 continue;
293 }
294
295 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
296 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
297 }
298}
299
300/**
301 * dce_v8_0_hpd_fini - hpd tear down callback.
302 *
303 * @adev: amdgpu_device pointer
304 *
305 * Tear down the hpd pins used by the card (evergreen+).
306 * Disable the hpd interrupts.
307 */
308static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
309{
310 struct drm_device *dev = adev->ddev;
311 struct drm_connector *connector;
312 u32 tmp;
313
314 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
315 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
316
317 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
318 continue;
319
320 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
321 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
322 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
323
324 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
325 }
326}
327
328static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
329{
330 return mmDC_GPIO_HPD_A;
331}
332
333static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
334{
335 u32 crtc_hung = 0;
336 u32 crtc_status[6];
337 u32 i, j, tmp;
338
339 for (i = 0; i < adev->mode_info.num_crtc; i++) {
340 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
341 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
342 crtc_hung |= (1 << i);
343 }
344 }
345
346 for (j = 0; j < 10; j++) {
347 for (i = 0; i < adev->mode_info.num_crtc; i++) {
348 if (crtc_hung & (1 << i)) {
349 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
350 if (tmp != crtc_status[i])
351 crtc_hung &= ~(1 << i);
352 }
353 }
354 if (crtc_hung == 0)
355 return false;
356 udelay(100);
357 }
358
359 return true;
360}
361
362static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
363 bool render)
364{
365 u32 tmp;
366
367 /* Lockout access through VGA aperture*/
368 tmp = RREG32(mmVGA_HDP_CONTROL);
369 if (render)
370 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
371 else
372 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
373 WREG32(mmVGA_HDP_CONTROL, tmp);
374
375 /* disable VGA render */
376 tmp = RREG32(mmVGA_RENDER_CONTROL);
377 if (render)
378 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
379 else
380 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
381 WREG32(mmVGA_RENDER_CONTROL, tmp);
382}
383
384static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
385{
386 int num_crtc = 0;
387
388 switch (adev->asic_type) {
389 case CHIP_BONAIRE:
390 case CHIP_HAWAII:
391 num_crtc = 6;
392 break;
393 case CHIP_KAVERI:
394 num_crtc = 4;
395 break;
396 case CHIP_KABINI:
397 case CHIP_MULLINS:
398 num_crtc = 2;
399 break;
400 default:
401 num_crtc = 0;
402 }
403 return num_crtc;
404}
405
406void dce_v8_0_disable_dce(struct amdgpu_device *adev)
407{
408 /*Disable VGA render and enabled crtc, if has DCE engine*/
409 if (amdgpu_atombios_has_dce_engine_info(adev)) {
410 u32 tmp;
411 int crtc_enabled, i;
412
413 dce_v8_0_set_vga_render_state(adev, false);
414
415 /*Disable crtc*/
416 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
417 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
418 CRTC_CONTROL, CRTC_MASTER_EN);
419 if (crtc_enabled) {
420 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
421 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
422 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
423 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
424 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
425 }
426 }
427 }
428}
429
430static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
431{
432 struct drm_device *dev = encoder->dev;
433 struct amdgpu_device *adev = dev->dev_private;
434 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
435 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
436 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
437 int bpc = 0;
438 u32 tmp = 0;
439 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
440
441 if (connector) {
442 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
443 bpc = amdgpu_connector_get_monitor_bpc(connector);
444 dither = amdgpu_connector->dither;
445 }
446
447 /* LVDS/eDP FMT is set up by atom */
448 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
449 return;
450
451 /* not needed for analog */
452 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
453 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
454 return;
455
456 if (bpc == 0)
457 return;
458
459 switch (bpc) {
460 case 6:
461 if (dither == AMDGPU_FMT_DITHER_ENABLE)
462 /* XXX sort out optimal dither settings */
463 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
464 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
465 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
466 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
467 else
468 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
469 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
470 break;
471 case 8:
472 if (dither == AMDGPU_FMT_DITHER_ENABLE)
473 /* XXX sort out optimal dither settings */
474 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
475 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
476 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
477 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
478 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
479 else
480 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
481 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
482 break;
483 case 10:
484 if (dither == AMDGPU_FMT_DITHER_ENABLE)
485 /* XXX sort out optimal dither settings */
486 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
487 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
488 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
489 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
490 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
491 else
492 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
493 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
494 break;
495 default:
496 /* not needed */
497 break;
498 }
499
500 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
501}
502
503
504/* display watermark setup */
505/**
506 * dce_v8_0_line_buffer_adjust - Set up the line buffer
507 *
508 * @adev: amdgpu_device pointer
509 * @amdgpu_crtc: the selected display controller
510 * @mode: the current display mode on the selected display
511 * controller
512 *
513 * Setup up the line buffer allocation for
514 * the selected display controller (CIK).
515 * Returns the line buffer size in pixels.
516 */
517static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
518 struct amdgpu_crtc *amdgpu_crtc,
519 struct drm_display_mode *mode)
520{
521 u32 tmp, buffer_alloc, i;
522 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
523 /*
524 * Line Buffer Setup
525 * There are 6 line buffers, one for each display controllers.
526 * There are 3 partitions per LB. Select the number of partitions
527 * to enable based on the display width. For display widths larger
528 * than 4096, you need use to use 2 display controllers and combine
529 * them using the stereo blender.
530 */
531 if (amdgpu_crtc->base.enabled && mode) {
532 if (mode->crtc_hdisplay < 1920) {
533 tmp = 1;
534 buffer_alloc = 2;
535 } else if (mode->crtc_hdisplay < 2560) {
536 tmp = 2;
537 buffer_alloc = 2;
538 } else if (mode->crtc_hdisplay < 4096) {
539 tmp = 0;
540 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
541 } else {
542 DRM_DEBUG_KMS("Mode too big for LB!\n");
543 tmp = 0;
544 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
545 }
546 } else {
547 tmp = 1;
548 buffer_alloc = 0;
549 }
550
551 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
552 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
553 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
554
555 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
556 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
557 for (i = 0; i < adev->usec_timeout; i++) {
558 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
559 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
560 break;
561 udelay(1);
562 }
563
564 if (amdgpu_crtc->base.enabled && mode) {
565 switch (tmp) {
566 case 0:
567 default:
568 return 4096 * 2;
569 case 1:
570 return 1920 * 2;
571 case 2:
572 return 2560 * 2;
573 }
574 }
575
576 /* controller not enabled, so no lb used */
577 return 0;
578}
579
580/**
581 * cik_get_number_of_dram_channels - get the number of dram channels
582 *
583 * @adev: amdgpu_device pointer
584 *
585 * Look up the number of video ram channels (CIK).
586 * Used for display watermark bandwidth calculations
587 * Returns the number of dram channels
588 */
589static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
590{
591 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
592
593 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
594 case 0:
595 default:
596 return 1;
597 case 1:
598 return 2;
599 case 2:
600 return 4;
601 case 3:
602 return 8;
603 case 4:
604 return 3;
605 case 5:
606 return 6;
607 case 6:
608 return 10;
609 case 7:
610 return 12;
611 case 8:
612 return 16;
613 }
614}
615
616struct dce8_wm_params {
617 u32 dram_channels; /* number of dram channels */
618 u32 yclk; /* bandwidth per dram data pin in kHz */
619 u32 sclk; /* engine clock in kHz */
620 u32 disp_clk; /* display clock in kHz */
621 u32 src_width; /* viewport width */
622 u32 active_time; /* active display time in ns */
623 u32 blank_time; /* blank time in ns */
624 bool interlaced; /* mode is interlaced */
625 fixed20_12 vsc; /* vertical scale ratio */
626 u32 num_heads; /* number of active crtcs */
627 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
628 u32 lb_size; /* line buffer allocated to pipe */
629 u32 vtaps; /* vertical scaler taps */
630};
631
632/**
633 * dce_v8_0_dram_bandwidth - get the dram bandwidth
634 *
635 * @wm: watermark calculation data
636 *
637 * Calculate the raw dram bandwidth (CIK).
638 * Used for display watermark bandwidth calculations
639 * Returns the dram bandwidth in MBytes/s
640 */
641static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
642{
643 /* Calculate raw DRAM Bandwidth */
644 fixed20_12 dram_efficiency; /* 0.7 */
645 fixed20_12 yclk, dram_channels, bandwidth;
646 fixed20_12 a;
647
648 a.full = dfixed_const(1000);
649 yclk.full = dfixed_const(wm->yclk);
650 yclk.full = dfixed_div(yclk, a);
651 dram_channels.full = dfixed_const(wm->dram_channels * 4);
652 a.full = dfixed_const(10);
653 dram_efficiency.full = dfixed_const(7);
654 dram_efficiency.full = dfixed_div(dram_efficiency, a);
655 bandwidth.full = dfixed_mul(dram_channels, yclk);
656 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
657
658 return dfixed_trunc(bandwidth);
659}
660
661/**
662 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
663 *
664 * @wm: watermark calculation data
665 *
666 * Calculate the dram bandwidth used for display (CIK).
667 * Used for display watermark bandwidth calculations
668 * Returns the dram bandwidth for display in MBytes/s
669 */
670static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
671{
672 /* Calculate DRAM Bandwidth and the part allocated to display. */
673 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
674 fixed20_12 yclk, dram_channels, bandwidth;
675 fixed20_12 a;
676
677 a.full = dfixed_const(1000);
678 yclk.full = dfixed_const(wm->yclk);
679 yclk.full = dfixed_div(yclk, a);
680 dram_channels.full = dfixed_const(wm->dram_channels * 4);
681 a.full = dfixed_const(10);
682 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
683 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
684 bandwidth.full = dfixed_mul(dram_channels, yclk);
685 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
686
687 return dfixed_trunc(bandwidth);
688}
689
690/**
691 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
692 *
693 * @wm: watermark calculation data
694 *
695 * Calculate the data return bandwidth used for display (CIK).
696 * Used for display watermark bandwidth calculations
697 * Returns the data return bandwidth in MBytes/s
698 */
699static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
700{
701 /* Calculate the display Data return Bandwidth */
702 fixed20_12 return_efficiency; /* 0.8 */
703 fixed20_12 sclk, bandwidth;
704 fixed20_12 a;
705
706 a.full = dfixed_const(1000);
707 sclk.full = dfixed_const(wm->sclk);
708 sclk.full = dfixed_div(sclk, a);
709 a.full = dfixed_const(10);
710 return_efficiency.full = dfixed_const(8);
711 return_efficiency.full = dfixed_div(return_efficiency, a);
712 a.full = dfixed_const(32);
713 bandwidth.full = dfixed_mul(a, sclk);
714 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
715
716 return dfixed_trunc(bandwidth);
717}
718
719/**
720 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
721 *
722 * @wm: watermark calculation data
723 *
724 * Calculate the dmif bandwidth used for display (CIK).
725 * Used for display watermark bandwidth calculations
726 * Returns the dmif bandwidth in MBytes/s
727 */
728static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
729{
730 /* Calculate the DMIF Request Bandwidth */
731 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
732 fixed20_12 disp_clk, bandwidth;
733 fixed20_12 a, b;
734
735 a.full = dfixed_const(1000);
736 disp_clk.full = dfixed_const(wm->disp_clk);
737 disp_clk.full = dfixed_div(disp_clk, a);
738 a.full = dfixed_const(32);
739 b.full = dfixed_mul(a, disp_clk);
740
741 a.full = dfixed_const(10);
742 disp_clk_request_efficiency.full = dfixed_const(8);
743 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
744
745 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
746
747 return dfixed_trunc(bandwidth);
748}
749
750/**
751 * dce_v8_0_available_bandwidth - get the min available bandwidth
752 *
753 * @wm: watermark calculation data
754 *
755 * Calculate the min available bandwidth used for display (CIK).
756 * Used for display watermark bandwidth calculations
757 * Returns the min available bandwidth in MBytes/s
758 */
759static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
760{
761 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
762 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
763 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
764 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
765
766 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
767}
768
769/**
770 * dce_v8_0_average_bandwidth - get the average available bandwidth
771 *
772 * @wm: watermark calculation data
773 *
774 * Calculate the average available bandwidth used for display (CIK).
775 * Used for display watermark bandwidth calculations
776 * Returns the average available bandwidth in MBytes/s
777 */
778static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
779{
780 /* Calculate the display mode Average Bandwidth
781 * DisplayMode should contain the source and destination dimensions,
782 * timing, etc.
783 */
784 fixed20_12 bpp;
785 fixed20_12 line_time;
786 fixed20_12 src_width;
787 fixed20_12 bandwidth;
788 fixed20_12 a;
789
790 a.full = dfixed_const(1000);
791 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
792 line_time.full = dfixed_div(line_time, a);
793 bpp.full = dfixed_const(wm->bytes_per_pixel);
794 src_width.full = dfixed_const(wm->src_width);
795 bandwidth.full = dfixed_mul(src_width, bpp);
796 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
797 bandwidth.full = dfixed_div(bandwidth, line_time);
798
799 return dfixed_trunc(bandwidth);
800}
801
802/**
803 * dce_v8_0_latency_watermark - get the latency watermark
804 *
805 * @wm: watermark calculation data
806 *
807 * Calculate the latency watermark (CIK).
808 * Used for display watermark bandwidth calculations
809 * Returns the latency watermark in ns
810 */
811static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
812{
813 /* First calculate the latency in ns */
814 u32 mc_latency = 2000; /* 2000 ns. */
815 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
816 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
817 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
818 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
819 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
820 (wm->num_heads * cursor_line_pair_return_time);
821 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
822 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
823 u32 tmp, dmif_size = 12288;
824 fixed20_12 a, b, c;
825
826 if (wm->num_heads == 0)
827 return 0;
828
829 a.full = dfixed_const(2);
830 b.full = dfixed_const(1);
831 if ((wm->vsc.full > a.full) ||
832 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
833 (wm->vtaps >= 5) ||
834 ((wm->vsc.full >= a.full) && wm->interlaced))
835 max_src_lines_per_dst_line = 4;
836 else
837 max_src_lines_per_dst_line = 2;
838
839 a.full = dfixed_const(available_bandwidth);
840 b.full = dfixed_const(wm->num_heads);
841 a.full = dfixed_div(a, b);
842 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
843 tmp = min(dfixed_trunc(a), tmp);
844
845 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
846
847 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
848 b.full = dfixed_const(1000);
849 c.full = dfixed_const(lb_fill_bw);
850 b.full = dfixed_div(c, b);
851 a.full = dfixed_div(a, b);
852 line_fill_time = dfixed_trunc(a);
853
854 if (line_fill_time < wm->active_time)
855 return latency;
856 else
857 return latency + (line_fill_time - wm->active_time);
858
859}
860
861/**
862 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
863 * average and available dram bandwidth
864 *
865 * @wm: watermark calculation data
866 *
867 * Check if the display average bandwidth fits in the display
868 * dram bandwidth (CIK).
869 * Used for display watermark bandwidth calculations
870 * Returns true if the display fits, false if not.
871 */
872static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
873{
874 if (dce_v8_0_average_bandwidth(wm) <=
875 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
876 return true;
877 else
878 return false;
879}
880
881/**
882 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
883 * average and available bandwidth
884 *
885 * @wm: watermark calculation data
886 *
887 * Check if the display average bandwidth fits in the display
888 * available bandwidth (CIK).
889 * Used for display watermark bandwidth calculations
890 * Returns true if the display fits, false if not.
891 */
892static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
893{
894 if (dce_v8_0_average_bandwidth(wm) <=
895 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
896 return true;
897 else
898 return false;
899}
900
901/**
902 * dce_v8_0_check_latency_hiding - check latency hiding
903 *
904 * @wm: watermark calculation data
905 *
906 * Check latency hiding (CIK).
907 * Used for display watermark bandwidth calculations
908 * Returns true if the display fits, false if not.
909 */
910static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
911{
912 u32 lb_partitions = wm->lb_size / wm->src_width;
913 u32 line_time = wm->active_time + wm->blank_time;
914 u32 latency_tolerant_lines;
915 u32 latency_hiding;
916 fixed20_12 a;
917
918 a.full = dfixed_const(1);
919 if (wm->vsc.full > a.full)
920 latency_tolerant_lines = 1;
921 else {
922 if (lb_partitions <= (wm->vtaps + 1))
923 latency_tolerant_lines = 1;
924 else
925 latency_tolerant_lines = 2;
926 }
927
928 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
929
930 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
931 return true;
932 else
933 return false;
934}
935
936/**
937 * dce_v8_0_program_watermarks - program display watermarks
938 *
939 * @adev: amdgpu_device pointer
940 * @amdgpu_crtc: the selected display controller
941 * @lb_size: line buffer size
942 * @num_heads: number of display controllers in use
943 *
944 * Calculate and program the display watermarks for the
945 * selected display controller (CIK).
946 */
947static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
948 struct amdgpu_crtc *amdgpu_crtc,
949 u32 lb_size, u32 num_heads)
950{
951 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
952 struct dce8_wm_params wm_low, wm_high;
953 u32 active_time;
954 u32 line_time = 0;
955 u32 latency_watermark_a = 0, latency_watermark_b = 0;
956 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
957
958 if (amdgpu_crtc->base.enabled && num_heads && mode) {
959 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
960 (u32)mode->clock);
961 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
962 (u32)mode->clock);
963 line_time = min(line_time, (u32)65535);
964
965 /* watermark for high clocks */
966 if (adev->pm.dpm_enabled) {
967 wm_high.yclk =
968 amdgpu_dpm_get_mclk(adev, false) * 10;
969 wm_high.sclk =
970 amdgpu_dpm_get_sclk(adev, false) * 10;
971 } else {
972 wm_high.yclk = adev->pm.current_mclk * 10;
973 wm_high.sclk = adev->pm.current_sclk * 10;
974 }
975
976 wm_high.disp_clk = mode->clock;
977 wm_high.src_width = mode->crtc_hdisplay;
978 wm_high.active_time = active_time;
979 wm_high.blank_time = line_time - wm_high.active_time;
980 wm_high.interlaced = false;
981 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
982 wm_high.interlaced = true;
983 wm_high.vsc = amdgpu_crtc->vsc;
984 wm_high.vtaps = 1;
985 if (amdgpu_crtc->rmx_type != RMX_OFF)
986 wm_high.vtaps = 2;
987 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
988 wm_high.lb_size = lb_size;
989 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
990 wm_high.num_heads = num_heads;
991
992 /* set for high clocks */
993 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
994
995 /* possibly force display priority to high */
996 /* should really do this at mode validation time... */
997 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
998 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
999 !dce_v8_0_check_latency_hiding(&wm_high) ||
1000 (adev->mode_info.disp_priority == 2)) {
1001 DRM_DEBUG_KMS("force priority to high\n");
1002 }
1003
1004 /* watermark for low clocks */
1005 if (adev->pm.dpm_enabled) {
1006 wm_low.yclk =
1007 amdgpu_dpm_get_mclk(adev, true) * 10;
1008 wm_low.sclk =
1009 amdgpu_dpm_get_sclk(adev, true) * 10;
1010 } else {
1011 wm_low.yclk = adev->pm.current_mclk * 10;
1012 wm_low.sclk = adev->pm.current_sclk * 10;
1013 }
1014
1015 wm_low.disp_clk = mode->clock;
1016 wm_low.src_width = mode->crtc_hdisplay;
1017 wm_low.active_time = active_time;
1018 wm_low.blank_time = line_time - wm_low.active_time;
1019 wm_low.interlaced = false;
1020 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1021 wm_low.interlaced = true;
1022 wm_low.vsc = amdgpu_crtc->vsc;
1023 wm_low.vtaps = 1;
1024 if (amdgpu_crtc->rmx_type != RMX_OFF)
1025 wm_low.vtaps = 2;
1026 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1027 wm_low.lb_size = lb_size;
1028 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1029 wm_low.num_heads = num_heads;
1030
1031 /* set for low clocks */
1032 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1033
1034 /* possibly force display priority to high */
1035 /* should really do this at mode validation time... */
1036 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1037 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1038 !dce_v8_0_check_latency_hiding(&wm_low) ||
1039 (adev->mode_info.disp_priority == 2)) {
1040 DRM_DEBUG_KMS("force priority to high\n");
1041 }
1042 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1043 }
1044
1045 /* select wm A */
1046 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1047 tmp = wm_mask;
1048 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1049 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1050 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1051 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1052 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1053 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1054 /* select wm B */
1055 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1056 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1057 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1058 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1059 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1060 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1061 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1062 /* restore original selection */
1063 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1064
1065 /* save values for DPM */
1066 amdgpu_crtc->line_time = line_time;
1067 amdgpu_crtc->wm_high = latency_watermark_a;
1068 amdgpu_crtc->wm_low = latency_watermark_b;
1069 /* Save number of lines the linebuffer leads before the scanout */
1070 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1071}
1072
1073/**
1074 * dce_v8_0_bandwidth_update - program display watermarks
1075 *
1076 * @adev: amdgpu_device pointer
1077 *
1078 * Calculate and program the display watermarks and line
1079 * buffer allocation (CIK).
1080 */
1081static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1082{
1083 struct drm_display_mode *mode = NULL;
1084 u32 num_heads = 0, lb_size;
1085 int i;
1086
1087 amdgpu_display_update_priority(adev);
1088
1089 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1090 if (adev->mode_info.crtcs[i]->base.enabled)
1091 num_heads++;
1092 }
1093 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1094 mode = &adev->mode_info.crtcs[i]->base.mode;
1095 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1096 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1097 lb_size, num_heads);
1098 }
1099}
1100
1101static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1102{
1103 int i;
1104 u32 offset, tmp;
1105
1106 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1107 offset = adev->mode_info.audio.pin[i].offset;
1108 tmp = RREG32_AUDIO_ENDPT(offset,
1109 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1110 if (((tmp &
1111 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1112 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1113 adev->mode_info.audio.pin[i].connected = false;
1114 else
1115 adev->mode_info.audio.pin[i].connected = true;
1116 }
1117}
1118
1119static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1120{
1121 int i;
1122
1123 dce_v8_0_audio_get_connected_pins(adev);
1124
1125 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1126 if (adev->mode_info.audio.pin[i].connected)
1127 return &adev->mode_info.audio.pin[i];
1128 }
1129 DRM_ERROR("No connected audio pins found!\n");
1130 return NULL;
1131}
1132
1133static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1134{
1135 struct amdgpu_device *adev = encoder->dev->dev_private;
1136 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1137 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1138 u32 offset;
1139
1140 if (!dig || !dig->afmt || !dig->afmt->pin)
1141 return;
1142
1143 offset = dig->afmt->offset;
1144
1145 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1146 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1147}
1148
1149static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1150 struct drm_display_mode *mode)
1151{
1152 struct amdgpu_device *adev = encoder->dev->dev_private;
1153 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1154 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1155 struct drm_connector *connector;
1156 struct amdgpu_connector *amdgpu_connector = NULL;
1157 u32 tmp = 0, offset;
1158
1159 if (!dig || !dig->afmt || !dig->afmt->pin)
1160 return;
1161
1162 offset = dig->afmt->pin->offset;
1163
1164 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1165 if (connector->encoder == encoder) {
1166 amdgpu_connector = to_amdgpu_connector(connector);
1167 break;
1168 }
1169 }
1170
1171 if (!amdgpu_connector) {
1172 DRM_ERROR("Couldn't find encoder's connector\n");
1173 return;
1174 }
1175
1176 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1177 if (connector->latency_present[1])
1178 tmp =
1179 (connector->video_latency[1] <<
1180 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1181 (connector->audio_latency[1] <<
1182 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1183 else
1184 tmp =
1185 (0 <<
1186 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1187 (0 <<
1188 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1189 } else {
1190 if (connector->latency_present[0])
1191 tmp =
1192 (connector->video_latency[0] <<
1193 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1194 (connector->audio_latency[0] <<
1195 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1196 else
1197 tmp =
1198 (0 <<
1199 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1200 (0 <<
1201 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1202
1203 }
1204 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1205}
1206
1207static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1208{
1209 struct amdgpu_device *adev = encoder->dev->dev_private;
1210 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1211 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1212 struct drm_connector *connector;
1213 struct amdgpu_connector *amdgpu_connector = NULL;
1214 u32 offset, tmp;
1215 u8 *sadb = NULL;
1216 int sad_count;
1217
1218 if (!dig || !dig->afmt || !dig->afmt->pin)
1219 return;
1220
1221 offset = dig->afmt->pin->offset;
1222
1223 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1224 if (connector->encoder == encoder) {
1225 amdgpu_connector = to_amdgpu_connector(connector);
1226 break;
1227 }
1228 }
1229
1230 if (!amdgpu_connector) {
1231 DRM_ERROR("Couldn't find encoder's connector\n");
1232 return;
1233 }
1234
1235 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1236 if (sad_count < 0) {
1237 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1238 sad_count = 0;
1239 }
1240
1241 /* program the speaker allocation */
1242 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1243 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1244 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1245 /* set HDMI mode */
1246 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1247 if (sad_count)
1248 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1249 else
1250 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1251 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1252
1253 kfree(sadb);
1254}
1255
1256static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1257{
1258 struct amdgpu_device *adev = encoder->dev->dev_private;
1259 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1260 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1261 u32 offset;
1262 struct drm_connector *connector;
1263 struct amdgpu_connector *amdgpu_connector = NULL;
1264 struct cea_sad *sads;
1265 int i, sad_count;
1266
1267 static const u16 eld_reg_to_type[][2] = {
1268 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1269 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1270 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1271 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1272 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1273 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1274 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1275 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1276 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1277 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1278 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1279 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1280 };
1281
1282 if (!dig || !dig->afmt || !dig->afmt->pin)
1283 return;
1284
1285 offset = dig->afmt->pin->offset;
1286
1287 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1288 if (connector->encoder == encoder) {
1289 amdgpu_connector = to_amdgpu_connector(connector);
1290 break;
1291 }
1292 }
1293
1294 if (!amdgpu_connector) {
1295 DRM_ERROR("Couldn't find encoder's connector\n");
1296 return;
1297 }
1298
1299 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1300 if (sad_count <= 0) {
1301 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1302 return;
1303 }
1304 BUG_ON(!sads);
1305
1306 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1307 u32 value = 0;
1308 u8 stereo_freqs = 0;
1309 int max_channels = -1;
1310 int j;
1311
1312 for (j = 0; j < sad_count; j++) {
1313 struct cea_sad *sad = &sads[j];
1314
1315 if (sad->format == eld_reg_to_type[i][1]) {
1316 if (sad->channels > max_channels) {
1317 value = (sad->channels <<
1318 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1319 (sad->byte2 <<
1320 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1321 (sad->freq <<
1322 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1323 max_channels = sad->channels;
1324 }
1325
1326 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1327 stereo_freqs |= sad->freq;
1328 else
1329 break;
1330 }
1331 }
1332
1333 value |= (stereo_freqs <<
1334 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1335
1336 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1337 }
1338
1339 kfree(sads);
1340}
1341
1342static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1343 struct amdgpu_audio_pin *pin,
1344 bool enable)
1345{
1346 if (!pin)
1347 return;
1348
1349 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1350 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1351}
1352
1353static const u32 pin_offsets[7] =
1354{
1355 (0x1780 - 0x1780),
1356 (0x1786 - 0x1780),
1357 (0x178c - 0x1780),
1358 (0x1792 - 0x1780),
1359 (0x1798 - 0x1780),
1360 (0x179d - 0x1780),
1361 (0x17a4 - 0x1780),
1362};
1363
1364static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1365{
1366 int i;
1367
1368 if (!amdgpu_audio)
1369 return 0;
1370
1371 adev->mode_info.audio.enabled = true;
1372
1373 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1374 adev->mode_info.audio.num_pins = 7;
1375 else if ((adev->asic_type == CHIP_KABINI) ||
1376 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1377 adev->mode_info.audio.num_pins = 3;
1378 else if ((adev->asic_type == CHIP_BONAIRE) ||
1379 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1380 adev->mode_info.audio.num_pins = 7;
1381 else
1382 adev->mode_info.audio.num_pins = 3;
1383
1384 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1385 adev->mode_info.audio.pin[i].channels = -1;
1386 adev->mode_info.audio.pin[i].rate = -1;
1387 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1388 adev->mode_info.audio.pin[i].status_bits = 0;
1389 adev->mode_info.audio.pin[i].category_code = 0;
1390 adev->mode_info.audio.pin[i].connected = false;
1391 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1392 adev->mode_info.audio.pin[i].id = i;
1393 /* disable audio. it will be set up later */
1394 /* XXX remove once we switch to ip funcs */
1395 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1396 }
1397
1398 return 0;
1399}
1400
1401static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1402{
1403 int i;
1404
1405 if (!amdgpu_audio)
1406 return;
1407
1408 if (!adev->mode_info.audio.enabled)
1409 return;
1410
1411 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1412 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1413
1414 adev->mode_info.audio.enabled = false;
1415}
1416
1417/*
1418 * update the N and CTS parameters for a given pixel clock rate
1419 */
1420static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1421{
1422 struct drm_device *dev = encoder->dev;
1423 struct amdgpu_device *adev = dev->dev_private;
1424 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1425 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1426 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1427 uint32_t offset = dig->afmt->offset;
1428
1429 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1430 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1431
1432 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1433 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1434
1435 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1436 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1437}
1438
1439/*
1440 * build a HDMI Video Info Frame
1441 */
1442static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1443 void *buffer, size_t size)
1444{
1445 struct drm_device *dev = encoder->dev;
1446 struct amdgpu_device *adev = dev->dev_private;
1447 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1448 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1449 uint32_t offset = dig->afmt->offset;
1450 uint8_t *frame = buffer + 3;
1451 uint8_t *header = buffer;
1452
1453 WREG32(mmAFMT_AVI_INFO0 + offset,
1454 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1455 WREG32(mmAFMT_AVI_INFO1 + offset,
1456 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1457 WREG32(mmAFMT_AVI_INFO2 + offset,
1458 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1459 WREG32(mmAFMT_AVI_INFO3 + offset,
1460 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1461}
1462
1463static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1464{
1465 struct drm_device *dev = encoder->dev;
1466 struct amdgpu_device *adev = dev->dev_private;
1467 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1468 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1469 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1470 u32 dto_phase = 24 * 1000;
1471 u32 dto_modulo = clock;
1472
1473 if (!dig || !dig->afmt)
1474 return;
1475
1476 /* XXX two dtos; generally use dto0 for hdmi */
1477 /* Express [24MHz / target pixel clock] as an exact rational
1478 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1479 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1480 */
1481 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1482 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1483 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1484}
1485
1486/*
1487 * update the info frames with the data from the current display mode
1488 */
1489static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1490 struct drm_display_mode *mode)
1491{
1492 struct drm_device *dev = encoder->dev;
1493 struct amdgpu_device *adev = dev->dev_private;
1494 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1495 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1496 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1497 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1498 struct hdmi_avi_infoframe frame;
1499 uint32_t offset, val;
1500 ssize_t err;
1501 int bpc = 8;
1502
1503 if (!dig || !dig->afmt)
1504 return;
1505
1506 /* Silent, r600_hdmi_enable will raise WARN for us */
1507 if (!dig->afmt->enabled)
1508 return;
1509
1510 offset = dig->afmt->offset;
1511
1512 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1513 if (encoder->crtc) {
1514 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1515 bpc = amdgpu_crtc->bpc;
1516 }
1517
1518 /* disable audio prior to setting up hw */
1519 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1520 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1521
1522 dce_v8_0_audio_set_dto(encoder, mode->clock);
1523
1524 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1525 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1526
1527 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1528
1529 val = RREG32(mmHDMI_CONTROL + offset);
1530 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1531 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1532
1533 switch (bpc) {
1534 case 0:
1535 case 6:
1536 case 8:
1537 case 16:
1538 default:
1539 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1540 connector->name, bpc);
1541 break;
1542 case 10:
1543 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1544 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1545 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1546 connector->name);
1547 break;
1548 case 12:
1549 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1550 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1551 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1552 connector->name);
1553 break;
1554 }
1555
1556 WREG32(mmHDMI_CONTROL + offset, val);
1557
1558 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1559 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1560 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1561 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1562
1563 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1564 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1565 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1566
1567 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1568 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1569
1570 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1571 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1572
1573 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1574
1575 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1576 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1577 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1578
1579 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1580 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1581
1582 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1583
1584 if (bpc > 8)
1585 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1586 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1587 else
1588 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1589 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1590 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1591
1592 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1593
1594 WREG32(mmAFMT_60958_0 + offset,
1595 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1596
1597 WREG32(mmAFMT_60958_1 + offset,
1598 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1599
1600 WREG32(mmAFMT_60958_2 + offset,
1601 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1602 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1603 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1604 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1605 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1606 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1607
1608 dce_v8_0_audio_write_speaker_allocation(encoder);
1609
1610
1611 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1612 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1613
1614 dce_v8_0_afmt_audio_select_pin(encoder);
1615 dce_v8_0_audio_write_sad_regs(encoder);
1616 dce_v8_0_audio_write_latency_fields(encoder, mode);
1617
1618 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
1619 if (err < 0) {
1620 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1621 return;
1622 }
1623
1624 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1625 if (err < 0) {
1626 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1627 return;
1628 }
1629
1630 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1631
1632 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1633 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1634 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1635
1636 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1637 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1638 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1639
1640 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1641 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1642
1643 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1644 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1645 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1646 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1647
1648 /* enable audio after setting up hw */
1649 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1650}
1651
1652static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1653{
1654 struct drm_device *dev = encoder->dev;
1655 struct amdgpu_device *adev = dev->dev_private;
1656 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1657 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1658
1659 if (!dig || !dig->afmt)
1660 return;
1661
1662 /* Silent, r600_hdmi_enable will raise WARN for us */
1663 if (enable && dig->afmt->enabled)
1664 return;
1665 if (!enable && !dig->afmt->enabled)
1666 return;
1667
1668 if (!enable && dig->afmt->pin) {
1669 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1670 dig->afmt->pin = NULL;
1671 }
1672
1673 dig->afmt->enabled = enable;
1674
1675 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1676 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1677}
1678
1679static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1680{
1681 int i;
1682
1683 for (i = 0; i < adev->mode_info.num_dig; i++)
1684 adev->mode_info.afmt[i] = NULL;
1685
1686 /* DCE8 has audio blocks tied to DIG encoders */
1687 for (i = 0; i < adev->mode_info.num_dig; i++) {
1688 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1689 if (adev->mode_info.afmt[i]) {
1690 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1691 adev->mode_info.afmt[i]->id = i;
1692 } else {
1693 int j;
1694 for (j = 0; j < i; j++) {
1695 kfree(adev->mode_info.afmt[j]);
1696 adev->mode_info.afmt[j] = NULL;
1697 }
1698 return -ENOMEM;
1699 }
1700 }
1701 return 0;
1702}
1703
1704static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1705{
1706 int i;
1707
1708 for (i = 0; i < adev->mode_info.num_dig; i++) {
1709 kfree(adev->mode_info.afmt[i]);
1710 adev->mode_info.afmt[i] = NULL;
1711 }
1712}
1713
1714static const u32 vga_control_regs[6] =
1715{
1716 mmD1VGA_CONTROL,
1717 mmD2VGA_CONTROL,
1718 mmD3VGA_CONTROL,
1719 mmD4VGA_CONTROL,
1720 mmD5VGA_CONTROL,
1721 mmD6VGA_CONTROL,
1722};
1723
1724static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1725{
1726 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1727 struct drm_device *dev = crtc->dev;
1728 struct amdgpu_device *adev = dev->dev_private;
1729 u32 vga_control;
1730
1731 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1732 if (enable)
1733 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1734 else
1735 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1736}
1737
1738static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1739{
1740 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1741 struct drm_device *dev = crtc->dev;
1742 struct amdgpu_device *adev = dev->dev_private;
1743
1744 if (enable)
1745 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1746 else
1747 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1748}
1749
1750static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1751 struct drm_framebuffer *fb,
1752 int x, int y, int atomic)
1753{
1754 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1755 struct drm_device *dev = crtc->dev;
1756 struct amdgpu_device *adev = dev->dev_private;
1757 struct amdgpu_framebuffer *amdgpu_fb;
1758 struct drm_framebuffer *target_fb;
1759 struct drm_gem_object *obj;
1760 struct amdgpu_bo *abo;
1761 uint64_t fb_location, tiling_flags;
1762 uint32_t fb_format, fb_pitch_pixels;
1763 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1764 u32 pipe_config;
1765 u32 viewport_w, viewport_h;
1766 int r;
1767 bool bypass_lut = false;
1768 struct drm_format_name_buf format_name;
1769
1770 /* no fb bound */
1771 if (!atomic && !crtc->primary->fb) {
1772 DRM_DEBUG_KMS("No FB bound\n");
1773 return 0;
1774 }
1775
1776 if (atomic) {
1777 amdgpu_fb = to_amdgpu_framebuffer(fb);
1778 target_fb = fb;
1779 } else {
1780 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1781 target_fb = crtc->primary->fb;
1782 }
1783
1784 /* If atomic, assume fb object is pinned & idle & fenced and
1785 * just update base pointers
1786 */
1787 obj = amdgpu_fb->obj;
1788 abo = gem_to_amdgpu_bo(obj);
1789 r = amdgpu_bo_reserve(abo, false);
1790 if (unlikely(r != 0))
1791 return r;
1792
1793 if (atomic) {
1794 fb_location = amdgpu_bo_gpu_offset(abo);
1795 } else {
1796 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1797 if (unlikely(r != 0)) {
1798 amdgpu_bo_unreserve(abo);
1799 return -EINVAL;
1800 }
1801 }
1802
1803 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1804 amdgpu_bo_unreserve(abo);
1805
1806 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1807
1808 switch (target_fb->format->format) {
1809 case DRM_FORMAT_C8:
1810 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1811 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1812 break;
1813 case DRM_FORMAT_XRGB4444:
1814 case DRM_FORMAT_ARGB4444:
1815 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1816 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1817#ifdef __BIG_ENDIAN
1818 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1819#endif
1820 break;
1821 case DRM_FORMAT_XRGB1555:
1822 case DRM_FORMAT_ARGB1555:
1823 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1824 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1825#ifdef __BIG_ENDIAN
1826 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1827#endif
1828 break;
1829 case DRM_FORMAT_BGRX5551:
1830 case DRM_FORMAT_BGRA5551:
1831 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1832 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1833#ifdef __BIG_ENDIAN
1834 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1835#endif
1836 break;
1837 case DRM_FORMAT_RGB565:
1838 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1839 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1840#ifdef __BIG_ENDIAN
1841 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1842#endif
1843 break;
1844 case DRM_FORMAT_XRGB8888:
1845 case DRM_FORMAT_ARGB8888:
1846 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1847 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1848#ifdef __BIG_ENDIAN
1849 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1850#endif
1851 break;
1852 case DRM_FORMAT_XRGB2101010:
1853 case DRM_FORMAT_ARGB2101010:
1854 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1855 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1856#ifdef __BIG_ENDIAN
1857 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1858#endif
1859 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1860 bypass_lut = true;
1861 break;
1862 case DRM_FORMAT_BGRX1010102:
1863 case DRM_FORMAT_BGRA1010102:
1864 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1865 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1866#ifdef __BIG_ENDIAN
1867 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1868#endif
1869 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1870 bypass_lut = true;
1871 break;
1872 default:
1873 DRM_ERROR("Unsupported screen format %s\n",
1874 drm_get_format_name(target_fb->format->format, &format_name));
1875 return -EINVAL;
1876 }
1877
1878 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1879 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1880
1881 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1882 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1883 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1884 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1885 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1886
1887 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1888 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1889 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1890 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1891 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1892 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1893 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1894 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1895 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1896 }
1897
1898 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1899
1900 dce_v8_0_vga_enable(crtc, false);
1901
1902 /* Make sure surface address is updated at vertical blank rather than
1903 * horizontal blank
1904 */
1905 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1906
1907 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1908 upper_32_bits(fb_location));
1909 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1910 upper_32_bits(fb_location));
1911 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1912 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1913 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1914 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1915 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1916 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1917
1918 /*
1919 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1920 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1921 * retain the full precision throughout the pipeline.
1922 */
1923 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1924 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1925 ~LUT_10BIT_BYPASS_EN);
1926
1927 if (bypass_lut)
1928 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1929
1930 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1931 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1932 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1933 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1934 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1935 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1936
1937 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1938 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1939
1940 dce_v8_0_grph_enable(crtc, true);
1941
1942 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1943 target_fb->height);
1944
1945 x &= ~3;
1946 y &= ~1;
1947 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1948 (x << 16) | y);
1949 viewport_w = crtc->mode.hdisplay;
1950 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1951 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1952 (viewport_w << 16) | viewport_h);
1953
1954 /* set pageflip to happen anywhere in vblank interval */
1955 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1956
1957 if (!atomic && fb && fb != crtc->primary->fb) {
1958 amdgpu_fb = to_amdgpu_framebuffer(fb);
1959 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1960 r = amdgpu_bo_reserve(abo, true);
1961 if (unlikely(r != 0))
1962 return r;
1963 amdgpu_bo_unpin(abo);
1964 amdgpu_bo_unreserve(abo);
1965 }
1966
1967 /* Bytes per pixel may have changed */
1968 dce_v8_0_bandwidth_update(adev);
1969
1970 return 0;
1971}
1972
1973static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
1974 struct drm_display_mode *mode)
1975{
1976 struct drm_device *dev = crtc->dev;
1977 struct amdgpu_device *adev = dev->dev_private;
1978 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1979
1980 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1981 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
1982 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
1983 else
1984 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1985}
1986
1987static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
1988{
1989 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1990 struct drm_device *dev = crtc->dev;
1991 struct amdgpu_device *adev = dev->dev_private;
1992 u16 *r, *g, *b;
1993 int i;
1994
1995 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1996
1997 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1998 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1999 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2000 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2001 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2002 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2003 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2004 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2005 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2006 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2007
2008 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2009
2010 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2011 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2012 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2013
2014 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2015 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2016 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2017
2018 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2019 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2020
2021 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2022 r = crtc->gamma_store;
2023 g = r + crtc->gamma_size;
2024 b = g + crtc->gamma_size;
2025 for (i = 0; i < 256; i++) {
2026 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2027 ((*r++ & 0xffc0) << 14) |
2028 ((*g++ & 0xffc0) << 4) |
2029 (*b++ >> 6));
2030 }
2031
2032 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2033 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2034 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2035 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2036 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2037 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2038 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2039 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2040 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2041 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2042 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2043 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2044 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2045 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2046 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2047 /* XXX this only needs to be programmed once per crtc at startup,
2048 * not sure where the best place for it is
2049 */
2050 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2051 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2052}
2053
2054static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2055{
2056 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2057 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2058
2059 switch (amdgpu_encoder->encoder_id) {
2060 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2061 if (dig->linkb)
2062 return 1;
2063 else
2064 return 0;
2065 break;
2066 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2067 if (dig->linkb)
2068 return 3;
2069 else
2070 return 2;
2071 break;
2072 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2073 if (dig->linkb)
2074 return 5;
2075 else
2076 return 4;
2077 break;
2078 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2079 return 6;
2080 break;
2081 default:
2082 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2083 return 0;
2084 }
2085}
2086
2087/**
2088 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2089 *
2090 * @crtc: drm crtc
2091 *
2092 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2093 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2094 * monitors a dedicated PPLL must be used. If a particular board has
2095 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2096 * as there is no need to program the PLL itself. If we are not able to
2097 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2098 * avoid messing up an existing monitor.
2099 *
2100 * Asic specific PLL information
2101 *
2102 * DCE 8.x
2103 * KB/KV
2104 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2105 * CI
2106 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2107 *
2108 */
2109static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2110{
2111 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2112 struct drm_device *dev = crtc->dev;
2113 struct amdgpu_device *adev = dev->dev_private;
2114 u32 pll_in_use;
2115 int pll;
2116
2117 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2118 if (adev->clock.dp_extclk)
2119 /* skip PPLL programming if using ext clock */
2120 return ATOM_PPLL_INVALID;
2121 else {
2122 /* use the same PPLL for all DP monitors */
2123 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2124 if (pll != ATOM_PPLL_INVALID)
2125 return pll;
2126 }
2127 } else {
2128 /* use the same PPLL for all monitors with the same clock */
2129 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2130 if (pll != ATOM_PPLL_INVALID)
2131 return pll;
2132 }
2133 /* otherwise, pick one of the plls */
2134 if ((adev->asic_type == CHIP_KABINI) ||
2135 (adev->asic_type == CHIP_MULLINS)) {
2136 /* KB/ML has PPLL1 and PPLL2 */
2137 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2138 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2139 return ATOM_PPLL2;
2140 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2141 return ATOM_PPLL1;
2142 DRM_ERROR("unable to allocate a PPLL\n");
2143 return ATOM_PPLL_INVALID;
2144 } else {
2145 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2146 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2147 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2148 return ATOM_PPLL2;
2149 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2150 return ATOM_PPLL1;
2151 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2152 return ATOM_PPLL0;
2153 DRM_ERROR("unable to allocate a PPLL\n");
2154 return ATOM_PPLL_INVALID;
2155 }
2156 return ATOM_PPLL_INVALID;
2157}
2158
2159static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2160{
2161 struct amdgpu_device *adev = crtc->dev->dev_private;
2162 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2163 uint32_t cur_lock;
2164
2165 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2166 if (lock)
2167 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2168 else
2169 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2170 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2171}
2172
2173static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2174{
2175 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2176 struct amdgpu_device *adev = crtc->dev->dev_private;
2177
2178 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2179 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2180 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2181}
2182
2183static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2184{
2185 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2186 struct amdgpu_device *adev = crtc->dev->dev_private;
2187
2188 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2189 upper_32_bits(amdgpu_crtc->cursor_addr));
2190 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2191 lower_32_bits(amdgpu_crtc->cursor_addr));
2192
2193 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2194 CUR_CONTROL__CURSOR_EN_MASK |
2195 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2196 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2197}
2198
2199static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2200 int x, int y)
2201{
2202 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2203 struct amdgpu_device *adev = crtc->dev->dev_private;
2204 int xorigin = 0, yorigin = 0;
2205
2206 amdgpu_crtc->cursor_x = x;
2207 amdgpu_crtc->cursor_y = y;
2208
2209 /* avivo cursor are offset into the total surface */
2210 x += crtc->x;
2211 y += crtc->y;
2212 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2213
2214 if (x < 0) {
2215 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2216 x = 0;
2217 }
2218 if (y < 0) {
2219 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2220 y = 0;
2221 }
2222
2223 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2224 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2225 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2226 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2227
2228 return 0;
2229}
2230
2231static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2232 int x, int y)
2233{
2234 int ret;
2235
2236 dce_v8_0_lock_cursor(crtc, true);
2237 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2238 dce_v8_0_lock_cursor(crtc, false);
2239
2240 return ret;
2241}
2242
2243static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2244 struct drm_file *file_priv,
2245 uint32_t handle,
2246 uint32_t width,
2247 uint32_t height,
2248 int32_t hot_x,
2249 int32_t hot_y)
2250{
2251 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2252 struct drm_gem_object *obj;
2253 struct amdgpu_bo *aobj;
2254 int ret;
2255
2256 if (!handle) {
2257 /* turn off cursor */
2258 dce_v8_0_hide_cursor(crtc);
2259 obj = NULL;
2260 goto unpin;
2261 }
2262
2263 if ((width > amdgpu_crtc->max_cursor_width) ||
2264 (height > amdgpu_crtc->max_cursor_height)) {
2265 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2266 return -EINVAL;
2267 }
2268
2269 obj = drm_gem_object_lookup(file_priv, handle);
2270 if (!obj) {
2271 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2272 return -ENOENT;
2273 }
2274
2275 aobj = gem_to_amdgpu_bo(obj);
2276 ret = amdgpu_bo_reserve(aobj, false);
2277 if (ret != 0) {
2278 drm_gem_object_put_unlocked(obj);
2279 return ret;
2280 }
2281
2282 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2283 amdgpu_bo_unreserve(aobj);
2284 if (ret) {
2285 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2286 drm_gem_object_put_unlocked(obj);
2287 return ret;
2288 }
2289
2290 dce_v8_0_lock_cursor(crtc, true);
2291
2292 if (width != amdgpu_crtc->cursor_width ||
2293 height != amdgpu_crtc->cursor_height ||
2294 hot_x != amdgpu_crtc->cursor_hot_x ||
2295 hot_y != amdgpu_crtc->cursor_hot_y) {
2296 int x, y;
2297
2298 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2299 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2300
2301 dce_v8_0_cursor_move_locked(crtc, x, y);
2302
2303 amdgpu_crtc->cursor_width = width;
2304 amdgpu_crtc->cursor_height = height;
2305 amdgpu_crtc->cursor_hot_x = hot_x;
2306 amdgpu_crtc->cursor_hot_y = hot_y;
2307 }
2308
2309 dce_v8_0_show_cursor(crtc);
2310 dce_v8_0_lock_cursor(crtc, false);
2311
2312unpin:
2313 if (amdgpu_crtc->cursor_bo) {
2314 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2315 ret = amdgpu_bo_reserve(aobj, true);
2316 if (likely(ret == 0)) {
2317 amdgpu_bo_unpin(aobj);
2318 amdgpu_bo_unreserve(aobj);
2319 }
2320 drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2321 }
2322
2323 amdgpu_crtc->cursor_bo = obj;
2324 return 0;
2325}
2326
2327static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2328{
2329 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2330
2331 if (amdgpu_crtc->cursor_bo) {
2332 dce_v8_0_lock_cursor(crtc, true);
2333
2334 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2335 amdgpu_crtc->cursor_y);
2336
2337 dce_v8_0_show_cursor(crtc);
2338
2339 dce_v8_0_lock_cursor(crtc, false);
2340 }
2341}
2342
2343static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2344 u16 *blue, uint32_t size,
2345 struct drm_modeset_acquire_ctx *ctx)
2346{
2347 dce_v8_0_crtc_load_lut(crtc);
2348
2349 return 0;
2350}
2351
2352static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2353{
2354 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2355
2356 drm_crtc_cleanup(crtc);
2357 kfree(amdgpu_crtc);
2358}
2359
2360static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2361 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2362 .cursor_move = dce_v8_0_crtc_cursor_move,
2363 .gamma_set = dce_v8_0_crtc_gamma_set,
2364 .set_config = amdgpu_display_crtc_set_config,
2365 .destroy = dce_v8_0_crtc_destroy,
2366 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2367};
2368
2369static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2370{
2371 struct drm_device *dev = crtc->dev;
2372 struct amdgpu_device *adev = dev->dev_private;
2373 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2374 unsigned type;
2375
2376 switch (mode) {
2377 case DRM_MODE_DPMS_ON:
2378 amdgpu_crtc->enabled = true;
2379 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2380 dce_v8_0_vga_enable(crtc, true);
2381 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2382 dce_v8_0_vga_enable(crtc, false);
2383 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2384 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2385 amdgpu_crtc->crtc_id);
2386 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2387 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2388 drm_crtc_vblank_on(crtc);
2389 dce_v8_0_crtc_load_lut(crtc);
2390 break;
2391 case DRM_MODE_DPMS_STANDBY:
2392 case DRM_MODE_DPMS_SUSPEND:
2393 case DRM_MODE_DPMS_OFF:
2394 drm_crtc_vblank_off(crtc);
2395 if (amdgpu_crtc->enabled) {
2396 dce_v8_0_vga_enable(crtc, true);
2397 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2398 dce_v8_0_vga_enable(crtc, false);
2399 }
2400 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2401 amdgpu_crtc->enabled = false;
2402 break;
2403 }
2404 /* adjust pm to dpms */
2405 amdgpu_pm_compute_clocks(adev);
2406}
2407
2408static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2409{
2410 /* disable crtc pair power gating before programming */
2411 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2412 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2413 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2414}
2415
2416static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2417{
2418 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2419 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2420}
2421
2422static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2423{
2424 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2425 struct drm_device *dev = crtc->dev;
2426 struct amdgpu_device *adev = dev->dev_private;
2427 struct amdgpu_atom_ss ss;
2428 int i;
2429
2430 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2431 if (crtc->primary->fb) {
2432 int r;
2433 struct amdgpu_framebuffer *amdgpu_fb;
2434 struct amdgpu_bo *abo;
2435
2436 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2437 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2438 r = amdgpu_bo_reserve(abo, true);
2439 if (unlikely(r))
2440 DRM_ERROR("failed to reserve abo before unpin\n");
2441 else {
2442 amdgpu_bo_unpin(abo);
2443 amdgpu_bo_unreserve(abo);
2444 }
2445 }
2446 /* disable the GRPH */
2447 dce_v8_0_grph_enable(crtc, false);
2448
2449 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2450
2451 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2452 if (adev->mode_info.crtcs[i] &&
2453 adev->mode_info.crtcs[i]->enabled &&
2454 i != amdgpu_crtc->crtc_id &&
2455 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2456 /* one other crtc is using this pll don't turn
2457 * off the pll
2458 */
2459 goto done;
2460 }
2461 }
2462
2463 switch (amdgpu_crtc->pll_id) {
2464 case ATOM_PPLL1:
2465 case ATOM_PPLL2:
2466 /* disable the ppll */
2467 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2468 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2469 break;
2470 case ATOM_PPLL0:
2471 /* disable the ppll */
2472 if ((adev->asic_type == CHIP_KAVERI) ||
2473 (adev->asic_type == CHIP_BONAIRE) ||
2474 (adev->asic_type == CHIP_HAWAII))
2475 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2476 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2477 break;
2478 default:
2479 break;
2480 }
2481done:
2482 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2483 amdgpu_crtc->adjusted_clock = 0;
2484 amdgpu_crtc->encoder = NULL;
2485 amdgpu_crtc->connector = NULL;
2486}
2487
2488static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2489 struct drm_display_mode *mode,
2490 struct drm_display_mode *adjusted_mode,
2491 int x, int y, struct drm_framebuffer *old_fb)
2492{
2493 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2494
2495 if (!amdgpu_crtc->adjusted_clock)
2496 return -EINVAL;
2497
2498 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2499 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2500 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2501 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2502 amdgpu_atombios_crtc_scaler_setup(crtc);
2503 dce_v8_0_cursor_reset(crtc);
2504 /* update the hw version fpr dpm */
2505 amdgpu_crtc->hw_mode = *adjusted_mode;
2506
2507 return 0;
2508}
2509
2510static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2511 const struct drm_display_mode *mode,
2512 struct drm_display_mode *adjusted_mode)
2513{
2514 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2515 struct drm_device *dev = crtc->dev;
2516 struct drm_encoder *encoder;
2517
2518 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2519 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2520 if (encoder->crtc == crtc) {
2521 amdgpu_crtc->encoder = encoder;
2522 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2523 break;
2524 }
2525 }
2526 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2527 amdgpu_crtc->encoder = NULL;
2528 amdgpu_crtc->connector = NULL;
2529 return false;
2530 }
2531 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2532 return false;
2533 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2534 return false;
2535 /* pick pll */
2536 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2537 /* if we can't get a PPLL for a non-DP encoder, fail */
2538 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2539 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2540 return false;
2541
2542 return true;
2543}
2544
2545static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2546 struct drm_framebuffer *old_fb)
2547{
2548 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2549}
2550
2551static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2552 struct drm_framebuffer *fb,
2553 int x, int y, enum mode_set_atomic state)
2554{
2555 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2556}
2557
2558static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2559 .dpms = dce_v8_0_crtc_dpms,
2560 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2561 .mode_set = dce_v8_0_crtc_mode_set,
2562 .mode_set_base = dce_v8_0_crtc_set_base,
2563 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2564 .prepare = dce_v8_0_crtc_prepare,
2565 .commit = dce_v8_0_crtc_commit,
2566 .disable = dce_v8_0_crtc_disable,
2567};
2568
2569static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2570{
2571 struct amdgpu_crtc *amdgpu_crtc;
2572
2573 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2574 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2575 if (amdgpu_crtc == NULL)
2576 return -ENOMEM;
2577
2578 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2579
2580 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2581 amdgpu_crtc->crtc_id = index;
2582 adev->mode_info.crtcs[index] = amdgpu_crtc;
2583
2584 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2585 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2586 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2587 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2588
2589 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2590
2591 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2592 amdgpu_crtc->adjusted_clock = 0;
2593 amdgpu_crtc->encoder = NULL;
2594 amdgpu_crtc->connector = NULL;
2595 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2596
2597 return 0;
2598}
2599
2600static int dce_v8_0_early_init(void *handle)
2601{
2602 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2603
2604 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2605 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2606
2607 dce_v8_0_set_display_funcs(adev);
2608
2609 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2610
2611 switch (adev->asic_type) {
2612 case CHIP_BONAIRE:
2613 case CHIP_HAWAII:
2614 adev->mode_info.num_hpd = 6;
2615 adev->mode_info.num_dig = 6;
2616 break;
2617 case CHIP_KAVERI:
2618 adev->mode_info.num_hpd = 6;
2619 adev->mode_info.num_dig = 7;
2620 break;
2621 case CHIP_KABINI:
2622 case CHIP_MULLINS:
2623 adev->mode_info.num_hpd = 6;
2624 adev->mode_info.num_dig = 6; /* ? */
2625 break;
2626 default:
2627 /* FIXME: not supported yet */
2628 return -EINVAL;
2629 }
2630
2631 dce_v8_0_set_irq_funcs(adev);
2632
2633 return 0;
2634}
2635
2636static int dce_v8_0_sw_init(void *handle)
2637{
2638 int r, i;
2639 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2640
2641 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2642 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2643 if (r)
2644 return r;
2645 }
2646
2647 for (i = 8; i < 20; i += 2) {
2648 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2649 if (r)
2650 return r;
2651 }
2652
2653 /* HPD hotplug */
2654 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2655 if (r)
2656 return r;
2657
2658 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2659
2660 adev->ddev->mode_config.async_page_flip = true;
2661
2662 adev->ddev->mode_config.max_width = 16384;
2663 adev->ddev->mode_config.max_height = 16384;
2664
2665 adev->ddev->mode_config.preferred_depth = 24;
2666 adev->ddev->mode_config.prefer_shadow = 1;
2667
2668 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2669
2670 r = amdgpu_display_modeset_create_props(adev);
2671 if (r)
2672 return r;
2673
2674 adev->ddev->mode_config.max_width = 16384;
2675 adev->ddev->mode_config.max_height = 16384;
2676
2677 /* allocate crtcs */
2678 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2679 r = dce_v8_0_crtc_init(adev, i);
2680 if (r)
2681 return r;
2682 }
2683
2684 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2685 amdgpu_display_print_display_setup(adev->ddev);
2686 else
2687 return -EINVAL;
2688
2689 /* setup afmt */
2690 r = dce_v8_0_afmt_init(adev);
2691 if (r)
2692 return r;
2693
2694 r = dce_v8_0_audio_init(adev);
2695 if (r)
2696 return r;
2697
2698 drm_kms_helper_poll_init(adev->ddev);
2699
2700 adev->mode_info.mode_config_initialized = true;
2701 return 0;
2702}
2703
2704static int dce_v8_0_sw_fini(void *handle)
2705{
2706 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2707
2708 kfree(adev->mode_info.bios_hardcoded_edid);
2709
2710 drm_kms_helper_poll_fini(adev->ddev);
2711
2712 dce_v8_0_audio_fini(adev);
2713
2714 dce_v8_0_afmt_fini(adev);
2715
2716 drm_mode_config_cleanup(adev->ddev);
2717 adev->mode_info.mode_config_initialized = false;
2718
2719 return 0;
2720}
2721
2722static int dce_v8_0_hw_init(void *handle)
2723{
2724 int i;
2725 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2726
2727 /* disable vga render */
2728 dce_v8_0_set_vga_render_state(adev, false);
2729 /* init dig PHYs, disp eng pll */
2730 amdgpu_atombios_encoder_init_dig(adev);
2731 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2732
2733 /* initialize hpd */
2734 dce_v8_0_hpd_init(adev);
2735
2736 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2737 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2738 }
2739
2740 dce_v8_0_pageflip_interrupt_init(adev);
2741
2742 return 0;
2743}
2744
2745static int dce_v8_0_hw_fini(void *handle)
2746{
2747 int i;
2748 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2749
2750 dce_v8_0_hpd_fini(adev);
2751
2752 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2753 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2754 }
2755
2756 dce_v8_0_pageflip_interrupt_fini(adev);
2757
2758 return 0;
2759}
2760
2761static int dce_v8_0_suspend(void *handle)
2762{
2763 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2764
2765 adev->mode_info.bl_level =
2766 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2767
2768 return dce_v8_0_hw_fini(handle);
2769}
2770
2771static int dce_v8_0_resume(void *handle)
2772{
2773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2774 int ret;
2775
2776 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2777 adev->mode_info.bl_level);
2778
2779 ret = dce_v8_0_hw_init(handle);
2780
2781 /* turn on the BL */
2782 if (adev->mode_info.bl_encoder) {
2783 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2784 adev->mode_info.bl_encoder);
2785 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2786 bl_level);
2787 }
2788
2789 return ret;
2790}
2791
2792static bool dce_v8_0_is_idle(void *handle)
2793{
2794 return true;
2795}
2796
2797static int dce_v8_0_wait_for_idle(void *handle)
2798{
2799 return 0;
2800}
2801
2802static int dce_v8_0_soft_reset(void *handle)
2803{
2804 u32 srbm_soft_reset = 0, tmp;
2805 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2806
2807 if (dce_v8_0_is_display_hung(adev))
2808 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2809
2810 if (srbm_soft_reset) {
2811 tmp = RREG32(mmSRBM_SOFT_RESET);
2812 tmp |= srbm_soft_reset;
2813 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2814 WREG32(mmSRBM_SOFT_RESET, tmp);
2815 tmp = RREG32(mmSRBM_SOFT_RESET);
2816
2817 udelay(50);
2818
2819 tmp &= ~srbm_soft_reset;
2820 WREG32(mmSRBM_SOFT_RESET, tmp);
2821 tmp = RREG32(mmSRBM_SOFT_RESET);
2822
2823 /* Wait a little for things to settle down */
2824 udelay(50);
2825 }
2826 return 0;
2827}
2828
2829static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2830 int crtc,
2831 enum amdgpu_interrupt_state state)
2832{
2833 u32 reg_block, lb_interrupt_mask;
2834
2835 if (crtc >= adev->mode_info.num_crtc) {
2836 DRM_DEBUG("invalid crtc %d\n", crtc);
2837 return;
2838 }
2839
2840 switch (crtc) {
2841 case 0:
2842 reg_block = CRTC0_REGISTER_OFFSET;
2843 break;
2844 case 1:
2845 reg_block = CRTC1_REGISTER_OFFSET;
2846 break;
2847 case 2:
2848 reg_block = CRTC2_REGISTER_OFFSET;
2849 break;
2850 case 3:
2851 reg_block = CRTC3_REGISTER_OFFSET;
2852 break;
2853 case 4:
2854 reg_block = CRTC4_REGISTER_OFFSET;
2855 break;
2856 case 5:
2857 reg_block = CRTC5_REGISTER_OFFSET;
2858 break;
2859 default:
2860 DRM_DEBUG("invalid crtc %d\n", crtc);
2861 return;
2862 }
2863
2864 switch (state) {
2865 case AMDGPU_IRQ_STATE_DISABLE:
2866 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2867 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2868 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2869 break;
2870 case AMDGPU_IRQ_STATE_ENABLE:
2871 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2872 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2873 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2874 break;
2875 default:
2876 break;
2877 }
2878}
2879
2880static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2881 int crtc,
2882 enum amdgpu_interrupt_state state)
2883{
2884 u32 reg_block, lb_interrupt_mask;
2885
2886 if (crtc >= adev->mode_info.num_crtc) {
2887 DRM_DEBUG("invalid crtc %d\n", crtc);
2888 return;
2889 }
2890
2891 switch (crtc) {
2892 case 0:
2893 reg_block = CRTC0_REGISTER_OFFSET;
2894 break;
2895 case 1:
2896 reg_block = CRTC1_REGISTER_OFFSET;
2897 break;
2898 case 2:
2899 reg_block = CRTC2_REGISTER_OFFSET;
2900 break;
2901 case 3:
2902 reg_block = CRTC3_REGISTER_OFFSET;
2903 break;
2904 case 4:
2905 reg_block = CRTC4_REGISTER_OFFSET;
2906 break;
2907 case 5:
2908 reg_block = CRTC5_REGISTER_OFFSET;
2909 break;
2910 default:
2911 DRM_DEBUG("invalid crtc %d\n", crtc);
2912 return;
2913 }
2914
2915 switch (state) {
2916 case AMDGPU_IRQ_STATE_DISABLE:
2917 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2918 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2919 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2920 break;
2921 case AMDGPU_IRQ_STATE_ENABLE:
2922 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2923 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2924 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2925 break;
2926 default:
2927 break;
2928 }
2929}
2930
2931static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2932 struct amdgpu_irq_src *src,
2933 unsigned type,
2934 enum amdgpu_interrupt_state state)
2935{
2936 u32 dc_hpd_int_cntl;
2937
2938 if (type >= adev->mode_info.num_hpd) {
2939 DRM_DEBUG("invalid hdp %d\n", type);
2940 return 0;
2941 }
2942
2943 switch (state) {
2944 case AMDGPU_IRQ_STATE_DISABLE:
2945 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2946 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2947 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2948 break;
2949 case AMDGPU_IRQ_STATE_ENABLE:
2950 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2951 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2952 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2953 break;
2954 default:
2955 break;
2956 }
2957
2958 return 0;
2959}
2960
2961static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2962 struct amdgpu_irq_src *src,
2963 unsigned type,
2964 enum amdgpu_interrupt_state state)
2965{
2966 switch (type) {
2967 case AMDGPU_CRTC_IRQ_VBLANK1:
2968 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2969 break;
2970 case AMDGPU_CRTC_IRQ_VBLANK2:
2971 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2972 break;
2973 case AMDGPU_CRTC_IRQ_VBLANK3:
2974 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2975 break;
2976 case AMDGPU_CRTC_IRQ_VBLANK4:
2977 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2978 break;
2979 case AMDGPU_CRTC_IRQ_VBLANK5:
2980 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2981 break;
2982 case AMDGPU_CRTC_IRQ_VBLANK6:
2983 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2984 break;
2985 case AMDGPU_CRTC_IRQ_VLINE1:
2986 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
2987 break;
2988 case AMDGPU_CRTC_IRQ_VLINE2:
2989 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
2990 break;
2991 case AMDGPU_CRTC_IRQ_VLINE3:
2992 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
2993 break;
2994 case AMDGPU_CRTC_IRQ_VLINE4:
2995 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
2996 break;
2997 case AMDGPU_CRTC_IRQ_VLINE5:
2998 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
2999 break;
3000 case AMDGPU_CRTC_IRQ_VLINE6:
3001 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3002 break;
3003 default:
3004 break;
3005 }
3006 return 0;
3007}
3008
3009static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3010 struct amdgpu_irq_src *source,
3011 struct amdgpu_iv_entry *entry)
3012{
3013 unsigned crtc = entry->src_id - 1;
3014 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3015 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3016 crtc);
3017
3018 switch (entry->src_data[0]) {
3019 case 0: /* vblank */
3020 if (disp_int & interrupt_status_offsets[crtc].vblank)
3021 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3022 else
3023 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3024
3025 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3026 drm_handle_vblank(adev->ddev, crtc);
3027 }
3028 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3029 break;
3030 case 1: /* vline */
3031 if (disp_int & interrupt_status_offsets[crtc].vline)
3032 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3033 else
3034 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3035
3036 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3037 break;
3038 default:
3039 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3040 break;
3041 }
3042
3043 return 0;
3044}
3045
3046static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3047 struct amdgpu_irq_src *src,
3048 unsigned type,
3049 enum amdgpu_interrupt_state state)
3050{
3051 u32 reg;
3052
3053 if (type >= adev->mode_info.num_crtc) {
3054 DRM_ERROR("invalid pageflip crtc %d\n", type);
3055 return -EINVAL;
3056 }
3057
3058 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3059 if (state == AMDGPU_IRQ_STATE_DISABLE)
3060 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3061 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3062 else
3063 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3064 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3065
3066 return 0;
3067}
3068
3069static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3070 struct amdgpu_irq_src *source,
3071 struct amdgpu_iv_entry *entry)
3072{
3073 unsigned long flags;
3074 unsigned crtc_id;
3075 struct amdgpu_crtc *amdgpu_crtc;
3076 struct amdgpu_flip_work *works;
3077
3078 crtc_id = (entry->src_id - 8) >> 1;
3079 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3080
3081 if (crtc_id >= adev->mode_info.num_crtc) {
3082 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3083 return -EINVAL;
3084 }
3085
3086 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3087 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3088 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3089 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3090
3091 /* IRQ could occur when in initial stage */
3092 if (amdgpu_crtc == NULL)
3093 return 0;
3094
3095 spin_lock_irqsave(&adev->ddev->event_lock, flags);
3096 works = amdgpu_crtc->pflip_works;
3097 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3098 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3099 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3100 amdgpu_crtc->pflip_status,
3101 AMDGPU_FLIP_SUBMITTED);
3102 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3103 return 0;
3104 }
3105
3106 /* page flip completed. clean up */
3107 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3108 amdgpu_crtc->pflip_works = NULL;
3109
3110 /* wakeup usersapce */
3111 if (works->event)
3112 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3113
3114 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3115
3116 drm_crtc_vblank_put(&amdgpu_crtc->base);
3117 schedule_work(&works->unpin_work);
3118
3119 return 0;
3120}
3121
3122static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3123 struct amdgpu_irq_src *source,
3124 struct amdgpu_iv_entry *entry)
3125{
3126 uint32_t disp_int, mask, tmp;
3127 unsigned hpd;
3128
3129 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3130 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3131 return 0;
3132 }
3133
3134 hpd = entry->src_data[0];
3135 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3136 mask = interrupt_status_offsets[hpd].hpd;
3137
3138 if (disp_int & mask) {
3139 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3140 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3141 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3142 schedule_work(&adev->hotplug_work);
3143 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3144 }
3145
3146 return 0;
3147
3148}
3149
3150static int dce_v8_0_set_clockgating_state(void *handle,
3151 enum amd_clockgating_state state)
3152{
3153 return 0;
3154}
3155
3156static int dce_v8_0_set_powergating_state(void *handle,
3157 enum amd_powergating_state state)
3158{
3159 return 0;
3160}
3161
3162static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3163 .name = "dce_v8_0",
3164 .early_init = dce_v8_0_early_init,
3165 .late_init = NULL,
3166 .sw_init = dce_v8_0_sw_init,
3167 .sw_fini = dce_v8_0_sw_fini,
3168 .hw_init = dce_v8_0_hw_init,
3169 .hw_fini = dce_v8_0_hw_fini,
3170 .suspend = dce_v8_0_suspend,
3171 .resume = dce_v8_0_resume,
3172 .is_idle = dce_v8_0_is_idle,
3173 .wait_for_idle = dce_v8_0_wait_for_idle,
3174 .soft_reset = dce_v8_0_soft_reset,
3175 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3176 .set_powergating_state = dce_v8_0_set_powergating_state,
3177};
3178
3179static void
3180dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3181 struct drm_display_mode *mode,
3182 struct drm_display_mode *adjusted_mode)
3183{
3184 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3185
3186 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3187
3188 /* need to call this here rather than in prepare() since we need some crtc info */
3189 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3190
3191 /* set scaler clears this on some chips */
3192 dce_v8_0_set_interleave(encoder->crtc, mode);
3193
3194 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3195 dce_v8_0_afmt_enable(encoder, true);
3196 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3197 }
3198}
3199
3200static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3201{
3202 struct amdgpu_device *adev = encoder->dev->dev_private;
3203 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3204 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3205
3206 if ((amdgpu_encoder->active_device &
3207 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3208 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3209 ENCODER_OBJECT_ID_NONE)) {
3210 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3211 if (dig) {
3212 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3213 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3214 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3215 }
3216 }
3217
3218 amdgpu_atombios_scratch_regs_lock(adev, true);
3219
3220 if (connector) {
3221 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3222
3223 /* select the clock/data port if it uses a router */
3224 if (amdgpu_connector->router.cd_valid)
3225 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3226
3227 /* turn eDP panel on for mode set */
3228 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3229 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3230 ATOM_TRANSMITTER_ACTION_POWER_ON);
3231 }
3232
3233 /* this is needed for the pll/ss setup to work correctly in some cases */
3234 amdgpu_atombios_encoder_set_crtc_source(encoder);
3235 /* set up the FMT blocks */
3236 dce_v8_0_program_fmt(encoder);
3237}
3238
3239static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3240{
3241 struct drm_device *dev = encoder->dev;
3242 struct amdgpu_device *adev = dev->dev_private;
3243
3244 /* need to call this here as we need the crtc set up */
3245 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3246 amdgpu_atombios_scratch_regs_lock(adev, false);
3247}
3248
3249static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3250{
3251 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3252 struct amdgpu_encoder_atom_dig *dig;
3253
3254 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3255
3256 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3257 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3258 dce_v8_0_afmt_enable(encoder, false);
3259 dig = amdgpu_encoder->enc_priv;
3260 dig->dig_encoder = -1;
3261 }
3262 amdgpu_encoder->active_device = 0;
3263}
3264
3265/* these are handled by the primary encoders */
3266static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3267{
3268
3269}
3270
3271static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3272{
3273
3274}
3275
3276static void
3277dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3278 struct drm_display_mode *mode,
3279 struct drm_display_mode *adjusted_mode)
3280{
3281
3282}
3283
3284static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3285{
3286
3287}
3288
3289static void
3290dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3291{
3292
3293}
3294
3295static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3296 .dpms = dce_v8_0_ext_dpms,
3297 .prepare = dce_v8_0_ext_prepare,
3298 .mode_set = dce_v8_0_ext_mode_set,
3299 .commit = dce_v8_0_ext_commit,
3300 .disable = dce_v8_0_ext_disable,
3301 /* no detect for TMDS/LVDS yet */
3302};
3303
3304static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3305 .dpms = amdgpu_atombios_encoder_dpms,
3306 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3307 .prepare = dce_v8_0_encoder_prepare,
3308 .mode_set = dce_v8_0_encoder_mode_set,
3309 .commit = dce_v8_0_encoder_commit,
3310 .disable = dce_v8_0_encoder_disable,
3311 .detect = amdgpu_atombios_encoder_dig_detect,
3312};
3313
3314static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3315 .dpms = amdgpu_atombios_encoder_dpms,
3316 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3317 .prepare = dce_v8_0_encoder_prepare,
3318 .mode_set = dce_v8_0_encoder_mode_set,
3319 .commit = dce_v8_0_encoder_commit,
3320 .detect = amdgpu_atombios_encoder_dac_detect,
3321};
3322
3323static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3324{
3325 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3326 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3327 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3328 kfree(amdgpu_encoder->enc_priv);
3329 drm_encoder_cleanup(encoder);
3330 kfree(amdgpu_encoder);
3331}
3332
3333static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3334 .destroy = dce_v8_0_encoder_destroy,
3335};
3336
3337static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3338 uint32_t encoder_enum,
3339 uint32_t supported_device,
3340 u16 caps)
3341{
3342 struct drm_device *dev = adev->ddev;
3343 struct drm_encoder *encoder;
3344 struct amdgpu_encoder *amdgpu_encoder;
3345
3346 /* see if we already added it */
3347 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3348 amdgpu_encoder = to_amdgpu_encoder(encoder);
3349 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3350 amdgpu_encoder->devices |= supported_device;
3351 return;
3352 }
3353
3354 }
3355
3356 /* add a new one */
3357 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3358 if (!amdgpu_encoder)
3359 return;
3360
3361 encoder = &amdgpu_encoder->base;
3362 switch (adev->mode_info.num_crtc) {
3363 case 1:
3364 encoder->possible_crtcs = 0x1;
3365 break;
3366 case 2:
3367 default:
3368 encoder->possible_crtcs = 0x3;
3369 break;
3370 case 4:
3371 encoder->possible_crtcs = 0xf;
3372 break;
3373 case 6:
3374 encoder->possible_crtcs = 0x3f;
3375 break;
3376 }
3377
3378 amdgpu_encoder->enc_priv = NULL;
3379
3380 amdgpu_encoder->encoder_enum = encoder_enum;
3381 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3382 amdgpu_encoder->devices = supported_device;
3383 amdgpu_encoder->rmx_type = RMX_OFF;
3384 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3385 amdgpu_encoder->is_ext_encoder = false;
3386 amdgpu_encoder->caps = caps;
3387
3388 switch (amdgpu_encoder->encoder_id) {
3389 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3390 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3391 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3392 DRM_MODE_ENCODER_DAC, NULL);
3393 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3394 break;
3395 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3396 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3397 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3398 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3399 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3400 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3401 amdgpu_encoder->rmx_type = RMX_FULL;
3402 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3403 DRM_MODE_ENCODER_LVDS, NULL);
3404 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3405 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3406 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3407 DRM_MODE_ENCODER_DAC, NULL);
3408 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3409 } else {
3410 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3411 DRM_MODE_ENCODER_TMDS, NULL);
3412 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3413 }
3414 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3415 break;
3416 case ENCODER_OBJECT_ID_SI170B:
3417 case ENCODER_OBJECT_ID_CH7303:
3418 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3419 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3420 case ENCODER_OBJECT_ID_TITFP513:
3421 case ENCODER_OBJECT_ID_VT1623:
3422 case ENCODER_OBJECT_ID_HDMI_SI1930:
3423 case ENCODER_OBJECT_ID_TRAVIS:
3424 case ENCODER_OBJECT_ID_NUTMEG:
3425 /* these are handled by the primary encoders */
3426 amdgpu_encoder->is_ext_encoder = true;
3427 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3428 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3429 DRM_MODE_ENCODER_LVDS, NULL);
3430 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3431 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3432 DRM_MODE_ENCODER_DAC, NULL);
3433 else
3434 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3435 DRM_MODE_ENCODER_TMDS, NULL);
3436 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3437 break;
3438 }
3439}
3440
3441static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3442 .bandwidth_update = &dce_v8_0_bandwidth_update,
3443 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3444 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3445 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3446 .hpd_sense = &dce_v8_0_hpd_sense,
3447 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3448 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3449 .page_flip = &dce_v8_0_page_flip,
3450 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3451 .add_encoder = &dce_v8_0_encoder_add,
3452 .add_connector = &amdgpu_connector_add,
3453};
3454
3455static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3456{
3457 if (adev->mode_info.funcs == NULL)
3458 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3459}
3460
3461static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3462 .set = dce_v8_0_set_crtc_interrupt_state,
3463 .process = dce_v8_0_crtc_irq,
3464};
3465
3466static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3467 .set = dce_v8_0_set_pageflip_interrupt_state,
3468 .process = dce_v8_0_pageflip_irq,
3469};
3470
3471static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3472 .set = dce_v8_0_set_hpd_interrupt_state,
3473 .process = dce_v8_0_hpd_irq,
3474};
3475
3476static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3477{
3478 if (adev->mode_info.num_crtc > 0)
3479 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3480 else
3481 adev->crtc_irq.num_types = 0;
3482 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3483
3484 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3485 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3486
3487 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3488 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3489}
3490
3491const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3492{
3493 .type = AMD_IP_BLOCK_TYPE_DCE,
3494 .major = 8,
3495 .minor = 0,
3496 .rev = 0,
3497 .funcs = &dce_v8_0_ip_funcs,
3498};
3499
3500const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3501{
3502 .type = AMD_IP_BLOCK_TYPE_DCE,
3503 .major = 8,
3504 .minor = 1,
3505 .rev = 0,
3506 .funcs = &dce_v8_0_ip_funcs,
3507};
3508
3509const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3510{
3511 .type = AMD_IP_BLOCK_TYPE_DCE,
3512 .major = 8,
3513 .minor = 2,
3514 .rev = 0,
3515 .funcs = &dce_v8_0_ip_funcs,
3516};
3517
3518const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3519{
3520 .type = AMD_IP_BLOCK_TYPE_DCE,
3521 .major = 8,
3522 .minor = 3,
3523 .rev = 0,
3524 .funcs = &dce_v8_0_ip_funcs,
3525};
3526
3527const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3528{
3529 .type = AMD_IP_BLOCK_TYPE_DCE,
3530 .major = 8,
3531 .minor = 5,
3532 .rev = 0,
3533 .funcs = &dce_v8_0_ip_funcs,
3534};