Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include <linux/console.h>
30#include <linux/efi.h>
31#include <linux/pci.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h>
34#include <linux/vga_switcheroo.h>
35#include <linux/vgaarb.h>
36
37#include <drm/drm_cache.h>
38#include <drm/drm_client_event.h>
39#include <drm/drm_crtc_helper.h>
40#include <drm/drm_device.h>
41#include <drm/drm_file.h>
42#include <drm/drm_framebuffer.h>
43#include <drm/drm_probe_helper.h>
44#include <drm/radeon_drm.h>
45
46#include "radeon_device.h"
47#include "radeon_reg.h"
48#include "radeon.h"
49#include "atom.h"
50
51static const char radeon_family_name[][16] = {
52 "R100",
53 "RV100",
54 "RS100",
55 "RV200",
56 "RS200",
57 "R200",
58 "RV250",
59 "RS300",
60 "RV280",
61 "R300",
62 "R350",
63 "RV350",
64 "RV380",
65 "R420",
66 "R423",
67 "RV410",
68 "RS400",
69 "RS480",
70 "RS600",
71 "RS690",
72 "RS740",
73 "RV515",
74 "R520",
75 "RV530",
76 "RV560",
77 "RV570",
78 "R580",
79 "R600",
80 "RV610",
81 "RV630",
82 "RV670",
83 "RV620",
84 "RV635",
85 "RS780",
86 "RS880",
87 "RV770",
88 "RV730",
89 "RV710",
90 "RV740",
91 "CEDAR",
92 "REDWOOD",
93 "JUNIPER",
94 "CYPRESS",
95 "HEMLOCK",
96 "PALM",
97 "SUMO",
98 "SUMO2",
99 "BARTS",
100 "TURKS",
101 "CAICOS",
102 "CAYMAN",
103 "ARUBA",
104 "TAHITI",
105 "PITCAIRN",
106 "VERDE",
107 "OLAND",
108 "HAINAN",
109 "BONAIRE",
110 "KAVERI",
111 "KABINI",
112 "HAWAII",
113 "MULLINS",
114 "LAST",
115};
116
117#if defined(CONFIG_VGA_SWITCHEROO)
118bool radeon_has_atpx_dgpu_power_cntl(void);
119bool radeon_is_atpx_hybrid(void);
120#else
121static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
122static inline bool radeon_is_atpx_hybrid(void) { return false; }
123#endif
124
125#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
126
127struct radeon_px_quirk {
128 u32 chip_vendor;
129 u32 chip_device;
130 u32 subsys_vendor;
131 u32 subsys_device;
132 u32 px_quirk_flags;
133};
134
135static struct radeon_px_quirk radeon_px_quirk_list[] = {
136 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
137 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
138 */
139 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
140 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
141 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
142 */
143 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
144 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
145 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
146 */
147 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
148 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
149 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
150 */
151 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
152 /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
153 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
154 */
155 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
156 { 0, 0, 0, 0, 0 },
157};
158
159bool radeon_is_px(struct drm_device *dev)
160{
161 struct radeon_device *rdev = dev->dev_private;
162
163 if (rdev->flags & RADEON_IS_PX)
164 return true;
165 return false;
166}
167
168static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
169{
170 struct radeon_px_quirk *p = radeon_px_quirk_list;
171
172 /* Apply PX quirks */
173 while (p && p->chip_device != 0) {
174 if (rdev->pdev->vendor == p->chip_vendor &&
175 rdev->pdev->device == p->chip_device &&
176 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
177 rdev->pdev->subsystem_device == p->subsys_device) {
178 rdev->px_quirk_flags = p->px_quirk_flags;
179 break;
180 }
181 ++p;
182 }
183
184 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
185 rdev->flags &= ~RADEON_IS_PX;
186
187 /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
188 if (!radeon_is_atpx_hybrid() &&
189 !radeon_has_atpx_dgpu_power_cntl())
190 rdev->flags &= ~RADEON_IS_PX;
191}
192
193/**
194 * radeon_program_register_sequence - program an array of registers.
195 *
196 * @rdev: radeon_device pointer
197 * @registers: pointer to the register array
198 * @array_size: size of the register array
199 *
200 * Programs an array or registers with and and or masks.
201 * This is a helper for setting golden registers.
202 */
203void radeon_program_register_sequence(struct radeon_device *rdev,
204 const u32 *registers,
205 const u32 array_size)
206{
207 u32 tmp, reg, and_mask, or_mask;
208 int i;
209
210 if (array_size % 3)
211 return;
212
213 for (i = 0; i < array_size; i +=3) {
214 reg = registers[i + 0];
215 and_mask = registers[i + 1];
216 or_mask = registers[i + 2];
217
218 if (and_mask == 0xffffffff) {
219 tmp = or_mask;
220 } else {
221 tmp = RREG32(reg);
222 tmp &= ~and_mask;
223 tmp |= or_mask;
224 }
225 WREG32(reg, tmp);
226 }
227}
228
229void radeon_pci_config_reset(struct radeon_device *rdev)
230{
231 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
232}
233
234/**
235 * radeon_surface_init - Clear GPU surface registers.
236 *
237 * @rdev: radeon_device pointer
238 *
239 * Clear GPU surface registers (r1xx-r5xx).
240 */
241void radeon_surface_init(struct radeon_device *rdev)
242{
243 /* FIXME: check this out */
244 if (rdev->family < CHIP_R600) {
245 int i;
246
247 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
248 if (rdev->surface_regs[i].bo)
249 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
250 else
251 radeon_clear_surface_reg(rdev, i);
252 }
253 /* enable surfaces */
254 WREG32(RADEON_SURFACE_CNTL, 0);
255 }
256}
257
258/*
259 * GPU scratch registers helpers function.
260 */
261/**
262 * radeon_scratch_init - Init scratch register driver information.
263 *
264 * @rdev: radeon_device pointer
265 *
266 * Init CP scratch register driver information (r1xx-r5xx)
267 */
268void radeon_scratch_init(struct radeon_device *rdev)
269{
270 int i;
271
272 /* FIXME: check this out */
273 if (rdev->family < CHIP_R300) {
274 rdev->scratch.num_reg = 5;
275 } else {
276 rdev->scratch.num_reg = 7;
277 }
278 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
279 for (i = 0; i < rdev->scratch.num_reg; i++) {
280 rdev->scratch.free[i] = true;
281 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
282 }
283}
284
285/**
286 * radeon_scratch_get - Allocate a scratch register
287 *
288 * @rdev: radeon_device pointer
289 * @reg: scratch register mmio offset
290 *
291 * Allocate a CP scratch register for use by the driver (all asics).
292 * Returns 0 on success or -EINVAL on failure.
293 */
294int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
295{
296 int i;
297
298 for (i = 0; i < rdev->scratch.num_reg; i++) {
299 if (rdev->scratch.free[i]) {
300 rdev->scratch.free[i] = false;
301 *reg = rdev->scratch.reg[i];
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
308/**
309 * radeon_scratch_free - Free a scratch register
310 *
311 * @rdev: radeon_device pointer
312 * @reg: scratch register mmio offset
313 *
314 * Free a CP scratch register allocated for use by the driver (all asics)
315 */
316void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
317{
318 int i;
319
320 for (i = 0; i < rdev->scratch.num_reg; i++) {
321 if (rdev->scratch.reg[i] == reg) {
322 rdev->scratch.free[i] = true;
323 return;
324 }
325 }
326}
327
328/*
329 * GPU doorbell aperture helpers function.
330 */
331/**
332 * radeon_doorbell_init - Init doorbell driver information.
333 *
334 * @rdev: radeon_device pointer
335 *
336 * Init doorbell driver information (CIK)
337 * Returns 0 on success, error on failure.
338 */
339static int radeon_doorbell_init(struct radeon_device *rdev)
340{
341 /* doorbell bar mapping */
342 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
343 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
344
345 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
346 if (rdev->doorbell.num_doorbells == 0)
347 return -EINVAL;
348
349 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
350 if (rdev->doorbell.ptr == NULL) {
351 return -ENOMEM;
352 }
353 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
354 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
355
356 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
357
358 return 0;
359}
360
361/**
362 * radeon_doorbell_fini - Tear down doorbell driver information.
363 *
364 * @rdev: radeon_device pointer
365 *
366 * Tear down doorbell driver information (CIK)
367 */
368static void radeon_doorbell_fini(struct radeon_device *rdev)
369{
370 iounmap(rdev->doorbell.ptr);
371 rdev->doorbell.ptr = NULL;
372}
373
374/**
375 * radeon_doorbell_get - Allocate a doorbell entry
376 *
377 * @rdev: radeon_device pointer
378 * @doorbell: doorbell index
379 *
380 * Allocate a doorbell for use by the driver (all asics).
381 * Returns 0 on success or -EINVAL on failure.
382 */
383int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
384{
385 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
386 if (offset < rdev->doorbell.num_doorbells) {
387 __set_bit(offset, rdev->doorbell.used);
388 *doorbell = offset;
389 return 0;
390 } else {
391 return -EINVAL;
392 }
393}
394
395/**
396 * radeon_doorbell_free - Free a doorbell entry
397 *
398 * @rdev: radeon_device pointer
399 * @doorbell: doorbell index
400 *
401 * Free a doorbell allocated for use by the driver (all asics)
402 */
403void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
404{
405 if (doorbell < rdev->doorbell.num_doorbells)
406 __clear_bit(doorbell, rdev->doorbell.used);
407}
408
409/*
410 * radeon_wb_*()
411 * Writeback is the method by which the GPU updates special pages
412 * in memory with the status of certain GPU events (fences, ring pointers,
413 * etc.).
414 */
415
416/**
417 * radeon_wb_disable - Disable Writeback
418 *
419 * @rdev: radeon_device pointer
420 *
421 * Disables Writeback (all asics). Used for suspend.
422 */
423void radeon_wb_disable(struct radeon_device *rdev)
424{
425 rdev->wb.enabled = false;
426}
427
428/**
429 * radeon_wb_fini - Disable Writeback and free memory
430 *
431 * @rdev: radeon_device pointer
432 *
433 * Disables Writeback and frees the Writeback memory (all asics).
434 * Used at driver shutdown.
435 */
436void radeon_wb_fini(struct radeon_device *rdev)
437{
438 radeon_wb_disable(rdev);
439 if (rdev->wb.wb_obj) {
440 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
441 radeon_bo_kunmap(rdev->wb.wb_obj);
442 radeon_bo_unpin(rdev->wb.wb_obj);
443 radeon_bo_unreserve(rdev->wb.wb_obj);
444 }
445 radeon_bo_unref(&rdev->wb.wb_obj);
446 rdev->wb.wb = NULL;
447 rdev->wb.wb_obj = NULL;
448 }
449}
450
451/**
452 * radeon_wb_init- Init Writeback driver info and allocate memory
453 *
454 * @rdev: radeon_device pointer
455 *
456 * Disables Writeback and frees the Writeback memory (all asics).
457 * Used at driver startup.
458 * Returns 0 on success or an -error on failure.
459 */
460int radeon_wb_init(struct radeon_device *rdev)
461{
462 int r;
463
464 if (rdev->wb.wb_obj == NULL) {
465 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
466 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
467 &rdev->wb.wb_obj);
468 if (r) {
469 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
470 return r;
471 }
472 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
473 if (unlikely(r != 0)) {
474 radeon_wb_fini(rdev);
475 return r;
476 }
477 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
478 &rdev->wb.gpu_addr);
479 if (r) {
480 radeon_bo_unreserve(rdev->wb.wb_obj);
481 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
482 radeon_wb_fini(rdev);
483 return r;
484 }
485 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
486 radeon_bo_unreserve(rdev->wb.wb_obj);
487 if (r) {
488 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
489 radeon_wb_fini(rdev);
490 return r;
491 }
492 }
493
494 /* clear wb memory */
495 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
496 /* disable event_write fences */
497 rdev->wb.use_event = false;
498 /* disabled via module param */
499 if (radeon_no_wb == 1) {
500 rdev->wb.enabled = false;
501 } else {
502 if (rdev->flags & RADEON_IS_AGP) {
503 /* often unreliable on AGP */
504 rdev->wb.enabled = false;
505 } else if (rdev->family < CHIP_R300) {
506 /* often unreliable on pre-r300 */
507 rdev->wb.enabled = false;
508 } else {
509 rdev->wb.enabled = true;
510 /* event_write fences are only available on r600+ */
511 if (rdev->family >= CHIP_R600) {
512 rdev->wb.use_event = true;
513 }
514 }
515 }
516 /* always use writeback/events on NI, APUs */
517 if (rdev->family >= CHIP_PALM) {
518 rdev->wb.enabled = true;
519 rdev->wb.use_event = true;
520 }
521
522 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
523
524 return 0;
525}
526
527/**
528 * radeon_vram_location - try to find VRAM location
529 * @rdev: radeon device structure holding all necessary informations
530 * @mc: memory controller structure holding memory informations
531 * @base: base address at which to put VRAM
532 *
533 * Function will place try to place VRAM at base address provided
534 * as parameter (which is so far either PCI aperture address or
535 * for IGP TOM base address).
536 *
537 * If there is not enough space to fit the unvisible VRAM in the 32bits
538 * address space then we limit the VRAM size to the aperture.
539 *
540 * If we are using AGP and if the AGP aperture doesn't allow us to have
541 * room for all the VRAM than we restrict the VRAM to the PCI aperture
542 * size and print a warning.
543 *
544 * This function will never fails, worst case are limiting VRAM.
545 *
546 * Note: GTT start, end, size should be initialized before calling this
547 * function on AGP platform.
548 *
549 * Note 1: We don't explicitly enforce VRAM start to be aligned on VRAM size,
550 * this shouldn't be a problem as we are using the PCI aperture as a reference.
551 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
552 * not IGP.
553 *
554 * Note 2: we use mc_vram_size as on some board we need to program the mc to
555 * cover the whole aperture even if VRAM size is inferior to aperture size
556 * Novell bug 204882 + along with lots of ubuntu ones
557 *
558 * Note 3: when limiting vram it's safe to overwritte real_vram_size because
559 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
560 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
561 * ones)
562 *
563 * Note 4: IGP TOM addr should be the same as the aperture addr, we don't
564 * explicitly check for that thought.
565 *
566 * FIXME: when reducing VRAM size align new size on power of 2.
567 */
568void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
569{
570 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
571
572 mc->vram_start = base;
573 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
574 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
575 mc->real_vram_size = mc->aper_size;
576 mc->mc_vram_size = mc->aper_size;
577 }
578 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
579 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
580 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
581 mc->real_vram_size = mc->aper_size;
582 mc->mc_vram_size = mc->aper_size;
583 }
584 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
585 if (limit && limit < mc->real_vram_size)
586 mc->real_vram_size = limit;
587 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
588 mc->mc_vram_size >> 20, mc->vram_start,
589 mc->vram_end, mc->real_vram_size >> 20);
590}
591
592/**
593 * radeon_gtt_location - try to find GTT location
594 * @rdev: radeon device structure holding all necessary informations
595 * @mc: memory controller structure holding memory informations
596 *
597 * Function will place try to place GTT before or after VRAM.
598 *
599 * If GTT size is bigger than space left then we ajust GTT size.
600 * Thus function will never fails.
601 *
602 * FIXME: when reducing GTT size align new size on power of 2.
603 */
604void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
605{
606 u64 size_af, size_bf;
607
608 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
609 size_bf = mc->vram_start & ~mc->gtt_base_align;
610 if (size_bf > size_af) {
611 if (mc->gtt_size > size_bf) {
612 dev_warn(rdev->dev, "limiting GTT\n");
613 mc->gtt_size = size_bf;
614 }
615 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
616 } else {
617 if (mc->gtt_size > size_af) {
618 dev_warn(rdev->dev, "limiting GTT\n");
619 mc->gtt_size = size_af;
620 }
621 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
622 }
623 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
624 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
625 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
626}
627
628/*
629 * GPU helpers function.
630 */
631
632/*
633 * radeon_device_is_virtual - check if we are running is a virtual environment
634 *
635 * Check if the asic has been passed through to a VM (all asics).
636 * Used at driver startup.
637 * Returns true if virtual or false if not.
638 */
639bool radeon_device_is_virtual(void)
640{
641#ifdef CONFIG_X86
642 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
643#else
644 return false;
645#endif
646}
647
648/**
649 * radeon_card_posted - check if the hw has already been initialized
650 *
651 * @rdev: radeon_device pointer
652 *
653 * Check if the asic has been initialized (all asics).
654 * Used at driver startup.
655 * Returns true if initialized or false if not.
656 */
657bool radeon_card_posted(struct radeon_device *rdev)
658{
659 uint32_t reg;
660
661 /* for pass through, always force asic_init for CI */
662 if (rdev->family >= CHIP_BONAIRE &&
663 radeon_device_is_virtual())
664 return false;
665
666 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
667 if (efi_enabled(EFI_BOOT) &&
668 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
669 (rdev->family < CHIP_R600))
670 return false;
671
672 if (ASIC_IS_NODCE(rdev))
673 goto check_memsize;
674
675 /* first check CRTCs */
676 if (ASIC_IS_DCE4(rdev)) {
677 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
678 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
679 if (rdev->num_crtc >= 4) {
680 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
681 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
682 }
683 if (rdev->num_crtc >= 6) {
684 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
685 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
686 }
687 if (reg & EVERGREEN_CRTC_MASTER_EN)
688 return true;
689 } else if (ASIC_IS_AVIVO(rdev)) {
690 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
691 RREG32(AVIVO_D2CRTC_CONTROL);
692 if (reg & AVIVO_CRTC_EN) {
693 return true;
694 }
695 } else {
696 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
697 RREG32(RADEON_CRTC2_GEN_CNTL);
698 if (reg & RADEON_CRTC_EN) {
699 return true;
700 }
701 }
702
703check_memsize:
704 /* then check MEM_SIZE, in case the crtcs are off */
705 if (rdev->family >= CHIP_R600)
706 reg = RREG32(R600_CONFIG_MEMSIZE);
707 else
708 reg = RREG32(RADEON_CONFIG_MEMSIZE);
709
710 if (reg)
711 return true;
712
713 return false;
714
715}
716
717/**
718 * radeon_update_bandwidth_info - update display bandwidth params
719 *
720 * @rdev: radeon_device pointer
721 *
722 * Used when sclk/mclk are switched or display modes are set.
723 * params are used to calculate display watermarks (all asics)
724 */
725void radeon_update_bandwidth_info(struct radeon_device *rdev)
726{
727 fixed20_12 a;
728 u32 sclk = rdev->pm.current_sclk;
729 u32 mclk = rdev->pm.current_mclk;
730
731 /* sclk/mclk in Mhz */
732 a.full = dfixed_const(100);
733 rdev->pm.sclk.full = dfixed_const(sclk);
734 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
735 rdev->pm.mclk.full = dfixed_const(mclk);
736 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
737
738 if (rdev->flags & RADEON_IS_IGP) {
739 a.full = dfixed_const(16);
740 /* core_bandwidth = sclk(Mhz) * 16 */
741 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
742 }
743}
744
745/**
746 * radeon_boot_test_post_card - check and possibly initialize the hw
747 *
748 * @rdev: radeon_device pointer
749 *
750 * Check if the asic is initialized and if not, attempt to initialize
751 * it (all asics).
752 * Returns true if initialized or false if not.
753 */
754bool radeon_boot_test_post_card(struct radeon_device *rdev)
755{
756 if (radeon_card_posted(rdev))
757 return true;
758
759 if (rdev->bios) {
760 DRM_INFO("GPU not posted. posting now...\n");
761 if (rdev->is_atom_bios)
762 atom_asic_init(rdev->mode_info.atom_context);
763 else
764 radeon_combios_asic_init(rdev_to_drm(rdev));
765 return true;
766 } else {
767 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
768 return false;
769 }
770}
771
772/**
773 * radeon_dummy_page_init - init dummy page used by the driver
774 *
775 * @rdev: radeon_device pointer
776 *
777 * Allocate the dummy page used by the driver (all asics).
778 * This dummy page is used by the driver as a filler for gart entries
779 * when pages are taken out of the GART
780 * Returns 0 on sucess, -ENOMEM on failure.
781 */
782int radeon_dummy_page_init(struct radeon_device *rdev)
783{
784 if (rdev->dummy_page.page)
785 return 0;
786 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
787 if (rdev->dummy_page.page == NULL)
788 return -ENOMEM;
789 rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
790 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
791 if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
792 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
793 __free_page(rdev->dummy_page.page);
794 rdev->dummy_page.page = NULL;
795 return -ENOMEM;
796 }
797 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
798 RADEON_GART_PAGE_DUMMY);
799 return 0;
800}
801
802/**
803 * radeon_dummy_page_fini - free dummy page used by the driver
804 *
805 * @rdev: radeon_device pointer
806 *
807 * Frees the dummy page used by the driver (all asics).
808 */
809void radeon_dummy_page_fini(struct radeon_device *rdev)
810{
811 if (rdev->dummy_page.page == NULL)
812 return;
813 dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
814 DMA_BIDIRECTIONAL);
815 __free_page(rdev->dummy_page.page);
816 rdev->dummy_page.page = NULL;
817}
818
819
820/* ATOM accessor methods */
821/*
822 * ATOM is an interpreted byte code stored in tables in the vbios. The
823 * driver registers callbacks to access registers and the interpreter
824 * in the driver parses the tables and executes then to program specific
825 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
826 * atombios.h, and atom.c
827 */
828
829/**
830 * cail_pll_read - read PLL register
831 *
832 * @info: atom card_info pointer
833 * @reg: PLL register offset
834 *
835 * Provides a PLL register accessor for the atom interpreter (r4xx+).
836 * Returns the value of the PLL register.
837 */
838static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
839{
840 struct radeon_device *rdev = info->dev->dev_private;
841 uint32_t r;
842
843 r = rdev->pll_rreg(rdev, reg);
844 return r;
845}
846
847/**
848 * cail_pll_write - write PLL register
849 *
850 * @info: atom card_info pointer
851 * @reg: PLL register offset
852 * @val: value to write to the pll register
853 *
854 * Provides a PLL register accessor for the atom interpreter (r4xx+).
855 */
856static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
857{
858 struct radeon_device *rdev = info->dev->dev_private;
859
860 rdev->pll_wreg(rdev, reg, val);
861}
862
863/**
864 * cail_mc_read - read MC (Memory Controller) register
865 *
866 * @info: atom card_info pointer
867 * @reg: MC register offset
868 *
869 * Provides an MC register accessor for the atom interpreter (r4xx+).
870 * Returns the value of the MC register.
871 */
872static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
873{
874 struct radeon_device *rdev = info->dev->dev_private;
875 uint32_t r;
876
877 r = rdev->mc_rreg(rdev, reg);
878 return r;
879}
880
881/**
882 * cail_mc_write - write MC (Memory Controller) register
883 *
884 * @info: atom card_info pointer
885 * @reg: MC register offset
886 * @val: value to write to the pll register
887 *
888 * Provides a MC register accessor for the atom interpreter (r4xx+).
889 */
890static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
891{
892 struct radeon_device *rdev = info->dev->dev_private;
893
894 rdev->mc_wreg(rdev, reg, val);
895}
896
897/**
898 * cail_reg_write - write MMIO register
899 *
900 * @info: atom card_info pointer
901 * @reg: MMIO register offset
902 * @val: value to write to the pll register
903 *
904 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
905 */
906static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
907{
908 struct radeon_device *rdev = info->dev->dev_private;
909
910 WREG32(reg*4, val);
911}
912
913/**
914 * cail_reg_read - read MMIO register
915 *
916 * @info: atom card_info pointer
917 * @reg: MMIO register offset
918 *
919 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
920 * Returns the value of the MMIO register.
921 */
922static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
923{
924 struct radeon_device *rdev = info->dev->dev_private;
925 uint32_t r;
926
927 r = RREG32(reg*4);
928 return r;
929}
930
931/**
932 * cail_ioreg_write - write IO register
933 *
934 * @info: atom card_info pointer
935 * @reg: IO register offset
936 * @val: value to write to the pll register
937 *
938 * Provides a IO register accessor for the atom interpreter (r4xx+).
939 */
940static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
941{
942 struct radeon_device *rdev = info->dev->dev_private;
943
944 WREG32_IO(reg*4, val);
945}
946
947/**
948 * cail_ioreg_read - read IO register
949 *
950 * @info: atom card_info pointer
951 * @reg: IO register offset
952 *
953 * Provides an IO register accessor for the atom interpreter (r4xx+).
954 * Returns the value of the IO register.
955 */
956static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
957{
958 struct radeon_device *rdev = info->dev->dev_private;
959 uint32_t r;
960
961 r = RREG32_IO(reg*4);
962 return r;
963}
964
965/**
966 * radeon_atombios_init - init the driver info and callbacks for atombios
967 *
968 * @rdev: radeon_device pointer
969 *
970 * Initializes the driver info and register access callbacks for the
971 * ATOM interpreter (r4xx+).
972 * Returns 0 on sucess, -ENOMEM on failure.
973 * Called at driver startup.
974 */
975int radeon_atombios_init(struct radeon_device *rdev)
976{
977 struct card_info *atom_card_info =
978 kzalloc(sizeof(struct card_info), GFP_KERNEL);
979
980 if (!atom_card_info)
981 return -ENOMEM;
982
983 rdev->mode_info.atom_card_info = atom_card_info;
984 atom_card_info->dev = rdev_to_drm(rdev);
985 atom_card_info->reg_read = cail_reg_read;
986 atom_card_info->reg_write = cail_reg_write;
987 /* needed for iio ops */
988 if (rdev->rio_mem) {
989 atom_card_info->ioreg_read = cail_ioreg_read;
990 atom_card_info->ioreg_write = cail_ioreg_write;
991 } else {
992 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
993 atom_card_info->ioreg_read = cail_reg_read;
994 atom_card_info->ioreg_write = cail_reg_write;
995 }
996 atom_card_info->mc_read = cail_mc_read;
997 atom_card_info->mc_write = cail_mc_write;
998 atom_card_info->pll_read = cail_pll_read;
999 atom_card_info->pll_write = cail_pll_write;
1000
1001 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1002 if (!rdev->mode_info.atom_context) {
1003 radeon_atombios_fini(rdev);
1004 return -ENOMEM;
1005 }
1006
1007 mutex_init(&rdev->mode_info.atom_context->mutex);
1008 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1009 radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1010 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1011 return 0;
1012}
1013
1014/**
1015 * radeon_atombios_fini - free the driver info and callbacks for atombios
1016 *
1017 * @rdev: radeon_device pointer
1018 *
1019 * Frees the driver info and register access callbacks for the ATOM
1020 * interpreter (r4xx+).
1021 * Called at driver shutdown.
1022 */
1023void radeon_atombios_fini(struct radeon_device *rdev)
1024{
1025 if (rdev->mode_info.atom_context) {
1026 kfree(rdev->mode_info.atom_context->scratch);
1027 kfree(rdev->mode_info.atom_context->iio);
1028 }
1029 kfree(rdev->mode_info.atom_context);
1030 rdev->mode_info.atom_context = NULL;
1031 kfree(rdev->mode_info.atom_card_info);
1032 rdev->mode_info.atom_card_info = NULL;
1033}
1034
1035/* COMBIOS */
1036/*
1037 * COMBIOS is the bios format prior to ATOM. It provides
1038 * command tables similar to ATOM, but doesn't have a unified
1039 * parser. See radeon_combios.c
1040 */
1041
1042/**
1043 * radeon_combios_init - init the driver info for combios
1044 *
1045 * @rdev: radeon_device pointer
1046 *
1047 * Initializes the driver info for combios (r1xx-r3xx).
1048 * Returns 0 on sucess.
1049 * Called at driver startup.
1050 */
1051int radeon_combios_init(struct radeon_device *rdev)
1052{
1053 radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1054 return 0;
1055}
1056
1057/**
1058 * radeon_combios_fini - free the driver info for combios
1059 *
1060 * @rdev: radeon_device pointer
1061 *
1062 * Frees the driver info for combios (r1xx-r3xx).
1063 * Called at driver shutdown.
1064 */
1065void radeon_combios_fini(struct radeon_device *rdev)
1066{
1067}
1068
1069/* if we get transitioned to only one device, take VGA back */
1070/**
1071 * radeon_vga_set_decode - enable/disable vga decode
1072 *
1073 * @pdev: PCI device
1074 * @state: enable/disable vga decode
1075 *
1076 * Enable/disable vga decode (all asics).
1077 * Returns VGA resource flags.
1078 */
1079static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
1080{
1081 struct drm_device *dev = pci_get_drvdata(pdev);
1082 struct radeon_device *rdev = dev->dev_private;
1083 radeon_vga_set_state(rdev, state);
1084 if (state)
1085 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1086 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1087 else
1088 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1089}
1090
1091/**
1092 * radeon_gart_size_auto - Determine a sensible default GART size
1093 * according to ASIC family.
1094 *
1095 * @family: ASIC family name
1096 */
1097static int radeon_gart_size_auto(enum radeon_family family)
1098{
1099 /* default to a larger gart size on newer asics */
1100 if (family >= CHIP_TAHITI)
1101 return 2048;
1102 else if (family >= CHIP_RV770)
1103 return 1024;
1104 else
1105 return 512;
1106}
1107
1108/**
1109 * radeon_check_arguments - validate module params
1110 *
1111 * @rdev: radeon_device pointer
1112 *
1113 * Validates certain module parameters and updates
1114 * the associated values used by the driver (all asics).
1115 */
1116static void radeon_check_arguments(struct radeon_device *rdev)
1117{
1118 /* vramlimit must be a power of two */
1119 if (radeon_vram_limit != 0 && !is_power_of_2(radeon_vram_limit)) {
1120 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1121 radeon_vram_limit);
1122 radeon_vram_limit = 0;
1123 }
1124
1125 if (radeon_gart_size == -1) {
1126 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1127 }
1128 /* gtt size must be power of two and greater or equal to 32M */
1129 if (radeon_gart_size < 32) {
1130 dev_warn(rdev->dev, "gart size (%d) too small\n",
1131 radeon_gart_size);
1132 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1133 } else if (!is_power_of_2(radeon_gart_size)) {
1134 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1135 radeon_gart_size);
1136 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1137 }
1138 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1139
1140 /* AGP mode can only be -1, 1, 2, 4, 8 */
1141 switch (radeon_agpmode) {
1142 case -1:
1143 case 0:
1144 case 1:
1145 case 2:
1146 case 4:
1147 case 8:
1148 break;
1149 default:
1150 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1151 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1152 radeon_agpmode = 0;
1153 break;
1154 }
1155
1156 if (!is_power_of_2(radeon_vm_size)) {
1157 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1158 radeon_vm_size);
1159 radeon_vm_size = 4;
1160 }
1161
1162 if (radeon_vm_size < 1) {
1163 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1164 radeon_vm_size);
1165 radeon_vm_size = 4;
1166 }
1167
1168 /*
1169 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1170 */
1171 if (radeon_vm_size > 1024) {
1172 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1173 radeon_vm_size);
1174 radeon_vm_size = 4;
1175 }
1176
1177 /* defines number of bits in page table versus page directory,
1178 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1179 * page table and the remaining bits are in the page directory */
1180 if (radeon_vm_block_size == -1) {
1181
1182 /* Total bits covered by PD + PTs */
1183 unsigned bits = ilog2(radeon_vm_size) + 18;
1184
1185 /* Make sure the PD is 4K in size up to 8GB address space.
1186 Above that split equal between PD and PTs */
1187 if (radeon_vm_size <= 8)
1188 radeon_vm_block_size = bits - 9;
1189 else
1190 radeon_vm_block_size = (bits + 3) / 2;
1191
1192 } else if (radeon_vm_block_size < 9) {
1193 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1194 radeon_vm_block_size);
1195 radeon_vm_block_size = 9;
1196 }
1197
1198 if (radeon_vm_block_size > 24 ||
1199 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1200 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1201 radeon_vm_block_size);
1202 radeon_vm_block_size = 9;
1203 }
1204}
1205
1206/**
1207 * radeon_switcheroo_set_state - set switcheroo state
1208 *
1209 * @pdev: pci dev pointer
1210 * @state: vga_switcheroo state
1211 *
1212 * Callback for the switcheroo driver. Suspends or resumes
1213 * the asics before or after it is powered up using ACPI methods.
1214 */
1215static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1216{
1217 struct drm_device *dev = pci_get_drvdata(pdev);
1218
1219 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1220 return;
1221
1222 if (state == VGA_SWITCHEROO_ON) {
1223 pr_info("radeon: switched on\n");
1224 /* don't suspend or resume card normally */
1225 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1226
1227 radeon_resume_kms(dev, true, true);
1228
1229 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1230 drm_kms_helper_poll_enable(dev);
1231 } else {
1232 pr_info("radeon: switched off\n");
1233 drm_kms_helper_poll_disable(dev);
1234 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1235 radeon_suspend_kms(dev, true, true, false);
1236 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1237 }
1238}
1239
1240/**
1241 * radeon_switcheroo_can_switch - see if switcheroo state can change
1242 *
1243 * @pdev: pci dev pointer
1244 *
1245 * Callback for the switcheroo driver. Check of the switcheroo
1246 * state can be changed.
1247 * Returns true if the state can be changed, false if not.
1248 */
1249static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1250{
1251 struct drm_device *dev = pci_get_drvdata(pdev);
1252
1253 /*
1254 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1255 * locking inversion with the driver load path. And the access here is
1256 * completely racy anyway. So don't bother with locking for now.
1257 */
1258 return atomic_read(&dev->open_count) == 0;
1259}
1260
1261static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1262 .set_gpu_state = radeon_switcheroo_set_state,
1263 .reprobe = NULL,
1264 .can_switch = radeon_switcheroo_can_switch,
1265};
1266
1267/**
1268 * radeon_device_init - initialize the driver
1269 *
1270 * @rdev: radeon_device pointer
1271 * @ddev: drm dev pointer
1272 * @pdev: pci dev pointer
1273 * @flags: driver flags
1274 *
1275 * Initializes the driver info and hw (all asics).
1276 * Returns 0 for success or an error on failure.
1277 * Called at driver startup.
1278 */
1279int radeon_device_init(struct radeon_device *rdev,
1280 struct drm_device *ddev,
1281 struct pci_dev *pdev,
1282 uint32_t flags)
1283{
1284 int r, i;
1285 int dma_bits;
1286 bool runtime = false;
1287
1288 rdev->shutdown = false;
1289 rdev->flags = flags;
1290 rdev->family = flags & RADEON_FAMILY_MASK;
1291 rdev->is_atom_bios = false;
1292 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1293 rdev->mc.gtt_size = 512 * 1024 * 1024;
1294 rdev->accel_working = false;
1295 /* set up ring ids */
1296 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1297 rdev->ring[i].idx = i;
1298 }
1299 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1300
1301 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1302 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1303 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1304
1305 /* mutex initialization are all done here so we
1306 * can recall function without having locking issues */
1307 mutex_init(&rdev->ring_lock);
1308 mutex_init(&rdev->dc_hw_i2c_mutex);
1309 atomic_set(&rdev->ih.lock, 0);
1310 mutex_init(&rdev->gem.mutex);
1311 mutex_init(&rdev->pm.mutex);
1312 mutex_init(&rdev->gpu_clock_mutex);
1313 mutex_init(&rdev->srbm_mutex);
1314 mutex_init(&rdev->audio.component_mutex);
1315 init_rwsem(&rdev->pm.mclk_lock);
1316 init_rwsem(&rdev->exclusive_lock);
1317 init_waitqueue_head(&rdev->irq.vblank_queue);
1318 r = radeon_gem_init(rdev);
1319 if (r)
1320 return r;
1321
1322 radeon_check_arguments(rdev);
1323 /* Adjust VM size here.
1324 * Max GPUVM size for cayman+ is 40 bits.
1325 */
1326 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1327
1328 /* Set asic functions */
1329 r = radeon_asic_init(rdev);
1330 if (r)
1331 return r;
1332
1333 /* all of the newer IGP chips have an internal gart
1334 * However some rs4xx report as AGP, so remove that here.
1335 */
1336 if ((rdev->family >= CHIP_RS400) &&
1337 (rdev->flags & RADEON_IS_IGP)) {
1338 rdev->flags &= ~RADEON_IS_AGP;
1339 }
1340
1341 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1342 radeon_agp_disable(rdev);
1343 }
1344
1345 /* Set the internal MC address mask
1346 * This is the max address of the GPU's
1347 * internal address space.
1348 */
1349 if (rdev->family >= CHIP_CAYMAN)
1350 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1351 else if (rdev->family >= CHIP_CEDAR)
1352 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1353 else
1354 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1355
1356 /* set DMA mask.
1357 * PCIE - can handle 40-bits.
1358 * IGP - can handle 40-bits
1359 * AGP - generally dma32 is safest
1360 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1361 */
1362 dma_bits = 40;
1363 if (rdev->flags & RADEON_IS_AGP)
1364 dma_bits = 32;
1365 if ((rdev->flags & RADEON_IS_PCI) &&
1366 (rdev->family <= CHIP_RS740))
1367 dma_bits = 32;
1368#ifdef CONFIG_PPC64
1369 if (rdev->family == CHIP_CEDAR)
1370 dma_bits = 32;
1371#endif
1372
1373 r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1374 if (r) {
1375 pr_warn("radeon: No suitable DMA available\n");
1376 return r;
1377 }
1378 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1379
1380 /* Registers mapping */
1381 /* TODO: block userspace mapping of io register */
1382 spin_lock_init(&rdev->mmio_idx_lock);
1383 spin_lock_init(&rdev->smc_idx_lock);
1384 spin_lock_init(&rdev->pll_idx_lock);
1385 spin_lock_init(&rdev->mc_idx_lock);
1386 spin_lock_init(&rdev->pcie_idx_lock);
1387 spin_lock_init(&rdev->pciep_idx_lock);
1388 spin_lock_init(&rdev->pif_idx_lock);
1389 spin_lock_init(&rdev->cg_idx_lock);
1390 spin_lock_init(&rdev->uvd_idx_lock);
1391 spin_lock_init(&rdev->rcu_idx_lock);
1392 spin_lock_init(&rdev->didt_idx_lock);
1393 spin_lock_init(&rdev->end_idx_lock);
1394 if (rdev->family >= CHIP_BONAIRE) {
1395 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1396 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1397 } else {
1398 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1399 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1400 }
1401 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1402 if (rdev->rmmio == NULL)
1403 return -ENOMEM;
1404
1405 /* doorbell bar mapping */
1406 if (rdev->family >= CHIP_BONAIRE)
1407 radeon_doorbell_init(rdev);
1408
1409 /* io port mapping */
1410 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1411 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1412 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1413 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1414 break;
1415 }
1416 }
1417 if (rdev->rio_mem == NULL)
1418 DRM_ERROR("Unable to find PCI I/O BAR\n");
1419
1420 if (rdev->flags & RADEON_IS_PX)
1421 radeon_device_handle_px_quirks(rdev);
1422
1423 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1424 /* this will fail for cards that aren't VGA class devices, just
1425 * ignore it */
1426 vga_client_register(rdev->pdev, radeon_vga_set_decode);
1427
1428 if (rdev->flags & RADEON_IS_PX)
1429 runtime = true;
1430 if (!pci_is_thunderbolt_attached(rdev->pdev))
1431 vga_switcheroo_register_client(rdev->pdev,
1432 &radeon_switcheroo_ops, runtime);
1433 if (runtime)
1434 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1435
1436 r = radeon_init(rdev);
1437 if (r)
1438 goto failed;
1439
1440 radeon_gem_debugfs_init(rdev);
1441
1442 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1443 /* Acceleration not working on AGP card try again
1444 * with fallback to PCI or PCIE GART
1445 */
1446 radeon_asic_reset(rdev);
1447 radeon_fini(rdev);
1448 radeon_agp_disable(rdev);
1449 r = radeon_init(rdev);
1450 if (r)
1451 goto failed;
1452 }
1453
1454 radeon_audio_component_init(rdev);
1455
1456 r = radeon_ib_ring_tests(rdev);
1457 if (r)
1458 DRM_ERROR("ib ring test failed (%d).\n", r);
1459
1460 /*
1461 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1462 * after the CP ring have chew one packet at least. Hence here we stop
1463 * and restart DPM after the radeon_ib_ring_tests().
1464 */
1465 if (rdev->pm.dpm_enabled &&
1466 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1467 (rdev->family == CHIP_TURKS) &&
1468 (rdev->flags & RADEON_IS_MOBILITY)) {
1469 mutex_lock(&rdev->pm.mutex);
1470 radeon_dpm_disable(rdev);
1471 radeon_dpm_enable(rdev);
1472 mutex_unlock(&rdev->pm.mutex);
1473 }
1474
1475 if ((radeon_testing & 1)) {
1476 if (rdev->accel_working)
1477 radeon_test_moves(rdev);
1478 else
1479 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1480 }
1481 if ((radeon_testing & 2)) {
1482 if (rdev->accel_working)
1483 radeon_test_syncing(rdev);
1484 else
1485 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1486 }
1487 if (radeon_benchmarking) {
1488 if (rdev->accel_working)
1489 radeon_benchmark(rdev, radeon_benchmarking);
1490 else
1491 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1492 }
1493 return 0;
1494
1495failed:
1496 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1497 if (radeon_is_px(ddev))
1498 pm_runtime_put_noidle(ddev->dev);
1499 if (runtime)
1500 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1501 return r;
1502}
1503
1504/**
1505 * radeon_device_fini - tear down the driver
1506 *
1507 * @rdev: radeon_device pointer
1508 *
1509 * Tear down the driver info (all asics).
1510 * Called at driver shutdown.
1511 */
1512void radeon_device_fini(struct radeon_device *rdev)
1513{
1514 DRM_INFO("radeon: finishing device.\n");
1515 rdev->shutdown = true;
1516 /* evict vram memory */
1517 radeon_bo_evict_vram(rdev);
1518 radeon_audio_component_fini(rdev);
1519 radeon_fini(rdev);
1520 if (!pci_is_thunderbolt_attached(rdev->pdev))
1521 vga_switcheroo_unregister_client(rdev->pdev);
1522 if (rdev->flags & RADEON_IS_PX)
1523 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1524 vga_client_unregister(rdev->pdev);
1525 if (rdev->rio_mem)
1526 pci_iounmap(rdev->pdev, rdev->rio_mem);
1527 rdev->rio_mem = NULL;
1528 iounmap(rdev->rmmio);
1529 rdev->rmmio = NULL;
1530 if (rdev->family >= CHIP_BONAIRE)
1531 radeon_doorbell_fini(rdev);
1532}
1533
1534
1535/*
1536 * Suspend & resume.
1537 */
1538/*
1539 * radeon_suspend_kms - initiate device suspend
1540 *
1541 * Puts the hw in the suspend state (all asics).
1542 * Returns 0 for success or an error on failure.
1543 * Called at driver suspend.
1544 */
1545int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1546 bool notify_clients, bool freeze)
1547{
1548 struct radeon_device *rdev;
1549 struct pci_dev *pdev;
1550 struct drm_crtc *crtc;
1551 struct drm_connector *connector;
1552 int i, r;
1553
1554 if (dev == NULL || dev->dev_private == NULL) {
1555 return -ENODEV;
1556 }
1557
1558 rdev = dev->dev_private;
1559 pdev = to_pci_dev(dev->dev);
1560
1561 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1562 return 0;
1563
1564 drm_kms_helper_poll_disable(dev);
1565
1566 drm_modeset_lock_all(dev);
1567 /* turn off display hw */
1568 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1569 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1570 }
1571 drm_modeset_unlock_all(dev);
1572
1573 /* unpin the front buffers and cursors */
1574 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1575 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1576 struct drm_framebuffer *fb = crtc->primary->fb;
1577 struct radeon_bo *robj;
1578
1579 if (radeon_crtc->cursor_bo) {
1580 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1581 r = radeon_bo_reserve(robj, false);
1582 if (r == 0) {
1583 radeon_bo_unpin(robj);
1584 radeon_bo_unreserve(robj);
1585 }
1586 }
1587
1588 if (fb == NULL || fb->obj[0] == NULL) {
1589 continue;
1590 }
1591 robj = gem_to_radeon_bo(fb->obj[0]);
1592 /* don't unpin kernel fb objects */
1593 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1594 r = radeon_bo_reserve(robj, false);
1595 if (r == 0) {
1596 radeon_bo_unpin(robj);
1597 radeon_bo_unreserve(robj);
1598 }
1599 }
1600 }
1601 /* evict vram memory */
1602 radeon_bo_evict_vram(rdev);
1603
1604 /* wait for gpu to finish processing current batch */
1605 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1606 r = radeon_fence_wait_empty(rdev, i);
1607 if (r) {
1608 /* delay GPU reset to resume */
1609 radeon_fence_driver_force_completion(rdev, i);
1610 } else {
1611 /* finish executing delayed work */
1612 flush_delayed_work(&rdev->fence_drv[i].lockup_work);
1613 }
1614 }
1615
1616 radeon_save_bios_scratch_regs(rdev);
1617
1618 radeon_suspend(rdev);
1619 radeon_hpd_fini(rdev);
1620 /* evict remaining vram memory
1621 * This second call to evict vram is to evict the gart page table
1622 * using the CPU.
1623 */
1624 radeon_bo_evict_vram(rdev);
1625
1626 radeon_agp_suspend(rdev);
1627
1628 pci_save_state(pdev);
1629 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1630 rdev->asic->asic_reset(rdev, true);
1631 pci_restore_state(pdev);
1632 } else if (suspend) {
1633 /* Shut down the device */
1634 pci_disable_device(pdev);
1635 pci_set_power_state(pdev, PCI_D3hot);
1636 }
1637
1638 if (notify_clients) {
1639 console_lock();
1640 drm_client_dev_suspend(dev, true);
1641 console_unlock();
1642 }
1643 return 0;
1644}
1645
1646/*
1647 * radeon_resume_kms - initiate device resume
1648 *
1649 * Bring the hw back to operating state (all asics).
1650 * Returns 0 for success or an error on failure.
1651 * Called at driver resume.
1652 */
1653int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
1654{
1655 struct drm_connector *connector;
1656 struct radeon_device *rdev = dev->dev_private;
1657 struct pci_dev *pdev = to_pci_dev(dev->dev);
1658 struct drm_crtc *crtc;
1659 int r;
1660
1661 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1662 return 0;
1663
1664 if (notify_clients) {
1665 console_lock();
1666 }
1667 if (resume) {
1668 pci_set_power_state(pdev, PCI_D0);
1669 pci_restore_state(pdev);
1670 if (pci_enable_device(pdev)) {
1671 if (notify_clients)
1672 console_unlock();
1673 return -1;
1674 }
1675 }
1676 /* resume AGP if in use */
1677 radeon_agp_resume(rdev);
1678 radeon_resume(rdev);
1679
1680 r = radeon_ib_ring_tests(rdev);
1681 if (r)
1682 DRM_ERROR("ib ring test failed (%d).\n", r);
1683
1684 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1685 /* do dpm late init */
1686 r = radeon_pm_late_init(rdev);
1687 if (r) {
1688 rdev->pm.dpm_enabled = false;
1689 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1690 }
1691 } else {
1692 /* resume old pm late */
1693 radeon_pm_resume(rdev);
1694 }
1695
1696 radeon_restore_bios_scratch_regs(rdev);
1697
1698 /* pin cursors */
1699 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1700 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1701
1702 if (radeon_crtc->cursor_bo) {
1703 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1704 r = radeon_bo_reserve(robj, false);
1705 if (r == 0) {
1706 /* Only 27 bit offset for legacy cursor */
1707 r = radeon_bo_pin_restricted(robj,
1708 RADEON_GEM_DOMAIN_VRAM,
1709 ASIC_IS_AVIVO(rdev) ?
1710 0 : 1 << 27,
1711 &radeon_crtc->cursor_addr);
1712 if (r != 0)
1713 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1714 radeon_bo_unreserve(robj);
1715 }
1716 }
1717 }
1718
1719 /* init dig PHYs, disp eng pll */
1720 if (rdev->is_atom_bios) {
1721 radeon_atom_encoder_init(rdev);
1722 radeon_atom_disp_eng_pll_init(rdev);
1723 /* turn on the BL */
1724 if (rdev->mode_info.bl_encoder) {
1725 u8 bl_level = radeon_get_backlight_level(rdev,
1726 rdev->mode_info.bl_encoder);
1727 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1728 bl_level);
1729 }
1730 }
1731 /* reset hpd state */
1732 radeon_hpd_init(rdev);
1733 /* blat the mode back in */
1734 if (notify_clients) {
1735 drm_helper_resume_force_mode(dev);
1736 /* turn on display hw */
1737 drm_modeset_lock_all(dev);
1738 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1739 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1740 }
1741 drm_modeset_unlock_all(dev);
1742 }
1743
1744 drm_kms_helper_poll_enable(dev);
1745
1746 /* set the power state here in case we are a PX system or headless */
1747 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1748 radeon_pm_compute_clocks(rdev);
1749
1750 if (notify_clients) {
1751 drm_client_dev_resume(dev, true);
1752 console_unlock();
1753 }
1754
1755 return 0;
1756}
1757
1758/**
1759 * radeon_gpu_reset - reset the asic
1760 *
1761 * @rdev: radeon device pointer
1762 *
1763 * Attempt the reset the GPU if it has hung (all asics).
1764 * Returns 0 for success or an error on failure.
1765 */
1766int radeon_gpu_reset(struct radeon_device *rdev)
1767{
1768 unsigned ring_sizes[RADEON_NUM_RINGS];
1769 uint32_t *ring_data[RADEON_NUM_RINGS];
1770
1771 bool saved = false;
1772
1773 int i, r;
1774
1775 down_write(&rdev->exclusive_lock);
1776
1777 if (!rdev->needs_reset) {
1778 up_write(&rdev->exclusive_lock);
1779 return 0;
1780 }
1781
1782 atomic_inc(&rdev->gpu_reset_counter);
1783
1784 radeon_save_bios_scratch_regs(rdev);
1785 radeon_suspend(rdev);
1786 radeon_hpd_fini(rdev);
1787
1788 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1789 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1790 &ring_data[i]);
1791 if (ring_sizes[i]) {
1792 saved = true;
1793 dev_info(rdev->dev, "Saved %d dwords of commands "
1794 "on ring %d.\n", ring_sizes[i], i);
1795 }
1796 }
1797
1798 r = radeon_asic_reset(rdev);
1799 if (!r) {
1800 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1801 radeon_resume(rdev);
1802 }
1803
1804 radeon_restore_bios_scratch_regs(rdev);
1805
1806 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1807 if (!r && ring_data[i]) {
1808 radeon_ring_restore(rdev, &rdev->ring[i],
1809 ring_sizes[i], ring_data[i]);
1810 } else {
1811 radeon_fence_driver_force_completion(rdev, i);
1812 kfree(ring_data[i]);
1813 }
1814 }
1815
1816 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1817 /* do dpm late init */
1818 r = radeon_pm_late_init(rdev);
1819 if (r) {
1820 rdev->pm.dpm_enabled = false;
1821 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1822 }
1823 } else {
1824 /* resume old pm late */
1825 radeon_pm_resume(rdev);
1826 }
1827
1828 /* init dig PHYs, disp eng pll */
1829 if (rdev->is_atom_bios) {
1830 radeon_atom_encoder_init(rdev);
1831 radeon_atom_disp_eng_pll_init(rdev);
1832 /* turn on the BL */
1833 if (rdev->mode_info.bl_encoder) {
1834 u8 bl_level = radeon_get_backlight_level(rdev,
1835 rdev->mode_info.bl_encoder);
1836 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1837 bl_level);
1838 }
1839 }
1840 /* reset hpd state */
1841 radeon_hpd_init(rdev);
1842
1843 rdev->in_reset = true;
1844 rdev->needs_reset = false;
1845
1846 downgrade_write(&rdev->exclusive_lock);
1847
1848 drm_helper_resume_force_mode(rdev_to_drm(rdev));
1849
1850 /* set the power state here in case we are a PX system or headless */
1851 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1852 radeon_pm_compute_clocks(rdev);
1853
1854 if (!r) {
1855 r = radeon_ib_ring_tests(rdev);
1856 if (r && saved)
1857 r = -EAGAIN;
1858 } else {
1859 /* bad news, how to tell it to userspace ? */
1860 dev_info(rdev->dev, "GPU reset failed\n");
1861 }
1862
1863 rdev->needs_reset = r == -EAGAIN;
1864 rdev->in_reset = false;
1865
1866 up_read(&rdev->exclusive_lock);
1867 return r;
1868}
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include <linux/console.h>
30#include <linux/efi.h>
31#include <linux/pci.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h>
34#include <linux/vga_switcheroo.h>
35#include <linux/vgaarb.h>
36
37#include <drm/drm_cache.h>
38#include <drm/drm_crtc_helper.h>
39#include <drm/drm_device.h>
40#include <drm/drm_file.h>
41#include <drm/drm_framebuffer.h>
42#include <drm/drm_probe_helper.h>
43#include <drm/radeon_drm.h>
44
45#include "radeon_device.h"
46#include "radeon_reg.h"
47#include "radeon.h"
48#include "atom.h"
49
50static const char radeon_family_name[][16] = {
51 "R100",
52 "RV100",
53 "RS100",
54 "RV200",
55 "RS200",
56 "R200",
57 "RV250",
58 "RS300",
59 "RV280",
60 "R300",
61 "R350",
62 "RV350",
63 "RV380",
64 "R420",
65 "R423",
66 "RV410",
67 "RS400",
68 "RS480",
69 "RS600",
70 "RS690",
71 "RS740",
72 "RV515",
73 "R520",
74 "RV530",
75 "RV560",
76 "RV570",
77 "R580",
78 "R600",
79 "RV610",
80 "RV630",
81 "RV670",
82 "RV620",
83 "RV635",
84 "RS780",
85 "RS880",
86 "RV770",
87 "RV730",
88 "RV710",
89 "RV740",
90 "CEDAR",
91 "REDWOOD",
92 "JUNIPER",
93 "CYPRESS",
94 "HEMLOCK",
95 "PALM",
96 "SUMO",
97 "SUMO2",
98 "BARTS",
99 "TURKS",
100 "CAICOS",
101 "CAYMAN",
102 "ARUBA",
103 "TAHITI",
104 "PITCAIRN",
105 "VERDE",
106 "OLAND",
107 "HAINAN",
108 "BONAIRE",
109 "KAVERI",
110 "KABINI",
111 "HAWAII",
112 "MULLINS",
113 "LAST",
114};
115
116#if defined(CONFIG_VGA_SWITCHEROO)
117bool radeon_has_atpx_dgpu_power_cntl(void);
118bool radeon_is_atpx_hybrid(void);
119#else
120static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
121static inline bool radeon_is_atpx_hybrid(void) { return false; }
122#endif
123
124#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
125
126struct radeon_px_quirk {
127 u32 chip_vendor;
128 u32 chip_device;
129 u32 subsys_vendor;
130 u32 subsys_device;
131 u32 px_quirk_flags;
132};
133
134static struct radeon_px_quirk radeon_px_quirk_list[] = {
135 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
136 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
137 */
138 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
139 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
140 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
141 */
142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
143 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
144 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
145 */
146 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
147 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
148 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
149 */
150 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
151 /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
152 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
153 */
154 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
155 { 0, 0, 0, 0, 0 },
156};
157
158bool radeon_is_px(struct drm_device *dev)
159{
160 struct radeon_device *rdev = dev->dev_private;
161
162 if (rdev->flags & RADEON_IS_PX)
163 return true;
164 return false;
165}
166
167static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
168{
169 struct radeon_px_quirk *p = radeon_px_quirk_list;
170
171 /* Apply PX quirks */
172 while (p && p->chip_device != 0) {
173 if (rdev->pdev->vendor == p->chip_vendor &&
174 rdev->pdev->device == p->chip_device &&
175 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
176 rdev->pdev->subsystem_device == p->subsys_device) {
177 rdev->px_quirk_flags = p->px_quirk_flags;
178 break;
179 }
180 ++p;
181 }
182
183 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
184 rdev->flags &= ~RADEON_IS_PX;
185
186 /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
187 if (!radeon_is_atpx_hybrid() &&
188 !radeon_has_atpx_dgpu_power_cntl())
189 rdev->flags &= ~RADEON_IS_PX;
190}
191
192/**
193 * radeon_program_register_sequence - program an array of registers.
194 *
195 * @rdev: radeon_device pointer
196 * @registers: pointer to the register array
197 * @array_size: size of the register array
198 *
199 * Programs an array or registers with and and or masks.
200 * This is a helper for setting golden registers.
201 */
202void radeon_program_register_sequence(struct radeon_device *rdev,
203 const u32 *registers,
204 const u32 array_size)
205{
206 u32 tmp, reg, and_mask, or_mask;
207 int i;
208
209 if (array_size % 3)
210 return;
211
212 for (i = 0; i < array_size; i +=3) {
213 reg = registers[i + 0];
214 and_mask = registers[i + 1];
215 or_mask = registers[i + 2];
216
217 if (and_mask == 0xffffffff) {
218 tmp = or_mask;
219 } else {
220 tmp = RREG32(reg);
221 tmp &= ~and_mask;
222 tmp |= or_mask;
223 }
224 WREG32(reg, tmp);
225 }
226}
227
228void radeon_pci_config_reset(struct radeon_device *rdev)
229{
230 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
231}
232
233/**
234 * radeon_surface_init - Clear GPU surface registers.
235 *
236 * @rdev: radeon_device pointer
237 *
238 * Clear GPU surface registers (r1xx-r5xx).
239 */
240void radeon_surface_init(struct radeon_device *rdev)
241{
242 /* FIXME: check this out */
243 if (rdev->family < CHIP_R600) {
244 int i;
245
246 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
247 if (rdev->surface_regs[i].bo)
248 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
249 else
250 radeon_clear_surface_reg(rdev, i);
251 }
252 /* enable surfaces */
253 WREG32(RADEON_SURFACE_CNTL, 0);
254 }
255}
256
257/*
258 * GPU scratch registers helpers function.
259 */
260/**
261 * radeon_scratch_init - Init scratch register driver information.
262 *
263 * @rdev: radeon_device pointer
264 *
265 * Init CP scratch register driver information (r1xx-r5xx)
266 */
267void radeon_scratch_init(struct radeon_device *rdev)
268{
269 int i;
270
271 /* FIXME: check this out */
272 if (rdev->family < CHIP_R300) {
273 rdev->scratch.num_reg = 5;
274 } else {
275 rdev->scratch.num_reg = 7;
276 }
277 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
278 for (i = 0; i < rdev->scratch.num_reg; i++) {
279 rdev->scratch.free[i] = true;
280 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
281 }
282}
283
284/**
285 * radeon_scratch_get - Allocate a scratch register
286 *
287 * @rdev: radeon_device pointer
288 * @reg: scratch register mmio offset
289 *
290 * Allocate a CP scratch register for use by the driver (all asics).
291 * Returns 0 on success or -EINVAL on failure.
292 */
293int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
294{
295 int i;
296
297 for (i = 0; i < rdev->scratch.num_reg; i++) {
298 if (rdev->scratch.free[i]) {
299 rdev->scratch.free[i] = false;
300 *reg = rdev->scratch.reg[i];
301 return 0;
302 }
303 }
304 return -EINVAL;
305}
306
307/**
308 * radeon_scratch_free - Free a scratch register
309 *
310 * @rdev: radeon_device pointer
311 * @reg: scratch register mmio offset
312 *
313 * Free a CP scratch register allocated for use by the driver (all asics)
314 */
315void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
316{
317 int i;
318
319 for (i = 0; i < rdev->scratch.num_reg; i++) {
320 if (rdev->scratch.reg[i] == reg) {
321 rdev->scratch.free[i] = true;
322 return;
323 }
324 }
325}
326
327/*
328 * GPU doorbell aperture helpers function.
329 */
330/**
331 * radeon_doorbell_init - Init doorbell driver information.
332 *
333 * @rdev: radeon_device pointer
334 *
335 * Init doorbell driver information (CIK)
336 * Returns 0 on success, error on failure.
337 */
338static int radeon_doorbell_init(struct radeon_device *rdev)
339{
340 /* doorbell bar mapping */
341 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
342 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
343
344 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
345 if (rdev->doorbell.num_doorbells == 0)
346 return -EINVAL;
347
348 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
349 if (rdev->doorbell.ptr == NULL) {
350 return -ENOMEM;
351 }
352 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
353 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
354
355 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
356
357 return 0;
358}
359
360/**
361 * radeon_doorbell_fini - Tear down doorbell driver information.
362 *
363 * @rdev: radeon_device pointer
364 *
365 * Tear down doorbell driver information (CIK)
366 */
367static void radeon_doorbell_fini(struct radeon_device *rdev)
368{
369 iounmap(rdev->doorbell.ptr);
370 rdev->doorbell.ptr = NULL;
371}
372
373/**
374 * radeon_doorbell_get - Allocate a doorbell entry
375 *
376 * @rdev: radeon_device pointer
377 * @doorbell: doorbell index
378 *
379 * Allocate a doorbell for use by the driver (all asics).
380 * Returns 0 on success or -EINVAL on failure.
381 */
382int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
383{
384 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
385 if (offset < rdev->doorbell.num_doorbells) {
386 __set_bit(offset, rdev->doorbell.used);
387 *doorbell = offset;
388 return 0;
389 } else {
390 return -EINVAL;
391 }
392}
393
394/**
395 * radeon_doorbell_free - Free a doorbell entry
396 *
397 * @rdev: radeon_device pointer
398 * @doorbell: doorbell index
399 *
400 * Free a doorbell allocated for use by the driver (all asics)
401 */
402void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
403{
404 if (doorbell < rdev->doorbell.num_doorbells)
405 __clear_bit(doorbell, rdev->doorbell.used);
406}
407
408/*
409 * radeon_wb_*()
410 * Writeback is the method by which the GPU updates special pages
411 * in memory with the status of certain GPU events (fences, ring pointers,
412 * etc.).
413 */
414
415/**
416 * radeon_wb_disable - Disable Writeback
417 *
418 * @rdev: radeon_device pointer
419 *
420 * Disables Writeback (all asics). Used for suspend.
421 */
422void radeon_wb_disable(struct radeon_device *rdev)
423{
424 rdev->wb.enabled = false;
425}
426
427/**
428 * radeon_wb_fini - Disable Writeback and free memory
429 *
430 * @rdev: radeon_device pointer
431 *
432 * Disables Writeback and frees the Writeback memory (all asics).
433 * Used at driver shutdown.
434 */
435void radeon_wb_fini(struct radeon_device *rdev)
436{
437 radeon_wb_disable(rdev);
438 if (rdev->wb.wb_obj) {
439 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
440 radeon_bo_kunmap(rdev->wb.wb_obj);
441 radeon_bo_unpin(rdev->wb.wb_obj);
442 radeon_bo_unreserve(rdev->wb.wb_obj);
443 }
444 radeon_bo_unref(&rdev->wb.wb_obj);
445 rdev->wb.wb = NULL;
446 rdev->wb.wb_obj = NULL;
447 }
448}
449
450/**
451 * radeon_wb_init- Init Writeback driver info and allocate memory
452 *
453 * @rdev: radeon_device pointer
454 *
455 * Disables Writeback and frees the Writeback memory (all asics).
456 * Used at driver startup.
457 * Returns 0 on success or an -error on failure.
458 */
459int radeon_wb_init(struct radeon_device *rdev)
460{
461 int r;
462
463 if (rdev->wb.wb_obj == NULL) {
464 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
465 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
466 &rdev->wb.wb_obj);
467 if (r) {
468 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
469 return r;
470 }
471 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
472 if (unlikely(r != 0)) {
473 radeon_wb_fini(rdev);
474 return r;
475 }
476 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
477 &rdev->wb.gpu_addr);
478 if (r) {
479 radeon_bo_unreserve(rdev->wb.wb_obj);
480 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
481 radeon_wb_fini(rdev);
482 return r;
483 }
484 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
485 radeon_bo_unreserve(rdev->wb.wb_obj);
486 if (r) {
487 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
488 radeon_wb_fini(rdev);
489 return r;
490 }
491 }
492
493 /* clear wb memory */
494 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
495 /* disable event_write fences */
496 rdev->wb.use_event = false;
497 /* disabled via module param */
498 if (radeon_no_wb == 1) {
499 rdev->wb.enabled = false;
500 } else {
501 if (rdev->flags & RADEON_IS_AGP) {
502 /* often unreliable on AGP */
503 rdev->wb.enabled = false;
504 } else if (rdev->family < CHIP_R300) {
505 /* often unreliable on pre-r300 */
506 rdev->wb.enabled = false;
507 } else {
508 rdev->wb.enabled = true;
509 /* event_write fences are only available on r600+ */
510 if (rdev->family >= CHIP_R600) {
511 rdev->wb.use_event = true;
512 }
513 }
514 }
515 /* always use writeback/events on NI, APUs */
516 if (rdev->family >= CHIP_PALM) {
517 rdev->wb.enabled = true;
518 rdev->wb.use_event = true;
519 }
520
521 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
522
523 return 0;
524}
525
526/**
527 * radeon_vram_location - try to find VRAM location
528 * @rdev: radeon device structure holding all necessary informations
529 * @mc: memory controller structure holding memory informations
530 * @base: base address at which to put VRAM
531 *
532 * Function will place try to place VRAM at base address provided
533 * as parameter (which is so far either PCI aperture address or
534 * for IGP TOM base address).
535 *
536 * If there is not enough space to fit the unvisible VRAM in the 32bits
537 * address space then we limit the VRAM size to the aperture.
538 *
539 * If we are using AGP and if the AGP aperture doesn't allow us to have
540 * room for all the VRAM than we restrict the VRAM to the PCI aperture
541 * size and print a warning.
542 *
543 * This function will never fails, worst case are limiting VRAM.
544 *
545 * Note: GTT start, end, size should be initialized before calling this
546 * function on AGP platform.
547 *
548 * Note 1: We don't explicitly enforce VRAM start to be aligned on VRAM size,
549 * this shouldn't be a problem as we are using the PCI aperture as a reference.
550 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
551 * not IGP.
552 *
553 * Note 2: we use mc_vram_size as on some board we need to program the mc to
554 * cover the whole aperture even if VRAM size is inferior to aperture size
555 * Novell bug 204882 + along with lots of ubuntu ones
556 *
557 * Note 3: when limiting vram it's safe to overwritte real_vram_size because
558 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
559 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
560 * ones)
561 *
562 * Note 4: IGP TOM addr should be the same as the aperture addr, we don't
563 * explicitly check for that thought.
564 *
565 * FIXME: when reducing VRAM size align new size on power of 2.
566 */
567void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
568{
569 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
570
571 mc->vram_start = base;
572 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
573 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
574 mc->real_vram_size = mc->aper_size;
575 mc->mc_vram_size = mc->aper_size;
576 }
577 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
578 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
579 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
580 mc->real_vram_size = mc->aper_size;
581 mc->mc_vram_size = mc->aper_size;
582 }
583 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
584 if (limit && limit < mc->real_vram_size)
585 mc->real_vram_size = limit;
586 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
587 mc->mc_vram_size >> 20, mc->vram_start,
588 mc->vram_end, mc->real_vram_size >> 20);
589}
590
591/**
592 * radeon_gtt_location - try to find GTT location
593 * @rdev: radeon device structure holding all necessary informations
594 * @mc: memory controller structure holding memory informations
595 *
596 * Function will place try to place GTT before or after VRAM.
597 *
598 * If GTT size is bigger than space left then we ajust GTT size.
599 * Thus function will never fails.
600 *
601 * FIXME: when reducing GTT size align new size on power of 2.
602 */
603void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
604{
605 u64 size_af, size_bf;
606
607 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
608 size_bf = mc->vram_start & ~mc->gtt_base_align;
609 if (size_bf > size_af) {
610 if (mc->gtt_size > size_bf) {
611 dev_warn(rdev->dev, "limiting GTT\n");
612 mc->gtt_size = size_bf;
613 }
614 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
615 } else {
616 if (mc->gtt_size > size_af) {
617 dev_warn(rdev->dev, "limiting GTT\n");
618 mc->gtt_size = size_af;
619 }
620 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
621 }
622 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
623 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
624 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
625}
626
627/*
628 * GPU helpers function.
629 */
630
631/*
632 * radeon_device_is_virtual - check if we are running is a virtual environment
633 *
634 * Check if the asic has been passed through to a VM (all asics).
635 * Used at driver startup.
636 * Returns true if virtual or false if not.
637 */
638bool radeon_device_is_virtual(void)
639{
640#ifdef CONFIG_X86
641 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
642#else
643 return false;
644#endif
645}
646
647/**
648 * radeon_card_posted - check if the hw has already been initialized
649 *
650 * @rdev: radeon_device pointer
651 *
652 * Check if the asic has been initialized (all asics).
653 * Used at driver startup.
654 * Returns true if initialized or false if not.
655 */
656bool radeon_card_posted(struct radeon_device *rdev)
657{
658 uint32_t reg;
659
660 /* for pass through, always force asic_init for CI */
661 if (rdev->family >= CHIP_BONAIRE &&
662 radeon_device_is_virtual())
663 return false;
664
665 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
666 if (efi_enabled(EFI_BOOT) &&
667 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
668 (rdev->family < CHIP_R600))
669 return false;
670
671 if (ASIC_IS_NODCE(rdev))
672 goto check_memsize;
673
674 /* first check CRTCs */
675 if (ASIC_IS_DCE4(rdev)) {
676 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
677 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
678 if (rdev->num_crtc >= 4) {
679 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
680 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
681 }
682 if (rdev->num_crtc >= 6) {
683 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
684 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
685 }
686 if (reg & EVERGREEN_CRTC_MASTER_EN)
687 return true;
688 } else if (ASIC_IS_AVIVO(rdev)) {
689 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
690 RREG32(AVIVO_D2CRTC_CONTROL);
691 if (reg & AVIVO_CRTC_EN) {
692 return true;
693 }
694 } else {
695 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
696 RREG32(RADEON_CRTC2_GEN_CNTL);
697 if (reg & RADEON_CRTC_EN) {
698 return true;
699 }
700 }
701
702check_memsize:
703 /* then check MEM_SIZE, in case the crtcs are off */
704 if (rdev->family >= CHIP_R600)
705 reg = RREG32(R600_CONFIG_MEMSIZE);
706 else
707 reg = RREG32(RADEON_CONFIG_MEMSIZE);
708
709 if (reg)
710 return true;
711
712 return false;
713
714}
715
716/**
717 * radeon_update_bandwidth_info - update display bandwidth params
718 *
719 * @rdev: radeon_device pointer
720 *
721 * Used when sclk/mclk are switched or display modes are set.
722 * params are used to calculate display watermarks (all asics)
723 */
724void radeon_update_bandwidth_info(struct radeon_device *rdev)
725{
726 fixed20_12 a;
727 u32 sclk = rdev->pm.current_sclk;
728 u32 mclk = rdev->pm.current_mclk;
729
730 /* sclk/mclk in Mhz */
731 a.full = dfixed_const(100);
732 rdev->pm.sclk.full = dfixed_const(sclk);
733 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
734 rdev->pm.mclk.full = dfixed_const(mclk);
735 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
736
737 if (rdev->flags & RADEON_IS_IGP) {
738 a.full = dfixed_const(16);
739 /* core_bandwidth = sclk(Mhz) * 16 */
740 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
741 }
742}
743
744/**
745 * radeon_boot_test_post_card - check and possibly initialize the hw
746 *
747 * @rdev: radeon_device pointer
748 *
749 * Check if the asic is initialized and if not, attempt to initialize
750 * it (all asics).
751 * Returns true if initialized or false if not.
752 */
753bool radeon_boot_test_post_card(struct radeon_device *rdev)
754{
755 if (radeon_card_posted(rdev))
756 return true;
757
758 if (rdev->bios) {
759 DRM_INFO("GPU not posted. posting now...\n");
760 if (rdev->is_atom_bios)
761 atom_asic_init(rdev->mode_info.atom_context);
762 else
763 radeon_combios_asic_init(rdev->ddev);
764 return true;
765 } else {
766 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
767 return false;
768 }
769}
770
771/**
772 * radeon_dummy_page_init - init dummy page used by the driver
773 *
774 * @rdev: radeon_device pointer
775 *
776 * Allocate the dummy page used by the driver (all asics).
777 * This dummy page is used by the driver as a filler for gart entries
778 * when pages are taken out of the GART
779 * Returns 0 on sucess, -ENOMEM on failure.
780 */
781int radeon_dummy_page_init(struct radeon_device *rdev)
782{
783 if (rdev->dummy_page.page)
784 return 0;
785 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
786 if (rdev->dummy_page.page == NULL)
787 return -ENOMEM;
788 rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
789 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
790 if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
791 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
792 __free_page(rdev->dummy_page.page);
793 rdev->dummy_page.page = NULL;
794 return -ENOMEM;
795 }
796 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
797 RADEON_GART_PAGE_DUMMY);
798 return 0;
799}
800
801/**
802 * radeon_dummy_page_fini - free dummy page used by the driver
803 *
804 * @rdev: radeon_device pointer
805 *
806 * Frees the dummy page used by the driver (all asics).
807 */
808void radeon_dummy_page_fini(struct radeon_device *rdev)
809{
810 if (rdev->dummy_page.page == NULL)
811 return;
812 dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
813 DMA_BIDIRECTIONAL);
814 __free_page(rdev->dummy_page.page);
815 rdev->dummy_page.page = NULL;
816}
817
818
819/* ATOM accessor methods */
820/*
821 * ATOM is an interpreted byte code stored in tables in the vbios. The
822 * driver registers callbacks to access registers and the interpreter
823 * in the driver parses the tables and executes then to program specific
824 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
825 * atombios.h, and atom.c
826 */
827
828/**
829 * cail_pll_read - read PLL register
830 *
831 * @info: atom card_info pointer
832 * @reg: PLL register offset
833 *
834 * Provides a PLL register accessor for the atom interpreter (r4xx+).
835 * Returns the value of the PLL register.
836 */
837static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
838{
839 struct radeon_device *rdev = info->dev->dev_private;
840 uint32_t r;
841
842 r = rdev->pll_rreg(rdev, reg);
843 return r;
844}
845
846/**
847 * cail_pll_write - write PLL register
848 *
849 * @info: atom card_info pointer
850 * @reg: PLL register offset
851 * @val: value to write to the pll register
852 *
853 * Provides a PLL register accessor for the atom interpreter (r4xx+).
854 */
855static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
856{
857 struct radeon_device *rdev = info->dev->dev_private;
858
859 rdev->pll_wreg(rdev, reg, val);
860}
861
862/**
863 * cail_mc_read - read MC (Memory Controller) register
864 *
865 * @info: atom card_info pointer
866 * @reg: MC register offset
867 *
868 * Provides an MC register accessor for the atom interpreter (r4xx+).
869 * Returns the value of the MC register.
870 */
871static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
872{
873 struct radeon_device *rdev = info->dev->dev_private;
874 uint32_t r;
875
876 r = rdev->mc_rreg(rdev, reg);
877 return r;
878}
879
880/**
881 * cail_mc_write - write MC (Memory Controller) register
882 *
883 * @info: atom card_info pointer
884 * @reg: MC register offset
885 * @val: value to write to the pll register
886 *
887 * Provides a MC register accessor for the atom interpreter (r4xx+).
888 */
889static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
890{
891 struct radeon_device *rdev = info->dev->dev_private;
892
893 rdev->mc_wreg(rdev, reg, val);
894}
895
896/**
897 * cail_reg_write - write MMIO register
898 *
899 * @info: atom card_info pointer
900 * @reg: MMIO register offset
901 * @val: value to write to the pll register
902 *
903 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
904 */
905static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
906{
907 struct radeon_device *rdev = info->dev->dev_private;
908
909 WREG32(reg*4, val);
910}
911
912/**
913 * cail_reg_read - read MMIO register
914 *
915 * @info: atom card_info pointer
916 * @reg: MMIO register offset
917 *
918 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
919 * Returns the value of the MMIO register.
920 */
921static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
922{
923 struct radeon_device *rdev = info->dev->dev_private;
924 uint32_t r;
925
926 r = RREG32(reg*4);
927 return r;
928}
929
930/**
931 * cail_ioreg_write - write IO register
932 *
933 * @info: atom card_info pointer
934 * @reg: IO register offset
935 * @val: value to write to the pll register
936 *
937 * Provides a IO register accessor for the atom interpreter (r4xx+).
938 */
939static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
940{
941 struct radeon_device *rdev = info->dev->dev_private;
942
943 WREG32_IO(reg*4, val);
944}
945
946/**
947 * cail_ioreg_read - read IO register
948 *
949 * @info: atom card_info pointer
950 * @reg: IO register offset
951 *
952 * Provides an IO register accessor for the atom interpreter (r4xx+).
953 * Returns the value of the IO register.
954 */
955static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
956{
957 struct radeon_device *rdev = info->dev->dev_private;
958 uint32_t r;
959
960 r = RREG32_IO(reg*4);
961 return r;
962}
963
964/**
965 * radeon_atombios_init - init the driver info and callbacks for atombios
966 *
967 * @rdev: radeon_device pointer
968 *
969 * Initializes the driver info and register access callbacks for the
970 * ATOM interpreter (r4xx+).
971 * Returns 0 on sucess, -ENOMEM on failure.
972 * Called at driver startup.
973 */
974int radeon_atombios_init(struct radeon_device *rdev)
975{
976 struct card_info *atom_card_info =
977 kzalloc(sizeof(struct card_info), GFP_KERNEL);
978
979 if (!atom_card_info)
980 return -ENOMEM;
981
982 rdev->mode_info.atom_card_info = atom_card_info;
983 atom_card_info->dev = rdev->ddev;
984 atom_card_info->reg_read = cail_reg_read;
985 atom_card_info->reg_write = cail_reg_write;
986 /* needed for iio ops */
987 if (rdev->rio_mem) {
988 atom_card_info->ioreg_read = cail_ioreg_read;
989 atom_card_info->ioreg_write = cail_ioreg_write;
990 } else {
991 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
992 atom_card_info->ioreg_read = cail_reg_read;
993 atom_card_info->ioreg_write = cail_reg_write;
994 }
995 atom_card_info->mc_read = cail_mc_read;
996 atom_card_info->mc_write = cail_mc_write;
997 atom_card_info->pll_read = cail_pll_read;
998 atom_card_info->pll_write = cail_pll_write;
999
1000 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1001 if (!rdev->mode_info.atom_context) {
1002 radeon_atombios_fini(rdev);
1003 return -ENOMEM;
1004 }
1005
1006 mutex_init(&rdev->mode_info.atom_context->mutex);
1007 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1008 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1009 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1010 return 0;
1011}
1012
1013/**
1014 * radeon_atombios_fini - free the driver info and callbacks for atombios
1015 *
1016 * @rdev: radeon_device pointer
1017 *
1018 * Frees the driver info and register access callbacks for the ATOM
1019 * interpreter (r4xx+).
1020 * Called at driver shutdown.
1021 */
1022void radeon_atombios_fini(struct radeon_device *rdev)
1023{
1024 if (rdev->mode_info.atom_context) {
1025 kfree(rdev->mode_info.atom_context->scratch);
1026 }
1027 kfree(rdev->mode_info.atom_context);
1028 rdev->mode_info.atom_context = NULL;
1029 kfree(rdev->mode_info.atom_card_info);
1030 rdev->mode_info.atom_card_info = NULL;
1031}
1032
1033/* COMBIOS */
1034/*
1035 * COMBIOS is the bios format prior to ATOM. It provides
1036 * command tables similar to ATOM, but doesn't have a unified
1037 * parser. See radeon_combios.c
1038 */
1039
1040/**
1041 * radeon_combios_init - init the driver info for combios
1042 *
1043 * @rdev: radeon_device pointer
1044 *
1045 * Initializes the driver info for combios (r1xx-r3xx).
1046 * Returns 0 on sucess.
1047 * Called at driver startup.
1048 */
1049int radeon_combios_init(struct radeon_device *rdev)
1050{
1051 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1052 return 0;
1053}
1054
1055/**
1056 * radeon_combios_fini - free the driver info for combios
1057 *
1058 * @rdev: radeon_device pointer
1059 *
1060 * Frees the driver info for combios (r1xx-r3xx).
1061 * Called at driver shutdown.
1062 */
1063void radeon_combios_fini(struct radeon_device *rdev)
1064{
1065}
1066
1067/* if we get transitioned to only one device, take VGA back */
1068/**
1069 * radeon_vga_set_decode - enable/disable vga decode
1070 *
1071 * @pdev: PCI device
1072 * @state: enable/disable vga decode
1073 *
1074 * Enable/disable vga decode (all asics).
1075 * Returns VGA resource flags.
1076 */
1077static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
1078{
1079 struct drm_device *dev = pci_get_drvdata(pdev);
1080 struct radeon_device *rdev = dev->dev_private;
1081 radeon_vga_set_state(rdev, state);
1082 if (state)
1083 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1084 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1085 else
1086 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1087}
1088
1089/**
1090 * radeon_gart_size_auto - Determine a sensible default GART size
1091 * according to ASIC family.
1092 *
1093 * @family: ASIC family name
1094 */
1095static int radeon_gart_size_auto(enum radeon_family family)
1096{
1097 /* default to a larger gart size on newer asics */
1098 if (family >= CHIP_TAHITI)
1099 return 2048;
1100 else if (family >= CHIP_RV770)
1101 return 1024;
1102 else
1103 return 512;
1104}
1105
1106/**
1107 * radeon_check_arguments - validate module params
1108 *
1109 * @rdev: radeon_device pointer
1110 *
1111 * Validates certain module parameters and updates
1112 * the associated values used by the driver (all asics).
1113 */
1114static void radeon_check_arguments(struct radeon_device *rdev)
1115{
1116 /* vramlimit must be a power of two */
1117 if (radeon_vram_limit != 0 && !is_power_of_2(radeon_vram_limit)) {
1118 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1119 radeon_vram_limit);
1120 radeon_vram_limit = 0;
1121 }
1122
1123 if (radeon_gart_size == -1) {
1124 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1125 }
1126 /* gtt size must be power of two and greater or equal to 32M */
1127 if (radeon_gart_size < 32) {
1128 dev_warn(rdev->dev, "gart size (%d) too small\n",
1129 radeon_gart_size);
1130 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1131 } else if (!is_power_of_2(radeon_gart_size)) {
1132 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1133 radeon_gart_size);
1134 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1135 }
1136 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1137
1138 /* AGP mode can only be -1, 1, 2, 4, 8 */
1139 switch (radeon_agpmode) {
1140 case -1:
1141 case 0:
1142 case 1:
1143 case 2:
1144 case 4:
1145 case 8:
1146 break;
1147 default:
1148 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1149 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1150 radeon_agpmode = 0;
1151 break;
1152 }
1153
1154 if (!is_power_of_2(radeon_vm_size)) {
1155 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1156 radeon_vm_size);
1157 radeon_vm_size = 4;
1158 }
1159
1160 if (radeon_vm_size < 1) {
1161 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1162 radeon_vm_size);
1163 radeon_vm_size = 4;
1164 }
1165
1166 /*
1167 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1168 */
1169 if (radeon_vm_size > 1024) {
1170 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1171 radeon_vm_size);
1172 radeon_vm_size = 4;
1173 }
1174
1175 /* defines number of bits in page table versus page directory,
1176 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1177 * page table and the remaining bits are in the page directory */
1178 if (radeon_vm_block_size == -1) {
1179
1180 /* Total bits covered by PD + PTs */
1181 unsigned bits = ilog2(radeon_vm_size) + 18;
1182
1183 /* Make sure the PD is 4K in size up to 8GB address space.
1184 Above that split equal between PD and PTs */
1185 if (radeon_vm_size <= 8)
1186 radeon_vm_block_size = bits - 9;
1187 else
1188 radeon_vm_block_size = (bits + 3) / 2;
1189
1190 } else if (radeon_vm_block_size < 9) {
1191 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1192 radeon_vm_block_size);
1193 radeon_vm_block_size = 9;
1194 }
1195
1196 if (radeon_vm_block_size > 24 ||
1197 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1198 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1199 radeon_vm_block_size);
1200 radeon_vm_block_size = 9;
1201 }
1202}
1203
1204/**
1205 * radeon_switcheroo_set_state - set switcheroo state
1206 *
1207 * @pdev: pci dev pointer
1208 * @state: vga_switcheroo state
1209 *
1210 * Callback for the switcheroo driver. Suspends or resumes
1211 * the asics before or after it is powered up using ACPI methods.
1212 */
1213static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1214{
1215 struct drm_device *dev = pci_get_drvdata(pdev);
1216
1217 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1218 return;
1219
1220 if (state == VGA_SWITCHEROO_ON) {
1221 pr_info("radeon: switched on\n");
1222 /* don't suspend or resume card normally */
1223 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1224
1225 radeon_resume_kms(dev, true, true);
1226
1227 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1228 drm_kms_helper_poll_enable(dev);
1229 } else {
1230 pr_info("radeon: switched off\n");
1231 drm_kms_helper_poll_disable(dev);
1232 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1233 radeon_suspend_kms(dev, true, true, false);
1234 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1235 }
1236}
1237
1238/**
1239 * radeon_switcheroo_can_switch - see if switcheroo state can change
1240 *
1241 * @pdev: pci dev pointer
1242 *
1243 * Callback for the switcheroo driver. Check of the switcheroo
1244 * state can be changed.
1245 * Returns true if the state can be changed, false if not.
1246 */
1247static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1248{
1249 struct drm_device *dev = pci_get_drvdata(pdev);
1250
1251 /*
1252 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1253 * locking inversion with the driver load path. And the access here is
1254 * completely racy anyway. So don't bother with locking for now.
1255 */
1256 return atomic_read(&dev->open_count) == 0;
1257}
1258
1259static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1260 .set_gpu_state = radeon_switcheroo_set_state,
1261 .reprobe = NULL,
1262 .can_switch = radeon_switcheroo_can_switch,
1263};
1264
1265/**
1266 * radeon_device_init - initialize the driver
1267 *
1268 * @rdev: radeon_device pointer
1269 * @ddev: drm dev pointer
1270 * @pdev: pci dev pointer
1271 * @flags: driver flags
1272 *
1273 * Initializes the driver info and hw (all asics).
1274 * Returns 0 for success or an error on failure.
1275 * Called at driver startup.
1276 */
1277int radeon_device_init(struct radeon_device *rdev,
1278 struct drm_device *ddev,
1279 struct pci_dev *pdev,
1280 uint32_t flags)
1281{
1282 int r, i;
1283 int dma_bits;
1284 bool runtime = false;
1285
1286 rdev->shutdown = false;
1287 rdev->dev = &pdev->dev;
1288 rdev->ddev = ddev;
1289 rdev->pdev = pdev;
1290 rdev->flags = flags;
1291 rdev->family = flags & RADEON_FAMILY_MASK;
1292 rdev->is_atom_bios = false;
1293 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1294 rdev->mc.gtt_size = 512 * 1024 * 1024;
1295 rdev->accel_working = false;
1296 /* set up ring ids */
1297 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1298 rdev->ring[i].idx = i;
1299 }
1300 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1301
1302 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1303 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1304 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1305
1306 /* mutex initialization are all done here so we
1307 * can recall function without having locking issues */
1308 mutex_init(&rdev->ring_lock);
1309 mutex_init(&rdev->dc_hw_i2c_mutex);
1310 atomic_set(&rdev->ih.lock, 0);
1311 mutex_init(&rdev->gem.mutex);
1312 mutex_init(&rdev->pm.mutex);
1313 mutex_init(&rdev->gpu_clock_mutex);
1314 mutex_init(&rdev->srbm_mutex);
1315 mutex_init(&rdev->audio.component_mutex);
1316 init_rwsem(&rdev->pm.mclk_lock);
1317 init_rwsem(&rdev->exclusive_lock);
1318 init_waitqueue_head(&rdev->irq.vblank_queue);
1319 r = radeon_gem_init(rdev);
1320 if (r)
1321 return r;
1322
1323 radeon_check_arguments(rdev);
1324 /* Adjust VM size here.
1325 * Max GPUVM size for cayman+ is 40 bits.
1326 */
1327 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1328
1329 /* Set asic functions */
1330 r = radeon_asic_init(rdev);
1331 if (r)
1332 return r;
1333
1334 /* all of the newer IGP chips have an internal gart
1335 * However some rs4xx report as AGP, so remove that here.
1336 */
1337 if ((rdev->family >= CHIP_RS400) &&
1338 (rdev->flags & RADEON_IS_IGP)) {
1339 rdev->flags &= ~RADEON_IS_AGP;
1340 }
1341
1342 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1343 radeon_agp_disable(rdev);
1344 }
1345
1346 /* Set the internal MC address mask
1347 * This is the max address of the GPU's
1348 * internal address space.
1349 */
1350 if (rdev->family >= CHIP_CAYMAN)
1351 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1352 else if (rdev->family >= CHIP_CEDAR)
1353 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1354 else
1355 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1356
1357 /* set DMA mask.
1358 * PCIE - can handle 40-bits.
1359 * IGP - can handle 40-bits
1360 * AGP - generally dma32 is safest
1361 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1362 */
1363 dma_bits = 40;
1364 if (rdev->flags & RADEON_IS_AGP)
1365 dma_bits = 32;
1366 if ((rdev->flags & RADEON_IS_PCI) &&
1367 (rdev->family <= CHIP_RS740))
1368 dma_bits = 32;
1369#ifdef CONFIG_PPC64
1370 if (rdev->family == CHIP_CEDAR)
1371 dma_bits = 32;
1372#endif
1373
1374 r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1375 if (r) {
1376 pr_warn("radeon: No suitable DMA available\n");
1377 return r;
1378 }
1379 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1380
1381 /* Registers mapping */
1382 /* TODO: block userspace mapping of io register */
1383 spin_lock_init(&rdev->mmio_idx_lock);
1384 spin_lock_init(&rdev->smc_idx_lock);
1385 spin_lock_init(&rdev->pll_idx_lock);
1386 spin_lock_init(&rdev->mc_idx_lock);
1387 spin_lock_init(&rdev->pcie_idx_lock);
1388 spin_lock_init(&rdev->pciep_idx_lock);
1389 spin_lock_init(&rdev->pif_idx_lock);
1390 spin_lock_init(&rdev->cg_idx_lock);
1391 spin_lock_init(&rdev->uvd_idx_lock);
1392 spin_lock_init(&rdev->rcu_idx_lock);
1393 spin_lock_init(&rdev->didt_idx_lock);
1394 spin_lock_init(&rdev->end_idx_lock);
1395 if (rdev->family >= CHIP_BONAIRE) {
1396 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1397 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1398 } else {
1399 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1400 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1401 }
1402 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1403 if (rdev->rmmio == NULL)
1404 return -ENOMEM;
1405
1406 /* doorbell bar mapping */
1407 if (rdev->family >= CHIP_BONAIRE)
1408 radeon_doorbell_init(rdev);
1409
1410 /* io port mapping */
1411 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1412 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1413 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1414 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1415 break;
1416 }
1417 }
1418 if (rdev->rio_mem == NULL)
1419 DRM_ERROR("Unable to find PCI I/O BAR\n");
1420
1421 if (rdev->flags & RADEON_IS_PX)
1422 radeon_device_handle_px_quirks(rdev);
1423
1424 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1425 /* this will fail for cards that aren't VGA class devices, just
1426 * ignore it */
1427 vga_client_register(rdev->pdev, radeon_vga_set_decode);
1428
1429 if (rdev->flags & RADEON_IS_PX)
1430 runtime = true;
1431 if (!pci_is_thunderbolt_attached(rdev->pdev))
1432 vga_switcheroo_register_client(rdev->pdev,
1433 &radeon_switcheroo_ops, runtime);
1434 if (runtime)
1435 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1436
1437 r = radeon_init(rdev);
1438 if (r)
1439 goto failed;
1440
1441 radeon_gem_debugfs_init(rdev);
1442
1443 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1444 /* Acceleration not working on AGP card try again
1445 * with fallback to PCI or PCIE GART
1446 */
1447 radeon_asic_reset(rdev);
1448 radeon_fini(rdev);
1449 radeon_agp_disable(rdev);
1450 r = radeon_init(rdev);
1451 if (r)
1452 goto failed;
1453 }
1454
1455 radeon_audio_component_init(rdev);
1456
1457 r = radeon_ib_ring_tests(rdev);
1458 if (r)
1459 DRM_ERROR("ib ring test failed (%d).\n", r);
1460
1461 /*
1462 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1463 * after the CP ring have chew one packet at least. Hence here we stop
1464 * and restart DPM after the radeon_ib_ring_tests().
1465 */
1466 if (rdev->pm.dpm_enabled &&
1467 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1468 (rdev->family == CHIP_TURKS) &&
1469 (rdev->flags & RADEON_IS_MOBILITY)) {
1470 mutex_lock(&rdev->pm.mutex);
1471 radeon_dpm_disable(rdev);
1472 radeon_dpm_enable(rdev);
1473 mutex_unlock(&rdev->pm.mutex);
1474 }
1475
1476 if ((radeon_testing & 1)) {
1477 if (rdev->accel_working)
1478 radeon_test_moves(rdev);
1479 else
1480 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1481 }
1482 if ((radeon_testing & 2)) {
1483 if (rdev->accel_working)
1484 radeon_test_syncing(rdev);
1485 else
1486 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1487 }
1488 if (radeon_benchmarking) {
1489 if (rdev->accel_working)
1490 radeon_benchmark(rdev, radeon_benchmarking);
1491 else
1492 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1493 }
1494 return 0;
1495
1496failed:
1497 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1498 if (radeon_is_px(ddev))
1499 pm_runtime_put_noidle(ddev->dev);
1500 if (runtime)
1501 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1502 return r;
1503}
1504
1505/**
1506 * radeon_device_fini - tear down the driver
1507 *
1508 * @rdev: radeon_device pointer
1509 *
1510 * Tear down the driver info (all asics).
1511 * Called at driver shutdown.
1512 */
1513void radeon_device_fini(struct radeon_device *rdev)
1514{
1515 DRM_INFO("radeon: finishing device.\n");
1516 rdev->shutdown = true;
1517 /* evict vram memory */
1518 radeon_bo_evict_vram(rdev);
1519 radeon_audio_component_fini(rdev);
1520 radeon_fini(rdev);
1521 if (!pci_is_thunderbolt_attached(rdev->pdev))
1522 vga_switcheroo_unregister_client(rdev->pdev);
1523 if (rdev->flags & RADEON_IS_PX)
1524 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1525 vga_client_unregister(rdev->pdev);
1526 if (rdev->rio_mem)
1527 pci_iounmap(rdev->pdev, rdev->rio_mem);
1528 rdev->rio_mem = NULL;
1529 iounmap(rdev->rmmio);
1530 rdev->rmmio = NULL;
1531 if (rdev->family >= CHIP_BONAIRE)
1532 radeon_doorbell_fini(rdev);
1533}
1534
1535
1536/*
1537 * Suspend & resume.
1538 */
1539/*
1540 * radeon_suspend_kms - initiate device suspend
1541 *
1542 * Puts the hw in the suspend state (all asics).
1543 * Returns 0 for success or an error on failure.
1544 * Called at driver suspend.
1545 */
1546int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1547 bool fbcon, bool freeze)
1548{
1549 struct radeon_device *rdev;
1550 struct pci_dev *pdev;
1551 struct drm_crtc *crtc;
1552 struct drm_connector *connector;
1553 int i, r;
1554
1555 if (dev == NULL || dev->dev_private == NULL) {
1556 return -ENODEV;
1557 }
1558
1559 rdev = dev->dev_private;
1560 pdev = to_pci_dev(dev->dev);
1561
1562 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1563 return 0;
1564
1565 drm_kms_helper_poll_disable(dev);
1566
1567 drm_modeset_lock_all(dev);
1568 /* turn off display hw */
1569 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1570 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1571 }
1572 drm_modeset_unlock_all(dev);
1573
1574 /* unpin the front buffers and cursors */
1575 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1576 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1577 struct drm_framebuffer *fb = crtc->primary->fb;
1578 struct radeon_bo *robj;
1579
1580 if (radeon_crtc->cursor_bo) {
1581 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1582 r = radeon_bo_reserve(robj, false);
1583 if (r == 0) {
1584 radeon_bo_unpin(robj);
1585 radeon_bo_unreserve(robj);
1586 }
1587 }
1588
1589 if (fb == NULL || fb->obj[0] == NULL) {
1590 continue;
1591 }
1592 robj = gem_to_radeon_bo(fb->obj[0]);
1593 /* don't unpin kernel fb objects */
1594 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1595 r = radeon_bo_reserve(robj, false);
1596 if (r == 0) {
1597 radeon_bo_unpin(robj);
1598 radeon_bo_unreserve(robj);
1599 }
1600 }
1601 }
1602 /* evict vram memory */
1603 radeon_bo_evict_vram(rdev);
1604
1605 /* wait for gpu to finish processing current batch */
1606 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1607 r = radeon_fence_wait_empty(rdev, i);
1608 if (r) {
1609 /* delay GPU reset to resume */
1610 radeon_fence_driver_force_completion(rdev, i);
1611 } else {
1612 /* finish executing delayed work */
1613 flush_delayed_work(&rdev->fence_drv[i].lockup_work);
1614 }
1615 }
1616
1617 radeon_save_bios_scratch_regs(rdev);
1618
1619 radeon_suspend(rdev);
1620 radeon_hpd_fini(rdev);
1621 /* evict remaining vram memory
1622 * This second call to evict vram is to evict the gart page table
1623 * using the CPU.
1624 */
1625 radeon_bo_evict_vram(rdev);
1626
1627 radeon_agp_suspend(rdev);
1628
1629 pci_save_state(pdev);
1630 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1631 rdev->asic->asic_reset(rdev, true);
1632 pci_restore_state(pdev);
1633 } else if (suspend) {
1634 /* Shut down the device */
1635 pci_disable_device(pdev);
1636 pci_set_power_state(pdev, PCI_D3hot);
1637 }
1638
1639 if (fbcon) {
1640 console_lock();
1641 radeon_fbdev_set_suspend(rdev, 1);
1642 console_unlock();
1643 }
1644 return 0;
1645}
1646
1647/*
1648 * radeon_resume_kms - initiate device resume
1649 *
1650 * Bring the hw back to operating state (all asics).
1651 * Returns 0 for success or an error on failure.
1652 * Called at driver resume.
1653 */
1654int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1655{
1656 struct drm_connector *connector;
1657 struct radeon_device *rdev = dev->dev_private;
1658 struct pci_dev *pdev = to_pci_dev(dev->dev);
1659 struct drm_crtc *crtc;
1660 int r;
1661
1662 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1663 return 0;
1664
1665 if (fbcon) {
1666 console_lock();
1667 }
1668 if (resume) {
1669 pci_set_power_state(pdev, PCI_D0);
1670 pci_restore_state(pdev);
1671 if (pci_enable_device(pdev)) {
1672 if (fbcon)
1673 console_unlock();
1674 return -1;
1675 }
1676 }
1677 /* resume AGP if in use */
1678 radeon_agp_resume(rdev);
1679 radeon_resume(rdev);
1680
1681 r = radeon_ib_ring_tests(rdev);
1682 if (r)
1683 DRM_ERROR("ib ring test failed (%d).\n", r);
1684
1685 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1686 /* do dpm late init */
1687 r = radeon_pm_late_init(rdev);
1688 if (r) {
1689 rdev->pm.dpm_enabled = false;
1690 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1691 }
1692 } else {
1693 /* resume old pm late */
1694 radeon_pm_resume(rdev);
1695 }
1696
1697 radeon_restore_bios_scratch_regs(rdev);
1698
1699 /* pin cursors */
1700 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1701 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1702
1703 if (radeon_crtc->cursor_bo) {
1704 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1705 r = radeon_bo_reserve(robj, false);
1706 if (r == 0) {
1707 /* Only 27 bit offset for legacy cursor */
1708 r = radeon_bo_pin_restricted(robj,
1709 RADEON_GEM_DOMAIN_VRAM,
1710 ASIC_IS_AVIVO(rdev) ?
1711 0 : 1 << 27,
1712 &radeon_crtc->cursor_addr);
1713 if (r != 0)
1714 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1715 radeon_bo_unreserve(robj);
1716 }
1717 }
1718 }
1719
1720 /* init dig PHYs, disp eng pll */
1721 if (rdev->is_atom_bios) {
1722 radeon_atom_encoder_init(rdev);
1723 radeon_atom_disp_eng_pll_init(rdev);
1724 /* turn on the BL */
1725 if (rdev->mode_info.bl_encoder) {
1726 u8 bl_level = radeon_get_backlight_level(rdev,
1727 rdev->mode_info.bl_encoder);
1728 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1729 bl_level);
1730 }
1731 }
1732 /* reset hpd state */
1733 radeon_hpd_init(rdev);
1734 /* blat the mode back in */
1735 if (fbcon) {
1736 drm_helper_resume_force_mode(dev);
1737 /* turn on display hw */
1738 drm_modeset_lock_all(dev);
1739 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1740 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1741 }
1742 drm_modeset_unlock_all(dev);
1743 }
1744
1745 drm_kms_helper_poll_enable(dev);
1746
1747 /* set the power state here in case we are a PX system or headless */
1748 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1749 radeon_pm_compute_clocks(rdev);
1750
1751 if (fbcon) {
1752 radeon_fbdev_set_suspend(rdev, 0);
1753 console_unlock();
1754 }
1755
1756 return 0;
1757}
1758
1759/**
1760 * radeon_gpu_reset - reset the asic
1761 *
1762 * @rdev: radeon device pointer
1763 *
1764 * Attempt the reset the GPU if it has hung (all asics).
1765 * Returns 0 for success or an error on failure.
1766 */
1767int radeon_gpu_reset(struct radeon_device *rdev)
1768{
1769 unsigned ring_sizes[RADEON_NUM_RINGS];
1770 uint32_t *ring_data[RADEON_NUM_RINGS];
1771
1772 bool saved = false;
1773
1774 int i, r;
1775 int resched;
1776
1777 down_write(&rdev->exclusive_lock);
1778
1779 if (!rdev->needs_reset) {
1780 up_write(&rdev->exclusive_lock);
1781 return 0;
1782 }
1783
1784 atomic_inc(&rdev->gpu_reset_counter);
1785
1786 radeon_save_bios_scratch_regs(rdev);
1787 /* block TTM */
1788 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1789 radeon_suspend(rdev);
1790 radeon_hpd_fini(rdev);
1791
1792 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1793 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1794 &ring_data[i]);
1795 if (ring_sizes[i]) {
1796 saved = true;
1797 dev_info(rdev->dev, "Saved %d dwords of commands "
1798 "on ring %d.\n", ring_sizes[i], i);
1799 }
1800 }
1801
1802 r = radeon_asic_reset(rdev);
1803 if (!r) {
1804 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1805 radeon_resume(rdev);
1806 }
1807
1808 radeon_restore_bios_scratch_regs(rdev);
1809
1810 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1811 if (!r && ring_data[i]) {
1812 radeon_ring_restore(rdev, &rdev->ring[i],
1813 ring_sizes[i], ring_data[i]);
1814 } else {
1815 radeon_fence_driver_force_completion(rdev, i);
1816 kfree(ring_data[i]);
1817 }
1818 }
1819
1820 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1821 /* do dpm late init */
1822 r = radeon_pm_late_init(rdev);
1823 if (r) {
1824 rdev->pm.dpm_enabled = false;
1825 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1826 }
1827 } else {
1828 /* resume old pm late */
1829 radeon_pm_resume(rdev);
1830 }
1831
1832 /* init dig PHYs, disp eng pll */
1833 if (rdev->is_atom_bios) {
1834 radeon_atom_encoder_init(rdev);
1835 radeon_atom_disp_eng_pll_init(rdev);
1836 /* turn on the BL */
1837 if (rdev->mode_info.bl_encoder) {
1838 u8 bl_level = radeon_get_backlight_level(rdev,
1839 rdev->mode_info.bl_encoder);
1840 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1841 bl_level);
1842 }
1843 }
1844 /* reset hpd state */
1845 radeon_hpd_init(rdev);
1846
1847 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1848
1849 rdev->in_reset = true;
1850 rdev->needs_reset = false;
1851
1852 downgrade_write(&rdev->exclusive_lock);
1853
1854 drm_helper_resume_force_mode(rdev->ddev);
1855
1856 /* set the power state here in case we are a PX system or headless */
1857 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1858 radeon_pm_compute_clocks(rdev);
1859
1860 if (!r) {
1861 r = radeon_ib_ring_tests(rdev);
1862 if (r && saved)
1863 r = -EAGAIN;
1864 } else {
1865 /* bad news, how to tell it to userspace ? */
1866 dev_info(rdev->dev, "GPU reset failed\n");
1867 }
1868
1869 rdev->needs_reset = r == -EAGAIN;
1870 rdev->in_reset = false;
1871
1872 up_read(&rdev->exclusive_lock);
1873 return r;
1874}