Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/console.h>
  29#include <linux/slab.h>
  30#include <drm/drmP.h>
  31#include <drm/drm_crtc_helper.h>
  32#include <drm/radeon_drm.h>
  33#include <linux/vgaarb.h>
  34#include <linux/vga_switcheroo.h>
  35#include <linux/efi.h>
  36#include "radeon_reg.h"
  37#include "radeon.h"
  38#include "atom.h"
  39
  40static const char radeon_family_name[][16] = {
  41	"R100",
  42	"RV100",
  43	"RS100",
  44	"RV200",
  45	"RS200",
  46	"R200",
  47	"RV250",
  48	"RS300",
  49	"RV280",
  50	"R300",
  51	"R350",
  52	"RV350",
  53	"RV380",
  54	"R420",
  55	"R423",
  56	"RV410",
  57	"RS400",
  58	"RS480",
  59	"RS600",
  60	"RS690",
  61	"RS740",
  62	"RV515",
  63	"R520",
  64	"RV530",
  65	"RV560",
  66	"RV570",
  67	"R580",
  68	"R600",
  69	"RV610",
  70	"RV630",
  71	"RV670",
  72	"RV620",
  73	"RV635",
  74	"RS780",
  75	"RS880",
  76	"RV770",
  77	"RV730",
  78	"RV710",
  79	"RV740",
  80	"CEDAR",
  81	"REDWOOD",
  82	"JUNIPER",
  83	"CYPRESS",
  84	"HEMLOCK",
  85	"PALM",
  86	"SUMO",
  87	"SUMO2",
  88	"BARTS",
  89	"TURKS",
  90	"CAICOS",
  91	"CAYMAN",
 
 
 
 
 
 
 
 
 
 
 
  92	"LAST",
  93};
  94
  95/*
  96 * Clear GPU surface registers.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97 */
  98void radeon_surface_init(struct radeon_device *rdev)
  99{
 100	/* FIXME: check this out */
 101	if (rdev->family < CHIP_R600) {
 102		int i;
 103
 104		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
 105			if (rdev->surface_regs[i].bo)
 106				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
 107			else
 108				radeon_clear_surface_reg(rdev, i);
 109		}
 110		/* enable surfaces */
 111		WREG32(RADEON_SURFACE_CNTL, 0);
 112	}
 113}
 114
 115/*
 116 * GPU scratch registers helpers function.
 117 */
 
 
 
 
 
 
 
 118void radeon_scratch_init(struct radeon_device *rdev)
 119{
 120	int i;
 121
 122	/* FIXME: check this out */
 123	if (rdev->family < CHIP_R300) {
 124		rdev->scratch.num_reg = 5;
 125	} else {
 126		rdev->scratch.num_reg = 7;
 127	}
 128	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
 129	for (i = 0; i < rdev->scratch.num_reg; i++) {
 130		rdev->scratch.free[i] = true;
 131		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
 132	}
 133}
 134
 
 
 
 
 
 
 
 
 
 135int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
 136{
 137	int i;
 138
 139	for (i = 0; i < rdev->scratch.num_reg; i++) {
 140		if (rdev->scratch.free[i]) {
 141			rdev->scratch.free[i] = false;
 142			*reg = rdev->scratch.reg[i];
 143			return 0;
 144		}
 145	}
 146	return -EINVAL;
 147}
 148
 
 
 
 
 
 
 
 
 149void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
 150{
 151	int i;
 152
 153	for (i = 0; i < rdev->scratch.num_reg; i++) {
 154		if (rdev->scratch.reg[i] == reg) {
 155			rdev->scratch.free[i] = true;
 156			return;
 157		}
 158	}
 159}
 160
 161void radeon_wb_disable(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
 162{
 163	int r;
 
 
 164
 165	if (rdev->wb.wb_obj) {
 166		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
 167		if (unlikely(r != 0))
 168			return;
 169		radeon_bo_kunmap(rdev->wb.wb_obj);
 170		radeon_bo_unpin(rdev->wb.wb_obj);
 171		radeon_bo_unreserve(rdev->wb.wb_obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 173	rdev->wb.enabled = false;
 174}
 175
 
 
 
 
 
 
 
 
 176void radeon_wb_fini(struct radeon_device *rdev)
 177{
 178	radeon_wb_disable(rdev);
 179	if (rdev->wb.wb_obj) {
 
 
 
 
 
 180		radeon_bo_unref(&rdev->wb.wb_obj);
 181		rdev->wb.wb = NULL;
 182		rdev->wb.wb_obj = NULL;
 183	}
 184}
 185
 
 
 
 
 
 
 
 
 
 186int radeon_wb_init(struct radeon_device *rdev)
 187{
 188	int r;
 189
 190	if (rdev->wb.wb_obj == NULL) {
 191		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
 192				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
 193		if (r) {
 194			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
 195			return r;
 196		}
 197	}
 198	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
 199	if (unlikely(r != 0)) {
 200		radeon_wb_fini(rdev);
 201		return r;
 202	}
 203	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
 204			  &rdev->wb.gpu_addr);
 205	if (r) {
 
 
 
 
 
 206		radeon_bo_unreserve(rdev->wb.wb_obj);
 207		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
 208		radeon_wb_fini(rdev);
 209		return r;
 210	}
 211	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
 212	radeon_bo_unreserve(rdev->wb.wb_obj);
 213	if (r) {
 214		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
 215		radeon_wb_fini(rdev);
 216		return r;
 217	}
 218
 219	/* clear wb memory */
 220	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
 221	/* disable event_write fences */
 222	rdev->wb.use_event = false;
 223	/* disabled via module param */
 224	if (radeon_no_wb == 1)
 225		rdev->wb.enabled = false;
 226	else {
 227		/* often unreliable on AGP */
 228		if (rdev->flags & RADEON_IS_AGP) {
 
 
 
 
 229			rdev->wb.enabled = false;
 230		} else {
 231			rdev->wb.enabled = true;
 232			/* event_write fences are only available on r600+ */
 233			if (rdev->family >= CHIP_R600)
 234				rdev->wb.use_event = true;
 
 235		}
 236	}
 237	/* always use writeback/events on NI */
 238	if (ASIC_IS_DCE5(rdev)) {
 239		rdev->wb.enabled = true;
 240		rdev->wb.use_event = true;
 241	}
 242
 243	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
 244
 245	return 0;
 246}
 247
 248/**
 249 * radeon_vram_location - try to find VRAM location
 250 * @rdev: radeon device structure holding all necessary informations
 251 * @mc: memory controller structure holding memory informations
 252 * @base: base address at which to put VRAM
 253 *
 254 * Function will place try to place VRAM at base address provided
 255 * as parameter (which is so far either PCI aperture address or
 256 * for IGP TOM base address).
 257 *
 258 * If there is not enough space to fit the unvisible VRAM in the 32bits
 259 * address space then we limit the VRAM size to the aperture.
 260 *
 261 * If we are using AGP and if the AGP aperture doesn't allow us to have
 262 * room for all the VRAM than we restrict the VRAM to the PCI aperture
 263 * size and print a warning.
 264 *
 265 * This function will never fails, worst case are limiting VRAM.
 266 *
 267 * Note: GTT start, end, size should be initialized before calling this
 268 * function on AGP platform.
 269 *
 270 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
 271 * this shouldn't be a problem as we are using the PCI aperture as a reference.
 272 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
 273 * not IGP.
 274 *
 275 * Note: we use mc_vram_size as on some board we need to program the mc to
 276 * cover the whole aperture even if VRAM size is inferior to aperture size
 277 * Novell bug 204882 + along with lots of ubuntu ones
 278 *
 279 * Note: when limiting vram it's safe to overwritte real_vram_size because
 280 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
 281 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
 282 * ones)
 283 *
 284 * Note: IGP TOM addr should be the same as the aperture addr, we don't
 285 * explicitly check for that thought.
 286 *
 287 * FIXME: when reducing VRAM size align new size on power of 2.
 288 */
 289void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
 290{
 
 
 291	mc->vram_start = base;
 292	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
 293		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
 294		mc->real_vram_size = mc->aper_size;
 295		mc->mc_vram_size = mc->aper_size;
 296	}
 297	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 298	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
 299		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
 300		mc->real_vram_size = mc->aper_size;
 301		mc->mc_vram_size = mc->aper_size;
 302	}
 303	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 304	if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
 305		mc->real_vram_size = radeon_vram_limit;
 306	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
 307			mc->mc_vram_size >> 20, mc->vram_start,
 308			mc->vram_end, mc->real_vram_size >> 20);
 309}
 310
 311/**
 312 * radeon_gtt_location - try to find GTT location
 313 * @rdev: radeon device structure holding all necessary informations
 314 * @mc: memory controller structure holding memory informations
 315 *
 316 * Function will place try to place GTT before or after VRAM.
 317 *
 318 * If GTT size is bigger than space left then we ajust GTT size.
 319 * Thus function will never fails.
 320 *
 321 * FIXME: when reducing GTT size align new size on power of 2.
 322 */
 323void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 324{
 325	u64 size_af, size_bf;
 326
 327	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
 328	size_bf = mc->vram_start & ~mc->gtt_base_align;
 329	if (size_bf > size_af) {
 330		if (mc->gtt_size > size_bf) {
 331			dev_warn(rdev->dev, "limiting GTT\n");
 332			mc->gtt_size = size_bf;
 333		}
 334		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
 335	} else {
 336		if (mc->gtt_size > size_af) {
 337			dev_warn(rdev->dev, "limiting GTT\n");
 338			mc->gtt_size = size_af;
 339		}
 340		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
 341	}
 342	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
 343	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
 344			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
 345}
 346
 347/*
 348 * GPU helpers function.
 349 */
 
 
 
 
 
 
 
 
 
 350bool radeon_card_posted(struct radeon_device *rdev)
 351{
 352	uint32_t reg;
 353
 354	if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
 
 
 
 355		return false;
 356
 
 
 
 357	/* first check CRTCs */
 358	if (ASIC_IS_DCE41(rdev)) {
 359		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
 360			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
 361		if (reg & EVERGREEN_CRTC_MASTER_EN)
 362			return true;
 363	} else if (ASIC_IS_DCE4(rdev)) {
 364		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
 365			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
 366			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
 367			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
 368			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
 369			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
 370		if (reg & EVERGREEN_CRTC_MASTER_EN)
 371			return true;
 372	} else if (ASIC_IS_AVIVO(rdev)) {
 373		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
 374		      RREG32(AVIVO_D2CRTC_CONTROL);
 375		if (reg & AVIVO_CRTC_EN) {
 376			return true;
 377		}
 378	} else {
 379		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
 380		      RREG32(RADEON_CRTC2_GEN_CNTL);
 381		if (reg & RADEON_CRTC_EN) {
 382			return true;
 383		}
 384	}
 385
 
 386	/* then check MEM_SIZE, in case the crtcs are off */
 387	if (rdev->family >= CHIP_R600)
 388		reg = RREG32(R600_CONFIG_MEMSIZE);
 389	else
 390		reg = RREG32(RADEON_CONFIG_MEMSIZE);
 391
 392	if (reg)
 393		return true;
 394
 395	return false;
 396
 397}
 398
 
 
 
 
 
 
 
 
 399void radeon_update_bandwidth_info(struct radeon_device *rdev)
 400{
 401	fixed20_12 a;
 402	u32 sclk = rdev->pm.current_sclk;
 403	u32 mclk = rdev->pm.current_mclk;
 404
 405	/* sclk/mclk in Mhz */
 406	a.full = dfixed_const(100);
 407	rdev->pm.sclk.full = dfixed_const(sclk);
 408	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
 409	rdev->pm.mclk.full = dfixed_const(mclk);
 410	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 411
 412	if (rdev->flags & RADEON_IS_IGP) {
 413		a.full = dfixed_const(16);
 414		/* core_bandwidth = sclk(Mhz) * 16 */
 415		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
 416	}
 417}
 418
 
 
 
 
 
 
 
 
 
 419bool radeon_boot_test_post_card(struct radeon_device *rdev)
 420{
 421	if (radeon_card_posted(rdev))
 422		return true;
 423
 424	if (rdev->bios) {
 425		DRM_INFO("GPU not posted. posting now...\n");
 426		if (rdev->is_atom_bios)
 427			atom_asic_init(rdev->mode_info.atom_context);
 428		else
 429			radeon_combios_asic_init(rdev->ddev);
 430		return true;
 431	} else {
 432		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 433		return false;
 434	}
 435}
 436
 
 
 
 
 
 
 
 
 
 
 437int radeon_dummy_page_init(struct radeon_device *rdev)
 438{
 439	if (rdev->dummy_page.page)
 440		return 0;
 441	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
 442	if (rdev->dummy_page.page == NULL)
 443		return -ENOMEM;
 444	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
 445					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 446	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
 447		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
 448		__free_page(rdev->dummy_page.page);
 449		rdev->dummy_page.page = NULL;
 450		return -ENOMEM;
 451	}
 452	return 0;
 453}
 454
 
 
 
 
 
 
 
 455void radeon_dummy_page_fini(struct radeon_device *rdev)
 456{
 457	if (rdev->dummy_page.page == NULL)
 458		return;
 459	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
 460			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 461	__free_page(rdev->dummy_page.page);
 462	rdev->dummy_page.page = NULL;
 463}
 464
 465
 466/* ATOM accessor methods */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 467static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
 468{
 469	struct radeon_device *rdev = info->dev->dev_private;
 470	uint32_t r;
 471
 472	r = rdev->pll_rreg(rdev, reg);
 473	return r;
 474}
 475
 
 
 
 
 
 
 
 
 
 476static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
 477{
 478	struct radeon_device *rdev = info->dev->dev_private;
 479
 480	rdev->pll_wreg(rdev, reg, val);
 481}
 482
 
 
 
 
 
 
 
 
 
 483static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
 484{
 485	struct radeon_device *rdev = info->dev->dev_private;
 486	uint32_t r;
 487
 488	r = rdev->mc_rreg(rdev, reg);
 489	return r;
 490}
 491
 
 
 
 
 
 
 
 
 
 492static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
 493{
 494	struct radeon_device *rdev = info->dev->dev_private;
 495
 496	rdev->mc_wreg(rdev, reg, val);
 497}
 498
 
 
 
 
 
 
 
 
 
 499static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
 500{
 501	struct radeon_device *rdev = info->dev->dev_private;
 502
 503	WREG32(reg*4, val);
 504}
 505
 
 
 
 
 
 
 
 
 
 506static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
 507{
 508	struct radeon_device *rdev = info->dev->dev_private;
 509	uint32_t r;
 510
 511	r = RREG32(reg*4);
 512	return r;
 513}
 514
 
 
 
 
 
 
 
 
 
 515static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
 516{
 517	struct radeon_device *rdev = info->dev->dev_private;
 518
 519	WREG32_IO(reg*4, val);
 520}
 521
 
 
 
 
 
 
 
 
 
 522static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
 523{
 524	struct radeon_device *rdev = info->dev->dev_private;
 525	uint32_t r;
 526
 527	r = RREG32_IO(reg*4);
 528	return r;
 529}
 530
 
 
 
 
 
 
 
 
 
 
 531int radeon_atombios_init(struct radeon_device *rdev)
 532{
 533	struct card_info *atom_card_info =
 534	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
 535
 536	if (!atom_card_info)
 537		return -ENOMEM;
 538
 539	rdev->mode_info.atom_card_info = atom_card_info;
 540	atom_card_info->dev = rdev->ddev;
 541	atom_card_info->reg_read = cail_reg_read;
 542	atom_card_info->reg_write = cail_reg_write;
 543	/* needed for iio ops */
 544	if (rdev->rio_mem) {
 545		atom_card_info->ioreg_read = cail_ioreg_read;
 546		atom_card_info->ioreg_write = cail_ioreg_write;
 547	} else {
 548		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
 549		atom_card_info->ioreg_read = cail_reg_read;
 550		atom_card_info->ioreg_write = cail_reg_write;
 551	}
 552	atom_card_info->mc_read = cail_mc_read;
 553	atom_card_info->mc_write = cail_mc_write;
 554	atom_card_info->pll_read = cail_pll_read;
 555	atom_card_info->pll_write = cail_pll_write;
 556
 557	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
 
 
 
 
 
 558	mutex_init(&rdev->mode_info.atom_context->mutex);
 559	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
 560	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
 561	return 0;
 562}
 563
 
 
 
 
 
 
 
 
 
 564void radeon_atombios_fini(struct radeon_device *rdev)
 565{
 566	if (rdev->mode_info.atom_context) {
 567		kfree(rdev->mode_info.atom_context->scratch);
 568		kfree(rdev->mode_info.atom_context);
 569	}
 
 
 570	kfree(rdev->mode_info.atom_card_info);
 
 571}
 572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 573int radeon_combios_init(struct radeon_device *rdev)
 574{
 575	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
 576	return 0;
 577}
 578
 
 
 
 
 
 
 
 
 579void radeon_combios_fini(struct radeon_device *rdev)
 580{
 581}
 582
 583/* if we get transitioned to only one device, tak VGA back */
 
 
 
 
 
 
 
 
 
 584static unsigned int radeon_vga_set_decode(void *cookie, bool state)
 585{
 586	struct radeon_device *rdev = cookie;
 587	radeon_vga_set_state(rdev, state);
 588	if (state)
 589		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 590		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 591	else
 592		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 593}
 594
 595void radeon_check_arguments(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596{
 597	/* vramlimit must be a power of two */
 598	switch (radeon_vram_limit) {
 599	case 0:
 600	case 4:
 601	case 8:
 602	case 16:
 603	case 32:
 604	case 64:
 605	case 128:
 606	case 256:
 607	case 512:
 608	case 1024:
 609	case 2048:
 610	case 4096:
 611		break;
 612	default:
 613		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
 614				radeon_vram_limit);
 615		radeon_vram_limit = 0;
 616		break;
 617	}
 618	radeon_vram_limit = radeon_vram_limit << 20;
 
 
 
 
 
 
 
 619	/* gtt size must be power of two and greater or equal to 32M */
 620	switch (radeon_gart_size) {
 621	case 4:
 622	case 8:
 623	case 16:
 624		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
 625				radeon_gart_size);
 626		radeon_gart_size = 512;
 627		break;
 628	case 32:
 629	case 64:
 630	case 128:
 631	case 256:
 632	case 512:
 633	case 1024:
 634	case 2048:
 635	case 4096:
 636		break;
 637	default:
 638		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
 639				radeon_gart_size);
 640		radeon_gart_size = 512;
 641		break;
 
 
 642	}
 643	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
 
 644	/* AGP mode can only be -1, 1, 2, 4, 8 */
 645	switch (radeon_agpmode) {
 646	case -1:
 647	case 0:
 648	case 1:
 649	case 2:
 650	case 4:
 651	case 8:
 652		break;
 653	default:
 654		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
 655				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
 656		radeon_agpmode = 0;
 657		break;
 658	}
 659}
 660
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 662{
 663	struct drm_device *dev = pci_get_drvdata(pdev);
 664	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 
 
 
 665	if (state == VGA_SWITCHEROO_ON) {
 
 
 666		printk(KERN_INFO "radeon: switched on\n");
 667		/* don't suspend or resume card normally */
 668		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 669		radeon_resume_kms(dev);
 
 
 
 
 
 
 
 670		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 671		drm_kms_helper_poll_enable(dev);
 672	} else {
 673		printk(KERN_INFO "radeon: switched off\n");
 674		drm_kms_helper_poll_disable(dev);
 675		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 676		radeon_suspend_kms(dev, pmm);
 677		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 678	}
 679}
 680
 
 
 
 
 
 
 
 
 
 681static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
 682{
 683	struct drm_device *dev = pci_get_drvdata(pdev);
 684	bool can_switch;
 685
 686	spin_lock(&dev->count_lock);
 687	can_switch = (dev->open_count == 0);
 688	spin_unlock(&dev->count_lock);
 689	return can_switch;
 690}
 691
 
 
 
 
 
 692
 
 
 
 
 
 
 
 
 
 
 
 
 693int radeon_device_init(struct radeon_device *rdev,
 694		       struct drm_device *ddev,
 695		       struct pci_dev *pdev,
 696		       uint32_t flags)
 697{
 698	int r, i;
 699	int dma_bits;
 
 700
 701	rdev->shutdown = false;
 702	rdev->dev = &pdev->dev;
 703	rdev->ddev = ddev;
 704	rdev->pdev = pdev;
 705	rdev->flags = flags;
 706	rdev->family = flags & RADEON_FAMILY_MASK;
 707	rdev->is_atom_bios = false;
 708	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
 709	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
 710	rdev->gpu_lockup = false;
 711	rdev->accel_working = false;
 
 
 
 
 712
 713	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
 714		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
 715		pdev->subsystem_vendor, pdev->subsystem_device);
 716
 717	/* mutex initialization are all done here so we
 718	 * can recall function without having locking issues */
 719	mutex_init(&rdev->cs_mutex);
 720	mutex_init(&rdev->ib_pool.mutex);
 721	mutex_init(&rdev->cp.mutex);
 722	mutex_init(&rdev->dc_hw_i2c_mutex);
 723	if (rdev->family >= CHIP_R600)
 724		spin_lock_init(&rdev->ih.lock);
 725	mutex_init(&rdev->gem.mutex);
 726	mutex_init(&rdev->pm.mutex);
 727	mutex_init(&rdev->vram_mutex);
 728	rwlock_init(&rdev->fence_drv.lock);
 729	INIT_LIST_HEAD(&rdev->gem.objects);
 
 730	init_waitqueue_head(&rdev->irq.vblank_queue);
 731	init_waitqueue_head(&rdev->irq.idle_queue);
 
 
 
 
 
 
 
 
 732
 733	/* Set asic functions */
 734	r = radeon_asic_init(rdev);
 735	if (r)
 736		return r;
 737	radeon_check_arguments(rdev);
 738
 739	/* all of the newer IGP chips have an internal gart
 740	 * However some rs4xx report as AGP, so remove that here.
 741	 */
 742	if ((rdev->family >= CHIP_RS400) &&
 743	    (rdev->flags & RADEON_IS_IGP)) {
 744		rdev->flags &= ~RADEON_IS_AGP;
 745	}
 746
 747	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
 748		radeon_agp_disable(rdev);
 749	}
 750
 
 
 
 
 
 
 
 
 
 
 
 751	/* set DMA mask + need_dma32 flags.
 752	 * PCIE - can handle 40-bits.
 753	 * IGP - can handle 40-bits (in theory)
 754	 * AGP - generally dma32 is safest
 755	 * PCI - only dma32
 756	 */
 757	rdev->need_dma32 = false;
 758	if (rdev->flags & RADEON_IS_AGP)
 759		rdev->need_dma32 = true;
 760	if (rdev->flags & RADEON_IS_PCI)
 
 761		rdev->need_dma32 = true;
 762
 763	dma_bits = rdev->need_dma32 ? 32 : 40;
 764	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
 765	if (r) {
 766		rdev->need_dma32 = true;
 
 767		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
 768	}
 
 
 
 
 
 769
 770	/* Registers mapping */
 771	/* TODO: block userspace mapping of io register */
 772	rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
 773	rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
 775	if (rdev->rmmio == NULL) {
 776		return -ENOMEM;
 777	}
 778	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
 779	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
 780
 
 
 
 
 781	/* io port mapping */
 782	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 783		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
 784			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
 785			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
 786			break;
 787		}
 788	}
 789	if (rdev->rio_mem == NULL)
 790		DRM_ERROR("Unable to find PCI I/O BAR\n");
 791
 792	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
 793	/* this will fail for cards that aren't VGA class devices, just
 794	 * ignore it */
 795	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 796	vga_switcheroo_register_client(rdev->pdev,
 797				       radeon_switcheroo_set_state,
 798				       NULL,
 799				       radeon_switcheroo_can_switch);
 
 
 800
 801	r = radeon_init(rdev);
 802	if (r)
 803		return r;
 804
 
 
 
 
 
 
 
 
 
 805	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
 806		/* Acceleration not working on AGP card try again
 807		 * with fallback to PCI or PCIE GART
 808		 */
 809		radeon_asic_reset(rdev);
 810		radeon_fini(rdev);
 811		radeon_agp_disable(rdev);
 812		r = radeon_init(rdev);
 813		if (r)
 814			return r;
 815	}
 816	if (radeon_testing) {
 817		radeon_test_moves(rdev);
 
 
 
 
 
 
 
 
 
 
 818	}
 819	if (radeon_benchmarking) {
 820		radeon_benchmark(rdev);
 
 
 
 821	}
 822	return 0;
 823}
 824
 
 
 
 
 
 
 
 
 
 
 825void radeon_device_fini(struct radeon_device *rdev)
 826{
 827	DRM_INFO("radeon: finishing device.\n");
 828	rdev->shutdown = true;
 829	/* evict vram memory */
 830	radeon_bo_evict_vram(rdev);
 831	radeon_fini(rdev);
 832	vga_switcheroo_unregister_client(rdev->pdev);
 833	vga_client_register(rdev->pdev, NULL, NULL, NULL);
 834	if (rdev->rio_mem)
 835		pci_iounmap(rdev->pdev, rdev->rio_mem);
 836	rdev->rio_mem = NULL;
 837	iounmap(rdev->rmmio);
 838	rdev->rmmio = NULL;
 
 
 
 839}
 840
 841
 842/*
 843 * Suspend & resume.
 844 */
 845int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 
 
 
 
 
 
 
 
 
 
 846{
 847	struct radeon_device *rdev;
 848	struct drm_crtc *crtc;
 849	struct drm_connector *connector;
 850	int r;
 
 851
 852	if (dev == NULL || dev->dev_private == NULL) {
 853		return -ENODEV;
 854	}
 855	if (state.event == PM_EVENT_PRETHAW) {
 856		return 0;
 857	}
 858	rdev = dev->dev_private;
 859
 860	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 861		return 0;
 862
 
 
 863	/* turn off display hw */
 864	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 865		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 866	}
 867
 868	/* unpin the front buffers */
 869	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 870		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
 871		struct radeon_bo *robj;
 872
 873		if (rfb == NULL || rfb->obj == NULL) {
 874			continue;
 875		}
 876		robj = gem_to_radeon_bo(rfb->obj);
 877		/* don't unpin kernel fb objects */
 878		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
 879			r = radeon_bo_reserve(robj, false);
 880			if (r == 0) {
 881				radeon_bo_unpin(robj);
 882				radeon_bo_unreserve(robj);
 883			}
 884		}
 885	}
 886	/* evict vram memory */
 887	radeon_bo_evict_vram(rdev);
 
 888	/* wait for gpu to finish processing current batch */
 889	radeon_fence_wait_last(rdev);
 
 
 
 
 
 
 
 
 
 890
 891	radeon_save_bios_scratch_regs(rdev);
 892
 893	radeon_pm_suspend(rdev);
 894	radeon_suspend(rdev);
 895	radeon_hpd_fini(rdev);
 896	/* evict remaining vram memory */
 897	radeon_bo_evict_vram(rdev);
 898
 899	radeon_agp_suspend(rdev);
 900
 901	pci_save_state(dev->pdev);
 902	if (state.event == PM_EVENT_SUSPEND) {
 903		/* Shut down the device */
 904		pci_disable_device(dev->pdev);
 905		pci_set_power_state(dev->pdev, PCI_D3hot);
 906	}
 907	console_lock();
 908	radeon_fbdev_set_suspend(rdev, 1);
 909	console_unlock();
 
 
 
 910	return 0;
 911}
 912
 913int radeon_resume_kms(struct drm_device *dev)
 
 
 
 
 
 
 
 
 
 914{
 915	struct drm_connector *connector;
 916	struct radeon_device *rdev = dev->dev_private;
 
 917
 918	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 919		return 0;
 920
 921	console_lock();
 922	pci_set_power_state(dev->pdev, PCI_D0);
 923	pci_restore_state(dev->pdev);
 924	if (pci_enable_device(dev->pdev)) {
 925		console_unlock();
 926		return -1;
 
 
 
 
 
 927	}
 928	pci_set_master(dev->pdev);
 929	/* resume AGP if in use */
 930	radeon_agp_resume(rdev);
 931	radeon_resume(rdev);
 932	radeon_pm_resume(rdev);
 933	radeon_restore_bios_scratch_regs(rdev);
 934
 935	radeon_fbdev_set_suspend(rdev, 0);
 936	console_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 937
 938	/* init dig PHYs */
 939	if (rdev->is_atom_bios)
 
 
 940		radeon_atom_encoder_init(rdev);
 
 
 
 
 
 
 
 
 
 941	/* reset hpd state */
 942	radeon_hpd_init(rdev);
 943	/* blat the mode back in */
 944	drm_helper_resume_force_mode(dev);
 945	/* turn on display hw */
 946	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 947		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
 
 
 948	}
 
 
 
 
 
 
 
 
 
 
 
 
 949	return 0;
 950}
 951
 
 
 
 
 
 
 
 
 952int radeon_gpu_reset(struct radeon_device *rdev)
 953{
 954	int r;
 
 
 
 
 
 955	int resched;
 956
 
 
 
 
 
 
 
 
 
 957	radeon_save_bios_scratch_regs(rdev);
 958	/* block TTM */
 959	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
 
 960	radeon_suspend(rdev);
 961
 
 
 
 
 
 
 
 
 
 
 
 962	r = radeon_asic_reset(rdev);
 963	if (!r) {
 964		dev_info(rdev->dev, "GPU reset succeed\n");
 965		radeon_resume(rdev);
 966		radeon_restore_bios_scratch_regs(rdev);
 967		drm_helper_resume_force_mode(rdev->ddev);
 968		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
 969		return 0;
 970	}
 971	/* bad news, how to tell it to userspace ? */
 972	dev_info(rdev->dev, "GPU reset failed\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973	return r;
 974}
 975
 976
 977/*
 978 * Debugfs
 979 */
 980struct radeon_debugfs {
 981	struct drm_info_list	*files;
 982	unsigned		num_files;
 983};
 984static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
 985static unsigned _radeon_debugfs_count = 0;
 986
 987int radeon_debugfs_add_files(struct radeon_device *rdev,
 988			     struct drm_info_list *files,
 989			     unsigned nfiles)
 990{
 991	unsigned i;
 992
 993	for (i = 0; i < _radeon_debugfs_count; i++) {
 994		if (_radeon_debugfs[i].files == files) {
 995			/* Already registered */
 996			return 0;
 997		}
 998	}
 999	if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
1000		DRM_ERROR("Reached maximum number of debugfs files.\n");
1001		DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
 
 
 
1002		return -EINVAL;
1003	}
1004	_radeon_debugfs[_radeon_debugfs_count].files = files;
1005	_radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
1006	_radeon_debugfs_count++;
1007#if defined(CONFIG_DEBUG_FS)
1008	drm_debugfs_create_files(files, nfiles,
1009				 rdev->ddev->control->debugfs_root,
1010				 rdev->ddev->control);
1011	drm_debugfs_create_files(files, nfiles,
1012				 rdev->ddev->primary->debugfs_root,
1013				 rdev->ddev->primary);
1014#endif
1015	return 0;
1016}
1017
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1018#if defined(CONFIG_DEBUG_FS)
1019int radeon_debugfs_init(struct drm_minor *minor)
1020{
1021	return 0;
1022}
1023
1024void radeon_debugfs_cleanup(struct drm_minor *minor)
1025{
1026	unsigned i;
1027
1028	for (i = 0; i < _radeon_debugfs_count; i++) {
1029		drm_debugfs_remove_files(_radeon_debugfs[i].files,
1030					 _radeon_debugfs[i].num_files, minor);
1031	}
1032}
1033#endif
v3.15
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/console.h>
  29#include <linux/slab.h>
  30#include <drm/drmP.h>
  31#include <drm/drm_crtc_helper.h>
  32#include <drm/radeon_drm.h>
  33#include <linux/vgaarb.h>
  34#include <linux/vga_switcheroo.h>
  35#include <linux/efi.h>
  36#include "radeon_reg.h"
  37#include "radeon.h"
  38#include "atom.h"
  39
  40static const char radeon_family_name[][16] = {
  41	"R100",
  42	"RV100",
  43	"RS100",
  44	"RV200",
  45	"RS200",
  46	"R200",
  47	"RV250",
  48	"RS300",
  49	"RV280",
  50	"R300",
  51	"R350",
  52	"RV350",
  53	"RV380",
  54	"R420",
  55	"R423",
  56	"RV410",
  57	"RS400",
  58	"RS480",
  59	"RS600",
  60	"RS690",
  61	"RS740",
  62	"RV515",
  63	"R520",
  64	"RV530",
  65	"RV560",
  66	"RV570",
  67	"R580",
  68	"R600",
  69	"RV610",
  70	"RV630",
  71	"RV670",
  72	"RV620",
  73	"RV635",
  74	"RS780",
  75	"RS880",
  76	"RV770",
  77	"RV730",
  78	"RV710",
  79	"RV740",
  80	"CEDAR",
  81	"REDWOOD",
  82	"JUNIPER",
  83	"CYPRESS",
  84	"HEMLOCK",
  85	"PALM",
  86	"SUMO",
  87	"SUMO2",
  88	"BARTS",
  89	"TURKS",
  90	"CAICOS",
  91	"CAYMAN",
  92	"ARUBA",
  93	"TAHITI",
  94	"PITCAIRN",
  95	"VERDE",
  96	"OLAND",
  97	"HAINAN",
  98	"BONAIRE",
  99	"KAVERI",
 100	"KABINI",
 101	"HAWAII",
 102	"MULLINS",
 103	"LAST",
 104};
 105
 106bool radeon_is_px(struct drm_device *dev)
 107{
 108	struct radeon_device *rdev = dev->dev_private;
 109
 110	if (rdev->flags & RADEON_IS_PX)
 111		return true;
 112	return false;
 113}
 114
 115/**
 116 * radeon_program_register_sequence - program an array of registers.
 117 *
 118 * @rdev: radeon_device pointer
 119 * @registers: pointer to the register array
 120 * @array_size: size of the register array
 121 *
 122 * Programs an array or registers with and and or masks.
 123 * This is a helper for setting golden registers.
 124 */
 125void radeon_program_register_sequence(struct radeon_device *rdev,
 126				      const u32 *registers,
 127				      const u32 array_size)
 128{
 129	u32 tmp, reg, and_mask, or_mask;
 130	int i;
 131
 132	if (array_size % 3)
 133		return;
 134
 135	for (i = 0; i < array_size; i +=3) {
 136		reg = registers[i + 0];
 137		and_mask = registers[i + 1];
 138		or_mask = registers[i + 2];
 139
 140		if (and_mask == 0xffffffff) {
 141			tmp = or_mask;
 142		} else {
 143			tmp = RREG32(reg);
 144			tmp &= ~and_mask;
 145			tmp |= or_mask;
 146		}
 147		WREG32(reg, tmp);
 148	}
 149}
 150
 151void radeon_pci_config_reset(struct radeon_device *rdev)
 152{
 153	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
 154}
 155
 156/**
 157 * radeon_surface_init - Clear GPU surface registers.
 158 *
 159 * @rdev: radeon_device pointer
 160 *
 161 * Clear GPU surface registers (r1xx-r5xx).
 162 */
 163void radeon_surface_init(struct radeon_device *rdev)
 164{
 165	/* FIXME: check this out */
 166	if (rdev->family < CHIP_R600) {
 167		int i;
 168
 169		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
 170			if (rdev->surface_regs[i].bo)
 171				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
 172			else
 173				radeon_clear_surface_reg(rdev, i);
 174		}
 175		/* enable surfaces */
 176		WREG32(RADEON_SURFACE_CNTL, 0);
 177	}
 178}
 179
 180/*
 181 * GPU scratch registers helpers function.
 182 */
 183/**
 184 * radeon_scratch_init - Init scratch register driver information.
 185 *
 186 * @rdev: radeon_device pointer
 187 *
 188 * Init CP scratch register driver information (r1xx-r5xx)
 189 */
 190void radeon_scratch_init(struct radeon_device *rdev)
 191{
 192	int i;
 193
 194	/* FIXME: check this out */
 195	if (rdev->family < CHIP_R300) {
 196		rdev->scratch.num_reg = 5;
 197	} else {
 198		rdev->scratch.num_reg = 7;
 199	}
 200	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
 201	for (i = 0; i < rdev->scratch.num_reg; i++) {
 202		rdev->scratch.free[i] = true;
 203		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
 204	}
 205}
 206
 207/**
 208 * radeon_scratch_get - Allocate a scratch register
 209 *
 210 * @rdev: radeon_device pointer
 211 * @reg: scratch register mmio offset
 212 *
 213 * Allocate a CP scratch register for use by the driver (all asics).
 214 * Returns 0 on success or -EINVAL on failure.
 215 */
 216int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
 217{
 218	int i;
 219
 220	for (i = 0; i < rdev->scratch.num_reg; i++) {
 221		if (rdev->scratch.free[i]) {
 222			rdev->scratch.free[i] = false;
 223			*reg = rdev->scratch.reg[i];
 224			return 0;
 225		}
 226	}
 227	return -EINVAL;
 228}
 229
 230/**
 231 * radeon_scratch_free - Free a scratch register
 232 *
 233 * @rdev: radeon_device pointer
 234 * @reg: scratch register mmio offset
 235 *
 236 * Free a CP scratch register allocated for use by the driver (all asics)
 237 */
 238void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
 239{
 240	int i;
 241
 242	for (i = 0; i < rdev->scratch.num_reg; i++) {
 243		if (rdev->scratch.reg[i] == reg) {
 244			rdev->scratch.free[i] = true;
 245			return;
 246		}
 247	}
 248}
 249
 250/*
 251 * GPU doorbell aperture helpers function.
 252 */
 253/**
 254 * radeon_doorbell_init - Init doorbell driver information.
 255 *
 256 * @rdev: radeon_device pointer
 257 *
 258 * Init doorbell driver information (CIK)
 259 * Returns 0 on success, error on failure.
 260 */
 261static int radeon_doorbell_init(struct radeon_device *rdev)
 262{
 263	/* doorbell bar mapping */
 264	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
 265	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
 266
 267	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
 268	if (rdev->doorbell.num_doorbells == 0)
 269		return -EINVAL;
 270
 271	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
 272	if (rdev->doorbell.ptr == NULL) {
 273		return -ENOMEM;
 274	}
 275	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
 276	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
 277
 278	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
 279
 280	return 0;
 281}
 282
 283/**
 284 * radeon_doorbell_fini - Tear down doorbell driver information.
 285 *
 286 * @rdev: radeon_device pointer
 287 *
 288 * Tear down doorbell driver information (CIK)
 289 */
 290static void radeon_doorbell_fini(struct radeon_device *rdev)
 291{
 292	iounmap(rdev->doorbell.ptr);
 293	rdev->doorbell.ptr = NULL;
 294}
 295
 296/**
 297 * radeon_doorbell_get - Allocate a doorbell entry
 298 *
 299 * @rdev: radeon_device pointer
 300 * @doorbell: doorbell index
 301 *
 302 * Allocate a doorbell for use by the driver (all asics).
 303 * Returns 0 on success or -EINVAL on failure.
 304 */
 305int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
 306{
 307	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
 308	if (offset < rdev->doorbell.num_doorbells) {
 309		__set_bit(offset, rdev->doorbell.used);
 310		*doorbell = offset;
 311		return 0;
 312	} else {
 313		return -EINVAL;
 314	}
 315}
 316
 317/**
 318 * radeon_doorbell_free - Free a doorbell entry
 319 *
 320 * @rdev: radeon_device pointer
 321 * @doorbell: doorbell index
 322 *
 323 * Free a doorbell allocated for use by the driver (all asics)
 324 */
 325void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
 326{
 327	if (doorbell < rdev->doorbell.num_doorbells)
 328		__clear_bit(doorbell, rdev->doorbell.used);
 329}
 330
 331/*
 332 * radeon_wb_*()
 333 * Writeback is the the method by which the the GPU updates special pages
 334 * in memory with the status of certain GPU events (fences, ring pointers,
 335 * etc.).
 336 */
 337
 338/**
 339 * radeon_wb_disable - Disable Writeback
 340 *
 341 * @rdev: radeon_device pointer
 342 *
 343 * Disables Writeback (all asics).  Used for suspend.
 344 */
 345void radeon_wb_disable(struct radeon_device *rdev)
 346{
 347	rdev->wb.enabled = false;
 348}
 349
 350/**
 351 * radeon_wb_fini - Disable Writeback and free memory
 352 *
 353 * @rdev: radeon_device pointer
 354 *
 355 * Disables Writeback and frees the Writeback memory (all asics).
 356 * Used at driver shutdown.
 357 */
 358void radeon_wb_fini(struct radeon_device *rdev)
 359{
 360	radeon_wb_disable(rdev);
 361	if (rdev->wb.wb_obj) {
 362		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
 363			radeon_bo_kunmap(rdev->wb.wb_obj);
 364			radeon_bo_unpin(rdev->wb.wb_obj);
 365			radeon_bo_unreserve(rdev->wb.wb_obj);
 366		}
 367		radeon_bo_unref(&rdev->wb.wb_obj);
 368		rdev->wb.wb = NULL;
 369		rdev->wb.wb_obj = NULL;
 370	}
 371}
 372
 373/**
 374 * radeon_wb_init- Init Writeback driver info and allocate memory
 375 *
 376 * @rdev: radeon_device pointer
 377 *
 378 * Disables Writeback and frees the Writeback memory (all asics).
 379 * Used at driver startup.
 380 * Returns 0 on success or an -error on failure.
 381 */
 382int radeon_wb_init(struct radeon_device *rdev)
 383{
 384	int r;
 385
 386	if (rdev->wb.wb_obj == NULL) {
 387		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
 388				     RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
 389		if (r) {
 390			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
 391			return r;
 392		}
 393		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
 394		if (unlikely(r != 0)) {
 395			radeon_wb_fini(rdev);
 396			return r;
 397		}
 398		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
 399				&rdev->wb.gpu_addr);
 400		if (r) {
 401			radeon_bo_unreserve(rdev->wb.wb_obj);
 402			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
 403			radeon_wb_fini(rdev);
 404			return r;
 405		}
 406		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
 407		radeon_bo_unreserve(rdev->wb.wb_obj);
 408		if (r) {
 409			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
 410			radeon_wb_fini(rdev);
 411			return r;
 412		}
 
 
 
 
 
 413	}
 414
 415	/* clear wb memory */
 416	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
 417	/* disable event_write fences */
 418	rdev->wb.use_event = false;
 419	/* disabled via module param */
 420	if (radeon_no_wb == 1) {
 421		rdev->wb.enabled = false;
 422	} else {
 
 423		if (rdev->flags & RADEON_IS_AGP) {
 424			/* often unreliable on AGP */
 425			rdev->wb.enabled = false;
 426		} else if (rdev->family < CHIP_R300) {
 427			/* often unreliable on pre-r300 */
 428			rdev->wb.enabled = false;
 429		} else {
 430			rdev->wb.enabled = true;
 431			/* event_write fences are only available on r600+ */
 432			if (rdev->family >= CHIP_R600) {
 433				rdev->wb.use_event = true;
 434			}
 435		}
 436	}
 437	/* always use writeback/events on NI, APUs */
 438	if (rdev->family >= CHIP_PALM) {
 439		rdev->wb.enabled = true;
 440		rdev->wb.use_event = true;
 441	}
 442
 443	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
 444
 445	return 0;
 446}
 447
 448/**
 449 * radeon_vram_location - try to find VRAM location
 450 * @rdev: radeon device structure holding all necessary informations
 451 * @mc: memory controller structure holding memory informations
 452 * @base: base address at which to put VRAM
 453 *
 454 * Function will place try to place VRAM at base address provided
 455 * as parameter (which is so far either PCI aperture address or
 456 * for IGP TOM base address).
 457 *
 458 * If there is not enough space to fit the unvisible VRAM in the 32bits
 459 * address space then we limit the VRAM size to the aperture.
 460 *
 461 * If we are using AGP and if the AGP aperture doesn't allow us to have
 462 * room for all the VRAM than we restrict the VRAM to the PCI aperture
 463 * size and print a warning.
 464 *
 465 * This function will never fails, worst case are limiting VRAM.
 466 *
 467 * Note: GTT start, end, size should be initialized before calling this
 468 * function on AGP platform.
 469 *
 470 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
 471 * this shouldn't be a problem as we are using the PCI aperture as a reference.
 472 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
 473 * not IGP.
 474 *
 475 * Note: we use mc_vram_size as on some board we need to program the mc to
 476 * cover the whole aperture even if VRAM size is inferior to aperture size
 477 * Novell bug 204882 + along with lots of ubuntu ones
 478 *
 479 * Note: when limiting vram it's safe to overwritte real_vram_size because
 480 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
 481 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
 482 * ones)
 483 *
 484 * Note: IGP TOM addr should be the same as the aperture addr, we don't
 485 * explicitly check for that thought.
 486 *
 487 * FIXME: when reducing VRAM size align new size on power of 2.
 488 */
 489void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
 490{
 491	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
 492
 493	mc->vram_start = base;
 494	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
 495		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
 496		mc->real_vram_size = mc->aper_size;
 497		mc->mc_vram_size = mc->aper_size;
 498	}
 499	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 500	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
 501		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
 502		mc->real_vram_size = mc->aper_size;
 503		mc->mc_vram_size = mc->aper_size;
 504	}
 505	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 506	if (limit && limit < mc->real_vram_size)
 507		mc->real_vram_size = limit;
 508	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
 509			mc->mc_vram_size >> 20, mc->vram_start,
 510			mc->vram_end, mc->real_vram_size >> 20);
 511}
 512
 513/**
 514 * radeon_gtt_location - try to find GTT location
 515 * @rdev: radeon device structure holding all necessary informations
 516 * @mc: memory controller structure holding memory informations
 517 *
 518 * Function will place try to place GTT before or after VRAM.
 519 *
 520 * If GTT size is bigger than space left then we ajust GTT size.
 521 * Thus function will never fails.
 522 *
 523 * FIXME: when reducing GTT size align new size on power of 2.
 524 */
 525void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 526{
 527	u64 size_af, size_bf;
 528
 529	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
 530	size_bf = mc->vram_start & ~mc->gtt_base_align;
 531	if (size_bf > size_af) {
 532		if (mc->gtt_size > size_bf) {
 533			dev_warn(rdev->dev, "limiting GTT\n");
 534			mc->gtt_size = size_bf;
 535		}
 536		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
 537	} else {
 538		if (mc->gtt_size > size_af) {
 539			dev_warn(rdev->dev, "limiting GTT\n");
 540			mc->gtt_size = size_af;
 541		}
 542		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
 543	}
 544	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
 545	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
 546			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
 547}
 548
 549/*
 550 * GPU helpers function.
 551 */
 552/**
 553 * radeon_card_posted - check if the hw has already been initialized
 554 *
 555 * @rdev: radeon_device pointer
 556 *
 557 * Check if the asic has been initialized (all asics).
 558 * Used at driver startup.
 559 * Returns true if initialized or false if not.
 560 */
 561bool radeon_card_posted(struct radeon_device *rdev)
 562{
 563	uint32_t reg;
 564
 565	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
 566	if (efi_enabled(EFI_BOOT) &&
 567	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
 568	    (rdev->family < CHIP_R600))
 569		return false;
 570
 571	if (ASIC_IS_NODCE(rdev))
 572		goto check_memsize;
 573
 574	/* first check CRTCs */
 575	if (ASIC_IS_DCE4(rdev)) {
 576		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
 577			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
 578			if (rdev->num_crtc >= 4) {
 579				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
 580					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
 581			}
 582			if (rdev->num_crtc >= 6) {
 583				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
 584					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
 585			}
 
 586		if (reg & EVERGREEN_CRTC_MASTER_EN)
 587			return true;
 588	} else if (ASIC_IS_AVIVO(rdev)) {
 589		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
 590		      RREG32(AVIVO_D2CRTC_CONTROL);
 591		if (reg & AVIVO_CRTC_EN) {
 592			return true;
 593		}
 594	} else {
 595		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
 596		      RREG32(RADEON_CRTC2_GEN_CNTL);
 597		if (reg & RADEON_CRTC_EN) {
 598			return true;
 599		}
 600	}
 601
 602check_memsize:
 603	/* then check MEM_SIZE, in case the crtcs are off */
 604	if (rdev->family >= CHIP_R600)
 605		reg = RREG32(R600_CONFIG_MEMSIZE);
 606	else
 607		reg = RREG32(RADEON_CONFIG_MEMSIZE);
 608
 609	if (reg)
 610		return true;
 611
 612	return false;
 613
 614}
 615
 616/**
 617 * radeon_update_bandwidth_info - update display bandwidth params
 618 *
 619 * @rdev: radeon_device pointer
 620 *
 621 * Used when sclk/mclk are switched or display modes are set.
 622 * params are used to calculate display watermarks (all asics)
 623 */
 624void radeon_update_bandwidth_info(struct radeon_device *rdev)
 625{
 626	fixed20_12 a;
 627	u32 sclk = rdev->pm.current_sclk;
 628	u32 mclk = rdev->pm.current_mclk;
 629
 630	/* sclk/mclk in Mhz */
 631	a.full = dfixed_const(100);
 632	rdev->pm.sclk.full = dfixed_const(sclk);
 633	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
 634	rdev->pm.mclk.full = dfixed_const(mclk);
 635	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 636
 637	if (rdev->flags & RADEON_IS_IGP) {
 638		a.full = dfixed_const(16);
 639		/* core_bandwidth = sclk(Mhz) * 16 */
 640		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
 641	}
 642}
 643
 644/**
 645 * radeon_boot_test_post_card - check and possibly initialize the hw
 646 *
 647 * @rdev: radeon_device pointer
 648 *
 649 * Check if the asic is initialized and if not, attempt to initialize
 650 * it (all asics).
 651 * Returns true if initialized or false if not.
 652 */
 653bool radeon_boot_test_post_card(struct radeon_device *rdev)
 654{
 655	if (radeon_card_posted(rdev))
 656		return true;
 657
 658	if (rdev->bios) {
 659		DRM_INFO("GPU not posted. posting now...\n");
 660		if (rdev->is_atom_bios)
 661			atom_asic_init(rdev->mode_info.atom_context);
 662		else
 663			radeon_combios_asic_init(rdev->ddev);
 664		return true;
 665	} else {
 666		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 667		return false;
 668	}
 669}
 670
 671/**
 672 * radeon_dummy_page_init - init dummy page used by the driver
 673 *
 674 * @rdev: radeon_device pointer
 675 *
 676 * Allocate the dummy page used by the driver (all asics).
 677 * This dummy page is used by the driver as a filler for gart entries
 678 * when pages are taken out of the GART
 679 * Returns 0 on sucess, -ENOMEM on failure.
 680 */
 681int radeon_dummy_page_init(struct radeon_device *rdev)
 682{
 683	if (rdev->dummy_page.page)
 684		return 0;
 685	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
 686	if (rdev->dummy_page.page == NULL)
 687		return -ENOMEM;
 688	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
 689					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 690	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
 691		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
 692		__free_page(rdev->dummy_page.page);
 693		rdev->dummy_page.page = NULL;
 694		return -ENOMEM;
 695	}
 696	return 0;
 697}
 698
 699/**
 700 * radeon_dummy_page_fini - free dummy page used by the driver
 701 *
 702 * @rdev: radeon_device pointer
 703 *
 704 * Frees the dummy page used by the driver (all asics).
 705 */
 706void radeon_dummy_page_fini(struct radeon_device *rdev)
 707{
 708	if (rdev->dummy_page.page == NULL)
 709		return;
 710	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
 711			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 712	__free_page(rdev->dummy_page.page);
 713	rdev->dummy_page.page = NULL;
 714}
 715
 716
 717/* ATOM accessor methods */
 718/*
 719 * ATOM is an interpreted byte code stored in tables in the vbios.  The
 720 * driver registers callbacks to access registers and the interpreter
 721 * in the driver parses the tables and executes then to program specific
 722 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
 723 * atombios.h, and atom.c
 724 */
 725
 726/**
 727 * cail_pll_read - read PLL register
 728 *
 729 * @info: atom card_info pointer
 730 * @reg: PLL register offset
 731 *
 732 * Provides a PLL register accessor for the atom interpreter (r4xx+).
 733 * Returns the value of the PLL register.
 734 */
 735static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
 736{
 737	struct radeon_device *rdev = info->dev->dev_private;
 738	uint32_t r;
 739
 740	r = rdev->pll_rreg(rdev, reg);
 741	return r;
 742}
 743
 744/**
 745 * cail_pll_write - write PLL register
 746 *
 747 * @info: atom card_info pointer
 748 * @reg: PLL register offset
 749 * @val: value to write to the pll register
 750 *
 751 * Provides a PLL register accessor for the atom interpreter (r4xx+).
 752 */
 753static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
 754{
 755	struct radeon_device *rdev = info->dev->dev_private;
 756
 757	rdev->pll_wreg(rdev, reg, val);
 758}
 759
 760/**
 761 * cail_mc_read - read MC (Memory Controller) register
 762 *
 763 * @info: atom card_info pointer
 764 * @reg: MC register offset
 765 *
 766 * Provides an MC register accessor for the atom interpreter (r4xx+).
 767 * Returns the value of the MC register.
 768 */
 769static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
 770{
 771	struct radeon_device *rdev = info->dev->dev_private;
 772	uint32_t r;
 773
 774	r = rdev->mc_rreg(rdev, reg);
 775	return r;
 776}
 777
 778/**
 779 * cail_mc_write - write MC (Memory Controller) register
 780 *
 781 * @info: atom card_info pointer
 782 * @reg: MC register offset
 783 * @val: value to write to the pll register
 784 *
 785 * Provides a MC register accessor for the atom interpreter (r4xx+).
 786 */
 787static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
 788{
 789	struct radeon_device *rdev = info->dev->dev_private;
 790
 791	rdev->mc_wreg(rdev, reg, val);
 792}
 793
 794/**
 795 * cail_reg_write - write MMIO register
 796 *
 797 * @info: atom card_info pointer
 798 * @reg: MMIO register offset
 799 * @val: value to write to the pll register
 800 *
 801 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
 802 */
 803static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
 804{
 805	struct radeon_device *rdev = info->dev->dev_private;
 806
 807	WREG32(reg*4, val);
 808}
 809
 810/**
 811 * cail_reg_read - read MMIO register
 812 *
 813 * @info: atom card_info pointer
 814 * @reg: MMIO register offset
 815 *
 816 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
 817 * Returns the value of the MMIO register.
 818 */
 819static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
 820{
 821	struct radeon_device *rdev = info->dev->dev_private;
 822	uint32_t r;
 823
 824	r = RREG32(reg*4);
 825	return r;
 826}
 827
 828/**
 829 * cail_ioreg_write - write IO register
 830 *
 831 * @info: atom card_info pointer
 832 * @reg: IO register offset
 833 * @val: value to write to the pll register
 834 *
 835 * Provides a IO register accessor for the atom interpreter (r4xx+).
 836 */
 837static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
 838{
 839	struct radeon_device *rdev = info->dev->dev_private;
 840
 841	WREG32_IO(reg*4, val);
 842}
 843
 844/**
 845 * cail_ioreg_read - read IO register
 846 *
 847 * @info: atom card_info pointer
 848 * @reg: IO register offset
 849 *
 850 * Provides an IO register accessor for the atom interpreter (r4xx+).
 851 * Returns the value of the IO register.
 852 */
 853static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
 854{
 855	struct radeon_device *rdev = info->dev->dev_private;
 856	uint32_t r;
 857
 858	r = RREG32_IO(reg*4);
 859	return r;
 860}
 861
 862/**
 863 * radeon_atombios_init - init the driver info and callbacks for atombios
 864 *
 865 * @rdev: radeon_device pointer
 866 *
 867 * Initializes the driver info and register access callbacks for the
 868 * ATOM interpreter (r4xx+).
 869 * Returns 0 on sucess, -ENOMEM on failure.
 870 * Called at driver startup.
 871 */
 872int radeon_atombios_init(struct radeon_device *rdev)
 873{
 874	struct card_info *atom_card_info =
 875	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
 876
 877	if (!atom_card_info)
 878		return -ENOMEM;
 879
 880	rdev->mode_info.atom_card_info = atom_card_info;
 881	atom_card_info->dev = rdev->ddev;
 882	atom_card_info->reg_read = cail_reg_read;
 883	atom_card_info->reg_write = cail_reg_write;
 884	/* needed for iio ops */
 885	if (rdev->rio_mem) {
 886		atom_card_info->ioreg_read = cail_ioreg_read;
 887		atom_card_info->ioreg_write = cail_ioreg_write;
 888	} else {
 889		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
 890		atom_card_info->ioreg_read = cail_reg_read;
 891		atom_card_info->ioreg_write = cail_reg_write;
 892	}
 893	atom_card_info->mc_read = cail_mc_read;
 894	atom_card_info->mc_write = cail_mc_write;
 895	atom_card_info->pll_read = cail_pll_read;
 896	atom_card_info->pll_write = cail_pll_write;
 897
 898	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
 899	if (!rdev->mode_info.atom_context) {
 900		radeon_atombios_fini(rdev);
 901		return -ENOMEM;
 902	}
 903
 904	mutex_init(&rdev->mode_info.atom_context->mutex);
 905	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
 906	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
 907	return 0;
 908}
 909
 910/**
 911 * radeon_atombios_fini - free the driver info and callbacks for atombios
 912 *
 913 * @rdev: radeon_device pointer
 914 *
 915 * Frees the driver info and register access callbacks for the ATOM
 916 * interpreter (r4xx+).
 917 * Called at driver shutdown.
 918 */
 919void radeon_atombios_fini(struct radeon_device *rdev)
 920{
 921	if (rdev->mode_info.atom_context) {
 922		kfree(rdev->mode_info.atom_context->scratch);
 
 923	}
 924	kfree(rdev->mode_info.atom_context);
 925	rdev->mode_info.atom_context = NULL;
 926	kfree(rdev->mode_info.atom_card_info);
 927	rdev->mode_info.atom_card_info = NULL;
 928}
 929
 930/* COMBIOS */
 931/*
 932 * COMBIOS is the bios format prior to ATOM. It provides
 933 * command tables similar to ATOM, but doesn't have a unified
 934 * parser.  See radeon_combios.c
 935 */
 936
 937/**
 938 * radeon_combios_init - init the driver info for combios
 939 *
 940 * @rdev: radeon_device pointer
 941 *
 942 * Initializes the driver info for combios (r1xx-r3xx).
 943 * Returns 0 on sucess.
 944 * Called at driver startup.
 945 */
 946int radeon_combios_init(struct radeon_device *rdev)
 947{
 948	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
 949	return 0;
 950}
 951
 952/**
 953 * radeon_combios_fini - free the driver info for combios
 954 *
 955 * @rdev: radeon_device pointer
 956 *
 957 * Frees the driver info for combios (r1xx-r3xx).
 958 * Called at driver shutdown.
 959 */
 960void radeon_combios_fini(struct radeon_device *rdev)
 961{
 962}
 963
 964/* if we get transitioned to only one device, take VGA back */
 965/**
 966 * radeon_vga_set_decode - enable/disable vga decode
 967 *
 968 * @cookie: radeon_device pointer
 969 * @state: enable/disable vga decode
 970 *
 971 * Enable/disable vga decode (all asics).
 972 * Returns VGA resource flags.
 973 */
 974static unsigned int radeon_vga_set_decode(void *cookie, bool state)
 975{
 976	struct radeon_device *rdev = cookie;
 977	radeon_vga_set_state(rdev, state);
 978	if (state)
 979		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 980		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 981	else
 982		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 983}
 984
 985/**
 986 * radeon_check_pot_argument - check that argument is a power of two
 987 *
 988 * @arg: value to check
 989 *
 990 * Validates that a certain argument is a power of two (all asics).
 991 * Returns true if argument is valid.
 992 */
 993static bool radeon_check_pot_argument(int arg)
 994{
 995	return (arg & (arg - 1)) == 0;
 996}
 997
 998/**
 999 * radeon_check_arguments - validate module params
1000 *
1001 * @rdev: radeon_device pointer
1002 *
1003 * Validates certain module parameters and updates
1004 * the associated values used by the driver (all asics).
1005 */
1006static void radeon_check_arguments(struct radeon_device *rdev)
1007{
1008	/* vramlimit must be a power of two */
1009	if (!radeon_check_pot_argument(radeon_vram_limit)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1011				radeon_vram_limit);
1012		radeon_vram_limit = 0;
 
1013	}
1014
1015	if (radeon_gart_size == -1) {
1016		/* default to a larger gart size on newer asics */
1017		if (rdev->family >= CHIP_RV770)
1018			radeon_gart_size = 1024;
1019		else
1020			radeon_gart_size = 512;
1021	}
1022	/* gtt size must be power of two and greater or equal to 32M */
1023	if (radeon_gart_size < 32) {
1024		dev_warn(rdev->dev, "gart size (%d) too small\n",
 
 
 
1025				radeon_gart_size);
1026		if (rdev->family >= CHIP_RV770)
1027			radeon_gart_size = 1024;
1028		else
1029			radeon_gart_size = 512;
1030	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
 
 
 
 
 
 
 
1031		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1032				radeon_gart_size);
1033		if (rdev->family >= CHIP_RV770)
1034			radeon_gart_size = 1024;
1035		else
1036			radeon_gart_size = 512;
1037	}
1038	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1039
1040	/* AGP mode can only be -1, 1, 2, 4, 8 */
1041	switch (radeon_agpmode) {
1042	case -1:
1043	case 0:
1044	case 1:
1045	case 2:
1046	case 4:
1047	case 8:
1048		break;
1049	default:
1050		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1051				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1052		radeon_agpmode = 0;
1053		break;
1054	}
1055}
1056
1057/**
1058 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1059 * needed for waking up.
1060 *
1061 * @pdev: pci dev pointer
1062 */
1063static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1064{
1065
1066	/* 6600m in a macbook pro */
1067	if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1068	    pdev->subsystem_device == 0x00e2) {
1069		printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1070		return true;
1071	}
1072
1073	return false;
1074}
1075
1076/**
1077 * radeon_switcheroo_set_state - set switcheroo state
1078 *
1079 * @pdev: pci dev pointer
1080 * @state: vga switcheroo state
1081 *
1082 * Callback for the switcheroo driver.  Suspends or resumes the
1083 * the asics before or after it is powered up using ACPI methods.
1084 */
1085static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1086{
1087	struct drm_device *dev = pci_get_drvdata(pdev);
1088
1089	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1090		return;
1091
1092	if (state == VGA_SWITCHEROO_ON) {
1093		unsigned d3_delay = dev->pdev->d3_delay;
1094
1095		printk(KERN_INFO "radeon: switched on\n");
1096		/* don't suspend or resume card normally */
1097		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1098
1099		if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1100			dev->pdev->d3_delay = 20;
1101
1102		radeon_resume_kms(dev, true, true);
1103
1104		dev->pdev->d3_delay = d3_delay;
1105
1106		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1107		drm_kms_helper_poll_enable(dev);
1108	} else {
1109		printk(KERN_INFO "radeon: switched off\n");
1110		drm_kms_helper_poll_disable(dev);
1111		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1112		radeon_suspend_kms(dev, true, true);
1113		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1114	}
1115}
1116
1117/**
1118 * radeon_switcheroo_can_switch - see if switcheroo state can change
1119 *
1120 * @pdev: pci dev pointer
1121 *
1122 * Callback for the switcheroo driver.  Check of the switcheroo
1123 * state can be changed.
1124 * Returns true if the state can be changed, false if not.
1125 */
1126static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1127{
1128	struct drm_device *dev = pci_get_drvdata(pdev);
1129	bool can_switch;
1130
1131	spin_lock(&dev->count_lock);
1132	can_switch = (dev->open_count == 0);
1133	spin_unlock(&dev->count_lock);
1134	return can_switch;
1135}
1136
1137static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1138	.set_gpu_state = radeon_switcheroo_set_state,
1139	.reprobe = NULL,
1140	.can_switch = radeon_switcheroo_can_switch,
1141};
1142
1143/**
1144 * radeon_device_init - initialize the driver
1145 *
1146 * @rdev: radeon_device pointer
1147 * @pdev: drm dev pointer
1148 * @pdev: pci dev pointer
1149 * @flags: driver flags
1150 *
1151 * Initializes the driver info and hw (all asics).
1152 * Returns 0 for success or an error on failure.
1153 * Called at driver startup.
1154 */
1155int radeon_device_init(struct radeon_device *rdev,
1156		       struct drm_device *ddev,
1157		       struct pci_dev *pdev,
1158		       uint32_t flags)
1159{
1160	int r, i;
1161	int dma_bits;
1162	bool runtime = false;
1163
1164	rdev->shutdown = false;
1165	rdev->dev = &pdev->dev;
1166	rdev->ddev = ddev;
1167	rdev->pdev = pdev;
1168	rdev->flags = flags;
1169	rdev->family = flags & RADEON_FAMILY_MASK;
1170	rdev->is_atom_bios = false;
1171	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1172	rdev->mc.gtt_size = 512 * 1024 * 1024;
 
1173	rdev->accel_working = false;
1174	/* set up ring ids */
1175	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1176		rdev->ring[i].idx = i;
1177	}
1178
1179	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1180		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1181		pdev->subsystem_vendor, pdev->subsystem_device);
1182
1183	/* mutex initialization are all done here so we
1184	 * can recall function without having locking issues */
1185	mutex_init(&rdev->ring_lock);
 
 
1186	mutex_init(&rdev->dc_hw_i2c_mutex);
1187	atomic_set(&rdev->ih.lock, 0);
 
1188	mutex_init(&rdev->gem.mutex);
1189	mutex_init(&rdev->pm.mutex);
1190	mutex_init(&rdev->gpu_clock_mutex);
1191	mutex_init(&rdev->srbm_mutex);
1192	init_rwsem(&rdev->pm.mclk_lock);
1193	init_rwsem(&rdev->exclusive_lock);
1194	init_waitqueue_head(&rdev->irq.vblank_queue);
1195	r = radeon_gem_init(rdev);
1196	if (r)
1197		return r;
1198
1199	/* Adjust VM size here.
1200	 * Currently set to 4GB ((1 << 20) 4k pages).
1201	 * Max GPUVM size for cayman and SI is 40 bits.
1202	 */
1203	rdev->vm_manager.max_pfn = 1 << 20;
1204
1205	/* Set asic functions */
1206	r = radeon_asic_init(rdev);
1207	if (r)
1208		return r;
1209	radeon_check_arguments(rdev);
1210
1211	/* all of the newer IGP chips have an internal gart
1212	 * However some rs4xx report as AGP, so remove that here.
1213	 */
1214	if ((rdev->family >= CHIP_RS400) &&
1215	    (rdev->flags & RADEON_IS_IGP)) {
1216		rdev->flags &= ~RADEON_IS_AGP;
1217	}
1218
1219	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1220		radeon_agp_disable(rdev);
1221	}
1222
1223	/* Set the internal MC address mask
1224	 * This is the max address of the GPU's
1225	 * internal address space.
1226	 */
1227	if (rdev->family >= CHIP_CAYMAN)
1228		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1229	else if (rdev->family >= CHIP_CEDAR)
1230		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1231	else
1232		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1233
1234	/* set DMA mask + need_dma32 flags.
1235	 * PCIE - can handle 40-bits.
1236	 * IGP - can handle 40-bits
1237	 * AGP - generally dma32 is safest
1238	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1239	 */
1240	rdev->need_dma32 = false;
1241	if (rdev->flags & RADEON_IS_AGP)
1242		rdev->need_dma32 = true;
1243	if ((rdev->flags & RADEON_IS_PCI) &&
1244	    (rdev->family <= CHIP_RS740))
1245		rdev->need_dma32 = true;
1246
1247	dma_bits = rdev->need_dma32 ? 32 : 40;
1248	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1249	if (r) {
1250		rdev->need_dma32 = true;
1251		dma_bits = 32;
1252		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1253	}
1254	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1255	if (r) {
1256		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1257		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1258	}
1259
1260	/* Registers mapping */
1261	/* TODO: block userspace mapping of io register */
1262	spin_lock_init(&rdev->mmio_idx_lock);
1263	spin_lock_init(&rdev->smc_idx_lock);
1264	spin_lock_init(&rdev->pll_idx_lock);
1265	spin_lock_init(&rdev->mc_idx_lock);
1266	spin_lock_init(&rdev->pcie_idx_lock);
1267	spin_lock_init(&rdev->pciep_idx_lock);
1268	spin_lock_init(&rdev->pif_idx_lock);
1269	spin_lock_init(&rdev->cg_idx_lock);
1270	spin_lock_init(&rdev->uvd_idx_lock);
1271	spin_lock_init(&rdev->rcu_idx_lock);
1272	spin_lock_init(&rdev->didt_idx_lock);
1273	spin_lock_init(&rdev->end_idx_lock);
1274	if (rdev->family >= CHIP_BONAIRE) {
1275		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1276		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1277	} else {
1278		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1279		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1280	}
1281	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1282	if (rdev->rmmio == NULL) {
1283		return -ENOMEM;
1284	}
1285	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1286	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1287
1288	/* doorbell bar mapping */
1289	if (rdev->family >= CHIP_BONAIRE)
1290		radeon_doorbell_init(rdev);
1291
1292	/* io port mapping */
1293	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1294		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1295			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1296			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1297			break;
1298		}
1299	}
1300	if (rdev->rio_mem == NULL)
1301		DRM_ERROR("Unable to find PCI I/O BAR\n");
1302
1303	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
1304	/* this will fail for cards that aren't VGA class devices, just
1305	 * ignore it */
1306	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1307
1308	if (rdev->flags & RADEON_IS_PX)
1309		runtime = true;
1310	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1311	if (runtime)
1312		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1313
1314	r = radeon_init(rdev);
1315	if (r)
1316		return r;
1317
1318	r = radeon_ib_ring_tests(rdev);
1319	if (r)
1320		DRM_ERROR("ib ring test failed (%d).\n", r);
1321
1322	r = radeon_gem_debugfs_init(rdev);
1323	if (r) {
1324		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1325	}
1326
1327	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1328		/* Acceleration not working on AGP card try again
1329		 * with fallback to PCI or PCIE GART
1330		 */
1331		radeon_asic_reset(rdev);
1332		radeon_fini(rdev);
1333		radeon_agp_disable(rdev);
1334		r = radeon_init(rdev);
1335		if (r)
1336			return r;
1337	}
1338
1339	if ((radeon_testing & 1)) {
1340		if (rdev->accel_working)
1341			radeon_test_moves(rdev);
1342		else
1343			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1344	}
1345	if ((radeon_testing & 2)) {
1346		if (rdev->accel_working)
1347			radeon_test_syncing(rdev);
1348		else
1349			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1350	}
1351	if (radeon_benchmarking) {
1352		if (rdev->accel_working)
1353			radeon_benchmark(rdev, radeon_benchmarking);
1354		else
1355			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1356	}
1357	return 0;
1358}
1359
1360static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1361
1362/**
1363 * radeon_device_fini - tear down the driver
1364 *
1365 * @rdev: radeon_device pointer
1366 *
1367 * Tear down the driver info (all asics).
1368 * Called at driver shutdown.
1369 */
1370void radeon_device_fini(struct radeon_device *rdev)
1371{
1372	DRM_INFO("radeon: finishing device.\n");
1373	rdev->shutdown = true;
1374	/* evict vram memory */
1375	radeon_bo_evict_vram(rdev);
1376	radeon_fini(rdev);
1377	vga_switcheroo_unregister_client(rdev->pdev);
1378	vga_client_register(rdev->pdev, NULL, NULL, NULL);
1379	if (rdev->rio_mem)
1380		pci_iounmap(rdev->pdev, rdev->rio_mem);
1381	rdev->rio_mem = NULL;
1382	iounmap(rdev->rmmio);
1383	rdev->rmmio = NULL;
1384	if (rdev->family >= CHIP_BONAIRE)
1385		radeon_doorbell_fini(rdev);
1386	radeon_debugfs_remove_files(rdev);
1387}
1388
1389
1390/*
1391 * Suspend & resume.
1392 */
1393/**
1394 * radeon_suspend_kms - initiate device suspend
1395 *
1396 * @pdev: drm dev pointer
1397 * @state: suspend state
1398 *
1399 * Puts the hw in the suspend state (all asics).
1400 * Returns 0 for success or an error on failure.
1401 * Called at driver suspend.
1402 */
1403int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1404{
1405	struct radeon_device *rdev;
1406	struct drm_crtc *crtc;
1407	struct drm_connector *connector;
1408	int i, r;
1409	bool force_completion = false;
1410
1411	if (dev == NULL || dev->dev_private == NULL) {
1412		return -ENODEV;
1413	}
1414
 
 
1415	rdev = dev->dev_private;
1416
1417	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1418		return 0;
1419
1420	drm_kms_helper_poll_disable(dev);
1421
1422	/* turn off display hw */
1423	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1424		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1425	}
1426
1427	/* unpin the front buffers */
1428	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1429		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1430		struct radeon_bo *robj;
1431
1432		if (rfb == NULL || rfb->obj == NULL) {
1433			continue;
1434		}
1435		robj = gem_to_radeon_bo(rfb->obj);
1436		/* don't unpin kernel fb objects */
1437		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1438			r = radeon_bo_reserve(robj, false);
1439			if (r == 0) {
1440				radeon_bo_unpin(robj);
1441				radeon_bo_unreserve(robj);
1442			}
1443		}
1444	}
1445	/* evict vram memory */
1446	radeon_bo_evict_vram(rdev);
1447
1448	/* wait for gpu to finish processing current batch */
1449	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1450		r = radeon_fence_wait_empty(rdev, i);
1451		if (r) {
1452			/* delay GPU reset to resume */
1453			force_completion = true;
1454		}
1455	}
1456	if (force_completion) {
1457		radeon_fence_driver_force_completion(rdev);
1458	}
1459
1460	radeon_save_bios_scratch_regs(rdev);
1461
 
1462	radeon_suspend(rdev);
1463	radeon_hpd_fini(rdev);
1464	/* evict remaining vram memory */
1465	radeon_bo_evict_vram(rdev);
1466
1467	radeon_agp_suspend(rdev);
1468
1469	pci_save_state(dev->pdev);
1470	if (suspend) {
1471		/* Shut down the device */
1472		pci_disable_device(dev->pdev);
1473		pci_set_power_state(dev->pdev, PCI_D3hot);
1474	}
1475
1476	if (fbcon) {
1477		console_lock();
1478		radeon_fbdev_set_suspend(rdev, 1);
1479		console_unlock();
1480	}
1481	return 0;
1482}
1483
1484/**
1485 * radeon_resume_kms - initiate device resume
1486 *
1487 * @pdev: drm dev pointer
1488 *
1489 * Bring the hw back to operating state (all asics).
1490 * Returns 0 for success or an error on failure.
1491 * Called at driver resume.
1492 */
1493int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1494{
1495	struct drm_connector *connector;
1496	struct radeon_device *rdev = dev->dev_private;
1497	int r;
1498
1499	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1500		return 0;
1501
1502	if (fbcon) {
1503		console_lock();
1504	}
1505	if (resume) {
1506		pci_set_power_state(dev->pdev, PCI_D0);
1507		pci_restore_state(dev->pdev);
1508		if (pci_enable_device(dev->pdev)) {
1509			if (fbcon)
1510				console_unlock();
1511			return -1;
1512		}
1513	}
 
1514	/* resume AGP if in use */
1515	radeon_agp_resume(rdev);
1516	radeon_resume(rdev);
 
 
1517
1518	r = radeon_ib_ring_tests(rdev);
1519	if (r)
1520		DRM_ERROR("ib ring test failed (%d).\n", r);
1521
1522	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1523		/* do dpm late init */
1524		r = radeon_pm_late_init(rdev);
1525		if (r) {
1526			rdev->pm.dpm_enabled = false;
1527			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1528		}
1529	} else {
1530		/* resume old pm late */
1531		radeon_pm_resume(rdev);
1532	}
1533
1534	radeon_restore_bios_scratch_regs(rdev);
1535
1536	/* init dig PHYs, disp eng pll */
1537	if (rdev->is_atom_bios) {
1538		radeon_atom_encoder_init(rdev);
1539		radeon_atom_disp_eng_pll_init(rdev);
1540		/* turn on the BL */
1541		if (rdev->mode_info.bl_encoder) {
1542			u8 bl_level = radeon_get_backlight_level(rdev,
1543								 rdev->mode_info.bl_encoder);
1544			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1545						   bl_level);
1546		}
1547	}
1548	/* reset hpd state */
1549	radeon_hpd_init(rdev);
1550	/* blat the mode back in */
1551	if (fbcon) {
1552		drm_helper_resume_force_mode(dev);
1553		/* turn on display hw */
1554		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1555			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1556		}
1557	}
1558
1559	drm_kms_helper_poll_enable(dev);
1560
1561	/* set the power state here in case we are a PX system or headless */
1562	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1563		radeon_pm_compute_clocks(rdev);
1564
1565	if (fbcon) {
1566		radeon_fbdev_set_suspend(rdev, 0);
1567		console_unlock();
1568	}
1569
1570	return 0;
1571}
1572
1573/**
1574 * radeon_gpu_reset - reset the asic
1575 *
1576 * @rdev: radeon device pointer
1577 *
1578 * Attempt the reset the GPU if it has hung (all asics).
1579 * Returns 0 for success or an error on failure.
1580 */
1581int radeon_gpu_reset(struct radeon_device *rdev)
1582{
1583	unsigned ring_sizes[RADEON_NUM_RINGS];
1584	uint32_t *ring_data[RADEON_NUM_RINGS];
1585
1586	bool saved = false;
1587
1588	int i, r;
1589	int resched;
1590
1591	down_write(&rdev->exclusive_lock);
1592
1593	if (!rdev->needs_reset) {
1594		up_write(&rdev->exclusive_lock);
1595		return 0;
1596	}
1597
1598	rdev->needs_reset = false;
1599
1600	radeon_save_bios_scratch_regs(rdev);
1601	/* block TTM */
1602	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1603	radeon_pm_suspend(rdev);
1604	radeon_suspend(rdev);
1605
1606	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1607		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1608						   &ring_data[i]);
1609		if (ring_sizes[i]) {
1610			saved = true;
1611			dev_info(rdev->dev, "Saved %d dwords of commands "
1612				 "on ring %d.\n", ring_sizes[i], i);
1613		}
1614	}
1615
1616retry:
1617	r = radeon_asic_reset(rdev);
1618	if (!r) {
1619		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1620		radeon_resume(rdev);
 
 
 
 
1621	}
1622
1623	radeon_restore_bios_scratch_regs(rdev);
1624
1625	if (!r) {
1626		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1627			radeon_ring_restore(rdev, &rdev->ring[i],
1628					    ring_sizes[i], ring_data[i]);
1629			ring_sizes[i] = 0;
1630			ring_data[i] = NULL;
1631		}
1632
1633		r = radeon_ib_ring_tests(rdev);
1634		if (r) {
1635			dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1636			if (saved) {
1637				saved = false;
1638				radeon_suspend(rdev);
1639				goto retry;
1640			}
1641		}
1642	} else {
1643		radeon_fence_driver_force_completion(rdev);
1644		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1645			kfree(ring_data[i]);
1646		}
1647	}
1648
1649	radeon_pm_resume(rdev);
1650	drm_helper_resume_force_mode(rdev->ddev);
1651
1652	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1653	if (r) {
1654		/* bad news, how to tell it to userspace ? */
1655		dev_info(rdev->dev, "GPU reset failed\n");
1656	}
1657
1658	up_write(&rdev->exclusive_lock);
1659	return r;
1660}
1661
1662
1663/*
1664 * Debugfs
1665 */
 
 
 
 
 
 
 
1666int radeon_debugfs_add_files(struct radeon_device *rdev,
1667			     struct drm_info_list *files,
1668			     unsigned nfiles)
1669{
1670	unsigned i;
1671
1672	for (i = 0; i < rdev->debugfs_count; i++) {
1673		if (rdev->debugfs[i].files == files) {
1674			/* Already registered */
1675			return 0;
1676		}
1677	}
1678
1679	i = rdev->debugfs_count + 1;
1680	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1681		DRM_ERROR("Reached maximum number of debugfs components.\n");
1682		DRM_ERROR("Report so we increase "
1683		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1684		return -EINVAL;
1685	}
1686	rdev->debugfs[rdev->debugfs_count].files = files;
1687	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1688	rdev->debugfs_count = i;
1689#if defined(CONFIG_DEBUG_FS)
1690	drm_debugfs_create_files(files, nfiles,
1691				 rdev->ddev->control->debugfs_root,
1692				 rdev->ddev->control);
1693	drm_debugfs_create_files(files, nfiles,
1694				 rdev->ddev->primary->debugfs_root,
1695				 rdev->ddev->primary);
1696#endif
1697	return 0;
1698}
1699
1700static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1701{
1702#if defined(CONFIG_DEBUG_FS)
1703	unsigned i;
1704
1705	for (i = 0; i < rdev->debugfs_count; i++) {
1706		drm_debugfs_remove_files(rdev->debugfs[i].files,
1707					 rdev->debugfs[i].num_files,
1708					 rdev->ddev->control);
1709		drm_debugfs_remove_files(rdev->debugfs[i].files,
1710					 rdev->debugfs[i].num_files,
1711					 rdev->ddev->primary);
1712	}
1713#endif
1714}
1715
1716#if defined(CONFIG_DEBUG_FS)
1717int radeon_debugfs_init(struct drm_minor *minor)
1718{
1719	return 0;
1720}
1721
1722void radeon_debugfs_cleanup(struct drm_minor *minor)
1723{
 
 
 
 
 
 
1724}
1725#endif