Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/slab.h>
  29#include <linux/seq_file.h>
  30#include <linux/firmware.h>
  31#include <linux/platform_device.h>
 
  32#include "drmP.h"
  33#include "radeon_drm.h"
  34#include "radeon.h"
  35#include "radeon_asic.h"
  36#include "radeon_mode.h"
  37#include "r600d.h"
  38#include "atom.h"
  39#include "avivod.h"
  40
  41#define PFP_UCODE_SIZE 576
  42#define PM4_UCODE_SIZE 1792
  43#define RLC_UCODE_SIZE 768
  44#define R700_PFP_UCODE_SIZE 848
  45#define R700_PM4_UCODE_SIZE 1360
  46#define R700_RLC_UCODE_SIZE 1024
  47#define EVERGREEN_PFP_UCODE_SIZE 1120
  48#define EVERGREEN_PM4_UCODE_SIZE 1376
  49#define EVERGREEN_RLC_UCODE_SIZE 768
  50#define CAYMAN_RLC_UCODE_SIZE 1024
 
  51
  52/* Firmware Names */
  53MODULE_FIRMWARE("radeon/R600_pfp.bin");
  54MODULE_FIRMWARE("radeon/R600_me.bin");
  55MODULE_FIRMWARE("radeon/RV610_pfp.bin");
  56MODULE_FIRMWARE("radeon/RV610_me.bin");
  57MODULE_FIRMWARE("radeon/RV630_pfp.bin");
  58MODULE_FIRMWARE("radeon/RV630_me.bin");
  59MODULE_FIRMWARE("radeon/RV620_pfp.bin");
  60MODULE_FIRMWARE("radeon/RV620_me.bin");
  61MODULE_FIRMWARE("radeon/RV635_pfp.bin");
  62MODULE_FIRMWARE("radeon/RV635_me.bin");
  63MODULE_FIRMWARE("radeon/RV670_pfp.bin");
  64MODULE_FIRMWARE("radeon/RV670_me.bin");
  65MODULE_FIRMWARE("radeon/RS780_pfp.bin");
  66MODULE_FIRMWARE("radeon/RS780_me.bin");
  67MODULE_FIRMWARE("radeon/RV770_pfp.bin");
  68MODULE_FIRMWARE("radeon/RV770_me.bin");
  69MODULE_FIRMWARE("radeon/RV730_pfp.bin");
  70MODULE_FIRMWARE("radeon/RV730_me.bin");
  71MODULE_FIRMWARE("radeon/RV710_pfp.bin");
  72MODULE_FIRMWARE("radeon/RV710_me.bin");
  73MODULE_FIRMWARE("radeon/R600_rlc.bin");
  74MODULE_FIRMWARE("radeon/R700_rlc.bin");
  75MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
  76MODULE_FIRMWARE("radeon/CEDAR_me.bin");
  77MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
  78MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
  79MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
  80MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
  81MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
  82MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
  83MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
  84MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
  85MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
  86MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
  87MODULE_FIRMWARE("radeon/PALM_pfp.bin");
  88MODULE_FIRMWARE("radeon/PALM_me.bin");
  89MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
  90MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
  91MODULE_FIRMWARE("radeon/SUMO_me.bin");
  92MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
  93MODULE_FIRMWARE("radeon/SUMO2_me.bin");
  94
  95int r600_debugfs_mc_info_init(struct radeon_device *rdev);
  96
  97/* r600,rv610,rv630,rv620,rv635,rv670 */
  98int r600_mc_wait_for_idle(struct radeon_device *rdev);
  99void r600_gpu_init(struct radeon_device *rdev);
 100void r600_fini(struct radeon_device *rdev);
 101void r600_irq_disable(struct radeon_device *rdev);
 102static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 103
 104/* get temperature in millidegrees */
 105int rv6xx_get_temp(struct radeon_device *rdev)
 106{
 107	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
 108		ASIC_T_SHIFT;
 109	int actual_temp = temp & 0xff;
 110
 111	if (temp & 0x100)
 112		actual_temp -= 256;
 113
 114	return actual_temp * 1000;
 115}
 116
 117void r600_pm_get_dynpm_state(struct radeon_device *rdev)
 118{
 119	int i;
 120
 121	rdev->pm.dynpm_can_upclock = true;
 122	rdev->pm.dynpm_can_downclock = true;
 123
 124	/* power state array is low to high, default is first */
 125	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
 126		int min_power_state_index = 0;
 127
 128		if (rdev->pm.num_power_states > 2)
 129			min_power_state_index = 1;
 130
 131		switch (rdev->pm.dynpm_planned_action) {
 132		case DYNPM_ACTION_MINIMUM:
 133			rdev->pm.requested_power_state_index = min_power_state_index;
 134			rdev->pm.requested_clock_mode_index = 0;
 135			rdev->pm.dynpm_can_downclock = false;
 136			break;
 137		case DYNPM_ACTION_DOWNCLOCK:
 138			if (rdev->pm.current_power_state_index == min_power_state_index) {
 139				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
 140				rdev->pm.dynpm_can_downclock = false;
 141			} else {
 142				if (rdev->pm.active_crtc_count > 1) {
 143					for (i = 0; i < rdev->pm.num_power_states; i++) {
 144						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
 145							continue;
 146						else if (i >= rdev->pm.current_power_state_index) {
 147							rdev->pm.requested_power_state_index =
 148								rdev->pm.current_power_state_index;
 149							break;
 150						} else {
 151							rdev->pm.requested_power_state_index = i;
 152							break;
 153						}
 154					}
 155				} else {
 156					if (rdev->pm.current_power_state_index == 0)
 157						rdev->pm.requested_power_state_index =
 158							rdev->pm.num_power_states - 1;
 159					else
 160						rdev->pm.requested_power_state_index =
 161							rdev->pm.current_power_state_index - 1;
 162				}
 163			}
 164			rdev->pm.requested_clock_mode_index = 0;
 165			/* don't use the power state if crtcs are active and no display flag is set */
 166			if ((rdev->pm.active_crtc_count > 0) &&
 167			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
 168			     clock_info[rdev->pm.requested_clock_mode_index].flags &
 169			     RADEON_PM_MODE_NO_DISPLAY)) {
 170				rdev->pm.requested_power_state_index++;
 171			}
 172			break;
 173		case DYNPM_ACTION_UPCLOCK:
 174			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
 175				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
 176				rdev->pm.dynpm_can_upclock = false;
 177			} else {
 178				if (rdev->pm.active_crtc_count > 1) {
 179					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
 180						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
 181							continue;
 182						else if (i <= rdev->pm.current_power_state_index) {
 183							rdev->pm.requested_power_state_index =
 184								rdev->pm.current_power_state_index;
 185							break;
 186						} else {
 187							rdev->pm.requested_power_state_index = i;
 188							break;
 189						}
 190					}
 191				} else
 192					rdev->pm.requested_power_state_index =
 193						rdev->pm.current_power_state_index + 1;
 194			}
 195			rdev->pm.requested_clock_mode_index = 0;
 196			break;
 197		case DYNPM_ACTION_DEFAULT:
 198			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
 199			rdev->pm.requested_clock_mode_index = 0;
 200			rdev->pm.dynpm_can_upclock = false;
 201			break;
 202		case DYNPM_ACTION_NONE:
 203		default:
 204			DRM_ERROR("Requested mode for not defined action\n");
 205			return;
 206		}
 207	} else {
 208		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
 209		/* for now just select the first power state and switch between clock modes */
 210		/* power state array is low to high, default is first (0) */
 211		if (rdev->pm.active_crtc_count > 1) {
 212			rdev->pm.requested_power_state_index = -1;
 213			/* start at 1 as we don't want the default mode */
 214			for (i = 1; i < rdev->pm.num_power_states; i++) {
 215				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
 216					continue;
 217				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
 218					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
 219					rdev->pm.requested_power_state_index = i;
 220					break;
 221				}
 222			}
 223			/* if nothing selected, grab the default state. */
 224			if (rdev->pm.requested_power_state_index == -1)
 225				rdev->pm.requested_power_state_index = 0;
 226		} else
 227			rdev->pm.requested_power_state_index = 1;
 228
 229		switch (rdev->pm.dynpm_planned_action) {
 230		case DYNPM_ACTION_MINIMUM:
 231			rdev->pm.requested_clock_mode_index = 0;
 232			rdev->pm.dynpm_can_downclock = false;
 233			break;
 234		case DYNPM_ACTION_DOWNCLOCK:
 235			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
 236				if (rdev->pm.current_clock_mode_index == 0) {
 237					rdev->pm.requested_clock_mode_index = 0;
 238					rdev->pm.dynpm_can_downclock = false;
 239				} else
 240					rdev->pm.requested_clock_mode_index =
 241						rdev->pm.current_clock_mode_index - 1;
 242			} else {
 243				rdev->pm.requested_clock_mode_index = 0;
 244				rdev->pm.dynpm_can_downclock = false;
 245			}
 246			/* don't use the power state if crtcs are active and no display flag is set */
 247			if ((rdev->pm.active_crtc_count > 0) &&
 248			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
 249			     clock_info[rdev->pm.requested_clock_mode_index].flags &
 250			     RADEON_PM_MODE_NO_DISPLAY)) {
 251				rdev->pm.requested_clock_mode_index++;
 252			}
 253			break;
 254		case DYNPM_ACTION_UPCLOCK:
 255			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
 256				if (rdev->pm.current_clock_mode_index ==
 257				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
 258					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
 259					rdev->pm.dynpm_can_upclock = false;
 260				} else
 261					rdev->pm.requested_clock_mode_index =
 262						rdev->pm.current_clock_mode_index + 1;
 263			} else {
 264				rdev->pm.requested_clock_mode_index =
 265					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
 266				rdev->pm.dynpm_can_upclock = false;
 267			}
 268			break;
 269		case DYNPM_ACTION_DEFAULT:
 270			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
 271			rdev->pm.requested_clock_mode_index = 0;
 272			rdev->pm.dynpm_can_upclock = false;
 273			break;
 274		case DYNPM_ACTION_NONE:
 275		default:
 276			DRM_ERROR("Requested mode for not defined action\n");
 277			return;
 278		}
 279	}
 280
 281	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
 282		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
 283		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
 284		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
 285		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
 286		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
 287		  pcie_lanes);
 288}
 289
 290static int r600_pm_get_type_index(struct radeon_device *rdev,
 291				  enum radeon_pm_state_type ps_type,
 292				  int instance)
 293{
 294	int i;
 295	int found_instance = -1;
 296
 297	for (i = 0; i < rdev->pm.num_power_states; i++) {
 298		if (rdev->pm.power_state[i].type == ps_type) {
 299			found_instance++;
 300			if (found_instance == instance)
 301				return i;
 302		}
 303	}
 304	/* return default if no match */
 305	return rdev->pm.default_power_state_index;
 306}
 307
 308void rs780_pm_init_profile(struct radeon_device *rdev)
 309{
 310	if (rdev->pm.num_power_states == 2) {
 311		/* default */
 312		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 313		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 314		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 315		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 316		/* low sh */
 317		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
 318		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
 319		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 320		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 321		/* mid sh */
 322		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
 323		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
 324		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 325		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 326		/* high sh */
 327		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
 328		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
 329		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 330		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 331		/* low mh */
 332		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
 333		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
 334		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 335		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 336		/* mid mh */
 337		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
 338		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
 339		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 340		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 341		/* high mh */
 342		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
 343		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
 344		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 345		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 346	} else if (rdev->pm.num_power_states == 3) {
 347		/* default */
 348		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 349		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 350		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 351		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 352		/* low sh */
 353		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
 354		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
 355		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 356		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 357		/* mid sh */
 358		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
 359		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
 360		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 361		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 362		/* high sh */
 363		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
 364		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
 365		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 366		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 367		/* low mh */
 368		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
 369		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
 370		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 371		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 372		/* mid mh */
 373		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
 374		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
 375		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 376		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 377		/* high mh */
 378		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
 379		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
 380		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 381		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 382	} else {
 383		/* default */
 384		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 385		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 386		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 387		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 388		/* low sh */
 389		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
 390		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
 391		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 392		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 393		/* mid sh */
 394		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
 395		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
 396		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 397		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 398		/* high sh */
 399		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
 400		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
 401		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 402		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 403		/* low mh */
 404		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
 405		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
 406		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 407		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 408		/* mid mh */
 409		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
 410		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
 411		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 412		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 413		/* high mh */
 414		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
 415		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
 416		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 417		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 418	}
 419}
 420
 421void r600_pm_init_profile(struct radeon_device *rdev)
 422{
 
 
 423	if (rdev->family == CHIP_R600) {
 424		/* XXX */
 425		/* default */
 426		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 427		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 428		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 429		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 430		/* low sh */
 431		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 432		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 433		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 434		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 435		/* mid sh */
 436		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 437		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 438		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 439		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 440		/* high sh */
 441		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 442		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 443		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 444		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 445		/* low mh */
 446		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 447		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 448		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 449		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 450		/* mid mh */
 451		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 452		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 453		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 454		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 455		/* high mh */
 456		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 457		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 458		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 459		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 460	} else {
 461		if (rdev->pm.num_power_states < 4) {
 462			/* default */
 463			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 464			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 465			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 466			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
 467			/* low sh */
 468			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
 469			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
 470			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 471			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 472			/* mid sh */
 473			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
 474			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
 475			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 476			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
 477			/* high sh */
 478			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
 479			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
 480			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 481			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
 482			/* low mh */
 483			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
 484			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
 485			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 486			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 487			/* low mh */
 488			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
 489			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
 490			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 491			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
 492			/* high mh */
 493			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
 494			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
 495			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 496			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
 497		} else {
 498			/* default */
 499			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 500			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 501			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 502			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
 503			/* low sh */
 504			if (rdev->flags & RADEON_IS_MOBILITY) {
 505				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
 506					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 507				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
 508					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 509				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 510				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 511			} else {
 512				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
 513					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 514				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
 515					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 516				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 517				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 518			}
 519			/* mid sh */
 520			if (rdev->flags & RADEON_IS_MOBILITY) {
 521				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
 522					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 523				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
 524					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 525				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 526				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
 527			} else {
 528				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
 529					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 530				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
 531					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 532				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 533				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
 534			}
 535			/* high sh */
 536			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
 537				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 538			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
 539				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 540			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 541			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
 542			/* low mh */
 543			if (rdev->flags & RADEON_IS_MOBILITY) {
 544				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
 545					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
 546				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
 547					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
 548				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 549				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 550			} else {
 551				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
 552					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 553				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
 554					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 555				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 556				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 557			}
 558			/* mid mh */
 559			if (rdev->flags & RADEON_IS_MOBILITY) {
 560				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
 561					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
 562				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
 563					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
 564				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 565				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
 566			} else {
 567				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
 568					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 569				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
 570					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 571				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 572				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
 573			}
 574			/* high mh */
 575			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
 576				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 577			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
 578				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 579			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 580			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
 581		}
 582	}
 583}
 584
 585void r600_pm_misc(struct radeon_device *rdev)
 586{
 587	int req_ps_idx = rdev->pm.requested_power_state_index;
 588	int req_cm_idx = rdev->pm.requested_clock_mode_index;
 589	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
 590	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 591
 592	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
 593		/* 0xff01 is a flag rather then an actual voltage */
 594		if (voltage->voltage == 0xff01)
 595			return;
 596		if (voltage->voltage != rdev->pm.current_vddc) {
 597			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
 598			rdev->pm.current_vddc = voltage->voltage;
 599			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
 600		}
 601	}
 602}
 603
 604bool r600_gui_idle(struct radeon_device *rdev)
 605{
 606	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
 607		return false;
 608	else
 609		return true;
 610}
 611
 612/* hpd for digital panel detect/disconnect */
 613bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 614{
 615	bool connected = false;
 616
 617	if (ASIC_IS_DCE3(rdev)) {
 618		switch (hpd) {
 619		case RADEON_HPD_1:
 620			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
 621				connected = true;
 622			break;
 623		case RADEON_HPD_2:
 624			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
 625				connected = true;
 626			break;
 627		case RADEON_HPD_3:
 628			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
 629				connected = true;
 630			break;
 631		case RADEON_HPD_4:
 632			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
 633				connected = true;
 634			break;
 635			/* DCE 3.2 */
 636		case RADEON_HPD_5:
 637			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
 638				connected = true;
 639			break;
 640		case RADEON_HPD_6:
 641			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
 642				connected = true;
 643			break;
 644		default:
 645			break;
 646		}
 647	} else {
 648		switch (hpd) {
 649		case RADEON_HPD_1:
 650			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
 651				connected = true;
 652			break;
 653		case RADEON_HPD_2:
 654			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
 655				connected = true;
 656			break;
 657		case RADEON_HPD_3:
 658			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
 659				connected = true;
 660			break;
 661		default:
 662			break;
 663		}
 664	}
 665	return connected;
 666}
 667
 668void r600_hpd_set_polarity(struct radeon_device *rdev,
 669			   enum radeon_hpd_id hpd)
 670{
 671	u32 tmp;
 672	bool connected = r600_hpd_sense(rdev, hpd);
 673
 674	if (ASIC_IS_DCE3(rdev)) {
 675		switch (hpd) {
 676		case RADEON_HPD_1:
 677			tmp = RREG32(DC_HPD1_INT_CONTROL);
 678			if (connected)
 679				tmp &= ~DC_HPDx_INT_POLARITY;
 680			else
 681				tmp |= DC_HPDx_INT_POLARITY;
 682			WREG32(DC_HPD1_INT_CONTROL, tmp);
 683			break;
 684		case RADEON_HPD_2:
 685			tmp = RREG32(DC_HPD2_INT_CONTROL);
 686			if (connected)
 687				tmp &= ~DC_HPDx_INT_POLARITY;
 688			else
 689				tmp |= DC_HPDx_INT_POLARITY;
 690			WREG32(DC_HPD2_INT_CONTROL, tmp);
 691			break;
 692		case RADEON_HPD_3:
 693			tmp = RREG32(DC_HPD3_INT_CONTROL);
 694			if (connected)
 695				tmp &= ~DC_HPDx_INT_POLARITY;
 696			else
 697				tmp |= DC_HPDx_INT_POLARITY;
 698			WREG32(DC_HPD3_INT_CONTROL, tmp);
 699			break;
 700		case RADEON_HPD_4:
 701			tmp = RREG32(DC_HPD4_INT_CONTROL);
 702			if (connected)
 703				tmp &= ~DC_HPDx_INT_POLARITY;
 704			else
 705				tmp |= DC_HPDx_INT_POLARITY;
 706			WREG32(DC_HPD4_INT_CONTROL, tmp);
 707			break;
 708		case RADEON_HPD_5:
 709			tmp = RREG32(DC_HPD5_INT_CONTROL);
 710			if (connected)
 711				tmp &= ~DC_HPDx_INT_POLARITY;
 712			else
 713				tmp |= DC_HPDx_INT_POLARITY;
 714			WREG32(DC_HPD5_INT_CONTROL, tmp);
 715			break;
 716			/* DCE 3.2 */
 717		case RADEON_HPD_6:
 718			tmp = RREG32(DC_HPD6_INT_CONTROL);
 719			if (connected)
 720				tmp &= ~DC_HPDx_INT_POLARITY;
 721			else
 722				tmp |= DC_HPDx_INT_POLARITY;
 723			WREG32(DC_HPD6_INT_CONTROL, tmp);
 724			break;
 725		default:
 726			break;
 727		}
 728	} else {
 729		switch (hpd) {
 730		case RADEON_HPD_1:
 731			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
 732			if (connected)
 733				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
 734			else
 735				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
 736			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
 737			break;
 738		case RADEON_HPD_2:
 739			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
 740			if (connected)
 741				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
 742			else
 743				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
 744			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
 745			break;
 746		case RADEON_HPD_3:
 747			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
 748			if (connected)
 749				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
 750			else
 751				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
 752			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
 753			break;
 754		default:
 755			break;
 756		}
 757	}
 758}
 759
 760void r600_hpd_init(struct radeon_device *rdev)
 761{
 762	struct drm_device *dev = rdev->ddev;
 763	struct drm_connector *connector;
 764
 765	if (ASIC_IS_DCE3(rdev)) {
 766		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
 767		if (ASIC_IS_DCE32(rdev))
 768			tmp |= DC_HPDx_EN;
 
 
 
 
 
 
 
 
 
 
 
 769
 770		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 771			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 772			switch (radeon_connector->hpd.hpd) {
 773			case RADEON_HPD_1:
 774				WREG32(DC_HPD1_CONTROL, tmp);
 775				rdev->irq.hpd[0] = true;
 776				break;
 777			case RADEON_HPD_2:
 778				WREG32(DC_HPD2_CONTROL, tmp);
 779				rdev->irq.hpd[1] = true;
 780				break;
 781			case RADEON_HPD_3:
 782				WREG32(DC_HPD3_CONTROL, tmp);
 783				rdev->irq.hpd[2] = true;
 784				break;
 785			case RADEON_HPD_4:
 786				WREG32(DC_HPD4_CONTROL, tmp);
 787				rdev->irq.hpd[3] = true;
 788				break;
 789				/* DCE 3.2 */
 790			case RADEON_HPD_5:
 791				WREG32(DC_HPD5_CONTROL, tmp);
 792				rdev->irq.hpd[4] = true;
 793				break;
 794			case RADEON_HPD_6:
 795				WREG32(DC_HPD6_CONTROL, tmp);
 796				rdev->irq.hpd[5] = true;
 797				break;
 798			default:
 799				break;
 800			}
 801		}
 802	} else {
 803		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 804			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 805			switch (radeon_connector->hpd.hpd) {
 806			case RADEON_HPD_1:
 807				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
 808				rdev->irq.hpd[0] = true;
 809				break;
 810			case RADEON_HPD_2:
 811				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
 812				rdev->irq.hpd[1] = true;
 813				break;
 814			case RADEON_HPD_3:
 815				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
 816				rdev->irq.hpd[2] = true;
 817				break;
 818			default:
 819				break;
 820			}
 821		}
 
 822	}
 823	if (rdev->irq.installed)
 824		r600_irq_set(rdev);
 825}
 826
 827void r600_hpd_fini(struct radeon_device *rdev)
 828{
 829	struct drm_device *dev = rdev->ddev;
 830	struct drm_connector *connector;
 831
 832	if (ASIC_IS_DCE3(rdev)) {
 833		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 834			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 835			switch (radeon_connector->hpd.hpd) {
 836			case RADEON_HPD_1:
 837				WREG32(DC_HPD1_CONTROL, 0);
 838				rdev->irq.hpd[0] = false;
 839				break;
 840			case RADEON_HPD_2:
 841				WREG32(DC_HPD2_CONTROL, 0);
 842				rdev->irq.hpd[1] = false;
 843				break;
 844			case RADEON_HPD_3:
 845				WREG32(DC_HPD3_CONTROL, 0);
 846				rdev->irq.hpd[2] = false;
 847				break;
 848			case RADEON_HPD_4:
 849				WREG32(DC_HPD4_CONTROL, 0);
 850				rdev->irq.hpd[3] = false;
 851				break;
 852				/* DCE 3.2 */
 853			case RADEON_HPD_5:
 854				WREG32(DC_HPD5_CONTROL, 0);
 855				rdev->irq.hpd[4] = false;
 856				break;
 857			case RADEON_HPD_6:
 858				WREG32(DC_HPD6_CONTROL, 0);
 859				rdev->irq.hpd[5] = false;
 860				break;
 861			default:
 862				break;
 863			}
 864		}
 865	} else {
 866		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 867			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 868			switch (radeon_connector->hpd.hpd) {
 869			case RADEON_HPD_1:
 870				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
 871				rdev->irq.hpd[0] = false;
 872				break;
 873			case RADEON_HPD_2:
 874				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
 875				rdev->irq.hpd[1] = false;
 876				break;
 877			case RADEON_HPD_3:
 878				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
 879				rdev->irq.hpd[2] = false;
 880				break;
 881			default:
 882				break;
 883			}
 884		}
 885	}
 886}
 887
 888/*
 889 * R600 PCIE GART
 890 */
 891void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
 892{
 893	unsigned i;
 894	u32 tmp;
 895
 896	/* flush hdp cache so updates hit vram */
 897	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
 898	    !(rdev->flags & RADEON_IS_AGP)) {
 899		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
 900		u32 tmp;
 901
 902		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
 903		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
 904		 * This seems to cause problems on some AGP cards. Just use the old
 905		 * method for them.
 906		 */
 907		WREG32(HDP_DEBUG1, 0);
 908		tmp = readl((void __iomem *)ptr);
 909	} else
 910		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 911
 912	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
 913	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
 914	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
 915	for (i = 0; i < rdev->usec_timeout; i++) {
 916		/* read MC_STATUS */
 917		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
 918		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
 919		if (tmp == 2) {
 920			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
 921			return;
 922		}
 923		if (tmp) {
 924			return;
 925		}
 926		udelay(1);
 927	}
 928}
 929
 930int r600_pcie_gart_init(struct radeon_device *rdev)
 931{
 932	int r;
 933
 934	if (rdev->gart.table.vram.robj) {
 935		WARN(1, "R600 PCIE GART already initialized\n");
 936		return 0;
 937	}
 938	/* Initialize common gart structure */
 939	r = radeon_gart_init(rdev);
 940	if (r)
 941		return r;
 942	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
 943	return radeon_gart_table_vram_alloc(rdev);
 944}
 945
 946int r600_pcie_gart_enable(struct radeon_device *rdev)
 947{
 948	u32 tmp;
 949	int r, i;
 950
 951	if (rdev->gart.table.vram.robj == NULL) {
 952		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 953		return -EINVAL;
 954	}
 955	r = radeon_gart_table_vram_pin(rdev);
 956	if (r)
 957		return r;
 958	radeon_gart_restore(rdev);
 959
 960	/* Setup L2 cache */
 961	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
 962				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
 963				EFFECTIVE_L2_QUEUE_SIZE(7));
 964	WREG32(VM_L2_CNTL2, 0);
 965	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
 966	/* Setup TLB control */
 967	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
 968		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
 969		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
 970		ENABLE_WAIT_L2_QUERY;
 971	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
 972	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
 973	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
 974	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
 975	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
 976	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
 977	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
 978	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
 979	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
 980	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
 981	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
 982	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
 983	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
 984	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
 985	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
 986	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
 987	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
 988	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
 989				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 990	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 991			(u32)(rdev->dummy_page.addr >> 12));
 992	for (i = 1; i < 7; i++)
 993		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 994
 995	r600_pcie_gart_tlb_flush(rdev);
 
 
 
 996	rdev->gart.ready = true;
 997	return 0;
 998}
 999
1000void r600_pcie_gart_disable(struct radeon_device *rdev)
1001{
1002	u32 tmp;
1003	int i, r;
1004
1005	/* Disable all tables */
1006	for (i = 0; i < 7; i++)
1007		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1008
1009	/* Disable L2 cache */
1010	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1011				EFFECTIVE_L2_QUEUE_SIZE(7));
1012	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1013	/* Setup L1 TLB control */
1014	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1015		ENABLE_WAIT_L2_QUERY;
1016	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1017	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1018	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1019	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1020	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1021	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1022	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1023	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1024	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1025	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1026	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1027	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1028	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1029	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1030	if (rdev->gart.table.vram.robj) {
1031		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1032		if (likely(r == 0)) {
1033			radeon_bo_kunmap(rdev->gart.table.vram.robj);
1034			radeon_bo_unpin(rdev->gart.table.vram.robj);
1035			radeon_bo_unreserve(rdev->gart.table.vram.robj);
1036		}
1037	}
1038}
1039
1040void r600_pcie_gart_fini(struct radeon_device *rdev)
1041{
1042	radeon_gart_fini(rdev);
1043	r600_pcie_gart_disable(rdev);
1044	radeon_gart_table_vram_free(rdev);
1045}
1046
1047void r600_agp_enable(struct radeon_device *rdev)
1048{
1049	u32 tmp;
1050	int i;
1051
1052	/* Setup L2 cache */
1053	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1054				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1055				EFFECTIVE_L2_QUEUE_SIZE(7));
1056	WREG32(VM_L2_CNTL2, 0);
1057	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1058	/* Setup TLB control */
1059	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1060		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1061		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1062		ENABLE_WAIT_L2_QUERY;
1063	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1064	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1065	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1066	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1067	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1068	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1069	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1070	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1071	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1072	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1073	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1074	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1075	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1076	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1077	for (i = 0; i < 7; i++)
1078		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1079}
1080
1081int r600_mc_wait_for_idle(struct radeon_device *rdev)
1082{
1083	unsigned i;
1084	u32 tmp;
1085
1086	for (i = 0; i < rdev->usec_timeout; i++) {
1087		/* read MC_STATUS */
1088		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1089		if (!tmp)
1090			return 0;
1091		udelay(1);
1092	}
1093	return -1;
1094}
1095
1096static void r600_mc_program(struct radeon_device *rdev)
1097{
1098	struct rv515_mc_save save;
1099	u32 tmp;
1100	int i, j;
1101
1102	/* Initialize HDP */
1103	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1104		WREG32((0x2c14 + j), 0x00000000);
1105		WREG32((0x2c18 + j), 0x00000000);
1106		WREG32((0x2c1c + j), 0x00000000);
1107		WREG32((0x2c20 + j), 0x00000000);
1108		WREG32((0x2c24 + j), 0x00000000);
1109	}
1110	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1111
1112	rv515_mc_stop(rdev, &save);
1113	if (r600_mc_wait_for_idle(rdev)) {
1114		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1115	}
1116	/* Lockout access through VGA aperture (doesn't exist before R600) */
1117	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1118	/* Update configuration */
1119	if (rdev->flags & RADEON_IS_AGP) {
1120		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1121			/* VRAM before AGP */
1122			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1123				rdev->mc.vram_start >> 12);
1124			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1125				rdev->mc.gtt_end >> 12);
1126		} else {
1127			/* VRAM after AGP */
1128			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1129				rdev->mc.gtt_start >> 12);
1130			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1131				rdev->mc.vram_end >> 12);
1132		}
1133	} else {
1134		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1135		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1136	}
1137	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1138	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1139	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1140	WREG32(MC_VM_FB_LOCATION, tmp);
1141	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1142	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1143	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1144	if (rdev->flags & RADEON_IS_AGP) {
1145		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1146		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1147		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1148	} else {
1149		WREG32(MC_VM_AGP_BASE, 0);
1150		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1151		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1152	}
1153	if (r600_mc_wait_for_idle(rdev)) {
1154		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1155	}
1156	rv515_mc_resume(rdev, &save);
1157	/* we need to own VRAM, so turn off the VGA renderer here
1158	 * to stop it overwriting our objects */
1159	rv515_vga_render_disable(rdev);
1160}
1161
1162/**
1163 * r600_vram_gtt_location - try to find VRAM & GTT location
1164 * @rdev: radeon device structure holding all necessary informations
1165 * @mc: memory controller structure holding memory informations
1166 *
1167 * Function will place try to place VRAM at same place as in CPU (PCI)
1168 * address space as some GPU seems to have issue when we reprogram at
1169 * different address space.
1170 *
1171 * If there is not enough space to fit the unvisible VRAM after the
1172 * aperture then we limit the VRAM size to the aperture.
1173 *
1174 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1175 * them to be in one from GPU point of view so that we can program GPU to
1176 * catch access outside them (weird GPU policy see ??).
1177 *
1178 * This function will never fails, worst case are limiting VRAM or GTT.
1179 *
1180 * Note: GTT start, end, size should be initialized before calling this
1181 * function on AGP platform.
1182 */
1183static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1184{
1185	u64 size_bf, size_af;
1186
1187	if (mc->mc_vram_size > 0xE0000000) {
1188		/* leave room for at least 512M GTT */
1189		dev_warn(rdev->dev, "limiting VRAM\n");
1190		mc->real_vram_size = 0xE0000000;
1191		mc->mc_vram_size = 0xE0000000;
1192	}
1193	if (rdev->flags & RADEON_IS_AGP) {
1194		size_bf = mc->gtt_start;
1195		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1196		if (size_bf > size_af) {
1197			if (mc->mc_vram_size > size_bf) {
1198				dev_warn(rdev->dev, "limiting VRAM\n");
1199				mc->real_vram_size = size_bf;
1200				mc->mc_vram_size = size_bf;
1201			}
1202			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1203		} else {
1204			if (mc->mc_vram_size > size_af) {
1205				dev_warn(rdev->dev, "limiting VRAM\n");
1206				mc->real_vram_size = size_af;
1207				mc->mc_vram_size = size_af;
1208			}
1209			mc->vram_start = mc->gtt_end;
1210		}
1211		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1212		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1213				mc->mc_vram_size >> 20, mc->vram_start,
1214				mc->vram_end, mc->real_vram_size >> 20);
1215	} else {
1216		u64 base = 0;
1217		if (rdev->flags & RADEON_IS_IGP) {
1218			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1219			base <<= 24;
1220		}
1221		radeon_vram_location(rdev, &rdev->mc, base);
1222		rdev->mc.gtt_base_align = 0;
1223		radeon_gtt_location(rdev, mc);
1224	}
1225}
1226
1227int r600_mc_init(struct radeon_device *rdev)
1228{
1229	u32 tmp;
1230	int chansize, numchan;
1231
1232	/* Get VRAM informations */
1233	rdev->mc.vram_is_ddr = true;
1234	tmp = RREG32(RAMCFG);
1235	if (tmp & CHANSIZE_OVERRIDE) {
1236		chansize = 16;
1237	} else if (tmp & CHANSIZE_MASK) {
1238		chansize = 64;
1239	} else {
1240		chansize = 32;
1241	}
1242	tmp = RREG32(CHMAP);
1243	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1244	case 0:
1245	default:
1246		numchan = 1;
1247		break;
1248	case 1:
1249		numchan = 2;
1250		break;
1251	case 2:
1252		numchan = 4;
1253		break;
1254	case 3:
1255		numchan = 8;
1256		break;
1257	}
1258	rdev->mc.vram_width = numchan * chansize;
1259	/* Could aper size report 0 ? */
1260	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1261	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1262	/* Setup GPU memory space */
1263	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1264	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1265	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1266	r600_vram_gtt_location(rdev, &rdev->mc);
1267
1268	if (rdev->flags & RADEON_IS_IGP) {
1269		rs690_pm_info(rdev);
1270		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1271	}
1272	radeon_update_bandwidth_info(rdev);
1273	return 0;
1274}
1275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276/* We doesn't check that the GPU really needs a reset we simply do the
1277 * reset, it's up to the caller to determine if the GPU needs one. We
1278 * might add an helper function to check that.
1279 */
1280int r600_gpu_soft_reset(struct radeon_device *rdev)
1281{
1282	struct rv515_mc_save save;
1283	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1284				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1285				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1286				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1287				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1288				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1289				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1290				S_008010_GUI_ACTIVE(1);
1291	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1292			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1293			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1294			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1295			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1296			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1297			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1298			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1299	u32 tmp;
1300
1301	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1302		return 0;
1303
1304	dev_info(rdev->dev, "GPU softreset \n");
1305	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1306		RREG32(R_008010_GRBM_STATUS));
1307	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1308		RREG32(R_008014_GRBM_STATUS2));
1309	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1310		RREG32(R_000E50_SRBM_STATUS));
1311	rv515_mc_stop(rdev, &save);
1312	if (r600_mc_wait_for_idle(rdev)) {
1313		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1314	}
1315	/* Disable CP parsing/prefetching */
1316	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1317	/* Check if any of the rendering block is busy and reset it */
1318	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1319	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1320		tmp = S_008020_SOFT_RESET_CR(1) |
1321			S_008020_SOFT_RESET_DB(1) |
1322			S_008020_SOFT_RESET_CB(1) |
1323			S_008020_SOFT_RESET_PA(1) |
1324			S_008020_SOFT_RESET_SC(1) |
1325			S_008020_SOFT_RESET_SMX(1) |
1326			S_008020_SOFT_RESET_SPI(1) |
1327			S_008020_SOFT_RESET_SX(1) |
1328			S_008020_SOFT_RESET_SH(1) |
1329			S_008020_SOFT_RESET_TC(1) |
1330			S_008020_SOFT_RESET_TA(1) |
1331			S_008020_SOFT_RESET_VC(1) |
1332			S_008020_SOFT_RESET_VGT(1);
1333		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1334		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1335		RREG32(R_008020_GRBM_SOFT_RESET);
1336		mdelay(15);
1337		WREG32(R_008020_GRBM_SOFT_RESET, 0);
1338	}
1339	/* Reset CP (we always reset CP) */
1340	tmp = S_008020_SOFT_RESET_CP(1);
1341	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1342	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1343	RREG32(R_008020_GRBM_SOFT_RESET);
1344	mdelay(15);
1345	WREG32(R_008020_GRBM_SOFT_RESET, 0);
1346	/* Wait a little for things to settle down */
1347	mdelay(1);
1348	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1349		RREG32(R_008010_GRBM_STATUS));
1350	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1351		RREG32(R_008014_GRBM_STATUS2));
1352	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1353		RREG32(R_000E50_SRBM_STATUS));
1354	rv515_mc_resume(rdev, &save);
1355	return 0;
1356}
1357
1358bool r600_gpu_is_lockup(struct radeon_device *rdev)
1359{
1360	u32 srbm_status;
1361	u32 grbm_status;
1362	u32 grbm_status2;
1363	struct r100_gpu_lockup *lockup;
1364	int r;
1365
1366	if (rdev->family >= CHIP_RV770)
1367		lockup = &rdev->config.rv770.lockup;
1368	else
1369		lockup = &rdev->config.r600.lockup;
1370
1371	srbm_status = RREG32(R_000E50_SRBM_STATUS);
1372	grbm_status = RREG32(R_008010_GRBM_STATUS);
1373	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1374	if (!G_008010_GUI_ACTIVE(grbm_status)) {
1375		r100_gpu_lockup_update(lockup, &rdev->cp);
1376		return false;
1377	}
1378	/* force CP activities */
1379	r = radeon_ring_lock(rdev, 2);
1380	if (!r) {
1381		/* PACKET2 NOP */
1382		radeon_ring_write(rdev, 0x80000000);
1383		radeon_ring_write(rdev, 0x80000000);
1384		radeon_ring_unlock_commit(rdev);
1385	}
1386	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1387	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
1388}
1389
1390int r600_asic_reset(struct radeon_device *rdev)
1391{
1392	return r600_gpu_soft_reset(rdev);
1393}
1394
1395static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1396					     u32 num_backends,
1397					     u32 backend_disable_mask)
1398{
1399	u32 backend_map = 0;
1400	u32 enabled_backends_mask;
1401	u32 enabled_backends_count;
1402	u32 cur_pipe;
1403	u32 swizzle_pipe[R6XX_MAX_PIPES];
1404	u32 cur_backend;
1405	u32 i;
1406
1407	if (num_tile_pipes > R6XX_MAX_PIPES)
1408		num_tile_pipes = R6XX_MAX_PIPES;
1409	if (num_tile_pipes < 1)
1410		num_tile_pipes = 1;
1411	if (num_backends > R6XX_MAX_BACKENDS)
1412		num_backends = R6XX_MAX_BACKENDS;
1413	if (num_backends < 1)
1414		num_backends = 1;
1415
1416	enabled_backends_mask = 0;
1417	enabled_backends_count = 0;
1418	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1419		if (((backend_disable_mask >> i) & 1) == 0) {
1420			enabled_backends_mask |= (1 << i);
1421			++enabled_backends_count;
1422		}
1423		if (enabled_backends_count == num_backends)
1424			break;
1425	}
1426
1427	if (enabled_backends_count == 0) {
1428		enabled_backends_mask = 1;
1429		enabled_backends_count = 1;
1430	}
1431
1432	if (enabled_backends_count != num_backends)
1433		num_backends = enabled_backends_count;
1434
1435	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1436	switch (num_tile_pipes) {
1437	case 1:
1438		swizzle_pipe[0] = 0;
1439		break;
1440	case 2:
1441		swizzle_pipe[0] = 0;
1442		swizzle_pipe[1] = 1;
1443		break;
1444	case 3:
1445		swizzle_pipe[0] = 0;
1446		swizzle_pipe[1] = 1;
1447		swizzle_pipe[2] = 2;
1448		break;
1449	case 4:
1450		swizzle_pipe[0] = 0;
1451		swizzle_pipe[1] = 1;
1452		swizzle_pipe[2] = 2;
1453		swizzle_pipe[3] = 3;
1454		break;
1455	case 5:
1456		swizzle_pipe[0] = 0;
1457		swizzle_pipe[1] = 1;
1458		swizzle_pipe[2] = 2;
1459		swizzle_pipe[3] = 3;
1460		swizzle_pipe[4] = 4;
1461		break;
1462	case 6:
1463		swizzle_pipe[0] = 0;
1464		swizzle_pipe[1] = 2;
1465		swizzle_pipe[2] = 4;
1466		swizzle_pipe[3] = 5;
1467		swizzle_pipe[4] = 1;
1468		swizzle_pipe[5] = 3;
1469		break;
1470	case 7:
1471		swizzle_pipe[0] = 0;
1472		swizzle_pipe[1] = 2;
1473		swizzle_pipe[2] = 4;
1474		swizzle_pipe[3] = 6;
1475		swizzle_pipe[4] = 1;
1476		swizzle_pipe[5] = 3;
1477		swizzle_pipe[6] = 5;
1478		break;
1479	case 8:
1480		swizzle_pipe[0] = 0;
1481		swizzle_pipe[1] = 2;
1482		swizzle_pipe[2] = 4;
1483		swizzle_pipe[3] = 6;
1484		swizzle_pipe[4] = 1;
1485		swizzle_pipe[5] = 3;
1486		swizzle_pipe[6] = 5;
1487		swizzle_pipe[7] = 7;
1488		break;
1489	}
1490
1491	cur_backend = 0;
1492	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1493		while (((1 << cur_backend) & enabled_backends_mask) == 0)
1494			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1495
1496		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1497
1498		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
 
 
 
 
 
1499	}
1500
1501	return backend_map;
1502}
1503
1504int r600_count_pipe_bits(uint32_t val)
1505{
1506	int i, ret = 0;
1507
1508	for (i = 0; i < 32; i++) {
1509		ret += val & 1;
1510		val >>= 1;
1511	}
1512	return ret;
1513}
1514
1515void r600_gpu_init(struct radeon_device *rdev)
1516{
1517	u32 tiling_config;
1518	u32 ramcfg;
1519	u32 backend_map;
1520	u32 cc_rb_backend_disable;
1521	u32 cc_gc_shader_pipe_config;
1522	u32 tmp;
1523	int i, j;
1524	u32 sq_config;
1525	u32 sq_gpr_resource_mgmt_1 = 0;
1526	u32 sq_gpr_resource_mgmt_2 = 0;
1527	u32 sq_thread_resource_mgmt = 0;
1528	u32 sq_stack_resource_mgmt_1 = 0;
1529	u32 sq_stack_resource_mgmt_2 = 0;
 
1530
1531	/* FIXME: implement */
1532	switch (rdev->family) {
1533	case CHIP_R600:
1534		rdev->config.r600.max_pipes = 4;
1535		rdev->config.r600.max_tile_pipes = 8;
1536		rdev->config.r600.max_simds = 4;
1537		rdev->config.r600.max_backends = 4;
1538		rdev->config.r600.max_gprs = 256;
1539		rdev->config.r600.max_threads = 192;
1540		rdev->config.r600.max_stack_entries = 256;
1541		rdev->config.r600.max_hw_contexts = 8;
1542		rdev->config.r600.max_gs_threads = 16;
1543		rdev->config.r600.sx_max_export_size = 128;
1544		rdev->config.r600.sx_max_export_pos_size = 16;
1545		rdev->config.r600.sx_max_export_smx_size = 128;
1546		rdev->config.r600.sq_num_cf_insts = 2;
1547		break;
1548	case CHIP_RV630:
1549	case CHIP_RV635:
1550		rdev->config.r600.max_pipes = 2;
1551		rdev->config.r600.max_tile_pipes = 2;
1552		rdev->config.r600.max_simds = 3;
1553		rdev->config.r600.max_backends = 1;
1554		rdev->config.r600.max_gprs = 128;
1555		rdev->config.r600.max_threads = 192;
1556		rdev->config.r600.max_stack_entries = 128;
1557		rdev->config.r600.max_hw_contexts = 8;
1558		rdev->config.r600.max_gs_threads = 4;
1559		rdev->config.r600.sx_max_export_size = 128;
1560		rdev->config.r600.sx_max_export_pos_size = 16;
1561		rdev->config.r600.sx_max_export_smx_size = 128;
1562		rdev->config.r600.sq_num_cf_insts = 2;
1563		break;
1564	case CHIP_RV610:
1565	case CHIP_RV620:
1566	case CHIP_RS780:
1567	case CHIP_RS880:
1568		rdev->config.r600.max_pipes = 1;
1569		rdev->config.r600.max_tile_pipes = 1;
1570		rdev->config.r600.max_simds = 2;
1571		rdev->config.r600.max_backends = 1;
1572		rdev->config.r600.max_gprs = 128;
1573		rdev->config.r600.max_threads = 192;
1574		rdev->config.r600.max_stack_entries = 128;
1575		rdev->config.r600.max_hw_contexts = 4;
1576		rdev->config.r600.max_gs_threads = 4;
1577		rdev->config.r600.sx_max_export_size = 128;
1578		rdev->config.r600.sx_max_export_pos_size = 16;
1579		rdev->config.r600.sx_max_export_smx_size = 128;
1580		rdev->config.r600.sq_num_cf_insts = 1;
1581		break;
1582	case CHIP_RV670:
1583		rdev->config.r600.max_pipes = 4;
1584		rdev->config.r600.max_tile_pipes = 4;
1585		rdev->config.r600.max_simds = 4;
1586		rdev->config.r600.max_backends = 4;
1587		rdev->config.r600.max_gprs = 192;
1588		rdev->config.r600.max_threads = 192;
1589		rdev->config.r600.max_stack_entries = 256;
1590		rdev->config.r600.max_hw_contexts = 8;
1591		rdev->config.r600.max_gs_threads = 16;
1592		rdev->config.r600.sx_max_export_size = 128;
1593		rdev->config.r600.sx_max_export_pos_size = 16;
1594		rdev->config.r600.sx_max_export_smx_size = 128;
1595		rdev->config.r600.sq_num_cf_insts = 2;
1596		break;
1597	default:
1598		break;
1599	}
1600
1601	/* Initialize HDP */
1602	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1603		WREG32((0x2c14 + j), 0x00000000);
1604		WREG32((0x2c18 + j), 0x00000000);
1605		WREG32((0x2c1c + j), 0x00000000);
1606		WREG32((0x2c20 + j), 0x00000000);
1607		WREG32((0x2c24 + j), 0x00000000);
1608	}
1609
1610	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1611
1612	/* Setup tiling */
1613	tiling_config = 0;
1614	ramcfg = RREG32(RAMCFG);
1615	switch (rdev->config.r600.max_tile_pipes) {
1616	case 1:
1617		tiling_config |= PIPE_TILING(0);
1618		break;
1619	case 2:
1620		tiling_config |= PIPE_TILING(1);
1621		break;
1622	case 4:
1623		tiling_config |= PIPE_TILING(2);
1624		break;
1625	case 8:
1626		tiling_config |= PIPE_TILING(3);
1627		break;
1628	default:
1629		break;
1630	}
1631	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1632	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1633	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1634	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1635	if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1636		rdev->config.r600.tiling_group_size = 512;
1637	else
1638		rdev->config.r600.tiling_group_size = 256;
1639	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1640	if (tmp > 3) {
1641		tiling_config |= ROW_TILING(3);
1642		tiling_config |= SAMPLE_SPLIT(3);
1643	} else {
1644		tiling_config |= ROW_TILING(tmp);
1645		tiling_config |= SAMPLE_SPLIT(tmp);
1646	}
1647	tiling_config |= BANK_SWAPS(1);
1648
1649	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1650	cc_rb_backend_disable |=
1651		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652
1653	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1654	cc_gc_shader_pipe_config |=
1655		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1656	cc_gc_shader_pipe_config |=
1657		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1658
1659	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1660							(R6XX_MAX_BACKENDS -
1661							 r600_count_pipe_bits((cc_rb_backend_disable &
1662									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1663							(cc_rb_backend_disable >> 16));
1664	rdev->config.r600.tile_config = tiling_config;
1665	rdev->config.r600.backend_map = backend_map;
1666	tiling_config |= BACKEND_MAP(backend_map);
1667	WREG32(GB_TILING_CONFIG, tiling_config);
1668	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1669	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1670
1671	/* Setup pipes */
1672	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1673	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1674	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1675
1676	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1677	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1678	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1679
1680	/* Setup some CP states */
1681	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1682	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1683
1684	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1685			     SYNC_WALKER | SYNC_ALIGNER));
1686	/* Setup various GPU states */
1687	if (rdev->family == CHIP_RV670)
1688		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1689
1690	tmp = RREG32(SX_DEBUG_1);
1691	tmp |= SMX_EVENT_RELEASE;
1692	if ((rdev->family > CHIP_R600))
1693		tmp |= ENABLE_NEW_SMX_ADDRESS;
1694	WREG32(SX_DEBUG_1, tmp);
1695
1696	if (((rdev->family) == CHIP_R600) ||
1697	    ((rdev->family) == CHIP_RV630) ||
1698	    ((rdev->family) == CHIP_RV610) ||
1699	    ((rdev->family) == CHIP_RV620) ||
1700	    ((rdev->family) == CHIP_RS780) ||
1701	    ((rdev->family) == CHIP_RS880)) {
1702		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1703	} else {
1704		WREG32(DB_DEBUG, 0);
1705	}
1706	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1707			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1708
1709	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1710	WREG32(VGT_NUM_INSTANCES, 0);
1711
1712	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1713	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1714
1715	tmp = RREG32(SQ_MS_FIFO_SIZES);
1716	if (((rdev->family) == CHIP_RV610) ||
1717	    ((rdev->family) == CHIP_RV620) ||
1718	    ((rdev->family) == CHIP_RS780) ||
1719	    ((rdev->family) == CHIP_RS880)) {
1720		tmp = (CACHE_FIFO_SIZE(0xa) |
1721		       FETCH_FIFO_HIWATER(0xa) |
1722		       DONE_FIFO_HIWATER(0xe0) |
1723		       ALU_UPDATE_FIFO_HIWATER(0x8));
1724	} else if (((rdev->family) == CHIP_R600) ||
1725		   ((rdev->family) == CHIP_RV630)) {
1726		tmp &= ~DONE_FIFO_HIWATER(0xff);
1727		tmp |= DONE_FIFO_HIWATER(0x4);
1728	}
1729	WREG32(SQ_MS_FIFO_SIZES, tmp);
1730
1731	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1732	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1733	 */
1734	sq_config = RREG32(SQ_CONFIG);
1735	sq_config &= ~(PS_PRIO(3) |
1736		       VS_PRIO(3) |
1737		       GS_PRIO(3) |
1738		       ES_PRIO(3));
1739	sq_config |= (DX9_CONSTS |
1740		      VC_ENABLE |
1741		      PS_PRIO(0) |
1742		      VS_PRIO(1) |
1743		      GS_PRIO(2) |
1744		      ES_PRIO(3));
1745
1746	if ((rdev->family) == CHIP_R600) {
1747		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1748					  NUM_VS_GPRS(124) |
1749					  NUM_CLAUSE_TEMP_GPRS(4));
1750		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1751					  NUM_ES_GPRS(0));
1752		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1753					   NUM_VS_THREADS(48) |
1754					   NUM_GS_THREADS(4) |
1755					   NUM_ES_THREADS(4));
1756		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1757					    NUM_VS_STACK_ENTRIES(128));
1758		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1759					    NUM_ES_STACK_ENTRIES(0));
1760	} else if (((rdev->family) == CHIP_RV610) ||
1761		   ((rdev->family) == CHIP_RV620) ||
1762		   ((rdev->family) == CHIP_RS780) ||
1763		   ((rdev->family) == CHIP_RS880)) {
1764		/* no vertex cache */
1765		sq_config &= ~VC_ENABLE;
1766
1767		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1768					  NUM_VS_GPRS(44) |
1769					  NUM_CLAUSE_TEMP_GPRS(2));
1770		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1771					  NUM_ES_GPRS(17));
1772		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1773					   NUM_VS_THREADS(78) |
1774					   NUM_GS_THREADS(4) |
1775					   NUM_ES_THREADS(31));
1776		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1777					    NUM_VS_STACK_ENTRIES(40));
1778		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1779					    NUM_ES_STACK_ENTRIES(16));
1780	} else if (((rdev->family) == CHIP_RV630) ||
1781		   ((rdev->family) == CHIP_RV635)) {
1782		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1783					  NUM_VS_GPRS(44) |
1784					  NUM_CLAUSE_TEMP_GPRS(2));
1785		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1786					  NUM_ES_GPRS(18));
1787		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1788					   NUM_VS_THREADS(78) |
1789					   NUM_GS_THREADS(4) |
1790					   NUM_ES_THREADS(31));
1791		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1792					    NUM_VS_STACK_ENTRIES(40));
1793		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1794					    NUM_ES_STACK_ENTRIES(16));
1795	} else if ((rdev->family) == CHIP_RV670) {
1796		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1797					  NUM_VS_GPRS(44) |
1798					  NUM_CLAUSE_TEMP_GPRS(2));
1799		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1800					  NUM_ES_GPRS(17));
1801		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1802					   NUM_VS_THREADS(78) |
1803					   NUM_GS_THREADS(4) |
1804					   NUM_ES_THREADS(31));
1805		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1806					    NUM_VS_STACK_ENTRIES(64));
1807		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1808					    NUM_ES_STACK_ENTRIES(64));
1809	}
1810
1811	WREG32(SQ_CONFIG, sq_config);
1812	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1813	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1814	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1815	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1816	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1817
1818	if (((rdev->family) == CHIP_RV610) ||
1819	    ((rdev->family) == CHIP_RV620) ||
1820	    ((rdev->family) == CHIP_RS780) ||
1821	    ((rdev->family) == CHIP_RS880)) {
1822		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1823	} else {
1824		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1825	}
1826
1827	/* More default values. 2D/3D driver should adjust as needed */
1828	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1829					 S1_X(0x4) | S1_Y(0xc)));
1830	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1831					 S1_X(0x2) | S1_Y(0x2) |
1832					 S2_X(0xa) | S2_Y(0x6) |
1833					 S3_X(0x6) | S3_Y(0xa)));
1834	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1835					     S1_X(0x4) | S1_Y(0xc) |
1836					     S2_X(0x1) | S2_Y(0x6) |
1837					     S3_X(0xa) | S3_Y(0xe)));
1838	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1839					     S5_X(0x0) | S5_Y(0x0) |
1840					     S6_X(0xb) | S6_Y(0x4) |
1841					     S7_X(0x7) | S7_Y(0x8)));
1842
1843	WREG32(VGT_STRMOUT_EN, 0);
1844	tmp = rdev->config.r600.max_pipes * 16;
1845	switch (rdev->family) {
1846	case CHIP_RV610:
1847	case CHIP_RV620:
1848	case CHIP_RS780:
1849	case CHIP_RS880:
1850		tmp += 32;
1851		break;
1852	case CHIP_RV670:
1853		tmp += 128;
1854		break;
1855	default:
1856		break;
1857	}
1858	if (tmp > 256) {
1859		tmp = 256;
1860	}
1861	WREG32(VGT_ES_PER_GS, 128);
1862	WREG32(VGT_GS_PER_ES, tmp);
1863	WREG32(VGT_GS_PER_VS, 2);
1864	WREG32(VGT_GS_VERTEX_REUSE, 16);
1865
1866	/* more default values. 2D/3D driver should adjust as needed */
1867	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1868	WREG32(VGT_STRMOUT_EN, 0);
1869	WREG32(SX_MISC, 0);
1870	WREG32(PA_SC_MODE_CNTL, 0);
1871	WREG32(PA_SC_AA_CONFIG, 0);
1872	WREG32(PA_SC_LINE_STIPPLE, 0);
1873	WREG32(SPI_INPUT_Z, 0);
1874	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1875	WREG32(CB_COLOR7_FRAG, 0);
1876
1877	/* Clear render buffer base addresses */
1878	WREG32(CB_COLOR0_BASE, 0);
1879	WREG32(CB_COLOR1_BASE, 0);
1880	WREG32(CB_COLOR2_BASE, 0);
1881	WREG32(CB_COLOR3_BASE, 0);
1882	WREG32(CB_COLOR4_BASE, 0);
1883	WREG32(CB_COLOR5_BASE, 0);
1884	WREG32(CB_COLOR6_BASE, 0);
1885	WREG32(CB_COLOR7_BASE, 0);
1886	WREG32(CB_COLOR7_FRAG, 0);
1887
1888	switch (rdev->family) {
1889	case CHIP_RV610:
1890	case CHIP_RV620:
1891	case CHIP_RS780:
1892	case CHIP_RS880:
1893		tmp = TC_L2_SIZE(8);
1894		break;
1895	case CHIP_RV630:
1896	case CHIP_RV635:
1897		tmp = TC_L2_SIZE(4);
1898		break;
1899	case CHIP_R600:
1900		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1901		break;
1902	default:
1903		tmp = TC_L2_SIZE(0);
1904		break;
1905	}
1906	WREG32(TC_CNTL, tmp);
1907
1908	tmp = RREG32(HDP_HOST_PATH_CNTL);
1909	WREG32(HDP_HOST_PATH_CNTL, tmp);
1910
1911	tmp = RREG32(ARB_POP);
1912	tmp |= ENABLE_TC128;
1913	WREG32(ARB_POP, tmp);
1914
1915	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1916	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1917			       NUM_CLIP_SEQ(3)));
1918	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
 
1919}
1920
1921
1922/*
1923 * Indirect registers accessor
1924 */
1925u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1926{
1927	u32 r;
1928
1929	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1930	(void)RREG32(PCIE_PORT_INDEX);
1931	r = RREG32(PCIE_PORT_DATA);
1932	return r;
1933}
1934
1935void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1936{
1937	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1938	(void)RREG32(PCIE_PORT_INDEX);
1939	WREG32(PCIE_PORT_DATA, (v));
1940	(void)RREG32(PCIE_PORT_DATA);
1941}
1942
1943/*
1944 * CP & Ring
1945 */
1946void r600_cp_stop(struct radeon_device *rdev)
1947{
1948	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1949	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1950	WREG32(SCRATCH_UMSK, 0);
1951}
1952
1953int r600_init_microcode(struct radeon_device *rdev)
1954{
1955	struct platform_device *pdev;
1956	const char *chip_name;
1957	const char *rlc_chip_name;
1958	size_t pfp_req_size, me_req_size, rlc_req_size;
1959	char fw_name[30];
1960	int err;
1961
1962	DRM_DEBUG("\n");
1963
1964	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1965	err = IS_ERR(pdev);
1966	if (err) {
1967		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1968		return -EINVAL;
1969	}
1970
1971	switch (rdev->family) {
1972	case CHIP_R600:
1973		chip_name = "R600";
1974		rlc_chip_name = "R600";
1975		break;
1976	case CHIP_RV610:
1977		chip_name = "RV610";
1978		rlc_chip_name = "R600";
1979		break;
1980	case CHIP_RV630:
1981		chip_name = "RV630";
1982		rlc_chip_name = "R600";
1983		break;
1984	case CHIP_RV620:
1985		chip_name = "RV620";
1986		rlc_chip_name = "R600";
1987		break;
1988	case CHIP_RV635:
1989		chip_name = "RV635";
1990		rlc_chip_name = "R600";
1991		break;
1992	case CHIP_RV670:
1993		chip_name = "RV670";
1994		rlc_chip_name = "R600";
1995		break;
1996	case CHIP_RS780:
1997	case CHIP_RS880:
1998		chip_name = "RS780";
1999		rlc_chip_name = "R600";
2000		break;
2001	case CHIP_RV770:
2002		chip_name = "RV770";
2003		rlc_chip_name = "R700";
2004		break;
2005	case CHIP_RV730:
2006	case CHIP_RV740:
2007		chip_name = "RV730";
2008		rlc_chip_name = "R700";
2009		break;
2010	case CHIP_RV710:
2011		chip_name = "RV710";
2012		rlc_chip_name = "R700";
2013		break;
2014	case CHIP_CEDAR:
2015		chip_name = "CEDAR";
2016		rlc_chip_name = "CEDAR";
2017		break;
2018	case CHIP_REDWOOD:
2019		chip_name = "REDWOOD";
2020		rlc_chip_name = "REDWOOD";
2021		break;
2022	case CHIP_JUNIPER:
2023		chip_name = "JUNIPER";
2024		rlc_chip_name = "JUNIPER";
2025		break;
2026	case CHIP_CYPRESS:
2027	case CHIP_HEMLOCK:
2028		chip_name = "CYPRESS";
2029		rlc_chip_name = "CYPRESS";
2030		break;
2031	case CHIP_PALM:
2032		chip_name = "PALM";
2033		rlc_chip_name = "SUMO";
2034		break;
2035	case CHIP_SUMO:
2036		chip_name = "SUMO";
2037		rlc_chip_name = "SUMO";
2038		break;
2039	case CHIP_SUMO2:
2040		chip_name = "SUMO2";
2041		rlc_chip_name = "SUMO";
2042		break;
2043	default: BUG();
2044	}
2045
2046	if (rdev->family >= CHIP_CEDAR) {
2047		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2048		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2049		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2050	} else if (rdev->family >= CHIP_RV770) {
2051		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2052		me_req_size = R700_PM4_UCODE_SIZE * 4;
2053		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2054	} else {
2055		pfp_req_size = PFP_UCODE_SIZE * 4;
2056		me_req_size = PM4_UCODE_SIZE * 12;
2057		rlc_req_size = RLC_UCODE_SIZE * 4;
2058	}
2059
2060	DRM_INFO("Loading %s Microcode\n", chip_name);
2061
2062	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2063	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2064	if (err)
2065		goto out;
2066	if (rdev->pfp_fw->size != pfp_req_size) {
2067		printk(KERN_ERR
2068		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2069		       rdev->pfp_fw->size, fw_name);
2070		err = -EINVAL;
2071		goto out;
2072	}
2073
2074	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2075	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2076	if (err)
2077		goto out;
2078	if (rdev->me_fw->size != me_req_size) {
2079		printk(KERN_ERR
2080		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2081		       rdev->me_fw->size, fw_name);
2082		err = -EINVAL;
2083	}
2084
2085	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2086	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2087	if (err)
2088		goto out;
2089	if (rdev->rlc_fw->size != rlc_req_size) {
2090		printk(KERN_ERR
2091		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2092		       rdev->rlc_fw->size, fw_name);
2093		err = -EINVAL;
2094	}
2095
2096out:
2097	platform_device_unregister(pdev);
2098
2099	if (err) {
2100		if (err != -EINVAL)
2101			printk(KERN_ERR
2102			       "r600_cp: Failed to load firmware \"%s\"\n",
2103			       fw_name);
2104		release_firmware(rdev->pfp_fw);
2105		rdev->pfp_fw = NULL;
2106		release_firmware(rdev->me_fw);
2107		rdev->me_fw = NULL;
2108		release_firmware(rdev->rlc_fw);
2109		rdev->rlc_fw = NULL;
2110	}
2111	return err;
2112}
2113
2114static int r600_cp_load_microcode(struct radeon_device *rdev)
2115{
2116	const __be32 *fw_data;
2117	int i;
2118
2119	if (!rdev->me_fw || !rdev->pfp_fw)
2120		return -EINVAL;
2121
2122	r600_cp_stop(rdev);
2123
2124	WREG32(CP_RB_CNTL,
2125#ifdef __BIG_ENDIAN
2126	       BUF_SWAP_32BIT |
2127#endif
2128	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2129
2130	/* Reset cp */
2131	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2132	RREG32(GRBM_SOFT_RESET);
2133	mdelay(15);
2134	WREG32(GRBM_SOFT_RESET, 0);
2135
2136	WREG32(CP_ME_RAM_WADDR, 0);
2137
2138	fw_data = (const __be32 *)rdev->me_fw->data;
2139	WREG32(CP_ME_RAM_WADDR, 0);
2140	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2141		WREG32(CP_ME_RAM_DATA,
2142		       be32_to_cpup(fw_data++));
2143
2144	fw_data = (const __be32 *)rdev->pfp_fw->data;
2145	WREG32(CP_PFP_UCODE_ADDR, 0);
2146	for (i = 0; i < PFP_UCODE_SIZE; i++)
2147		WREG32(CP_PFP_UCODE_DATA,
2148		       be32_to_cpup(fw_data++));
2149
2150	WREG32(CP_PFP_UCODE_ADDR, 0);
2151	WREG32(CP_ME_RAM_WADDR, 0);
2152	WREG32(CP_ME_RAM_RADDR, 0);
2153	return 0;
2154}
2155
2156int r600_cp_start(struct radeon_device *rdev)
2157{
 
2158	int r;
2159	uint32_t cp_me;
2160
2161	r = radeon_ring_lock(rdev, 7);
2162	if (r) {
2163		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2164		return r;
2165	}
2166	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2167	radeon_ring_write(rdev, 0x1);
2168	if (rdev->family >= CHIP_RV770) {
2169		radeon_ring_write(rdev, 0x0);
2170		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2171	} else {
2172		radeon_ring_write(rdev, 0x3);
2173		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2174	}
2175	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2176	radeon_ring_write(rdev, 0);
2177	radeon_ring_write(rdev, 0);
2178	radeon_ring_unlock_commit(rdev);
2179
2180	cp_me = 0xff;
2181	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2182	return 0;
2183}
2184
2185int r600_cp_resume(struct radeon_device *rdev)
2186{
 
2187	u32 tmp;
2188	u32 rb_bufsz;
2189	int r;
2190
2191	/* Reset cp */
2192	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2193	RREG32(GRBM_SOFT_RESET);
2194	mdelay(15);
2195	WREG32(GRBM_SOFT_RESET, 0);
2196
2197	/* Set ring buffer size */
2198	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2199	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2200#ifdef __BIG_ENDIAN
2201	tmp |= BUF_SWAP_32BIT;
2202#endif
2203	WREG32(CP_RB_CNTL, tmp);
2204	WREG32(CP_SEM_WAIT_TIMER, 0x4);
2205
2206	/* Set the write pointer delay */
2207	WREG32(CP_RB_WPTR_DELAY, 0);
2208
2209	/* Initialize the ring buffer's read and write pointers */
2210	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2211	WREG32(CP_RB_RPTR_WR, 0);
2212	rdev->cp.wptr = 0;
2213	WREG32(CP_RB_WPTR, rdev->cp.wptr);
2214
2215	/* set the wb address whether it's enabled or not */
2216	WREG32(CP_RB_RPTR_ADDR,
2217	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2218	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2219	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2220
2221	if (rdev->wb.enabled)
2222		WREG32(SCRATCH_UMSK, 0xff);
2223	else {
2224		tmp |= RB_NO_UPDATE;
2225		WREG32(SCRATCH_UMSK, 0);
2226	}
2227
2228	mdelay(1);
2229	WREG32(CP_RB_CNTL, tmp);
2230
2231	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2232	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2233
2234	rdev->cp.rptr = RREG32(CP_RB_RPTR);
2235
2236	r600_cp_start(rdev);
2237	rdev->cp.ready = true;
2238	r = radeon_ring_test(rdev);
2239	if (r) {
2240		rdev->cp.ready = false;
2241		return r;
2242	}
2243	return 0;
2244}
2245
2246void r600_cp_commit(struct radeon_device *rdev)
2247{
2248	WREG32(CP_RB_WPTR, rdev->cp.wptr);
2249	(void)RREG32(CP_RB_WPTR);
2250}
2251
2252void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2253{
2254	u32 rb_bufsz;
2255
2256	/* Align ring size */
2257	rb_bufsz = drm_order(ring_size / 8);
2258	ring_size = (1 << (rb_bufsz + 1)) * 4;
2259	rdev->cp.ring_size = ring_size;
2260	rdev->cp.align_mask = 16 - 1;
2261}
2262
2263void r600_cp_fini(struct radeon_device *rdev)
2264{
2265	r600_cp_stop(rdev);
2266	radeon_ring_fini(rdev);
2267}
2268
2269
2270/*
2271 * GPU scratch registers helpers function.
2272 */
2273void r600_scratch_init(struct radeon_device *rdev)
2274{
2275	int i;
2276
2277	rdev->scratch.num_reg = 7;
2278	rdev->scratch.reg_base = SCRATCH_REG0;
2279	for (i = 0; i < rdev->scratch.num_reg; i++) {
2280		rdev->scratch.free[i] = true;
2281		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2282	}
2283}
2284
2285int r600_ring_test(struct radeon_device *rdev)
2286{
2287	uint32_t scratch;
2288	uint32_t tmp = 0;
2289	unsigned i;
2290	int r;
2291
2292	r = radeon_scratch_get(rdev, &scratch);
2293	if (r) {
2294		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2295		return r;
2296	}
2297	WREG32(scratch, 0xCAFEDEAD);
2298	r = radeon_ring_lock(rdev, 3);
2299	if (r) {
2300		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2301		radeon_scratch_free(rdev, scratch);
2302		return r;
2303	}
2304	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2305	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2306	radeon_ring_write(rdev, 0xDEADBEEF);
2307	radeon_ring_unlock_commit(rdev);
2308	for (i = 0; i < rdev->usec_timeout; i++) {
2309		tmp = RREG32(scratch);
2310		if (tmp == 0xDEADBEEF)
2311			break;
2312		DRM_UDELAY(1);
2313	}
2314	if (i < rdev->usec_timeout) {
2315		DRM_INFO("ring test succeeded in %d usecs\n", i);
2316	} else {
2317		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2318			  scratch, tmp);
2319		r = -EINVAL;
2320	}
2321	radeon_scratch_free(rdev, scratch);
2322	return r;
2323}
2324
2325void r600_fence_ring_emit(struct radeon_device *rdev,
2326			  struct radeon_fence *fence)
2327{
 
 
2328	if (rdev->wb.use_event) {
2329		u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2330			(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
 
 
 
 
 
 
 
2331		/* EVENT_WRITE_EOP - flush caches, send int */
2332		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2333		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2334		radeon_ring_write(rdev, addr & 0xffffffff);
2335		radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2336		radeon_ring_write(rdev, fence->seq);
2337		radeon_ring_write(rdev, 0);
2338	} else {
2339		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2340		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
 
 
 
 
 
 
 
 
2341		/* wait for 3D idle clean */
2342		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2343		radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2344		radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2345		/* Emit fence sequence & fire IRQ */
2346		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2347		radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2348		radeon_ring_write(rdev, fence->seq);
2349		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2350		radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2351		radeon_ring_write(rdev, RB_INT_STAT);
2352	}
2353}
2354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2355int r600_copy_blit(struct radeon_device *rdev,
2356		   uint64_t src_offset,
2357		   uint64_t dst_offset,
2358		   unsigned num_gpu_pages,
2359		   struct radeon_fence *fence)
2360{
 
2361	int r;
2362
2363	mutex_lock(&rdev->r600_blit.mutex);
2364	rdev->r600_blit.vb_ib = NULL;
2365	r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2366	if (r) {
2367		if (rdev->r600_blit.vb_ib)
2368			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2369		mutex_unlock(&rdev->r600_blit.mutex);
2370		return r;
2371	}
2372	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2373	r600_blit_done_copy(rdev, fence);
2374	mutex_unlock(&rdev->r600_blit.mutex);
2375	return 0;
2376}
2377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2378int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2379			 uint32_t tiling_flags, uint32_t pitch,
2380			 uint32_t offset, uint32_t obj_size)
2381{
2382	/* FIXME: implement */
2383	return 0;
2384}
2385
2386void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2387{
2388	/* FIXME: implement */
2389}
2390
2391int r600_startup(struct radeon_device *rdev)
2392{
 
2393	int r;
2394
2395	/* enable pcie gen2 link */
2396	r600_pcie_gen2_enable(rdev);
2397
2398	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2399		r = r600_init_microcode(rdev);
2400		if (r) {
2401			DRM_ERROR("Failed to load firmware!\n");
2402			return r;
2403		}
2404	}
2405
 
 
 
 
2406	r600_mc_program(rdev);
2407	if (rdev->flags & RADEON_IS_AGP) {
2408		r600_agp_enable(rdev);
2409	} else {
2410		r = r600_pcie_gart_enable(rdev);
2411		if (r)
2412			return r;
2413	}
2414	r600_gpu_init(rdev);
2415	r = r600_blit_init(rdev);
2416	if (r) {
2417		r600_blit_fini(rdev);
2418		rdev->asic->copy = NULL;
2419		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2420	}
2421
2422	/* allocate wb buffer */
2423	r = radeon_wb_init(rdev);
2424	if (r)
2425		return r;
2426
 
 
 
 
 
 
2427	/* Enable IRQ */
2428	r = r600_irq_init(rdev);
2429	if (r) {
2430		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2431		radeon_irq_kms_fini(rdev);
2432		return r;
2433	}
2434	r600_irq_set(rdev);
2435
2436	r = radeon_ring_init(rdev, rdev->cp.ring_size);
 
 
 
2437	if (r)
2438		return r;
2439	r = r600_cp_load_microcode(rdev);
2440	if (r)
2441		return r;
2442	r = r600_cp_resume(rdev);
2443	if (r)
2444		return r;
2445
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2446	return 0;
2447}
2448
2449void r600_vga_set_state(struct radeon_device *rdev, bool state)
2450{
2451	uint32_t temp;
2452
2453	temp = RREG32(CONFIG_CNTL);
2454	if (state == false) {
2455		temp &= ~(1<<0);
2456		temp |= (1<<1);
2457	} else {
2458		temp &= ~(1<<1);
2459	}
2460	WREG32(CONFIG_CNTL, temp);
2461}
2462
2463int r600_resume(struct radeon_device *rdev)
2464{
2465	int r;
2466
2467	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2468	 * posting will perform necessary task to bring back GPU into good
2469	 * shape.
2470	 */
2471	/* post card */
2472	atom_asic_init(rdev->mode_info.atom_context);
2473
 
2474	r = r600_startup(rdev);
2475	if (r) {
2476		DRM_ERROR("r600 startup failed on resume\n");
2477		return r;
2478	}
2479
2480	r = r600_ib_test(rdev);
2481	if (r) {
2482		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2483		return r;
2484	}
2485
2486	r = r600_audio_init(rdev);
2487	if (r) {
2488		DRM_ERROR("radeon: audio resume failed\n");
2489		return r;
2490	}
2491
2492	return r;
2493}
2494
2495int r600_suspend(struct radeon_device *rdev)
2496{
2497	int r;
2498
2499	r600_audio_fini(rdev);
 
 
2500	/* FIXME: we should wait for ring to be empty */
2501	r600_cp_stop(rdev);
2502	rdev->cp.ready = false;
2503	r600_irq_suspend(rdev);
2504	radeon_wb_disable(rdev);
2505	r600_pcie_gart_disable(rdev);
2506	/* unpin shaders bo */
2507	if (rdev->r600_blit.shader_obj) {
2508		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2509		if (!r) {
2510			radeon_bo_unpin(rdev->r600_blit.shader_obj);
2511			radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2512		}
2513	}
2514	return 0;
2515}
2516
2517/* Plan is to move initialization in that function and use
2518 * helper function so that radeon_device_init pretty much
2519 * do nothing more than calling asic specific function. This
2520 * should also allow to remove a bunch of callback function
2521 * like vram_info.
2522 */
2523int r600_init(struct radeon_device *rdev)
2524{
2525	int r;
2526
2527	if (r600_debugfs_mc_info_init(rdev)) {
2528		DRM_ERROR("Failed to register debugfs file for mc !\n");
2529	}
2530	/* This don't do much */
2531	r = radeon_gem_init(rdev);
2532	if (r)
2533		return r;
2534	/* Read BIOS */
2535	if (!radeon_get_bios(rdev)) {
2536		if (ASIC_IS_AVIVO(rdev))
2537			return -EINVAL;
2538	}
2539	/* Must be an ATOMBIOS */
2540	if (!rdev->is_atom_bios) {
2541		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2542		return -EINVAL;
2543	}
2544	r = radeon_atombios_init(rdev);
2545	if (r)
2546		return r;
2547	/* Post card if necessary */
2548	if (!radeon_card_posted(rdev)) {
2549		if (!rdev->bios) {
2550			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2551			return -EINVAL;
2552		}
2553		DRM_INFO("GPU not posted. posting now...\n");
2554		atom_asic_init(rdev->mode_info.atom_context);
2555	}
2556	/* Initialize scratch registers */
2557	r600_scratch_init(rdev);
2558	/* Initialize surface registers */
2559	radeon_surface_init(rdev);
2560	/* Initialize clocks */
2561	radeon_get_clock_info(rdev->ddev);
2562	/* Fence driver */
2563	r = radeon_fence_driver_init(rdev);
2564	if (r)
2565		return r;
2566	if (rdev->flags & RADEON_IS_AGP) {
2567		r = radeon_agp_init(rdev);
2568		if (r)
2569			radeon_agp_disable(rdev);
2570	}
2571	r = r600_mc_init(rdev);
2572	if (r)
2573		return r;
2574	/* Memory manager */
2575	r = radeon_bo_init(rdev);
2576	if (r)
2577		return r;
2578
2579	r = radeon_irq_kms_init(rdev);
2580	if (r)
2581		return r;
2582
2583	rdev->cp.ring_obj = NULL;
2584	r600_ring_init(rdev, 1024 * 1024);
2585
2586	rdev->ih.ring_obj = NULL;
2587	r600_ih_ring_init(rdev, 64 * 1024);
2588
2589	r = r600_pcie_gart_init(rdev);
2590	if (r)
2591		return r;
2592
 
2593	rdev->accel_working = true;
 
 
 
 
 
2594	r = r600_startup(rdev);
2595	if (r) {
2596		dev_err(rdev->dev, "disabling GPU acceleration\n");
2597		r600_cp_fini(rdev);
2598		r600_irq_fini(rdev);
2599		radeon_wb_fini(rdev);
 
2600		radeon_irq_kms_fini(rdev);
2601		r600_pcie_gart_fini(rdev);
2602		rdev->accel_working = false;
2603	}
2604	if (rdev->accel_working) {
2605		r = radeon_ib_pool_init(rdev);
2606		if (r) {
2607			dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2608			rdev->accel_working = false;
2609		} else {
2610			r = r600_ib_test(rdev);
2611			if (r) {
2612				dev_err(rdev->dev, "IB test failed (%d).\n", r);
2613				rdev->accel_working = false;
2614			}
2615		}
2616	}
2617
2618	r = r600_audio_init(rdev);
2619	if (r)
2620		return r; /* TODO error handling */
2621	return 0;
2622}
2623
2624void r600_fini(struct radeon_device *rdev)
2625{
2626	r600_audio_fini(rdev);
2627	r600_blit_fini(rdev);
2628	r600_cp_fini(rdev);
2629	r600_irq_fini(rdev);
2630	radeon_wb_fini(rdev);
2631	radeon_ib_pool_fini(rdev);
2632	radeon_irq_kms_fini(rdev);
2633	r600_pcie_gart_fini(rdev);
 
2634	radeon_agp_fini(rdev);
2635	radeon_gem_fini(rdev);
2636	radeon_fence_driver_fini(rdev);
2637	radeon_bo_fini(rdev);
2638	radeon_atombios_fini(rdev);
2639	kfree(rdev->bios);
2640	rdev->bios = NULL;
2641}
2642
2643
2644/*
2645 * CS stuff
2646 */
2647void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2648{
 
 
2649	/* FIXME: implement */
2650	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2651	radeon_ring_write(rdev,
2652#ifdef __BIG_ENDIAN
2653			  (2 << 0) |
2654#endif
2655			  (ib->gpu_addr & 0xFFFFFFFC));
2656	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2657	radeon_ring_write(rdev, ib->length_dw);
2658}
2659
2660int r600_ib_test(struct radeon_device *rdev)
2661{
2662	struct radeon_ib *ib;
2663	uint32_t scratch;
2664	uint32_t tmp = 0;
2665	unsigned i;
2666	int r;
 
2667
2668	r = radeon_scratch_get(rdev, &scratch);
2669	if (r) {
2670		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2671		return r;
2672	}
2673	WREG32(scratch, 0xCAFEDEAD);
2674	r = radeon_ib_get(rdev, &ib);
2675	if (r) {
2676		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2677		return r;
2678	}
2679	ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2680	ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2681	ib->ptr[2] = 0xDEADBEEF;
2682	ib->ptr[3] = PACKET2(0);
2683	ib->ptr[4] = PACKET2(0);
2684	ib->ptr[5] = PACKET2(0);
2685	ib->ptr[6] = PACKET2(0);
2686	ib->ptr[7] = PACKET2(0);
2687	ib->ptr[8] = PACKET2(0);
2688	ib->ptr[9] = PACKET2(0);
2689	ib->ptr[10] = PACKET2(0);
2690	ib->ptr[11] = PACKET2(0);
2691	ib->ptr[12] = PACKET2(0);
2692	ib->ptr[13] = PACKET2(0);
2693	ib->ptr[14] = PACKET2(0);
2694	ib->ptr[15] = PACKET2(0);
2695	ib->length_dw = 16;
2696	r = radeon_ib_schedule(rdev, ib);
2697	if (r) {
2698		radeon_scratch_free(rdev, scratch);
2699		radeon_ib_free(rdev, &ib);
2700		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2701		return r;
2702	}
2703	r = radeon_fence_wait(ib->fence, false);
2704	if (r) {
2705		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2706		return r;
2707	}
2708	for (i = 0; i < rdev->usec_timeout; i++) {
2709		tmp = RREG32(scratch);
2710		if (tmp == 0xDEADBEEF)
2711			break;
2712		DRM_UDELAY(1);
2713	}
2714	if (i < rdev->usec_timeout) {
2715		DRM_INFO("ib test succeeded in %u usecs\n", i);
2716	} else {
2717		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2718			  scratch, tmp);
2719		r = -EINVAL;
2720	}
2721	radeon_scratch_free(rdev, scratch);
2722	radeon_ib_free(rdev, &ib);
2723	return r;
2724}
2725
2726/*
2727 * Interrupts
2728 *
2729 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2730 * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2731 * writing to the ring and the GPU consuming, the GPU writes to the ring
2732 * and host consumes.  As the host irq handler processes interrupts, it
2733 * increments the rptr.  When the rptr catches up with the wptr, all the
2734 * current interrupts have been processed.
2735 */
2736
2737void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2738{
2739	u32 rb_bufsz;
2740
2741	/* Align ring size */
2742	rb_bufsz = drm_order(ring_size / 4);
2743	ring_size = (1 << rb_bufsz) * 4;
2744	rdev->ih.ring_size = ring_size;
2745	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2746	rdev->ih.rptr = 0;
2747}
2748
2749static int r600_ih_ring_alloc(struct radeon_device *rdev)
2750{
2751	int r;
2752
2753	/* Allocate ring buffer */
2754	if (rdev->ih.ring_obj == NULL) {
2755		r = radeon_bo_create(rdev, rdev->ih.ring_size,
2756				     PAGE_SIZE, true,
2757				     RADEON_GEM_DOMAIN_GTT,
2758				     &rdev->ih.ring_obj);
2759		if (r) {
2760			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2761			return r;
2762		}
2763		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2764		if (unlikely(r != 0))
2765			return r;
2766		r = radeon_bo_pin(rdev->ih.ring_obj,
2767				  RADEON_GEM_DOMAIN_GTT,
2768				  &rdev->ih.gpu_addr);
2769		if (r) {
2770			radeon_bo_unreserve(rdev->ih.ring_obj);
2771			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2772			return r;
2773		}
2774		r = radeon_bo_kmap(rdev->ih.ring_obj,
2775				   (void **)&rdev->ih.ring);
2776		radeon_bo_unreserve(rdev->ih.ring_obj);
2777		if (r) {
2778			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2779			return r;
2780		}
2781	}
2782	return 0;
2783}
2784
2785static void r600_ih_ring_fini(struct radeon_device *rdev)
2786{
2787	int r;
2788	if (rdev->ih.ring_obj) {
2789		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2790		if (likely(r == 0)) {
2791			radeon_bo_kunmap(rdev->ih.ring_obj);
2792			radeon_bo_unpin(rdev->ih.ring_obj);
2793			radeon_bo_unreserve(rdev->ih.ring_obj);
2794		}
2795		radeon_bo_unref(&rdev->ih.ring_obj);
2796		rdev->ih.ring = NULL;
2797		rdev->ih.ring_obj = NULL;
2798	}
2799}
2800
2801void r600_rlc_stop(struct radeon_device *rdev)
2802{
2803
2804	if ((rdev->family >= CHIP_RV770) &&
2805	    (rdev->family <= CHIP_RV740)) {
2806		/* r7xx asics need to soft reset RLC before halting */
2807		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2808		RREG32(SRBM_SOFT_RESET);
2809		udelay(15000);
2810		WREG32(SRBM_SOFT_RESET, 0);
2811		RREG32(SRBM_SOFT_RESET);
2812	}
2813
2814	WREG32(RLC_CNTL, 0);
2815}
2816
2817static void r600_rlc_start(struct radeon_device *rdev)
2818{
2819	WREG32(RLC_CNTL, RLC_ENABLE);
2820}
2821
2822static int r600_rlc_init(struct radeon_device *rdev)
2823{
2824	u32 i;
2825	const __be32 *fw_data;
2826
2827	if (!rdev->rlc_fw)
2828		return -EINVAL;
2829
2830	r600_rlc_stop(rdev);
2831
2832	WREG32(RLC_HB_BASE, 0);
2833	WREG32(RLC_HB_CNTL, 0);
2834	WREG32(RLC_HB_RPTR, 0);
2835	WREG32(RLC_HB_WPTR, 0);
 
 
 
 
 
 
 
 
2836	if (rdev->family <= CHIP_CAICOS) {
2837		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2838		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2839	}
2840	WREG32(RLC_MC_CNTL, 0);
2841	WREG32(RLC_UCODE_CNTL, 0);
2842
2843	fw_data = (const __be32 *)rdev->rlc_fw->data;
2844	if (rdev->family >= CHIP_CAYMAN) {
 
 
 
 
 
2845		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2846			WREG32(RLC_UCODE_ADDR, i);
2847			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2848		}
2849	} else if (rdev->family >= CHIP_CEDAR) {
2850		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2851			WREG32(RLC_UCODE_ADDR, i);
2852			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2853		}
2854	} else if (rdev->family >= CHIP_RV770) {
2855		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2856			WREG32(RLC_UCODE_ADDR, i);
2857			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2858		}
2859	} else {
2860		for (i = 0; i < RLC_UCODE_SIZE; i++) {
2861			WREG32(RLC_UCODE_ADDR, i);
2862			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2863		}
2864	}
2865	WREG32(RLC_UCODE_ADDR, 0);
2866
2867	r600_rlc_start(rdev);
2868
2869	return 0;
2870}
2871
2872static void r600_enable_interrupts(struct radeon_device *rdev)
2873{
2874	u32 ih_cntl = RREG32(IH_CNTL);
2875	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2876
2877	ih_cntl |= ENABLE_INTR;
2878	ih_rb_cntl |= IH_RB_ENABLE;
2879	WREG32(IH_CNTL, ih_cntl);
2880	WREG32(IH_RB_CNTL, ih_rb_cntl);
2881	rdev->ih.enabled = true;
2882}
2883
2884void r600_disable_interrupts(struct radeon_device *rdev)
2885{
2886	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2887	u32 ih_cntl = RREG32(IH_CNTL);
2888
2889	ih_rb_cntl &= ~IH_RB_ENABLE;
2890	ih_cntl &= ~ENABLE_INTR;
2891	WREG32(IH_RB_CNTL, ih_rb_cntl);
2892	WREG32(IH_CNTL, ih_cntl);
2893	/* set rptr, wptr to 0 */
2894	WREG32(IH_RB_RPTR, 0);
2895	WREG32(IH_RB_WPTR, 0);
2896	rdev->ih.enabled = false;
2897	rdev->ih.wptr = 0;
2898	rdev->ih.rptr = 0;
2899}
2900
2901static void r600_disable_interrupt_state(struct radeon_device *rdev)
2902{
2903	u32 tmp;
2904
2905	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2906	WREG32(GRBM_INT_CNTL, 0);
2907	WREG32(DxMODE_INT_MASK, 0);
2908	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2909	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2910	if (ASIC_IS_DCE3(rdev)) {
2911		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2912		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2913		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2914		WREG32(DC_HPD1_INT_CONTROL, tmp);
2915		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2916		WREG32(DC_HPD2_INT_CONTROL, tmp);
2917		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2918		WREG32(DC_HPD3_INT_CONTROL, tmp);
2919		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2920		WREG32(DC_HPD4_INT_CONTROL, tmp);
2921		if (ASIC_IS_DCE32(rdev)) {
2922			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2923			WREG32(DC_HPD5_INT_CONTROL, tmp);
2924			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2925			WREG32(DC_HPD6_INT_CONTROL, tmp);
 
 
 
 
 
 
 
 
 
2926		}
2927	} else {
2928		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2929		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2930		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2931		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2932		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2933		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2934		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2935		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
 
 
 
 
2936	}
2937}
2938
2939int r600_irq_init(struct radeon_device *rdev)
2940{
2941	int ret = 0;
2942	int rb_bufsz;
2943	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2944
2945	/* allocate ring */
2946	ret = r600_ih_ring_alloc(rdev);
2947	if (ret)
2948		return ret;
2949
2950	/* disable irqs */
2951	r600_disable_interrupts(rdev);
2952
2953	/* init rlc */
2954	ret = r600_rlc_init(rdev);
2955	if (ret) {
2956		r600_ih_ring_fini(rdev);
2957		return ret;
2958	}
2959
2960	/* setup interrupt control */
2961	/* set dummy read address to ring address */
2962	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2963	interrupt_cntl = RREG32(INTERRUPT_CNTL);
2964	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2965	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2966	 */
2967	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2968	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2969	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2970	WREG32(INTERRUPT_CNTL, interrupt_cntl);
2971
2972	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2973	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2974
2975	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2976		      IH_WPTR_OVERFLOW_CLEAR |
2977		      (rb_bufsz << 1));
2978
2979	if (rdev->wb.enabled)
2980		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2981
2982	/* set the writeback address whether it's enabled or not */
2983	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2984	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
2985
2986	WREG32(IH_RB_CNTL, ih_rb_cntl);
2987
2988	/* set rptr, wptr to 0 */
2989	WREG32(IH_RB_RPTR, 0);
2990	WREG32(IH_RB_WPTR, 0);
2991
2992	/* Default settings for IH_CNTL (disabled at first) */
2993	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2994	/* RPTR_REARM only works if msi's are enabled */
2995	if (rdev->msi_enabled)
2996		ih_cntl |= RPTR_REARM;
2997	WREG32(IH_CNTL, ih_cntl);
2998
2999	/* force the active interrupt state to all disabled */
3000	if (rdev->family >= CHIP_CEDAR)
3001		evergreen_disable_interrupt_state(rdev);
3002	else
3003		r600_disable_interrupt_state(rdev);
3004
 
 
 
3005	/* enable irqs */
3006	r600_enable_interrupts(rdev);
3007
3008	return ret;
3009}
3010
3011void r600_irq_suspend(struct radeon_device *rdev)
3012{
3013	r600_irq_disable(rdev);
3014	r600_rlc_stop(rdev);
3015}
3016
3017void r600_irq_fini(struct radeon_device *rdev)
3018{
3019	r600_irq_suspend(rdev);
3020	r600_ih_ring_fini(rdev);
3021}
3022
3023int r600_irq_set(struct radeon_device *rdev)
3024{
3025	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3026	u32 mode_int = 0;
3027	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3028	u32 grbm_int_cntl = 0;
3029	u32 hdmi1, hdmi2;
3030	u32 d1grph = 0, d2grph = 0;
3031
3032	if (!rdev->irq.installed) {
3033		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3034		return -EINVAL;
3035	}
3036	/* don't enable anything if the ih is disabled */
3037	if (!rdev->ih.enabled) {
3038		r600_disable_interrupts(rdev);
3039		/* force the active interrupt state to all disabled */
3040		r600_disable_interrupt_state(rdev);
3041		return 0;
3042	}
3043
3044	hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3045	if (ASIC_IS_DCE3(rdev)) {
3046		hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3047		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3048		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3049		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3050		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3051		if (ASIC_IS_DCE32(rdev)) {
3052			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3053			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
 
 
 
 
3054		}
3055	} else {
3056		hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3057		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3058		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3059		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
 
3060	}
3061
3062	if (rdev->irq.sw_int) {
3063		DRM_DEBUG("r600_irq_set: sw int\n");
3064		cp_int_cntl |= RB_INT_ENABLE;
3065		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3066	}
3067	if (rdev->irq.crtc_vblank_int[0] ||
3068	    rdev->irq.pflip[0]) {
3069		DRM_DEBUG("r600_irq_set: vblank 0\n");
3070		mode_int |= D1MODE_VBLANK_INT_MASK;
3071	}
3072	if (rdev->irq.crtc_vblank_int[1] ||
3073	    rdev->irq.pflip[1]) {
3074		DRM_DEBUG("r600_irq_set: vblank 1\n");
3075		mode_int |= D2MODE_VBLANK_INT_MASK;
3076	}
3077	if (rdev->irq.hpd[0]) {
3078		DRM_DEBUG("r600_irq_set: hpd 1\n");
3079		hpd1 |= DC_HPDx_INT_EN;
3080	}
3081	if (rdev->irq.hpd[1]) {
3082		DRM_DEBUG("r600_irq_set: hpd 2\n");
3083		hpd2 |= DC_HPDx_INT_EN;
3084	}
3085	if (rdev->irq.hpd[2]) {
3086		DRM_DEBUG("r600_irq_set: hpd 3\n");
3087		hpd3 |= DC_HPDx_INT_EN;
3088	}
3089	if (rdev->irq.hpd[3]) {
3090		DRM_DEBUG("r600_irq_set: hpd 4\n");
3091		hpd4 |= DC_HPDx_INT_EN;
3092	}
3093	if (rdev->irq.hpd[4]) {
3094		DRM_DEBUG("r600_irq_set: hpd 5\n");
3095		hpd5 |= DC_HPDx_INT_EN;
3096	}
3097	if (rdev->irq.hpd[5]) {
3098		DRM_DEBUG("r600_irq_set: hpd 6\n");
3099		hpd6 |= DC_HPDx_INT_EN;
3100	}
3101	if (rdev->irq.hdmi[0]) {
3102		DRM_DEBUG("r600_irq_set: hdmi 1\n");
3103		hdmi1 |= R600_HDMI_INT_EN;
3104	}
3105	if (rdev->irq.hdmi[1]) {
3106		DRM_DEBUG("r600_irq_set: hdmi 2\n");
3107		hdmi2 |= R600_HDMI_INT_EN;
3108	}
3109	if (rdev->irq.gui_idle) {
3110		DRM_DEBUG("gui idle\n");
3111		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3112	}
3113
3114	WREG32(CP_INT_CNTL, cp_int_cntl);
3115	WREG32(DxMODE_INT_MASK, mode_int);
3116	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3117	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3118	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3119	WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3120	if (ASIC_IS_DCE3(rdev)) {
3121		WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3122		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3123		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3124		WREG32(DC_HPD3_INT_CONTROL, hpd3);
3125		WREG32(DC_HPD4_INT_CONTROL, hpd4);
3126		if (ASIC_IS_DCE32(rdev)) {
3127			WREG32(DC_HPD5_INT_CONTROL, hpd5);
3128			WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
 
 
 
 
3129		}
3130	} else {
3131		WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3132		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3133		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3134		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
 
 
3135	}
3136
3137	return 0;
3138}
3139
3140static inline void r600_irq_ack(struct radeon_device *rdev)
3141{
3142	u32 tmp;
3143
3144	if (ASIC_IS_DCE3(rdev)) {
3145		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3146		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3147		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
 
 
 
 
 
 
 
3148	} else {
3149		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3150		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3151		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
 
 
3152	}
3153	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3154	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3155
3156	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3157		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3158	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3159		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3160	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3161		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3162	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3163		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3164	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3165		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3166	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3167		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3168	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3169		if (ASIC_IS_DCE3(rdev)) {
3170			tmp = RREG32(DC_HPD1_INT_CONTROL);
3171			tmp |= DC_HPDx_INT_ACK;
3172			WREG32(DC_HPD1_INT_CONTROL, tmp);
3173		} else {
3174			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3175			tmp |= DC_HPDx_INT_ACK;
3176			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3177		}
3178	}
3179	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3180		if (ASIC_IS_DCE3(rdev)) {
3181			tmp = RREG32(DC_HPD2_INT_CONTROL);
3182			tmp |= DC_HPDx_INT_ACK;
3183			WREG32(DC_HPD2_INT_CONTROL, tmp);
3184		} else {
3185			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3186			tmp |= DC_HPDx_INT_ACK;
3187			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3188		}
3189	}
3190	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3191		if (ASIC_IS_DCE3(rdev)) {
3192			tmp = RREG32(DC_HPD3_INT_CONTROL);
3193			tmp |= DC_HPDx_INT_ACK;
3194			WREG32(DC_HPD3_INT_CONTROL, tmp);
3195		} else {
3196			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3197			tmp |= DC_HPDx_INT_ACK;
3198			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3199		}
3200	}
3201	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3202		tmp = RREG32(DC_HPD4_INT_CONTROL);
3203		tmp |= DC_HPDx_INT_ACK;
3204		WREG32(DC_HPD4_INT_CONTROL, tmp);
3205	}
3206	if (ASIC_IS_DCE32(rdev)) {
3207		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3208			tmp = RREG32(DC_HPD5_INT_CONTROL);
3209			tmp |= DC_HPDx_INT_ACK;
3210			WREG32(DC_HPD5_INT_CONTROL, tmp);
3211		}
3212		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3213			tmp = RREG32(DC_HPD5_INT_CONTROL);
3214			tmp |= DC_HPDx_INT_ACK;
3215			WREG32(DC_HPD6_INT_CONTROL, tmp);
3216		}
3217	}
3218	if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3219		WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3220	}
3221	if (ASIC_IS_DCE3(rdev)) {
3222		if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3223			WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3224		}
3225	} else {
3226		if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3227			WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3228		}
3229	}
3230}
3231
3232void r600_irq_disable(struct radeon_device *rdev)
3233{
3234	r600_disable_interrupts(rdev);
3235	/* Wait and acknowledge irq */
3236	mdelay(1);
3237	r600_irq_ack(rdev);
3238	r600_disable_interrupt_state(rdev);
3239}
3240
3241static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3242{
3243	u32 wptr, tmp;
3244
3245	if (rdev->wb.enabled)
3246		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3247	else
3248		wptr = RREG32(IH_RB_WPTR);
3249
3250	if (wptr & RB_OVERFLOW) {
3251		/* When a ring buffer overflow happen start parsing interrupt
3252		 * from the last not overwritten vector (wptr + 16). Hopefully
3253		 * this should allow us to catchup.
3254		 */
3255		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3256			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3257		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3258		tmp = RREG32(IH_RB_CNTL);
3259		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3260		WREG32(IH_RB_CNTL, tmp);
3261	}
3262	return (wptr & rdev->ih.ptr_mask);
3263}
3264
3265/*        r600 IV Ring
3266 * Each IV ring entry is 128 bits:
3267 * [7:0]    - interrupt source id
3268 * [31:8]   - reserved
3269 * [59:32]  - interrupt source data
3270 * [127:60]  - reserved
3271 *
3272 * The basic interrupt vector entries
3273 * are decoded as follows:
3274 * src_id  src_data  description
3275 *      1         0  D1 Vblank
3276 *      1         1  D1 Vline
3277 *      5         0  D2 Vblank
3278 *      5         1  D2 Vline
3279 *     19         0  FP Hot plug detection A
3280 *     19         1  FP Hot plug detection B
3281 *     19         2  DAC A auto-detection
3282 *     19         3  DAC B auto-detection
3283 *     21         4  HDMI block A
3284 *     21         5  HDMI block B
3285 *    176         -  CP_INT RB
3286 *    177         -  CP_INT IB1
3287 *    178         -  CP_INT IB2
3288 *    181         -  EOP Interrupt
3289 *    233         -  GUI Idle
3290 *
3291 * Note, these are based on r600 and may need to be
3292 * adjusted or added to on newer asics
3293 */
3294
3295int r600_irq_process(struct radeon_device *rdev)
3296{
3297	u32 wptr;
3298	u32 rptr;
3299	u32 src_id, src_data;
3300	u32 ring_index;
3301	unsigned long flags;
3302	bool queue_hotplug = false;
 
3303
3304	if (!rdev->ih.enabled || rdev->shutdown)
3305		return IRQ_NONE;
3306
3307	/* No MSIs, need a dummy read to flush PCI DMAs */
3308	if (!rdev->msi_enabled)
3309		RREG32(IH_RB_WPTR);
3310
3311	wptr = r600_get_ih_wptr(rdev);
3312	rptr = rdev->ih.rptr;
3313	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3314
3315	spin_lock_irqsave(&rdev->ih.lock, flags);
3316
3317	if (rptr == wptr) {
3318		spin_unlock_irqrestore(&rdev->ih.lock, flags);
3319		return IRQ_NONE;
3320	}
3321
3322restart_ih:
3323	/* Order reading of wptr vs. reading of IH ring data */
3324	rmb();
3325
3326	/* display interrupts */
3327	r600_irq_ack(rdev);
3328
3329	rdev->ih.wptr = wptr;
3330	while (rptr != wptr) {
3331		/* wptr/rptr are in bytes! */
3332		ring_index = rptr / 4;
3333		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3334		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3335
3336		switch (src_id) {
3337		case 1: /* D1 vblank/vline */
3338			switch (src_data) {
3339			case 0: /* D1 vblank */
3340				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3341					if (rdev->irq.crtc_vblank_int[0]) {
3342						drm_handle_vblank(rdev->ddev, 0);
3343						rdev->pm.vblank_sync = true;
3344						wake_up(&rdev->irq.vblank_queue);
3345					}
3346					if (rdev->irq.pflip[0])
3347						radeon_crtc_handle_flip(rdev, 0);
3348					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3349					DRM_DEBUG("IH: D1 vblank\n");
3350				}
3351				break;
3352			case 1: /* D1 vline */
3353				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3354					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3355					DRM_DEBUG("IH: D1 vline\n");
3356				}
3357				break;
3358			default:
3359				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3360				break;
3361			}
3362			break;
3363		case 5: /* D2 vblank/vline */
3364			switch (src_data) {
3365			case 0: /* D2 vblank */
3366				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3367					if (rdev->irq.crtc_vblank_int[1]) {
3368						drm_handle_vblank(rdev->ddev, 1);
3369						rdev->pm.vblank_sync = true;
3370						wake_up(&rdev->irq.vblank_queue);
3371					}
3372					if (rdev->irq.pflip[1])
3373						radeon_crtc_handle_flip(rdev, 1);
3374					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3375					DRM_DEBUG("IH: D2 vblank\n");
3376				}
3377				break;
3378			case 1: /* D1 vline */
3379				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3380					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3381					DRM_DEBUG("IH: D2 vline\n");
3382				}
3383				break;
3384			default:
3385				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3386				break;
3387			}
3388			break;
3389		case 19: /* HPD/DAC hotplug */
3390			switch (src_data) {
3391			case 0:
3392				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3393					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3394					queue_hotplug = true;
3395					DRM_DEBUG("IH: HPD1\n");
3396				}
3397				break;
3398			case 1:
3399				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3400					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3401					queue_hotplug = true;
3402					DRM_DEBUG("IH: HPD2\n");
3403				}
3404				break;
3405			case 4:
3406				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3407					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3408					queue_hotplug = true;
3409					DRM_DEBUG("IH: HPD3\n");
3410				}
3411				break;
3412			case 5:
3413				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3414					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3415					queue_hotplug = true;
3416					DRM_DEBUG("IH: HPD4\n");
3417				}
3418				break;
3419			case 10:
3420				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3421					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3422					queue_hotplug = true;
3423					DRM_DEBUG("IH: HPD5\n");
3424				}
3425				break;
3426			case 12:
3427				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3428					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3429					queue_hotplug = true;
3430					DRM_DEBUG("IH: HPD6\n");
3431				}
3432				break;
3433			default:
3434				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3435				break;
3436			}
3437			break;
3438		case 21: /* HDMI */
3439			DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3440			r600_audio_schedule_polling(rdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3441			break;
3442		case 176: /* CP_INT in ring buffer */
3443		case 177: /* CP_INT in IB1 */
3444		case 178: /* CP_INT in IB2 */
3445			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3446			radeon_fence_process(rdev);
3447			break;
3448		case 181: /* CP EOP event */
3449			DRM_DEBUG("IH: CP EOP\n");
3450			radeon_fence_process(rdev);
3451			break;
3452		case 233: /* GUI IDLE */
3453			DRM_DEBUG("IH: GUI idle\n");
3454			rdev->pm.gui_idle = true;
3455			wake_up(&rdev->irq.idle_queue);
3456			break;
3457		default:
3458			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3459			break;
3460		}
3461
3462		/* wptr/rptr are in bytes! */
3463		rptr += 16;
3464		rptr &= rdev->ih.ptr_mask;
3465	}
3466	/* make sure wptr hasn't changed while processing */
3467	wptr = r600_get_ih_wptr(rdev);
3468	if (wptr != rdev->ih.wptr)
3469		goto restart_ih;
3470	if (queue_hotplug)
3471		schedule_work(&rdev->hotplug_work);
 
 
3472	rdev->ih.rptr = rptr;
3473	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3474	spin_unlock_irqrestore(&rdev->ih.lock, flags);
3475	return IRQ_HANDLED;
3476}
3477
3478/*
3479 * Debugfs info
3480 */
3481#if defined(CONFIG_DEBUG_FS)
3482
3483static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3484{
3485	struct drm_info_node *node = (struct drm_info_node *) m->private;
3486	struct drm_device *dev = node->minor->dev;
3487	struct radeon_device *rdev = dev->dev_private;
3488	unsigned count, i, j;
3489
3490	radeon_ring_free_size(rdev);
3491	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3492	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3493	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3494	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3495	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3496	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3497	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3498	seq_printf(m, "%u dwords in ring\n", count);
3499	i = rdev->cp.rptr;
3500	for (j = 0; j <= count; j++) {
3501		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3502		i = (i + 1) & rdev->cp.ptr_mask;
3503	}
3504	return 0;
3505}
3506
3507static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3508{
3509	struct drm_info_node *node = (struct drm_info_node *) m->private;
3510	struct drm_device *dev = node->minor->dev;
3511	struct radeon_device *rdev = dev->dev_private;
3512
3513	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3514	DREG32_SYS(m, rdev, VM_L2_STATUS);
3515	return 0;
3516}
3517
3518static struct drm_info_list r600_mc_info_list[] = {
3519	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3520	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3521};
3522#endif
3523
3524int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3525{
3526#if defined(CONFIG_DEBUG_FS)
3527	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3528#else
3529	return 0;
3530#endif
3531}
3532
3533/**
3534 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3535 * rdev: radeon device structure
3536 * bo: buffer object struct which userspace is waiting for idle
3537 *
3538 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3539 * through ring buffer, this leads to corruption in rendering, see
3540 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3541 * directly perform HDP flush by writing register through MMIO.
3542 */
3543void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3544{
3545	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
3546	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3547	 * This seems to cause problems on some AGP cards. Just use the old
3548	 * method for them.
3549	 */
3550	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3551	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3552		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3553		u32 tmp;
3554
3555		WREG32(HDP_DEBUG1, 0);
3556		tmp = readl((void __iomem *)ptr);
3557	} else
3558		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3559}
3560
3561void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3562{
3563	u32 link_width_cntl, mask, target_reg;
3564
3565	if (rdev->flags & RADEON_IS_IGP)
3566		return;
3567
3568	if (!(rdev->flags & RADEON_IS_PCIE))
3569		return;
3570
3571	/* x2 cards have a special sequence */
3572	if (ASIC_IS_X2(rdev))
3573		return;
3574
3575	/* FIXME wait for idle */
3576
3577	switch (lanes) {
3578	case 0:
3579		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3580		break;
3581	case 1:
3582		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3583		break;
3584	case 2:
3585		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3586		break;
3587	case 4:
3588		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3589		break;
3590	case 8:
3591		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3592		break;
3593	case 12:
3594		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3595		break;
3596	case 16:
3597	default:
3598		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3599		break;
3600	}
3601
3602	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3603
3604	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3605	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3606		return;
3607
3608	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3609		return;
3610
3611	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3612			     RADEON_PCIE_LC_RECONFIG_NOW |
3613			     R600_PCIE_LC_RENEGOTIATE_EN |
3614			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3615	link_width_cntl |= mask;
3616
3617	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3618
3619        /* some northbridges can renegotiate the link rather than requiring                                  
3620         * a complete re-config.                                                                             
3621         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
3622         */
3623        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3624		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3625        else
3626		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3627
3628	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3629						       RADEON_PCIE_LC_RECONFIG_NOW));
3630
3631        if (rdev->family >= CHIP_RV770)
3632		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3633        else
3634		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3635
3636        /* wait for lane set to complete */
3637        link_width_cntl = RREG32(target_reg);
3638        while (link_width_cntl == 0xffffffff)
3639		link_width_cntl = RREG32(target_reg);
3640
3641}
3642
3643int r600_get_pcie_lanes(struct radeon_device *rdev)
3644{
3645	u32 link_width_cntl;
3646
3647	if (rdev->flags & RADEON_IS_IGP)
3648		return 0;
3649
3650	if (!(rdev->flags & RADEON_IS_PCIE))
3651		return 0;
3652
3653	/* x2 cards have a special sequence */
3654	if (ASIC_IS_X2(rdev))
3655		return 0;
3656
3657	/* FIXME wait for idle */
3658
3659	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3660
3661	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3662	case RADEON_PCIE_LC_LINK_WIDTH_X0:
3663		return 0;
3664	case RADEON_PCIE_LC_LINK_WIDTH_X1:
3665		return 1;
3666	case RADEON_PCIE_LC_LINK_WIDTH_X2:
3667		return 2;
3668	case RADEON_PCIE_LC_LINK_WIDTH_X4:
3669		return 4;
3670	case RADEON_PCIE_LC_LINK_WIDTH_X8:
3671		return 8;
3672	case RADEON_PCIE_LC_LINK_WIDTH_X16:
3673	default:
3674		return 16;
3675	}
3676}
3677
3678static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3679{
3680	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3681	u16 link_cntl2;
3682
3683	if (radeon_pcie_gen2 == 0)
3684		return;
3685
3686	if (rdev->flags & RADEON_IS_IGP)
3687		return;
3688
3689	if (!(rdev->flags & RADEON_IS_PCIE))
3690		return;
3691
3692	/* x2 cards have a special sequence */
3693	if (ASIC_IS_X2(rdev))
3694		return;
3695
3696	/* only RV6xx+ chips are supported */
3697	if (rdev->family <= CHIP_R600)
3698		return;
3699
3700	/* 55 nm r6xx asics */
3701	if ((rdev->family == CHIP_RV670) ||
3702	    (rdev->family == CHIP_RV620) ||
3703	    (rdev->family == CHIP_RV635)) {
3704		/* advertise upconfig capability */
3705		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3706		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3707		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3708		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3709		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3710			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3711			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3712					     LC_RECONFIG_ARC_MISSING_ESCAPE);
3713			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3714			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3715		} else {
3716			link_width_cntl |= LC_UPCONFIGURE_DIS;
3717			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3718		}
3719	}
3720
3721	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3722	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3723	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3724
3725		/* 55 nm r6xx asics */
3726		if ((rdev->family == CHIP_RV670) ||
3727		    (rdev->family == CHIP_RV620) ||
3728		    (rdev->family == CHIP_RV635)) {
3729			WREG32(MM_CFGREGS_CNTL, 0x8);
3730			link_cntl2 = RREG32(0x4088);
3731			WREG32(MM_CFGREGS_CNTL, 0);
3732			/* not supported yet */
3733			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3734				return;
3735		}
3736
3737		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3738		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3739		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3740		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3741		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3742		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3743
3744		tmp = RREG32(0x541c);
3745		WREG32(0x541c, tmp | 0x8);
3746		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3747		link_cntl2 = RREG16(0x4088);
3748		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3749		link_cntl2 |= 0x2;
3750		WREG16(0x4088, link_cntl2);
3751		WREG32(MM_CFGREGS_CNTL, 0);
3752
3753		if ((rdev->family == CHIP_RV670) ||
3754		    (rdev->family == CHIP_RV620) ||
3755		    (rdev->family == CHIP_RV635)) {
3756			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3757			training_cntl &= ~LC_POINT_7_PLUS_EN;
3758			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3759		} else {
3760			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3761			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3762			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3763		}
3764
3765		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3766		speed_cntl |= LC_GEN2_EN_STRAP;
3767		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3768
3769	} else {
3770		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3771		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3772		if (1)
3773			link_width_cntl |= LC_UPCONFIGURE_DIS;
3774		else
3775			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3776		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3777	}
3778}
v3.5.6
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/slab.h>
  29#include <linux/seq_file.h>
  30#include <linux/firmware.h>
  31#include <linux/platform_device.h>
  32#include <linux/module.h>
  33#include "drmP.h"
  34#include "radeon_drm.h"
  35#include "radeon.h"
  36#include "radeon_asic.h"
  37#include "radeon_mode.h"
  38#include "r600d.h"
  39#include "atom.h"
  40#include "avivod.h"
  41
  42#define PFP_UCODE_SIZE 576
  43#define PM4_UCODE_SIZE 1792
  44#define RLC_UCODE_SIZE 768
  45#define R700_PFP_UCODE_SIZE 848
  46#define R700_PM4_UCODE_SIZE 1360
  47#define R700_RLC_UCODE_SIZE 1024
  48#define EVERGREEN_PFP_UCODE_SIZE 1120
  49#define EVERGREEN_PM4_UCODE_SIZE 1376
  50#define EVERGREEN_RLC_UCODE_SIZE 768
  51#define CAYMAN_RLC_UCODE_SIZE 1024
  52#define ARUBA_RLC_UCODE_SIZE 1536
  53
  54/* Firmware Names */
  55MODULE_FIRMWARE("radeon/R600_pfp.bin");
  56MODULE_FIRMWARE("radeon/R600_me.bin");
  57MODULE_FIRMWARE("radeon/RV610_pfp.bin");
  58MODULE_FIRMWARE("radeon/RV610_me.bin");
  59MODULE_FIRMWARE("radeon/RV630_pfp.bin");
  60MODULE_FIRMWARE("radeon/RV630_me.bin");
  61MODULE_FIRMWARE("radeon/RV620_pfp.bin");
  62MODULE_FIRMWARE("radeon/RV620_me.bin");
  63MODULE_FIRMWARE("radeon/RV635_pfp.bin");
  64MODULE_FIRMWARE("radeon/RV635_me.bin");
  65MODULE_FIRMWARE("radeon/RV670_pfp.bin");
  66MODULE_FIRMWARE("radeon/RV670_me.bin");
  67MODULE_FIRMWARE("radeon/RS780_pfp.bin");
  68MODULE_FIRMWARE("radeon/RS780_me.bin");
  69MODULE_FIRMWARE("radeon/RV770_pfp.bin");
  70MODULE_FIRMWARE("radeon/RV770_me.bin");
  71MODULE_FIRMWARE("radeon/RV730_pfp.bin");
  72MODULE_FIRMWARE("radeon/RV730_me.bin");
  73MODULE_FIRMWARE("radeon/RV710_pfp.bin");
  74MODULE_FIRMWARE("radeon/RV710_me.bin");
  75MODULE_FIRMWARE("radeon/R600_rlc.bin");
  76MODULE_FIRMWARE("radeon/R700_rlc.bin");
  77MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
  78MODULE_FIRMWARE("radeon/CEDAR_me.bin");
  79MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
  80MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
  81MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
  82MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
  83MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
  84MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
  85MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
  86MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
  87MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
  88MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
  89MODULE_FIRMWARE("radeon/PALM_pfp.bin");
  90MODULE_FIRMWARE("radeon/PALM_me.bin");
  91MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
  92MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
  93MODULE_FIRMWARE("radeon/SUMO_me.bin");
  94MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
  95MODULE_FIRMWARE("radeon/SUMO2_me.bin");
  96
  97int r600_debugfs_mc_info_init(struct radeon_device *rdev);
  98
  99/* r600,rv610,rv630,rv620,rv635,rv670 */
 100int r600_mc_wait_for_idle(struct radeon_device *rdev);
 101void r600_gpu_init(struct radeon_device *rdev);
 102void r600_fini(struct radeon_device *rdev);
 103void r600_irq_disable(struct radeon_device *rdev);
 104static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 105
 106/* get temperature in millidegrees */
 107int rv6xx_get_temp(struct radeon_device *rdev)
 108{
 109	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
 110		ASIC_T_SHIFT;
 111	int actual_temp = temp & 0xff;
 112
 113	if (temp & 0x100)
 114		actual_temp -= 256;
 115
 116	return actual_temp * 1000;
 117}
 118
 119void r600_pm_get_dynpm_state(struct radeon_device *rdev)
 120{
 121	int i;
 122
 123	rdev->pm.dynpm_can_upclock = true;
 124	rdev->pm.dynpm_can_downclock = true;
 125
 126	/* power state array is low to high, default is first */
 127	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
 128		int min_power_state_index = 0;
 129
 130		if (rdev->pm.num_power_states > 2)
 131			min_power_state_index = 1;
 132
 133		switch (rdev->pm.dynpm_planned_action) {
 134		case DYNPM_ACTION_MINIMUM:
 135			rdev->pm.requested_power_state_index = min_power_state_index;
 136			rdev->pm.requested_clock_mode_index = 0;
 137			rdev->pm.dynpm_can_downclock = false;
 138			break;
 139		case DYNPM_ACTION_DOWNCLOCK:
 140			if (rdev->pm.current_power_state_index == min_power_state_index) {
 141				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
 142				rdev->pm.dynpm_can_downclock = false;
 143			} else {
 144				if (rdev->pm.active_crtc_count > 1) {
 145					for (i = 0; i < rdev->pm.num_power_states; i++) {
 146						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
 147							continue;
 148						else if (i >= rdev->pm.current_power_state_index) {
 149							rdev->pm.requested_power_state_index =
 150								rdev->pm.current_power_state_index;
 151							break;
 152						} else {
 153							rdev->pm.requested_power_state_index = i;
 154							break;
 155						}
 156					}
 157				} else {
 158					if (rdev->pm.current_power_state_index == 0)
 159						rdev->pm.requested_power_state_index =
 160							rdev->pm.num_power_states - 1;
 161					else
 162						rdev->pm.requested_power_state_index =
 163							rdev->pm.current_power_state_index - 1;
 164				}
 165			}
 166			rdev->pm.requested_clock_mode_index = 0;
 167			/* don't use the power state if crtcs are active and no display flag is set */
 168			if ((rdev->pm.active_crtc_count > 0) &&
 169			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
 170			     clock_info[rdev->pm.requested_clock_mode_index].flags &
 171			     RADEON_PM_MODE_NO_DISPLAY)) {
 172				rdev->pm.requested_power_state_index++;
 173			}
 174			break;
 175		case DYNPM_ACTION_UPCLOCK:
 176			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
 177				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
 178				rdev->pm.dynpm_can_upclock = false;
 179			} else {
 180				if (rdev->pm.active_crtc_count > 1) {
 181					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
 182						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
 183							continue;
 184						else if (i <= rdev->pm.current_power_state_index) {
 185							rdev->pm.requested_power_state_index =
 186								rdev->pm.current_power_state_index;
 187							break;
 188						} else {
 189							rdev->pm.requested_power_state_index = i;
 190							break;
 191						}
 192					}
 193				} else
 194					rdev->pm.requested_power_state_index =
 195						rdev->pm.current_power_state_index + 1;
 196			}
 197			rdev->pm.requested_clock_mode_index = 0;
 198			break;
 199		case DYNPM_ACTION_DEFAULT:
 200			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
 201			rdev->pm.requested_clock_mode_index = 0;
 202			rdev->pm.dynpm_can_upclock = false;
 203			break;
 204		case DYNPM_ACTION_NONE:
 205		default:
 206			DRM_ERROR("Requested mode for not defined action\n");
 207			return;
 208		}
 209	} else {
 210		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
 211		/* for now just select the first power state and switch between clock modes */
 212		/* power state array is low to high, default is first (0) */
 213		if (rdev->pm.active_crtc_count > 1) {
 214			rdev->pm.requested_power_state_index = -1;
 215			/* start at 1 as we don't want the default mode */
 216			for (i = 1; i < rdev->pm.num_power_states; i++) {
 217				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
 218					continue;
 219				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
 220					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
 221					rdev->pm.requested_power_state_index = i;
 222					break;
 223				}
 224			}
 225			/* if nothing selected, grab the default state. */
 226			if (rdev->pm.requested_power_state_index == -1)
 227				rdev->pm.requested_power_state_index = 0;
 228		} else
 229			rdev->pm.requested_power_state_index = 1;
 230
 231		switch (rdev->pm.dynpm_planned_action) {
 232		case DYNPM_ACTION_MINIMUM:
 233			rdev->pm.requested_clock_mode_index = 0;
 234			rdev->pm.dynpm_can_downclock = false;
 235			break;
 236		case DYNPM_ACTION_DOWNCLOCK:
 237			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
 238				if (rdev->pm.current_clock_mode_index == 0) {
 239					rdev->pm.requested_clock_mode_index = 0;
 240					rdev->pm.dynpm_can_downclock = false;
 241				} else
 242					rdev->pm.requested_clock_mode_index =
 243						rdev->pm.current_clock_mode_index - 1;
 244			} else {
 245				rdev->pm.requested_clock_mode_index = 0;
 246				rdev->pm.dynpm_can_downclock = false;
 247			}
 248			/* don't use the power state if crtcs are active and no display flag is set */
 249			if ((rdev->pm.active_crtc_count > 0) &&
 250			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
 251			     clock_info[rdev->pm.requested_clock_mode_index].flags &
 252			     RADEON_PM_MODE_NO_DISPLAY)) {
 253				rdev->pm.requested_clock_mode_index++;
 254			}
 255			break;
 256		case DYNPM_ACTION_UPCLOCK:
 257			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
 258				if (rdev->pm.current_clock_mode_index ==
 259				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
 260					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
 261					rdev->pm.dynpm_can_upclock = false;
 262				} else
 263					rdev->pm.requested_clock_mode_index =
 264						rdev->pm.current_clock_mode_index + 1;
 265			} else {
 266				rdev->pm.requested_clock_mode_index =
 267					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
 268				rdev->pm.dynpm_can_upclock = false;
 269			}
 270			break;
 271		case DYNPM_ACTION_DEFAULT:
 272			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
 273			rdev->pm.requested_clock_mode_index = 0;
 274			rdev->pm.dynpm_can_upclock = false;
 275			break;
 276		case DYNPM_ACTION_NONE:
 277		default:
 278			DRM_ERROR("Requested mode for not defined action\n");
 279			return;
 280		}
 281	}
 282
 283	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
 284		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
 285		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
 286		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
 287		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
 288		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
 289		  pcie_lanes);
 290}
 291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 292void rs780_pm_init_profile(struct radeon_device *rdev)
 293{
 294	if (rdev->pm.num_power_states == 2) {
 295		/* default */
 296		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 297		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 298		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 299		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 300		/* low sh */
 301		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
 302		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
 303		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 304		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 305		/* mid sh */
 306		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
 307		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
 308		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 309		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 310		/* high sh */
 311		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
 312		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
 313		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 314		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 315		/* low mh */
 316		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
 317		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
 318		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 319		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 320		/* mid mh */
 321		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
 322		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
 323		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 324		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 325		/* high mh */
 326		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
 327		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
 328		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 329		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 330	} else if (rdev->pm.num_power_states == 3) {
 331		/* default */
 332		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 333		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 334		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 335		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 336		/* low sh */
 337		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
 338		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
 339		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 340		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 341		/* mid sh */
 342		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
 343		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
 344		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 345		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 346		/* high sh */
 347		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
 348		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
 349		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 350		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 351		/* low mh */
 352		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
 353		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
 354		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 355		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 356		/* mid mh */
 357		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
 358		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
 359		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 360		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 361		/* high mh */
 362		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
 363		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
 364		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 365		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 366	} else {
 367		/* default */
 368		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 369		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 370		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 371		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 372		/* low sh */
 373		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
 374		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
 375		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 376		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 377		/* mid sh */
 378		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
 379		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
 380		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 381		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 382		/* high sh */
 383		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
 384		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
 385		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 386		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 387		/* low mh */
 388		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
 389		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
 390		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 391		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 392		/* mid mh */
 393		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
 394		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
 395		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 396		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 397		/* high mh */
 398		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
 399		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
 400		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 401		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 402	}
 403}
 404
 405void r600_pm_init_profile(struct radeon_device *rdev)
 406{
 407	int idx;
 408
 409	if (rdev->family == CHIP_R600) {
 410		/* XXX */
 411		/* default */
 412		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 413		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 414		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 415		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 416		/* low sh */
 417		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 418		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 419		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 420		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 421		/* mid sh */
 422		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 423		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 424		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 425		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 426		/* high sh */
 427		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 428		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 429		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 430		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
 431		/* low mh */
 432		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 433		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 434		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 435		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 436		/* mid mh */
 437		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 438		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 439		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 440		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 441		/* high mh */
 442		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 443		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 444		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 445		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
 446	} else {
 447		if (rdev->pm.num_power_states < 4) {
 448			/* default */
 449			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 450			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 451			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 452			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
 453			/* low sh */
 454			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
 455			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
 456			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 457			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 458			/* mid sh */
 459			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
 460			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
 461			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 462			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
 463			/* high sh */
 464			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
 465			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
 466			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 467			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
 468			/* low mh */
 469			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
 470			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
 471			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 472			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 473			/* low mh */
 474			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
 475			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
 476			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 477			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
 478			/* high mh */
 479			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
 480			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
 481			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 482			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
 483		} else {
 484			/* default */
 485			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 486			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 487			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 488			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
 489			/* low sh */
 490			if (rdev->flags & RADEON_IS_MOBILITY)
 491				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 492			else
 493				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 494			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
 495			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
 496			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 497			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 
 
 
 
 
 
 
 498			/* mid sh */
 499			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
 500			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
 501			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 502			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
 
 
 
 
 
 
 
 
 
 
 
 503			/* high sh */
 504			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 505			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
 506			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
 
 507			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 508			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
 509			/* low mh */
 510			if (rdev->flags & RADEON_IS_MOBILITY)
 511				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
 512			else
 513				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 514			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
 515			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
 516			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 517			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 
 
 
 
 
 
 
 518			/* mid mh */
 519			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
 520			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
 521			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 522			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
 
 
 
 
 
 
 
 
 
 
 
 523			/* high mh */
 524			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 525			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
 526			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
 
 527			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 528			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
 529		}
 530	}
 531}
 532
 533void r600_pm_misc(struct radeon_device *rdev)
 534{
 535	int req_ps_idx = rdev->pm.requested_power_state_index;
 536	int req_cm_idx = rdev->pm.requested_clock_mode_index;
 537	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
 538	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 539
 540	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
 541		/* 0xff01 is a flag rather then an actual voltage */
 542		if (voltage->voltage == 0xff01)
 543			return;
 544		if (voltage->voltage != rdev->pm.current_vddc) {
 545			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
 546			rdev->pm.current_vddc = voltage->voltage;
 547			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
 548		}
 549	}
 550}
 551
 552bool r600_gui_idle(struct radeon_device *rdev)
 553{
 554	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
 555		return false;
 556	else
 557		return true;
 558}
 559
 560/* hpd for digital panel detect/disconnect */
 561bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 562{
 563	bool connected = false;
 564
 565	if (ASIC_IS_DCE3(rdev)) {
 566		switch (hpd) {
 567		case RADEON_HPD_1:
 568			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
 569				connected = true;
 570			break;
 571		case RADEON_HPD_2:
 572			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
 573				connected = true;
 574			break;
 575		case RADEON_HPD_3:
 576			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
 577				connected = true;
 578			break;
 579		case RADEON_HPD_4:
 580			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
 581				connected = true;
 582			break;
 583			/* DCE 3.2 */
 584		case RADEON_HPD_5:
 585			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
 586				connected = true;
 587			break;
 588		case RADEON_HPD_6:
 589			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
 590				connected = true;
 591			break;
 592		default:
 593			break;
 594		}
 595	} else {
 596		switch (hpd) {
 597		case RADEON_HPD_1:
 598			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
 599				connected = true;
 600			break;
 601		case RADEON_HPD_2:
 602			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
 603				connected = true;
 604			break;
 605		case RADEON_HPD_3:
 606			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
 607				connected = true;
 608			break;
 609		default:
 610			break;
 611		}
 612	}
 613	return connected;
 614}
 615
 616void r600_hpd_set_polarity(struct radeon_device *rdev,
 617			   enum radeon_hpd_id hpd)
 618{
 619	u32 tmp;
 620	bool connected = r600_hpd_sense(rdev, hpd);
 621
 622	if (ASIC_IS_DCE3(rdev)) {
 623		switch (hpd) {
 624		case RADEON_HPD_1:
 625			tmp = RREG32(DC_HPD1_INT_CONTROL);
 626			if (connected)
 627				tmp &= ~DC_HPDx_INT_POLARITY;
 628			else
 629				tmp |= DC_HPDx_INT_POLARITY;
 630			WREG32(DC_HPD1_INT_CONTROL, tmp);
 631			break;
 632		case RADEON_HPD_2:
 633			tmp = RREG32(DC_HPD2_INT_CONTROL);
 634			if (connected)
 635				tmp &= ~DC_HPDx_INT_POLARITY;
 636			else
 637				tmp |= DC_HPDx_INT_POLARITY;
 638			WREG32(DC_HPD2_INT_CONTROL, tmp);
 639			break;
 640		case RADEON_HPD_3:
 641			tmp = RREG32(DC_HPD3_INT_CONTROL);
 642			if (connected)
 643				tmp &= ~DC_HPDx_INT_POLARITY;
 644			else
 645				tmp |= DC_HPDx_INT_POLARITY;
 646			WREG32(DC_HPD3_INT_CONTROL, tmp);
 647			break;
 648		case RADEON_HPD_4:
 649			tmp = RREG32(DC_HPD4_INT_CONTROL);
 650			if (connected)
 651				tmp &= ~DC_HPDx_INT_POLARITY;
 652			else
 653				tmp |= DC_HPDx_INT_POLARITY;
 654			WREG32(DC_HPD4_INT_CONTROL, tmp);
 655			break;
 656		case RADEON_HPD_5:
 657			tmp = RREG32(DC_HPD5_INT_CONTROL);
 658			if (connected)
 659				tmp &= ~DC_HPDx_INT_POLARITY;
 660			else
 661				tmp |= DC_HPDx_INT_POLARITY;
 662			WREG32(DC_HPD5_INT_CONTROL, tmp);
 663			break;
 664			/* DCE 3.2 */
 665		case RADEON_HPD_6:
 666			tmp = RREG32(DC_HPD6_INT_CONTROL);
 667			if (connected)
 668				tmp &= ~DC_HPDx_INT_POLARITY;
 669			else
 670				tmp |= DC_HPDx_INT_POLARITY;
 671			WREG32(DC_HPD6_INT_CONTROL, tmp);
 672			break;
 673		default:
 674			break;
 675		}
 676	} else {
 677		switch (hpd) {
 678		case RADEON_HPD_1:
 679			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
 680			if (connected)
 681				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
 682			else
 683				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
 684			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
 685			break;
 686		case RADEON_HPD_2:
 687			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
 688			if (connected)
 689				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
 690			else
 691				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
 692			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
 693			break;
 694		case RADEON_HPD_3:
 695			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
 696			if (connected)
 697				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
 698			else
 699				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
 700			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
 701			break;
 702		default:
 703			break;
 704		}
 705	}
 706}
 707
 708void r600_hpd_init(struct radeon_device *rdev)
 709{
 710	struct drm_device *dev = rdev->ddev;
 711	struct drm_connector *connector;
 712
 713	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 714		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 715
 716		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 717		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 718			/* don't try to enable hpd on eDP or LVDS avoid breaking the
 719			 * aux dp channel on imac and help (but not completely fix)
 720			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 721			 */
 722			continue;
 723		}
 724		if (ASIC_IS_DCE3(rdev)) {
 725			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
 726			if (ASIC_IS_DCE32(rdev))
 727				tmp |= DC_HPDx_EN;
 728
 
 
 729			switch (radeon_connector->hpd.hpd) {
 730			case RADEON_HPD_1:
 731				WREG32(DC_HPD1_CONTROL, tmp);
 732				rdev->irq.hpd[0] = true;
 733				break;
 734			case RADEON_HPD_2:
 735				WREG32(DC_HPD2_CONTROL, tmp);
 736				rdev->irq.hpd[1] = true;
 737				break;
 738			case RADEON_HPD_3:
 739				WREG32(DC_HPD3_CONTROL, tmp);
 740				rdev->irq.hpd[2] = true;
 741				break;
 742			case RADEON_HPD_4:
 743				WREG32(DC_HPD4_CONTROL, tmp);
 744				rdev->irq.hpd[3] = true;
 745				break;
 746				/* DCE 3.2 */
 747			case RADEON_HPD_5:
 748				WREG32(DC_HPD5_CONTROL, tmp);
 749				rdev->irq.hpd[4] = true;
 750				break;
 751			case RADEON_HPD_6:
 752				WREG32(DC_HPD6_CONTROL, tmp);
 753				rdev->irq.hpd[5] = true;
 754				break;
 755			default:
 756				break;
 757			}
 758		} else {
 
 
 
 759			switch (radeon_connector->hpd.hpd) {
 760			case RADEON_HPD_1:
 761				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
 762				rdev->irq.hpd[0] = true;
 763				break;
 764			case RADEON_HPD_2:
 765				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
 766				rdev->irq.hpd[1] = true;
 767				break;
 768			case RADEON_HPD_3:
 769				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
 770				rdev->irq.hpd[2] = true;
 771				break;
 772			default:
 773				break;
 774			}
 775		}
 776		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 777	}
 778	if (rdev->irq.installed)
 779		r600_irq_set(rdev);
 780}
 781
 782void r600_hpd_fini(struct radeon_device *rdev)
 783{
 784	struct drm_device *dev = rdev->ddev;
 785	struct drm_connector *connector;
 786
 787	if (ASIC_IS_DCE3(rdev)) {
 788		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 789			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 790			switch (radeon_connector->hpd.hpd) {
 791			case RADEON_HPD_1:
 792				WREG32(DC_HPD1_CONTROL, 0);
 793				rdev->irq.hpd[0] = false;
 794				break;
 795			case RADEON_HPD_2:
 796				WREG32(DC_HPD2_CONTROL, 0);
 797				rdev->irq.hpd[1] = false;
 798				break;
 799			case RADEON_HPD_3:
 800				WREG32(DC_HPD3_CONTROL, 0);
 801				rdev->irq.hpd[2] = false;
 802				break;
 803			case RADEON_HPD_4:
 804				WREG32(DC_HPD4_CONTROL, 0);
 805				rdev->irq.hpd[3] = false;
 806				break;
 807				/* DCE 3.2 */
 808			case RADEON_HPD_5:
 809				WREG32(DC_HPD5_CONTROL, 0);
 810				rdev->irq.hpd[4] = false;
 811				break;
 812			case RADEON_HPD_6:
 813				WREG32(DC_HPD6_CONTROL, 0);
 814				rdev->irq.hpd[5] = false;
 815				break;
 816			default:
 817				break;
 818			}
 819		}
 820	} else {
 821		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 822			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 823			switch (radeon_connector->hpd.hpd) {
 824			case RADEON_HPD_1:
 825				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
 826				rdev->irq.hpd[0] = false;
 827				break;
 828			case RADEON_HPD_2:
 829				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
 830				rdev->irq.hpd[1] = false;
 831				break;
 832			case RADEON_HPD_3:
 833				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
 834				rdev->irq.hpd[2] = false;
 835				break;
 836			default:
 837				break;
 838			}
 839		}
 840	}
 841}
 842
 843/*
 844 * R600 PCIE GART
 845 */
 846void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
 847{
 848	unsigned i;
 849	u32 tmp;
 850
 851	/* flush hdp cache so updates hit vram */
 852	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
 853	    !(rdev->flags & RADEON_IS_AGP)) {
 854		void __iomem *ptr = (void *)rdev->gart.ptr;
 855		u32 tmp;
 856
 857		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
 858		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
 859		 * This seems to cause problems on some AGP cards. Just use the old
 860		 * method for them.
 861		 */
 862		WREG32(HDP_DEBUG1, 0);
 863		tmp = readl((void __iomem *)ptr);
 864	} else
 865		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 866
 867	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
 868	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
 869	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
 870	for (i = 0; i < rdev->usec_timeout; i++) {
 871		/* read MC_STATUS */
 872		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
 873		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
 874		if (tmp == 2) {
 875			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
 876			return;
 877		}
 878		if (tmp) {
 879			return;
 880		}
 881		udelay(1);
 882	}
 883}
 884
 885int r600_pcie_gart_init(struct radeon_device *rdev)
 886{
 887	int r;
 888
 889	if (rdev->gart.robj) {
 890		WARN(1, "R600 PCIE GART already initialized\n");
 891		return 0;
 892	}
 893	/* Initialize common gart structure */
 894	r = radeon_gart_init(rdev);
 895	if (r)
 896		return r;
 897	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
 898	return radeon_gart_table_vram_alloc(rdev);
 899}
 900
 901int r600_pcie_gart_enable(struct radeon_device *rdev)
 902{
 903	u32 tmp;
 904	int r, i;
 905
 906	if (rdev->gart.robj == NULL) {
 907		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 908		return -EINVAL;
 909	}
 910	r = radeon_gart_table_vram_pin(rdev);
 911	if (r)
 912		return r;
 913	radeon_gart_restore(rdev);
 914
 915	/* Setup L2 cache */
 916	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
 917				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
 918				EFFECTIVE_L2_QUEUE_SIZE(7));
 919	WREG32(VM_L2_CNTL2, 0);
 920	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
 921	/* Setup TLB control */
 922	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
 923		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
 924		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
 925		ENABLE_WAIT_L2_QUERY;
 926	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
 927	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
 928	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
 929	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
 930	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
 931	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
 932	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
 933	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
 934	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
 935	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
 936	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
 937	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
 938	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
 939	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
 940	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
 941	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
 942	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
 943	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
 944				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 945	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 946			(u32)(rdev->dummy_page.addr >> 12));
 947	for (i = 1; i < 7; i++)
 948		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 949
 950	r600_pcie_gart_tlb_flush(rdev);
 951	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 952		 (unsigned)(rdev->mc.gtt_size >> 20),
 953		 (unsigned long long)rdev->gart.table_addr);
 954	rdev->gart.ready = true;
 955	return 0;
 956}
 957
 958void r600_pcie_gart_disable(struct radeon_device *rdev)
 959{
 960	u32 tmp;
 961	int i;
 962
 963	/* Disable all tables */
 964	for (i = 0; i < 7; i++)
 965		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 966
 967	/* Disable L2 cache */
 968	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
 969				EFFECTIVE_L2_QUEUE_SIZE(7));
 970	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
 971	/* Setup L1 TLB control */
 972	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
 973		ENABLE_WAIT_L2_QUERY;
 974	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
 975	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
 976	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
 977	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
 978	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
 979	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
 980	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
 981	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
 982	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
 983	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
 984	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
 985	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
 986	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
 987	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
 988	radeon_gart_table_vram_unpin(rdev);
 
 
 
 
 
 
 
 989}
 990
 991void r600_pcie_gart_fini(struct radeon_device *rdev)
 992{
 993	radeon_gart_fini(rdev);
 994	r600_pcie_gart_disable(rdev);
 995	radeon_gart_table_vram_free(rdev);
 996}
 997
 998void r600_agp_enable(struct radeon_device *rdev)
 999{
1000	u32 tmp;
1001	int i;
1002
1003	/* Setup L2 cache */
1004	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1005				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1006				EFFECTIVE_L2_QUEUE_SIZE(7));
1007	WREG32(VM_L2_CNTL2, 0);
1008	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1009	/* Setup TLB control */
1010	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1011		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1012		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1013		ENABLE_WAIT_L2_QUERY;
1014	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1015	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1016	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1017	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1018	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1019	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1020	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1021	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1022	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1023	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1024	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1025	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1026	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1027	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1028	for (i = 0; i < 7; i++)
1029		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1030}
1031
1032int r600_mc_wait_for_idle(struct radeon_device *rdev)
1033{
1034	unsigned i;
1035	u32 tmp;
1036
1037	for (i = 0; i < rdev->usec_timeout; i++) {
1038		/* read MC_STATUS */
1039		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1040		if (!tmp)
1041			return 0;
1042		udelay(1);
1043	}
1044	return -1;
1045}
1046
1047static void r600_mc_program(struct radeon_device *rdev)
1048{
1049	struct rv515_mc_save save;
1050	u32 tmp;
1051	int i, j;
1052
1053	/* Initialize HDP */
1054	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1055		WREG32((0x2c14 + j), 0x00000000);
1056		WREG32((0x2c18 + j), 0x00000000);
1057		WREG32((0x2c1c + j), 0x00000000);
1058		WREG32((0x2c20 + j), 0x00000000);
1059		WREG32((0x2c24 + j), 0x00000000);
1060	}
1061	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1062
1063	rv515_mc_stop(rdev, &save);
1064	if (r600_mc_wait_for_idle(rdev)) {
1065		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1066	}
1067	/* Lockout access through VGA aperture (doesn't exist before R600) */
1068	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1069	/* Update configuration */
1070	if (rdev->flags & RADEON_IS_AGP) {
1071		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1072			/* VRAM before AGP */
1073			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1074				rdev->mc.vram_start >> 12);
1075			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1076				rdev->mc.gtt_end >> 12);
1077		} else {
1078			/* VRAM after AGP */
1079			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1080				rdev->mc.gtt_start >> 12);
1081			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1082				rdev->mc.vram_end >> 12);
1083		}
1084	} else {
1085		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1086		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1087	}
1088	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1089	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1090	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1091	WREG32(MC_VM_FB_LOCATION, tmp);
1092	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1093	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1094	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1095	if (rdev->flags & RADEON_IS_AGP) {
1096		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1097		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1098		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1099	} else {
1100		WREG32(MC_VM_AGP_BASE, 0);
1101		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1102		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1103	}
1104	if (r600_mc_wait_for_idle(rdev)) {
1105		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1106	}
1107	rv515_mc_resume(rdev, &save);
1108	/* we need to own VRAM, so turn off the VGA renderer here
1109	 * to stop it overwriting our objects */
1110	rv515_vga_render_disable(rdev);
1111}
1112
1113/**
1114 * r600_vram_gtt_location - try to find VRAM & GTT location
1115 * @rdev: radeon device structure holding all necessary informations
1116 * @mc: memory controller structure holding memory informations
1117 *
1118 * Function will place try to place VRAM at same place as in CPU (PCI)
1119 * address space as some GPU seems to have issue when we reprogram at
1120 * different address space.
1121 *
1122 * If there is not enough space to fit the unvisible VRAM after the
1123 * aperture then we limit the VRAM size to the aperture.
1124 *
1125 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1126 * them to be in one from GPU point of view so that we can program GPU to
1127 * catch access outside them (weird GPU policy see ??).
1128 *
1129 * This function will never fails, worst case are limiting VRAM or GTT.
1130 *
1131 * Note: GTT start, end, size should be initialized before calling this
1132 * function on AGP platform.
1133 */
1134static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1135{
1136	u64 size_bf, size_af;
1137
1138	if (mc->mc_vram_size > 0xE0000000) {
1139		/* leave room for at least 512M GTT */
1140		dev_warn(rdev->dev, "limiting VRAM\n");
1141		mc->real_vram_size = 0xE0000000;
1142		mc->mc_vram_size = 0xE0000000;
1143	}
1144	if (rdev->flags & RADEON_IS_AGP) {
1145		size_bf = mc->gtt_start;
1146		size_af = 0xFFFFFFFF - mc->gtt_end;
1147		if (size_bf > size_af) {
1148			if (mc->mc_vram_size > size_bf) {
1149				dev_warn(rdev->dev, "limiting VRAM\n");
1150				mc->real_vram_size = size_bf;
1151				mc->mc_vram_size = size_bf;
1152			}
1153			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1154		} else {
1155			if (mc->mc_vram_size > size_af) {
1156				dev_warn(rdev->dev, "limiting VRAM\n");
1157				mc->real_vram_size = size_af;
1158				mc->mc_vram_size = size_af;
1159			}
1160			mc->vram_start = mc->gtt_end + 1;
1161		}
1162		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1163		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1164				mc->mc_vram_size >> 20, mc->vram_start,
1165				mc->vram_end, mc->real_vram_size >> 20);
1166	} else {
1167		u64 base = 0;
1168		if (rdev->flags & RADEON_IS_IGP) {
1169			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1170			base <<= 24;
1171		}
1172		radeon_vram_location(rdev, &rdev->mc, base);
1173		rdev->mc.gtt_base_align = 0;
1174		radeon_gtt_location(rdev, mc);
1175	}
1176}
1177
1178int r600_mc_init(struct radeon_device *rdev)
1179{
1180	u32 tmp;
1181	int chansize, numchan;
1182
1183	/* Get VRAM informations */
1184	rdev->mc.vram_is_ddr = true;
1185	tmp = RREG32(RAMCFG);
1186	if (tmp & CHANSIZE_OVERRIDE) {
1187		chansize = 16;
1188	} else if (tmp & CHANSIZE_MASK) {
1189		chansize = 64;
1190	} else {
1191		chansize = 32;
1192	}
1193	tmp = RREG32(CHMAP);
1194	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1195	case 0:
1196	default:
1197		numchan = 1;
1198		break;
1199	case 1:
1200		numchan = 2;
1201		break;
1202	case 2:
1203		numchan = 4;
1204		break;
1205	case 3:
1206		numchan = 8;
1207		break;
1208	}
1209	rdev->mc.vram_width = numchan * chansize;
1210	/* Could aper size report 0 ? */
1211	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1212	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1213	/* Setup GPU memory space */
1214	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1215	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1216	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1217	r600_vram_gtt_location(rdev, &rdev->mc);
1218
1219	if (rdev->flags & RADEON_IS_IGP) {
1220		rs690_pm_info(rdev);
1221		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1222	}
1223	radeon_update_bandwidth_info(rdev);
1224	return 0;
1225}
1226
1227int r600_vram_scratch_init(struct radeon_device *rdev)
1228{
1229	int r;
1230
1231	if (rdev->vram_scratch.robj == NULL) {
1232		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1233				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1234				     NULL, &rdev->vram_scratch.robj);
1235		if (r) {
1236			return r;
1237		}
1238	}
1239
1240	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1241	if (unlikely(r != 0))
1242		return r;
1243	r = radeon_bo_pin(rdev->vram_scratch.robj,
1244			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1245	if (r) {
1246		radeon_bo_unreserve(rdev->vram_scratch.robj);
1247		return r;
1248	}
1249	r = radeon_bo_kmap(rdev->vram_scratch.robj,
1250				(void **)&rdev->vram_scratch.ptr);
1251	if (r)
1252		radeon_bo_unpin(rdev->vram_scratch.robj);
1253	radeon_bo_unreserve(rdev->vram_scratch.robj);
1254
1255	return r;
1256}
1257
1258void r600_vram_scratch_fini(struct radeon_device *rdev)
1259{
1260	int r;
1261
1262	if (rdev->vram_scratch.robj == NULL) {
1263		return;
1264	}
1265	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1266	if (likely(r == 0)) {
1267		radeon_bo_kunmap(rdev->vram_scratch.robj);
1268		radeon_bo_unpin(rdev->vram_scratch.robj);
1269		radeon_bo_unreserve(rdev->vram_scratch.robj);
1270	}
1271	radeon_bo_unref(&rdev->vram_scratch.robj);
1272}
1273
1274/* We doesn't check that the GPU really needs a reset we simply do the
1275 * reset, it's up to the caller to determine if the GPU needs one. We
1276 * might add an helper function to check that.
1277 */
1278int r600_gpu_soft_reset(struct radeon_device *rdev)
1279{
1280	struct rv515_mc_save save;
1281	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1282				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1283				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1284				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1285				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1286				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1287				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1288				S_008010_GUI_ACTIVE(1);
1289	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1290			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1291			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1292			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1293			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1294			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1295			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1296			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1297	u32 tmp;
1298
1299	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1300		return 0;
1301
1302	dev_info(rdev->dev, "GPU softreset \n");
1303	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1304		RREG32(R_008010_GRBM_STATUS));
1305	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1306		RREG32(R_008014_GRBM_STATUS2));
1307	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1308		RREG32(R_000E50_SRBM_STATUS));
1309	rv515_mc_stop(rdev, &save);
1310	if (r600_mc_wait_for_idle(rdev)) {
1311		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1312	}
1313	/* Disable CP parsing/prefetching */
1314	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1315	/* Check if any of the rendering block is busy and reset it */
1316	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1317	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1318		tmp = S_008020_SOFT_RESET_CR(1) |
1319			S_008020_SOFT_RESET_DB(1) |
1320			S_008020_SOFT_RESET_CB(1) |
1321			S_008020_SOFT_RESET_PA(1) |
1322			S_008020_SOFT_RESET_SC(1) |
1323			S_008020_SOFT_RESET_SMX(1) |
1324			S_008020_SOFT_RESET_SPI(1) |
1325			S_008020_SOFT_RESET_SX(1) |
1326			S_008020_SOFT_RESET_SH(1) |
1327			S_008020_SOFT_RESET_TC(1) |
1328			S_008020_SOFT_RESET_TA(1) |
1329			S_008020_SOFT_RESET_VC(1) |
1330			S_008020_SOFT_RESET_VGT(1);
1331		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1332		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1333		RREG32(R_008020_GRBM_SOFT_RESET);
1334		mdelay(15);
1335		WREG32(R_008020_GRBM_SOFT_RESET, 0);
1336	}
1337	/* Reset CP (we always reset CP) */
1338	tmp = S_008020_SOFT_RESET_CP(1);
1339	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1340	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1341	RREG32(R_008020_GRBM_SOFT_RESET);
1342	mdelay(15);
1343	WREG32(R_008020_GRBM_SOFT_RESET, 0);
1344	/* Wait a little for things to settle down */
1345	mdelay(1);
1346	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1347		RREG32(R_008010_GRBM_STATUS));
1348	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1349		RREG32(R_008014_GRBM_STATUS2));
1350	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1351		RREG32(R_000E50_SRBM_STATUS));
1352	rv515_mc_resume(rdev, &save);
1353	return 0;
1354}
1355
1356bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1357{
1358	u32 srbm_status;
1359	u32 grbm_status;
1360	u32 grbm_status2;
 
 
 
 
 
 
 
1361
1362	srbm_status = RREG32(R_000E50_SRBM_STATUS);
1363	grbm_status = RREG32(R_008010_GRBM_STATUS);
1364	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1365	if (!G_008010_GUI_ACTIVE(grbm_status)) {
1366		radeon_ring_lockup_update(ring);
1367		return false;
1368	}
1369	/* force CP activities */
1370	radeon_ring_force_activity(rdev, ring);
1371	return radeon_ring_test_lockup(rdev, ring);
 
 
 
 
 
 
 
1372}
1373
1374int r600_asic_reset(struct radeon_device *rdev)
1375{
1376	return r600_gpu_soft_reset(rdev);
1377}
1378
1379u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1380			      u32 tiling_pipe_num,
1381			      u32 max_rb_num,
1382			      u32 total_max_rb_num,
1383			      u32 disabled_rb_mask)
1384{
1385	u32 rendering_pipe_num, rb_num_width, req_rb_num;
1386	u32 pipe_rb_ratio, pipe_rb_remain;
1387	u32 data = 0, mask = 1 << (max_rb_num - 1);
1388	unsigned i, j;
 
1389
1390	/* mask out the RBs that don't exist on that asic */
1391	disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
 
 
 
 
 
 
1392
1393	rendering_pipe_num = 1 << tiling_pipe_num;
1394	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1395	BUG_ON(rendering_pipe_num < req_rb_num);
 
 
 
 
 
 
 
1396
1397	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1398	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
 
 
1399
1400	if (rdev->family <= CHIP_RV740) {
1401		/* r6xx/r7xx */
1402		rb_num_width = 2;
1403	} else {
1404		/* eg+ */
1405		rb_num_width = 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1406	}
1407
1408	for (i = 0; i < max_rb_num; i++) {
1409		if (!(mask & disabled_rb_mask)) {
1410			for (j = 0; j < pipe_rb_ratio; j++) {
1411				data <<= rb_num_width;
1412				data |= max_rb_num - i - 1;
1413			}
1414			if (pipe_rb_remain) {
1415				data <<= rb_num_width;
1416				data |= max_rb_num - i - 1;
1417				pipe_rb_remain--;
1418			}
1419		}
1420		mask >>= 1;
1421	}
1422
1423	return data;
1424}
1425
1426int r600_count_pipe_bits(uint32_t val)
1427{
1428	int i, ret = 0;
1429
1430	for (i = 0; i < 32; i++) {
1431		ret += val & 1;
1432		val >>= 1;
1433	}
1434	return ret;
1435}
1436
1437void r600_gpu_init(struct radeon_device *rdev)
1438{
1439	u32 tiling_config;
1440	u32 ramcfg;
 
1441	u32 cc_rb_backend_disable;
1442	u32 cc_gc_shader_pipe_config;
1443	u32 tmp;
1444	int i, j;
1445	u32 sq_config;
1446	u32 sq_gpr_resource_mgmt_1 = 0;
1447	u32 sq_gpr_resource_mgmt_2 = 0;
1448	u32 sq_thread_resource_mgmt = 0;
1449	u32 sq_stack_resource_mgmt_1 = 0;
1450	u32 sq_stack_resource_mgmt_2 = 0;
1451	u32 disabled_rb_mask;
1452
1453	rdev->config.r600.tiling_group_size = 256;
1454	switch (rdev->family) {
1455	case CHIP_R600:
1456		rdev->config.r600.max_pipes = 4;
1457		rdev->config.r600.max_tile_pipes = 8;
1458		rdev->config.r600.max_simds = 4;
1459		rdev->config.r600.max_backends = 4;
1460		rdev->config.r600.max_gprs = 256;
1461		rdev->config.r600.max_threads = 192;
1462		rdev->config.r600.max_stack_entries = 256;
1463		rdev->config.r600.max_hw_contexts = 8;
1464		rdev->config.r600.max_gs_threads = 16;
1465		rdev->config.r600.sx_max_export_size = 128;
1466		rdev->config.r600.sx_max_export_pos_size = 16;
1467		rdev->config.r600.sx_max_export_smx_size = 128;
1468		rdev->config.r600.sq_num_cf_insts = 2;
1469		break;
1470	case CHIP_RV630:
1471	case CHIP_RV635:
1472		rdev->config.r600.max_pipes = 2;
1473		rdev->config.r600.max_tile_pipes = 2;
1474		rdev->config.r600.max_simds = 3;
1475		rdev->config.r600.max_backends = 1;
1476		rdev->config.r600.max_gprs = 128;
1477		rdev->config.r600.max_threads = 192;
1478		rdev->config.r600.max_stack_entries = 128;
1479		rdev->config.r600.max_hw_contexts = 8;
1480		rdev->config.r600.max_gs_threads = 4;
1481		rdev->config.r600.sx_max_export_size = 128;
1482		rdev->config.r600.sx_max_export_pos_size = 16;
1483		rdev->config.r600.sx_max_export_smx_size = 128;
1484		rdev->config.r600.sq_num_cf_insts = 2;
1485		break;
1486	case CHIP_RV610:
1487	case CHIP_RV620:
1488	case CHIP_RS780:
1489	case CHIP_RS880:
1490		rdev->config.r600.max_pipes = 1;
1491		rdev->config.r600.max_tile_pipes = 1;
1492		rdev->config.r600.max_simds = 2;
1493		rdev->config.r600.max_backends = 1;
1494		rdev->config.r600.max_gprs = 128;
1495		rdev->config.r600.max_threads = 192;
1496		rdev->config.r600.max_stack_entries = 128;
1497		rdev->config.r600.max_hw_contexts = 4;
1498		rdev->config.r600.max_gs_threads = 4;
1499		rdev->config.r600.sx_max_export_size = 128;
1500		rdev->config.r600.sx_max_export_pos_size = 16;
1501		rdev->config.r600.sx_max_export_smx_size = 128;
1502		rdev->config.r600.sq_num_cf_insts = 1;
1503		break;
1504	case CHIP_RV670:
1505		rdev->config.r600.max_pipes = 4;
1506		rdev->config.r600.max_tile_pipes = 4;
1507		rdev->config.r600.max_simds = 4;
1508		rdev->config.r600.max_backends = 4;
1509		rdev->config.r600.max_gprs = 192;
1510		rdev->config.r600.max_threads = 192;
1511		rdev->config.r600.max_stack_entries = 256;
1512		rdev->config.r600.max_hw_contexts = 8;
1513		rdev->config.r600.max_gs_threads = 16;
1514		rdev->config.r600.sx_max_export_size = 128;
1515		rdev->config.r600.sx_max_export_pos_size = 16;
1516		rdev->config.r600.sx_max_export_smx_size = 128;
1517		rdev->config.r600.sq_num_cf_insts = 2;
1518		break;
1519	default:
1520		break;
1521	}
1522
1523	/* Initialize HDP */
1524	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1525		WREG32((0x2c14 + j), 0x00000000);
1526		WREG32((0x2c18 + j), 0x00000000);
1527		WREG32((0x2c1c + j), 0x00000000);
1528		WREG32((0x2c20 + j), 0x00000000);
1529		WREG32((0x2c24 + j), 0x00000000);
1530	}
1531
1532	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1533
1534	/* Setup tiling */
1535	tiling_config = 0;
1536	ramcfg = RREG32(RAMCFG);
1537	switch (rdev->config.r600.max_tile_pipes) {
1538	case 1:
1539		tiling_config |= PIPE_TILING(0);
1540		break;
1541	case 2:
1542		tiling_config |= PIPE_TILING(1);
1543		break;
1544	case 4:
1545		tiling_config |= PIPE_TILING(2);
1546		break;
1547	case 8:
1548		tiling_config |= PIPE_TILING(3);
1549		break;
1550	default:
1551		break;
1552	}
1553	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1554	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1555	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1556	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1557
 
 
 
1558	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1559	if (tmp > 3) {
1560		tiling_config |= ROW_TILING(3);
1561		tiling_config |= SAMPLE_SPLIT(3);
1562	} else {
1563		tiling_config |= ROW_TILING(tmp);
1564		tiling_config |= SAMPLE_SPLIT(tmp);
1565	}
1566	tiling_config |= BANK_SWAPS(1);
1567
1568	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1569	tmp = R6XX_MAX_BACKENDS -
1570		r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1571	if (tmp < rdev->config.r600.max_backends) {
1572		rdev->config.r600.max_backends = tmp;
1573	}
1574
1575	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1576	tmp = R6XX_MAX_PIPES -
1577		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1578	if (tmp < rdev->config.r600.max_pipes) {
1579		rdev->config.r600.max_pipes = tmp;
1580	}
1581	tmp = R6XX_MAX_SIMDS -
1582		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1583	if (tmp < rdev->config.r600.max_simds) {
1584		rdev->config.r600.max_simds = tmp;
1585	}
1586
1587	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1588	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1589	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1590					R6XX_MAX_BACKENDS, disabled_rb_mask);
1591	tiling_config |= tmp << 16;
1592	rdev->config.r600.backend_map = tmp;
1593
 
 
 
 
 
 
 
 
 
 
 
1594	rdev->config.r600.tile_config = tiling_config;
 
 
1595	WREG32(GB_TILING_CONFIG, tiling_config);
1596	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1597	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1598
 
 
 
 
 
1599	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1600	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1601	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1602
1603	/* Setup some CP states */
1604	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1605	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1606
1607	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1608			     SYNC_WALKER | SYNC_ALIGNER));
1609	/* Setup various GPU states */
1610	if (rdev->family == CHIP_RV670)
1611		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1612
1613	tmp = RREG32(SX_DEBUG_1);
1614	tmp |= SMX_EVENT_RELEASE;
1615	if ((rdev->family > CHIP_R600))
1616		tmp |= ENABLE_NEW_SMX_ADDRESS;
1617	WREG32(SX_DEBUG_1, tmp);
1618
1619	if (((rdev->family) == CHIP_R600) ||
1620	    ((rdev->family) == CHIP_RV630) ||
1621	    ((rdev->family) == CHIP_RV610) ||
1622	    ((rdev->family) == CHIP_RV620) ||
1623	    ((rdev->family) == CHIP_RS780) ||
1624	    ((rdev->family) == CHIP_RS880)) {
1625		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1626	} else {
1627		WREG32(DB_DEBUG, 0);
1628	}
1629	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1630			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1631
1632	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1633	WREG32(VGT_NUM_INSTANCES, 0);
1634
1635	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1636	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1637
1638	tmp = RREG32(SQ_MS_FIFO_SIZES);
1639	if (((rdev->family) == CHIP_RV610) ||
1640	    ((rdev->family) == CHIP_RV620) ||
1641	    ((rdev->family) == CHIP_RS780) ||
1642	    ((rdev->family) == CHIP_RS880)) {
1643		tmp = (CACHE_FIFO_SIZE(0xa) |
1644		       FETCH_FIFO_HIWATER(0xa) |
1645		       DONE_FIFO_HIWATER(0xe0) |
1646		       ALU_UPDATE_FIFO_HIWATER(0x8));
1647	} else if (((rdev->family) == CHIP_R600) ||
1648		   ((rdev->family) == CHIP_RV630)) {
1649		tmp &= ~DONE_FIFO_HIWATER(0xff);
1650		tmp |= DONE_FIFO_HIWATER(0x4);
1651	}
1652	WREG32(SQ_MS_FIFO_SIZES, tmp);
1653
1654	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1655	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1656	 */
1657	sq_config = RREG32(SQ_CONFIG);
1658	sq_config &= ~(PS_PRIO(3) |
1659		       VS_PRIO(3) |
1660		       GS_PRIO(3) |
1661		       ES_PRIO(3));
1662	sq_config |= (DX9_CONSTS |
1663		      VC_ENABLE |
1664		      PS_PRIO(0) |
1665		      VS_PRIO(1) |
1666		      GS_PRIO(2) |
1667		      ES_PRIO(3));
1668
1669	if ((rdev->family) == CHIP_R600) {
1670		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1671					  NUM_VS_GPRS(124) |
1672					  NUM_CLAUSE_TEMP_GPRS(4));
1673		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1674					  NUM_ES_GPRS(0));
1675		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1676					   NUM_VS_THREADS(48) |
1677					   NUM_GS_THREADS(4) |
1678					   NUM_ES_THREADS(4));
1679		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1680					    NUM_VS_STACK_ENTRIES(128));
1681		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1682					    NUM_ES_STACK_ENTRIES(0));
1683	} else if (((rdev->family) == CHIP_RV610) ||
1684		   ((rdev->family) == CHIP_RV620) ||
1685		   ((rdev->family) == CHIP_RS780) ||
1686		   ((rdev->family) == CHIP_RS880)) {
1687		/* no vertex cache */
1688		sq_config &= ~VC_ENABLE;
1689
1690		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1691					  NUM_VS_GPRS(44) |
1692					  NUM_CLAUSE_TEMP_GPRS(2));
1693		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1694					  NUM_ES_GPRS(17));
1695		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1696					   NUM_VS_THREADS(78) |
1697					   NUM_GS_THREADS(4) |
1698					   NUM_ES_THREADS(31));
1699		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1700					    NUM_VS_STACK_ENTRIES(40));
1701		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1702					    NUM_ES_STACK_ENTRIES(16));
1703	} else if (((rdev->family) == CHIP_RV630) ||
1704		   ((rdev->family) == CHIP_RV635)) {
1705		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1706					  NUM_VS_GPRS(44) |
1707					  NUM_CLAUSE_TEMP_GPRS(2));
1708		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1709					  NUM_ES_GPRS(18));
1710		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1711					   NUM_VS_THREADS(78) |
1712					   NUM_GS_THREADS(4) |
1713					   NUM_ES_THREADS(31));
1714		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1715					    NUM_VS_STACK_ENTRIES(40));
1716		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1717					    NUM_ES_STACK_ENTRIES(16));
1718	} else if ((rdev->family) == CHIP_RV670) {
1719		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1720					  NUM_VS_GPRS(44) |
1721					  NUM_CLAUSE_TEMP_GPRS(2));
1722		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1723					  NUM_ES_GPRS(17));
1724		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1725					   NUM_VS_THREADS(78) |
1726					   NUM_GS_THREADS(4) |
1727					   NUM_ES_THREADS(31));
1728		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1729					    NUM_VS_STACK_ENTRIES(64));
1730		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1731					    NUM_ES_STACK_ENTRIES(64));
1732	}
1733
1734	WREG32(SQ_CONFIG, sq_config);
1735	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1736	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1737	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1738	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1739	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1740
1741	if (((rdev->family) == CHIP_RV610) ||
1742	    ((rdev->family) == CHIP_RV620) ||
1743	    ((rdev->family) == CHIP_RS780) ||
1744	    ((rdev->family) == CHIP_RS880)) {
1745		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1746	} else {
1747		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1748	}
1749
1750	/* More default values. 2D/3D driver should adjust as needed */
1751	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1752					 S1_X(0x4) | S1_Y(0xc)));
1753	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1754					 S1_X(0x2) | S1_Y(0x2) |
1755					 S2_X(0xa) | S2_Y(0x6) |
1756					 S3_X(0x6) | S3_Y(0xa)));
1757	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1758					     S1_X(0x4) | S1_Y(0xc) |
1759					     S2_X(0x1) | S2_Y(0x6) |
1760					     S3_X(0xa) | S3_Y(0xe)));
1761	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1762					     S5_X(0x0) | S5_Y(0x0) |
1763					     S6_X(0xb) | S6_Y(0x4) |
1764					     S7_X(0x7) | S7_Y(0x8)));
1765
1766	WREG32(VGT_STRMOUT_EN, 0);
1767	tmp = rdev->config.r600.max_pipes * 16;
1768	switch (rdev->family) {
1769	case CHIP_RV610:
1770	case CHIP_RV620:
1771	case CHIP_RS780:
1772	case CHIP_RS880:
1773		tmp += 32;
1774		break;
1775	case CHIP_RV670:
1776		tmp += 128;
1777		break;
1778	default:
1779		break;
1780	}
1781	if (tmp > 256) {
1782		tmp = 256;
1783	}
1784	WREG32(VGT_ES_PER_GS, 128);
1785	WREG32(VGT_GS_PER_ES, tmp);
1786	WREG32(VGT_GS_PER_VS, 2);
1787	WREG32(VGT_GS_VERTEX_REUSE, 16);
1788
1789	/* more default values. 2D/3D driver should adjust as needed */
1790	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1791	WREG32(VGT_STRMOUT_EN, 0);
1792	WREG32(SX_MISC, 0);
1793	WREG32(PA_SC_MODE_CNTL, 0);
1794	WREG32(PA_SC_AA_CONFIG, 0);
1795	WREG32(PA_SC_LINE_STIPPLE, 0);
1796	WREG32(SPI_INPUT_Z, 0);
1797	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1798	WREG32(CB_COLOR7_FRAG, 0);
1799
1800	/* Clear render buffer base addresses */
1801	WREG32(CB_COLOR0_BASE, 0);
1802	WREG32(CB_COLOR1_BASE, 0);
1803	WREG32(CB_COLOR2_BASE, 0);
1804	WREG32(CB_COLOR3_BASE, 0);
1805	WREG32(CB_COLOR4_BASE, 0);
1806	WREG32(CB_COLOR5_BASE, 0);
1807	WREG32(CB_COLOR6_BASE, 0);
1808	WREG32(CB_COLOR7_BASE, 0);
1809	WREG32(CB_COLOR7_FRAG, 0);
1810
1811	switch (rdev->family) {
1812	case CHIP_RV610:
1813	case CHIP_RV620:
1814	case CHIP_RS780:
1815	case CHIP_RS880:
1816		tmp = TC_L2_SIZE(8);
1817		break;
1818	case CHIP_RV630:
1819	case CHIP_RV635:
1820		tmp = TC_L2_SIZE(4);
1821		break;
1822	case CHIP_R600:
1823		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1824		break;
1825	default:
1826		tmp = TC_L2_SIZE(0);
1827		break;
1828	}
1829	WREG32(TC_CNTL, tmp);
1830
1831	tmp = RREG32(HDP_HOST_PATH_CNTL);
1832	WREG32(HDP_HOST_PATH_CNTL, tmp);
1833
1834	tmp = RREG32(ARB_POP);
1835	tmp |= ENABLE_TC128;
1836	WREG32(ARB_POP, tmp);
1837
1838	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1839	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1840			       NUM_CLIP_SEQ(3)));
1841	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1842	WREG32(VC_ENHANCE, 0);
1843}
1844
1845
1846/*
1847 * Indirect registers accessor
1848 */
1849u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1850{
1851	u32 r;
1852
1853	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1854	(void)RREG32(PCIE_PORT_INDEX);
1855	r = RREG32(PCIE_PORT_DATA);
1856	return r;
1857}
1858
1859void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1860{
1861	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1862	(void)RREG32(PCIE_PORT_INDEX);
1863	WREG32(PCIE_PORT_DATA, (v));
1864	(void)RREG32(PCIE_PORT_DATA);
1865}
1866
1867/*
1868 * CP & Ring
1869 */
1870void r600_cp_stop(struct radeon_device *rdev)
1871{
1872	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1873	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1874	WREG32(SCRATCH_UMSK, 0);
1875}
1876
1877int r600_init_microcode(struct radeon_device *rdev)
1878{
1879	struct platform_device *pdev;
1880	const char *chip_name;
1881	const char *rlc_chip_name;
1882	size_t pfp_req_size, me_req_size, rlc_req_size;
1883	char fw_name[30];
1884	int err;
1885
1886	DRM_DEBUG("\n");
1887
1888	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1889	err = IS_ERR(pdev);
1890	if (err) {
1891		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1892		return -EINVAL;
1893	}
1894
1895	switch (rdev->family) {
1896	case CHIP_R600:
1897		chip_name = "R600";
1898		rlc_chip_name = "R600";
1899		break;
1900	case CHIP_RV610:
1901		chip_name = "RV610";
1902		rlc_chip_name = "R600";
1903		break;
1904	case CHIP_RV630:
1905		chip_name = "RV630";
1906		rlc_chip_name = "R600";
1907		break;
1908	case CHIP_RV620:
1909		chip_name = "RV620";
1910		rlc_chip_name = "R600";
1911		break;
1912	case CHIP_RV635:
1913		chip_name = "RV635";
1914		rlc_chip_name = "R600";
1915		break;
1916	case CHIP_RV670:
1917		chip_name = "RV670";
1918		rlc_chip_name = "R600";
1919		break;
1920	case CHIP_RS780:
1921	case CHIP_RS880:
1922		chip_name = "RS780";
1923		rlc_chip_name = "R600";
1924		break;
1925	case CHIP_RV770:
1926		chip_name = "RV770";
1927		rlc_chip_name = "R700";
1928		break;
1929	case CHIP_RV730:
1930	case CHIP_RV740:
1931		chip_name = "RV730";
1932		rlc_chip_name = "R700";
1933		break;
1934	case CHIP_RV710:
1935		chip_name = "RV710";
1936		rlc_chip_name = "R700";
1937		break;
1938	case CHIP_CEDAR:
1939		chip_name = "CEDAR";
1940		rlc_chip_name = "CEDAR";
1941		break;
1942	case CHIP_REDWOOD:
1943		chip_name = "REDWOOD";
1944		rlc_chip_name = "REDWOOD";
1945		break;
1946	case CHIP_JUNIPER:
1947		chip_name = "JUNIPER";
1948		rlc_chip_name = "JUNIPER";
1949		break;
1950	case CHIP_CYPRESS:
1951	case CHIP_HEMLOCK:
1952		chip_name = "CYPRESS";
1953		rlc_chip_name = "CYPRESS";
1954		break;
1955	case CHIP_PALM:
1956		chip_name = "PALM";
1957		rlc_chip_name = "SUMO";
1958		break;
1959	case CHIP_SUMO:
1960		chip_name = "SUMO";
1961		rlc_chip_name = "SUMO";
1962		break;
1963	case CHIP_SUMO2:
1964		chip_name = "SUMO2";
1965		rlc_chip_name = "SUMO";
1966		break;
1967	default: BUG();
1968	}
1969
1970	if (rdev->family >= CHIP_CEDAR) {
1971		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1972		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1973		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1974	} else if (rdev->family >= CHIP_RV770) {
1975		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1976		me_req_size = R700_PM4_UCODE_SIZE * 4;
1977		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1978	} else {
1979		pfp_req_size = PFP_UCODE_SIZE * 4;
1980		me_req_size = PM4_UCODE_SIZE * 12;
1981		rlc_req_size = RLC_UCODE_SIZE * 4;
1982	}
1983
1984	DRM_INFO("Loading %s Microcode\n", chip_name);
1985
1986	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1987	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1988	if (err)
1989		goto out;
1990	if (rdev->pfp_fw->size != pfp_req_size) {
1991		printk(KERN_ERR
1992		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1993		       rdev->pfp_fw->size, fw_name);
1994		err = -EINVAL;
1995		goto out;
1996	}
1997
1998	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1999	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2000	if (err)
2001		goto out;
2002	if (rdev->me_fw->size != me_req_size) {
2003		printk(KERN_ERR
2004		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2005		       rdev->me_fw->size, fw_name);
2006		err = -EINVAL;
2007	}
2008
2009	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2010	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2011	if (err)
2012		goto out;
2013	if (rdev->rlc_fw->size != rlc_req_size) {
2014		printk(KERN_ERR
2015		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2016		       rdev->rlc_fw->size, fw_name);
2017		err = -EINVAL;
2018	}
2019
2020out:
2021	platform_device_unregister(pdev);
2022
2023	if (err) {
2024		if (err != -EINVAL)
2025			printk(KERN_ERR
2026			       "r600_cp: Failed to load firmware \"%s\"\n",
2027			       fw_name);
2028		release_firmware(rdev->pfp_fw);
2029		rdev->pfp_fw = NULL;
2030		release_firmware(rdev->me_fw);
2031		rdev->me_fw = NULL;
2032		release_firmware(rdev->rlc_fw);
2033		rdev->rlc_fw = NULL;
2034	}
2035	return err;
2036}
2037
2038static int r600_cp_load_microcode(struct radeon_device *rdev)
2039{
2040	const __be32 *fw_data;
2041	int i;
2042
2043	if (!rdev->me_fw || !rdev->pfp_fw)
2044		return -EINVAL;
2045
2046	r600_cp_stop(rdev);
2047
2048	WREG32(CP_RB_CNTL,
2049#ifdef __BIG_ENDIAN
2050	       BUF_SWAP_32BIT |
2051#endif
2052	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2053
2054	/* Reset cp */
2055	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2056	RREG32(GRBM_SOFT_RESET);
2057	mdelay(15);
2058	WREG32(GRBM_SOFT_RESET, 0);
2059
2060	WREG32(CP_ME_RAM_WADDR, 0);
2061
2062	fw_data = (const __be32 *)rdev->me_fw->data;
2063	WREG32(CP_ME_RAM_WADDR, 0);
2064	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2065		WREG32(CP_ME_RAM_DATA,
2066		       be32_to_cpup(fw_data++));
2067
2068	fw_data = (const __be32 *)rdev->pfp_fw->data;
2069	WREG32(CP_PFP_UCODE_ADDR, 0);
2070	for (i = 0; i < PFP_UCODE_SIZE; i++)
2071		WREG32(CP_PFP_UCODE_DATA,
2072		       be32_to_cpup(fw_data++));
2073
2074	WREG32(CP_PFP_UCODE_ADDR, 0);
2075	WREG32(CP_ME_RAM_WADDR, 0);
2076	WREG32(CP_ME_RAM_RADDR, 0);
2077	return 0;
2078}
2079
2080int r600_cp_start(struct radeon_device *rdev)
2081{
2082	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2083	int r;
2084	uint32_t cp_me;
2085
2086	r = radeon_ring_lock(rdev, ring, 7);
2087	if (r) {
2088		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2089		return r;
2090	}
2091	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2092	radeon_ring_write(ring, 0x1);
2093	if (rdev->family >= CHIP_RV770) {
2094		radeon_ring_write(ring, 0x0);
2095		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2096	} else {
2097		radeon_ring_write(ring, 0x3);
2098		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2099	}
2100	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2101	radeon_ring_write(ring, 0);
2102	radeon_ring_write(ring, 0);
2103	radeon_ring_unlock_commit(rdev, ring);
2104
2105	cp_me = 0xff;
2106	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2107	return 0;
2108}
2109
2110int r600_cp_resume(struct radeon_device *rdev)
2111{
2112	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2113	u32 tmp;
2114	u32 rb_bufsz;
2115	int r;
2116
2117	/* Reset cp */
2118	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2119	RREG32(GRBM_SOFT_RESET);
2120	mdelay(15);
2121	WREG32(GRBM_SOFT_RESET, 0);
2122
2123	/* Set ring buffer size */
2124	rb_bufsz = drm_order(ring->ring_size / 8);
2125	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2126#ifdef __BIG_ENDIAN
2127	tmp |= BUF_SWAP_32BIT;
2128#endif
2129	WREG32(CP_RB_CNTL, tmp);
2130	WREG32(CP_SEM_WAIT_TIMER, 0x0);
2131
2132	/* Set the write pointer delay */
2133	WREG32(CP_RB_WPTR_DELAY, 0);
2134
2135	/* Initialize the ring buffer's read and write pointers */
2136	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2137	WREG32(CP_RB_RPTR_WR, 0);
2138	ring->wptr = 0;
2139	WREG32(CP_RB_WPTR, ring->wptr);
2140
2141	/* set the wb address whether it's enabled or not */
2142	WREG32(CP_RB_RPTR_ADDR,
2143	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2144	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2145	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2146
2147	if (rdev->wb.enabled)
2148		WREG32(SCRATCH_UMSK, 0xff);
2149	else {
2150		tmp |= RB_NO_UPDATE;
2151		WREG32(SCRATCH_UMSK, 0);
2152	}
2153
2154	mdelay(1);
2155	WREG32(CP_RB_CNTL, tmp);
2156
2157	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2158	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2159
2160	ring->rptr = RREG32(CP_RB_RPTR);
2161
2162	r600_cp_start(rdev);
2163	ring->ready = true;
2164	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2165	if (r) {
2166		ring->ready = false;
2167		return r;
2168	}
2169	return 0;
2170}
2171
2172void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
 
 
 
 
 
 
2173{
2174	u32 rb_bufsz;
2175
2176	/* Align ring size */
2177	rb_bufsz = drm_order(ring_size / 8);
2178	ring_size = (1 << (rb_bufsz + 1)) * 4;
2179	ring->ring_size = ring_size;
2180	ring->align_mask = 16 - 1;
2181}
2182
2183void r600_cp_fini(struct radeon_device *rdev)
2184{
2185	r600_cp_stop(rdev);
2186	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2187}
2188
2189
2190/*
2191 * GPU scratch registers helpers function.
2192 */
2193void r600_scratch_init(struct radeon_device *rdev)
2194{
2195	int i;
2196
2197	rdev->scratch.num_reg = 7;
2198	rdev->scratch.reg_base = SCRATCH_REG0;
2199	for (i = 0; i < rdev->scratch.num_reg; i++) {
2200		rdev->scratch.free[i] = true;
2201		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2202	}
2203}
2204
2205int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2206{
2207	uint32_t scratch;
2208	uint32_t tmp = 0;
2209	unsigned i, ridx = radeon_ring_index(rdev, ring);
2210	int r;
2211
2212	r = radeon_scratch_get(rdev, &scratch);
2213	if (r) {
2214		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2215		return r;
2216	}
2217	WREG32(scratch, 0xCAFEDEAD);
2218	r = radeon_ring_lock(rdev, ring, 3);
2219	if (r) {
2220		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
2221		radeon_scratch_free(rdev, scratch);
2222		return r;
2223	}
2224	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2225	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2226	radeon_ring_write(ring, 0xDEADBEEF);
2227	radeon_ring_unlock_commit(rdev, ring);
2228	for (i = 0; i < rdev->usec_timeout; i++) {
2229		tmp = RREG32(scratch);
2230		if (tmp == 0xDEADBEEF)
2231			break;
2232		DRM_UDELAY(1);
2233	}
2234	if (i < rdev->usec_timeout) {
2235		DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
2236	} else {
2237		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2238			  ridx, scratch, tmp);
2239		r = -EINVAL;
2240	}
2241	radeon_scratch_free(rdev, scratch);
2242	return r;
2243}
2244
2245void r600_fence_ring_emit(struct radeon_device *rdev,
2246			  struct radeon_fence *fence)
2247{
2248	struct radeon_ring *ring = &rdev->ring[fence->ring];
2249
2250	if (rdev->wb.use_event) {
2251		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2252		/* flush read cache over gart */
2253		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2254		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2255					PACKET3_VC_ACTION_ENA |
2256					PACKET3_SH_ACTION_ENA);
2257		radeon_ring_write(ring, 0xFFFFFFFF);
2258		radeon_ring_write(ring, 0);
2259		radeon_ring_write(ring, 10); /* poll interval */
2260		/* EVENT_WRITE_EOP - flush caches, send int */
2261		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2262		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2263		radeon_ring_write(ring, addr & 0xffffffff);
2264		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2265		radeon_ring_write(ring, fence->seq);
2266		radeon_ring_write(ring, 0);
2267	} else {
2268		/* flush read cache over gart */
2269		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2270		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2271					PACKET3_VC_ACTION_ENA |
2272					PACKET3_SH_ACTION_ENA);
2273		radeon_ring_write(ring, 0xFFFFFFFF);
2274		radeon_ring_write(ring, 0);
2275		radeon_ring_write(ring, 10); /* poll interval */
2276		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2277		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2278		/* wait for 3D idle clean */
2279		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2280		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2281		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2282		/* Emit fence sequence & fire IRQ */
2283		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2284		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2285		radeon_ring_write(ring, fence->seq);
2286		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2287		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2288		radeon_ring_write(ring, RB_INT_STAT);
2289	}
2290}
2291
2292void r600_semaphore_ring_emit(struct radeon_device *rdev,
2293			      struct radeon_ring *ring,
2294			      struct radeon_semaphore *semaphore,
2295			      bool emit_wait)
2296{
2297	uint64_t addr = semaphore->gpu_addr;
2298	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2299
2300	if (rdev->family < CHIP_CAYMAN)
2301		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2302
2303	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2304	radeon_ring_write(ring, addr & 0xffffffff);
2305	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2306}
2307
2308int r600_copy_blit(struct radeon_device *rdev,
2309		   uint64_t src_offset,
2310		   uint64_t dst_offset,
2311		   unsigned num_gpu_pages,
2312		   struct radeon_fence *fence)
2313{
2314	struct radeon_sa_bo *vb = NULL;
2315	int r;
2316
2317	r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
 
 
2318	if (r) {
 
 
 
2319		return r;
2320	}
2321	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
2322	r600_blit_done_copy(rdev, fence, vb);
 
2323	return 0;
2324}
2325
2326void r600_blit_suspend(struct radeon_device *rdev)
2327{
2328	int r;
2329
2330	/* unpin shaders bo */
2331	if (rdev->r600_blit.shader_obj) {
2332		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2333		if (!r) {
2334			radeon_bo_unpin(rdev->r600_blit.shader_obj);
2335			radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2336		}
2337	}
2338}
2339
2340int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2341			 uint32_t tiling_flags, uint32_t pitch,
2342			 uint32_t offset, uint32_t obj_size)
2343{
2344	/* FIXME: implement */
2345	return 0;
2346}
2347
2348void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2349{
2350	/* FIXME: implement */
2351}
2352
2353int r600_startup(struct radeon_device *rdev)
2354{
2355	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2356	int r;
2357
2358	/* enable pcie gen2 link */
2359	r600_pcie_gen2_enable(rdev);
2360
2361	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2362		r = r600_init_microcode(rdev);
2363		if (r) {
2364			DRM_ERROR("Failed to load firmware!\n");
2365			return r;
2366		}
2367	}
2368
2369	r = r600_vram_scratch_init(rdev);
2370	if (r)
2371		return r;
2372
2373	r600_mc_program(rdev);
2374	if (rdev->flags & RADEON_IS_AGP) {
2375		r600_agp_enable(rdev);
2376	} else {
2377		r = r600_pcie_gart_enable(rdev);
2378		if (r)
2379			return r;
2380	}
2381	r600_gpu_init(rdev);
2382	r = r600_blit_init(rdev);
2383	if (r) {
2384		r600_blit_fini(rdev);
2385		rdev->asic->copy.copy = NULL;
2386		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2387	}
2388
2389	/* allocate wb buffer */
2390	r = radeon_wb_init(rdev);
2391	if (r)
2392		return r;
2393
2394	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2395	if (r) {
2396		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2397		return r;
2398	}
2399
2400	/* Enable IRQ */
2401	r = r600_irq_init(rdev);
2402	if (r) {
2403		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2404		radeon_irq_kms_fini(rdev);
2405		return r;
2406	}
2407	r600_irq_set(rdev);
2408
2409	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2410			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2411			     0, 0xfffff, RADEON_CP_PACKET2);
2412
2413	if (r)
2414		return r;
2415	r = r600_cp_load_microcode(rdev);
2416	if (r)
2417		return r;
2418	r = r600_cp_resume(rdev);
2419	if (r)
2420		return r;
2421
2422	r = radeon_ib_pool_start(rdev);
2423	if (r)
2424		return r;
2425
2426	r = radeon_ib_ring_tests(rdev);
2427	if (r)
2428		return r;
2429
2430	r = r600_audio_init(rdev);
2431	if (r) {
2432		DRM_ERROR("radeon: audio init failed\n");
2433		return r;
2434	}
2435
2436	return 0;
2437}
2438
2439void r600_vga_set_state(struct radeon_device *rdev, bool state)
2440{
2441	uint32_t temp;
2442
2443	temp = RREG32(CONFIG_CNTL);
2444	if (state == false) {
2445		temp &= ~(1<<0);
2446		temp |= (1<<1);
2447	} else {
2448		temp &= ~(1<<1);
2449	}
2450	WREG32(CONFIG_CNTL, temp);
2451}
2452
2453int r600_resume(struct radeon_device *rdev)
2454{
2455	int r;
2456
2457	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2458	 * posting will perform necessary task to bring back GPU into good
2459	 * shape.
2460	 */
2461	/* post card */
2462	atom_asic_init(rdev->mode_info.atom_context);
2463
2464	rdev->accel_working = true;
2465	r = r600_startup(rdev);
2466	if (r) {
2467		DRM_ERROR("r600 startup failed on resume\n");
2468		rdev->accel_working = false;
 
 
 
 
 
 
 
 
 
 
 
2469		return r;
2470	}
2471
2472	return r;
2473}
2474
2475int r600_suspend(struct radeon_device *rdev)
2476{
 
 
2477	r600_audio_fini(rdev);
2478	radeon_ib_pool_suspend(rdev);
2479	r600_blit_suspend(rdev);
2480	/* FIXME: we should wait for ring to be empty */
2481	r600_cp_stop(rdev);
2482	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2483	r600_irq_suspend(rdev);
2484	radeon_wb_disable(rdev);
2485	r600_pcie_gart_disable(rdev);
2486
 
 
 
 
 
 
 
2487	return 0;
2488}
2489
2490/* Plan is to move initialization in that function and use
2491 * helper function so that radeon_device_init pretty much
2492 * do nothing more than calling asic specific function. This
2493 * should also allow to remove a bunch of callback function
2494 * like vram_info.
2495 */
2496int r600_init(struct radeon_device *rdev)
2497{
2498	int r;
2499
2500	if (r600_debugfs_mc_info_init(rdev)) {
2501		DRM_ERROR("Failed to register debugfs file for mc !\n");
2502	}
 
 
 
 
2503	/* Read BIOS */
2504	if (!radeon_get_bios(rdev)) {
2505		if (ASIC_IS_AVIVO(rdev))
2506			return -EINVAL;
2507	}
2508	/* Must be an ATOMBIOS */
2509	if (!rdev->is_atom_bios) {
2510		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2511		return -EINVAL;
2512	}
2513	r = radeon_atombios_init(rdev);
2514	if (r)
2515		return r;
2516	/* Post card if necessary */
2517	if (!radeon_card_posted(rdev)) {
2518		if (!rdev->bios) {
2519			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2520			return -EINVAL;
2521		}
2522		DRM_INFO("GPU not posted. posting now...\n");
2523		atom_asic_init(rdev->mode_info.atom_context);
2524	}
2525	/* Initialize scratch registers */
2526	r600_scratch_init(rdev);
2527	/* Initialize surface registers */
2528	radeon_surface_init(rdev);
2529	/* Initialize clocks */
2530	radeon_get_clock_info(rdev->ddev);
2531	/* Fence driver */
2532	r = radeon_fence_driver_init(rdev);
2533	if (r)
2534		return r;
2535	if (rdev->flags & RADEON_IS_AGP) {
2536		r = radeon_agp_init(rdev);
2537		if (r)
2538			radeon_agp_disable(rdev);
2539	}
2540	r = r600_mc_init(rdev);
2541	if (r)
2542		return r;
2543	/* Memory manager */
2544	r = radeon_bo_init(rdev);
2545	if (r)
2546		return r;
2547
2548	r = radeon_irq_kms_init(rdev);
2549	if (r)
2550		return r;
2551
2552	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2553	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2554
2555	rdev->ih.ring_obj = NULL;
2556	r600_ih_ring_init(rdev, 64 * 1024);
2557
2558	r = r600_pcie_gart_init(rdev);
2559	if (r)
2560		return r;
2561
2562	r = radeon_ib_pool_init(rdev);
2563	rdev->accel_working = true;
2564	if (r) {
2565		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2566		rdev->accel_working = false;
2567	}
2568
2569	r = r600_startup(rdev);
2570	if (r) {
2571		dev_err(rdev->dev, "disabling GPU acceleration\n");
2572		r600_cp_fini(rdev);
2573		r600_irq_fini(rdev);
2574		radeon_wb_fini(rdev);
2575		r100_ib_fini(rdev);
2576		radeon_irq_kms_fini(rdev);
2577		r600_pcie_gart_fini(rdev);
2578		rdev->accel_working = false;
2579	}
 
 
 
 
 
 
 
 
 
 
 
 
 
2580
 
 
 
2581	return 0;
2582}
2583
2584void r600_fini(struct radeon_device *rdev)
2585{
2586	r600_audio_fini(rdev);
2587	r600_blit_fini(rdev);
2588	r600_cp_fini(rdev);
2589	r600_irq_fini(rdev);
2590	radeon_wb_fini(rdev);
2591	r100_ib_fini(rdev);
2592	radeon_irq_kms_fini(rdev);
2593	r600_pcie_gart_fini(rdev);
2594	r600_vram_scratch_fini(rdev);
2595	radeon_agp_fini(rdev);
2596	radeon_gem_fini(rdev);
2597	radeon_fence_driver_fini(rdev);
2598	radeon_bo_fini(rdev);
2599	radeon_atombios_fini(rdev);
2600	kfree(rdev->bios);
2601	rdev->bios = NULL;
2602}
2603
2604
2605/*
2606 * CS stuff
2607 */
2608void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2609{
2610	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
2611
2612	/* FIXME: implement */
2613	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2614	radeon_ring_write(ring,
2615#ifdef __BIG_ENDIAN
2616			  (2 << 0) |
2617#endif
2618			  (ib->gpu_addr & 0xFFFFFFFC));
2619	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2620	radeon_ring_write(ring, ib->length_dw);
2621}
2622
2623int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2624{
2625	struct radeon_ib ib;
2626	uint32_t scratch;
2627	uint32_t tmp = 0;
2628	unsigned i;
2629	int r;
2630	int ring_index = radeon_ring_index(rdev, ring);
2631
2632	r = radeon_scratch_get(rdev, &scratch);
2633	if (r) {
2634		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2635		return r;
2636	}
2637	WREG32(scratch, 0xCAFEDEAD);
2638	r = radeon_ib_get(rdev, ring_index, &ib, 256);
2639	if (r) {
2640		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2641		return r;
2642	}
2643	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2644	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2645	ib.ptr[2] = 0xDEADBEEF;
2646	ib.length_dw = 3;
2647	r = radeon_ib_schedule(rdev, &ib);
 
 
 
 
 
 
 
 
 
 
 
 
 
2648	if (r) {
2649		radeon_scratch_free(rdev, scratch);
2650		radeon_ib_free(rdev, &ib);
2651		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2652		return r;
2653	}
2654	r = radeon_fence_wait(ib.fence, false);
2655	if (r) {
2656		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2657		return r;
2658	}
2659	for (i = 0; i < rdev->usec_timeout; i++) {
2660		tmp = RREG32(scratch);
2661		if (tmp == 0xDEADBEEF)
2662			break;
2663		DRM_UDELAY(1);
2664	}
2665	if (i < rdev->usec_timeout) {
2666		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2667	} else {
2668		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2669			  scratch, tmp);
2670		r = -EINVAL;
2671	}
2672	radeon_scratch_free(rdev, scratch);
2673	radeon_ib_free(rdev, &ib);
2674	return r;
2675}
2676
2677/*
2678 * Interrupts
2679 *
2680 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2681 * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2682 * writing to the ring and the GPU consuming, the GPU writes to the ring
2683 * and host consumes.  As the host irq handler processes interrupts, it
2684 * increments the rptr.  When the rptr catches up with the wptr, all the
2685 * current interrupts have been processed.
2686 */
2687
2688void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2689{
2690	u32 rb_bufsz;
2691
2692	/* Align ring size */
2693	rb_bufsz = drm_order(ring_size / 4);
2694	ring_size = (1 << rb_bufsz) * 4;
2695	rdev->ih.ring_size = ring_size;
2696	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2697	rdev->ih.rptr = 0;
2698}
2699
2700int r600_ih_ring_alloc(struct radeon_device *rdev)
2701{
2702	int r;
2703
2704	/* Allocate ring buffer */
2705	if (rdev->ih.ring_obj == NULL) {
2706		r = radeon_bo_create(rdev, rdev->ih.ring_size,
2707				     PAGE_SIZE, true,
2708				     RADEON_GEM_DOMAIN_GTT,
2709				     NULL, &rdev->ih.ring_obj);
2710		if (r) {
2711			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2712			return r;
2713		}
2714		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2715		if (unlikely(r != 0))
2716			return r;
2717		r = radeon_bo_pin(rdev->ih.ring_obj,
2718				  RADEON_GEM_DOMAIN_GTT,
2719				  &rdev->ih.gpu_addr);
2720		if (r) {
2721			radeon_bo_unreserve(rdev->ih.ring_obj);
2722			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2723			return r;
2724		}
2725		r = radeon_bo_kmap(rdev->ih.ring_obj,
2726				   (void **)&rdev->ih.ring);
2727		radeon_bo_unreserve(rdev->ih.ring_obj);
2728		if (r) {
2729			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2730			return r;
2731		}
2732	}
2733	return 0;
2734}
2735
2736void r600_ih_ring_fini(struct radeon_device *rdev)
2737{
2738	int r;
2739	if (rdev->ih.ring_obj) {
2740		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2741		if (likely(r == 0)) {
2742			radeon_bo_kunmap(rdev->ih.ring_obj);
2743			radeon_bo_unpin(rdev->ih.ring_obj);
2744			radeon_bo_unreserve(rdev->ih.ring_obj);
2745		}
2746		radeon_bo_unref(&rdev->ih.ring_obj);
2747		rdev->ih.ring = NULL;
2748		rdev->ih.ring_obj = NULL;
2749	}
2750}
2751
2752void r600_rlc_stop(struct radeon_device *rdev)
2753{
2754
2755	if ((rdev->family >= CHIP_RV770) &&
2756	    (rdev->family <= CHIP_RV740)) {
2757		/* r7xx asics need to soft reset RLC before halting */
2758		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2759		RREG32(SRBM_SOFT_RESET);
2760		mdelay(15);
2761		WREG32(SRBM_SOFT_RESET, 0);
2762		RREG32(SRBM_SOFT_RESET);
2763	}
2764
2765	WREG32(RLC_CNTL, 0);
2766}
2767
2768static void r600_rlc_start(struct radeon_device *rdev)
2769{
2770	WREG32(RLC_CNTL, RLC_ENABLE);
2771}
2772
2773static int r600_rlc_init(struct radeon_device *rdev)
2774{
2775	u32 i;
2776	const __be32 *fw_data;
2777
2778	if (!rdev->rlc_fw)
2779		return -EINVAL;
2780
2781	r600_rlc_stop(rdev);
2782
 
2783	WREG32(RLC_HB_CNTL, 0);
2784
2785	if (rdev->family == CHIP_ARUBA) {
2786		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
2787		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
2788	}
2789	if (rdev->family <= CHIP_CAYMAN) {
2790		WREG32(RLC_HB_BASE, 0);
2791		WREG32(RLC_HB_RPTR, 0);
2792		WREG32(RLC_HB_WPTR, 0);
2793	}
2794	if (rdev->family <= CHIP_CAICOS) {
2795		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2796		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2797	}
2798	WREG32(RLC_MC_CNTL, 0);
2799	WREG32(RLC_UCODE_CNTL, 0);
2800
2801	fw_data = (const __be32 *)rdev->rlc_fw->data;
2802	if (rdev->family >= CHIP_ARUBA) {
2803		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
2804			WREG32(RLC_UCODE_ADDR, i);
2805			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2806		}
2807	} else if (rdev->family >= CHIP_CAYMAN) {
2808		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2809			WREG32(RLC_UCODE_ADDR, i);
2810			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2811		}
2812	} else if (rdev->family >= CHIP_CEDAR) {
2813		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2814			WREG32(RLC_UCODE_ADDR, i);
2815			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2816		}
2817	} else if (rdev->family >= CHIP_RV770) {
2818		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2819			WREG32(RLC_UCODE_ADDR, i);
2820			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2821		}
2822	} else {
2823		for (i = 0; i < RLC_UCODE_SIZE; i++) {
2824			WREG32(RLC_UCODE_ADDR, i);
2825			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2826		}
2827	}
2828	WREG32(RLC_UCODE_ADDR, 0);
2829
2830	r600_rlc_start(rdev);
2831
2832	return 0;
2833}
2834
2835static void r600_enable_interrupts(struct radeon_device *rdev)
2836{
2837	u32 ih_cntl = RREG32(IH_CNTL);
2838	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2839
2840	ih_cntl |= ENABLE_INTR;
2841	ih_rb_cntl |= IH_RB_ENABLE;
2842	WREG32(IH_CNTL, ih_cntl);
2843	WREG32(IH_RB_CNTL, ih_rb_cntl);
2844	rdev->ih.enabled = true;
2845}
2846
2847void r600_disable_interrupts(struct radeon_device *rdev)
2848{
2849	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2850	u32 ih_cntl = RREG32(IH_CNTL);
2851
2852	ih_rb_cntl &= ~IH_RB_ENABLE;
2853	ih_cntl &= ~ENABLE_INTR;
2854	WREG32(IH_RB_CNTL, ih_rb_cntl);
2855	WREG32(IH_CNTL, ih_cntl);
2856	/* set rptr, wptr to 0 */
2857	WREG32(IH_RB_RPTR, 0);
2858	WREG32(IH_RB_WPTR, 0);
2859	rdev->ih.enabled = false;
2860	rdev->ih.wptr = 0;
2861	rdev->ih.rptr = 0;
2862}
2863
2864static void r600_disable_interrupt_state(struct radeon_device *rdev)
2865{
2866	u32 tmp;
2867
2868	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2869	WREG32(GRBM_INT_CNTL, 0);
2870	WREG32(DxMODE_INT_MASK, 0);
2871	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2872	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2873	if (ASIC_IS_DCE3(rdev)) {
2874		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2875		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2876		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2877		WREG32(DC_HPD1_INT_CONTROL, tmp);
2878		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2879		WREG32(DC_HPD2_INT_CONTROL, tmp);
2880		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2881		WREG32(DC_HPD3_INT_CONTROL, tmp);
2882		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2883		WREG32(DC_HPD4_INT_CONTROL, tmp);
2884		if (ASIC_IS_DCE32(rdev)) {
2885			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2886			WREG32(DC_HPD5_INT_CONTROL, tmp);
2887			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2888			WREG32(DC_HPD6_INT_CONTROL, tmp);
2889			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2890			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
2891			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2892			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
2893		} else {
2894			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2895			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
2896			tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2897			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
2898		}
2899	} else {
2900		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2901		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2902		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2903		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2904		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2905		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2906		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2907		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2908		tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2909		WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
2910		tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2911		WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
2912	}
2913}
2914
2915int r600_irq_init(struct radeon_device *rdev)
2916{
2917	int ret = 0;
2918	int rb_bufsz;
2919	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2920
2921	/* allocate ring */
2922	ret = r600_ih_ring_alloc(rdev);
2923	if (ret)
2924		return ret;
2925
2926	/* disable irqs */
2927	r600_disable_interrupts(rdev);
2928
2929	/* init rlc */
2930	ret = r600_rlc_init(rdev);
2931	if (ret) {
2932		r600_ih_ring_fini(rdev);
2933		return ret;
2934	}
2935
2936	/* setup interrupt control */
2937	/* set dummy read address to ring address */
2938	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2939	interrupt_cntl = RREG32(INTERRUPT_CNTL);
2940	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2941	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2942	 */
2943	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2944	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2945	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2946	WREG32(INTERRUPT_CNTL, interrupt_cntl);
2947
2948	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2949	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2950
2951	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2952		      IH_WPTR_OVERFLOW_CLEAR |
2953		      (rb_bufsz << 1));
2954
2955	if (rdev->wb.enabled)
2956		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2957
2958	/* set the writeback address whether it's enabled or not */
2959	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2960	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
2961
2962	WREG32(IH_RB_CNTL, ih_rb_cntl);
2963
2964	/* set rptr, wptr to 0 */
2965	WREG32(IH_RB_RPTR, 0);
2966	WREG32(IH_RB_WPTR, 0);
2967
2968	/* Default settings for IH_CNTL (disabled at first) */
2969	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2970	/* RPTR_REARM only works if msi's are enabled */
2971	if (rdev->msi_enabled)
2972		ih_cntl |= RPTR_REARM;
2973	WREG32(IH_CNTL, ih_cntl);
2974
2975	/* force the active interrupt state to all disabled */
2976	if (rdev->family >= CHIP_CEDAR)
2977		evergreen_disable_interrupt_state(rdev);
2978	else
2979		r600_disable_interrupt_state(rdev);
2980
2981	/* at this point everything should be setup correctly to enable master */
2982	pci_set_master(rdev->pdev);
2983
2984	/* enable irqs */
2985	r600_enable_interrupts(rdev);
2986
2987	return ret;
2988}
2989
2990void r600_irq_suspend(struct radeon_device *rdev)
2991{
2992	r600_irq_disable(rdev);
2993	r600_rlc_stop(rdev);
2994}
2995
2996void r600_irq_fini(struct radeon_device *rdev)
2997{
2998	r600_irq_suspend(rdev);
2999	r600_ih_ring_fini(rdev);
3000}
3001
3002int r600_irq_set(struct radeon_device *rdev)
3003{
3004	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3005	u32 mode_int = 0;
3006	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3007	u32 grbm_int_cntl = 0;
3008	u32 hdmi0, hdmi1;
3009	u32 d1grph = 0, d2grph = 0;
3010
3011	if (!rdev->irq.installed) {
3012		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3013		return -EINVAL;
3014	}
3015	/* don't enable anything if the ih is disabled */
3016	if (!rdev->ih.enabled) {
3017		r600_disable_interrupts(rdev);
3018		/* force the active interrupt state to all disabled */
3019		r600_disable_interrupt_state(rdev);
3020		return 0;
3021	}
3022
 
3023	if (ASIC_IS_DCE3(rdev)) {
 
3024		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3025		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3026		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3027		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3028		if (ASIC_IS_DCE32(rdev)) {
3029			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3030			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3031			hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3032			hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3033		} else {
3034			hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3035			hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3036		}
3037	} else {
 
3038		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3039		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3040		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3041		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3042		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3043	}
3044
3045	if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
3046		DRM_DEBUG("r600_irq_set: sw int\n");
3047		cp_int_cntl |= RB_INT_ENABLE;
3048		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3049	}
3050	if (rdev->irq.crtc_vblank_int[0] ||
3051	    rdev->irq.pflip[0]) {
3052		DRM_DEBUG("r600_irq_set: vblank 0\n");
3053		mode_int |= D1MODE_VBLANK_INT_MASK;
3054	}
3055	if (rdev->irq.crtc_vblank_int[1] ||
3056	    rdev->irq.pflip[1]) {
3057		DRM_DEBUG("r600_irq_set: vblank 1\n");
3058		mode_int |= D2MODE_VBLANK_INT_MASK;
3059	}
3060	if (rdev->irq.hpd[0]) {
3061		DRM_DEBUG("r600_irq_set: hpd 1\n");
3062		hpd1 |= DC_HPDx_INT_EN;
3063	}
3064	if (rdev->irq.hpd[1]) {
3065		DRM_DEBUG("r600_irq_set: hpd 2\n");
3066		hpd2 |= DC_HPDx_INT_EN;
3067	}
3068	if (rdev->irq.hpd[2]) {
3069		DRM_DEBUG("r600_irq_set: hpd 3\n");
3070		hpd3 |= DC_HPDx_INT_EN;
3071	}
3072	if (rdev->irq.hpd[3]) {
3073		DRM_DEBUG("r600_irq_set: hpd 4\n");
3074		hpd4 |= DC_HPDx_INT_EN;
3075	}
3076	if (rdev->irq.hpd[4]) {
3077		DRM_DEBUG("r600_irq_set: hpd 5\n");
3078		hpd5 |= DC_HPDx_INT_EN;
3079	}
3080	if (rdev->irq.hpd[5]) {
3081		DRM_DEBUG("r600_irq_set: hpd 6\n");
3082		hpd6 |= DC_HPDx_INT_EN;
3083	}
3084	if (rdev->irq.afmt[0]) {
3085		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3086		hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3087	}
3088	if (rdev->irq.afmt[1]) {
3089		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3090		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3091	}
3092	if (rdev->irq.gui_idle) {
3093		DRM_DEBUG("gui idle\n");
3094		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3095	}
3096
3097	WREG32(CP_INT_CNTL, cp_int_cntl);
3098	WREG32(DxMODE_INT_MASK, mode_int);
3099	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3100	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3101	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
3102	if (ASIC_IS_DCE3(rdev)) {
 
3103		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3104		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3105		WREG32(DC_HPD3_INT_CONTROL, hpd3);
3106		WREG32(DC_HPD4_INT_CONTROL, hpd4);
3107		if (ASIC_IS_DCE32(rdev)) {
3108			WREG32(DC_HPD5_INT_CONTROL, hpd5);
3109			WREG32(DC_HPD6_INT_CONTROL, hpd6);
3110			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3111			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3112		} else {
3113			WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3114			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3115		}
3116	} else {
 
3117		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3118		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3119		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3120		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3121		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3122	}
3123
3124	return 0;
3125}
3126
3127static void r600_irq_ack(struct radeon_device *rdev)
3128{
3129	u32 tmp;
3130
3131	if (ASIC_IS_DCE3(rdev)) {
3132		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3133		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3134		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3135		if (ASIC_IS_DCE32(rdev)) {
3136			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3137			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3138		} else {
3139			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3140			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3141		}
3142	} else {
3143		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3144		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3145		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3146		rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3147		rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3148	}
3149	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3150	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3151
3152	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3153		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3154	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3155		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3156	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3157		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3158	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3159		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3160	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3161		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3162	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3163		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3164	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3165		if (ASIC_IS_DCE3(rdev)) {
3166			tmp = RREG32(DC_HPD1_INT_CONTROL);
3167			tmp |= DC_HPDx_INT_ACK;
3168			WREG32(DC_HPD1_INT_CONTROL, tmp);
3169		} else {
3170			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3171			tmp |= DC_HPDx_INT_ACK;
3172			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3173		}
3174	}
3175	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3176		if (ASIC_IS_DCE3(rdev)) {
3177			tmp = RREG32(DC_HPD2_INT_CONTROL);
3178			tmp |= DC_HPDx_INT_ACK;
3179			WREG32(DC_HPD2_INT_CONTROL, tmp);
3180		} else {
3181			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3182			tmp |= DC_HPDx_INT_ACK;
3183			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3184		}
3185	}
3186	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3187		if (ASIC_IS_DCE3(rdev)) {
3188			tmp = RREG32(DC_HPD3_INT_CONTROL);
3189			tmp |= DC_HPDx_INT_ACK;
3190			WREG32(DC_HPD3_INT_CONTROL, tmp);
3191		} else {
3192			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3193			tmp |= DC_HPDx_INT_ACK;
3194			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3195		}
3196	}
3197	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3198		tmp = RREG32(DC_HPD4_INT_CONTROL);
3199		tmp |= DC_HPDx_INT_ACK;
3200		WREG32(DC_HPD4_INT_CONTROL, tmp);
3201	}
3202	if (ASIC_IS_DCE32(rdev)) {
3203		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3204			tmp = RREG32(DC_HPD5_INT_CONTROL);
3205			tmp |= DC_HPDx_INT_ACK;
3206			WREG32(DC_HPD5_INT_CONTROL, tmp);
3207		}
3208		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3209			tmp = RREG32(DC_HPD5_INT_CONTROL);
3210			tmp |= DC_HPDx_INT_ACK;
3211			WREG32(DC_HPD6_INT_CONTROL, tmp);
3212		}
3213		if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3214			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3215			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3216			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3217		}
3218		if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3219			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3220			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3221			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3222		}
3223	} else {
3224		if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3225			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3226			tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3227			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3228		}
3229		if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3230			if (ASIC_IS_DCE3(rdev)) {
3231				tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3232				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3233				WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3234			} else {
3235				tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3236				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3237				WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3238			}
3239		}
3240	}
3241}
3242
3243void r600_irq_disable(struct radeon_device *rdev)
3244{
3245	r600_disable_interrupts(rdev);
3246	/* Wait and acknowledge irq */
3247	mdelay(1);
3248	r600_irq_ack(rdev);
3249	r600_disable_interrupt_state(rdev);
3250}
3251
3252static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3253{
3254	u32 wptr, tmp;
3255
3256	if (rdev->wb.enabled)
3257		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3258	else
3259		wptr = RREG32(IH_RB_WPTR);
3260
3261	if (wptr & RB_OVERFLOW) {
3262		/* When a ring buffer overflow happen start parsing interrupt
3263		 * from the last not overwritten vector (wptr + 16). Hopefully
3264		 * this should allow us to catchup.
3265		 */
3266		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3267			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3268		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3269		tmp = RREG32(IH_RB_CNTL);
3270		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3271		WREG32(IH_RB_CNTL, tmp);
3272	}
3273	return (wptr & rdev->ih.ptr_mask);
3274}
3275
3276/*        r600 IV Ring
3277 * Each IV ring entry is 128 bits:
3278 * [7:0]    - interrupt source id
3279 * [31:8]   - reserved
3280 * [59:32]  - interrupt source data
3281 * [127:60]  - reserved
3282 *
3283 * The basic interrupt vector entries
3284 * are decoded as follows:
3285 * src_id  src_data  description
3286 *      1         0  D1 Vblank
3287 *      1         1  D1 Vline
3288 *      5         0  D2 Vblank
3289 *      5         1  D2 Vline
3290 *     19         0  FP Hot plug detection A
3291 *     19         1  FP Hot plug detection B
3292 *     19         2  DAC A auto-detection
3293 *     19         3  DAC B auto-detection
3294 *     21         4  HDMI block A
3295 *     21         5  HDMI block B
3296 *    176         -  CP_INT RB
3297 *    177         -  CP_INT IB1
3298 *    178         -  CP_INT IB2
3299 *    181         -  EOP Interrupt
3300 *    233         -  GUI Idle
3301 *
3302 * Note, these are based on r600 and may need to be
3303 * adjusted or added to on newer asics
3304 */
3305
3306int r600_irq_process(struct radeon_device *rdev)
3307{
3308	u32 wptr;
3309	u32 rptr;
3310	u32 src_id, src_data;
3311	u32 ring_index;
3312	unsigned long flags;
3313	bool queue_hotplug = false;
3314	bool queue_hdmi = false;
3315
3316	if (!rdev->ih.enabled || rdev->shutdown)
3317		return IRQ_NONE;
3318
3319	/* No MSIs, need a dummy read to flush PCI DMAs */
3320	if (!rdev->msi_enabled)
3321		RREG32(IH_RB_WPTR);
3322
3323	wptr = r600_get_ih_wptr(rdev);
3324	rptr = rdev->ih.rptr;
3325	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3326
3327	spin_lock_irqsave(&rdev->ih.lock, flags);
3328
3329	if (rptr == wptr) {
3330		spin_unlock_irqrestore(&rdev->ih.lock, flags);
3331		return IRQ_NONE;
3332	}
3333
3334restart_ih:
3335	/* Order reading of wptr vs. reading of IH ring data */
3336	rmb();
3337
3338	/* display interrupts */
3339	r600_irq_ack(rdev);
3340
3341	rdev->ih.wptr = wptr;
3342	while (rptr != wptr) {
3343		/* wptr/rptr are in bytes! */
3344		ring_index = rptr / 4;
3345		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3346		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3347
3348		switch (src_id) {
3349		case 1: /* D1 vblank/vline */
3350			switch (src_data) {
3351			case 0: /* D1 vblank */
3352				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3353					if (rdev->irq.crtc_vblank_int[0]) {
3354						drm_handle_vblank(rdev->ddev, 0);
3355						rdev->pm.vblank_sync = true;
3356						wake_up(&rdev->irq.vblank_queue);
3357					}
3358					if (rdev->irq.pflip[0])
3359						radeon_crtc_handle_flip(rdev, 0);
3360					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3361					DRM_DEBUG("IH: D1 vblank\n");
3362				}
3363				break;
3364			case 1: /* D1 vline */
3365				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3366					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3367					DRM_DEBUG("IH: D1 vline\n");
3368				}
3369				break;
3370			default:
3371				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3372				break;
3373			}
3374			break;
3375		case 5: /* D2 vblank/vline */
3376			switch (src_data) {
3377			case 0: /* D2 vblank */
3378				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3379					if (rdev->irq.crtc_vblank_int[1]) {
3380						drm_handle_vblank(rdev->ddev, 1);
3381						rdev->pm.vblank_sync = true;
3382						wake_up(&rdev->irq.vblank_queue);
3383					}
3384					if (rdev->irq.pflip[1])
3385						radeon_crtc_handle_flip(rdev, 1);
3386					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3387					DRM_DEBUG("IH: D2 vblank\n");
3388				}
3389				break;
3390			case 1: /* D1 vline */
3391				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3392					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3393					DRM_DEBUG("IH: D2 vline\n");
3394				}
3395				break;
3396			default:
3397				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3398				break;
3399			}
3400			break;
3401		case 19: /* HPD/DAC hotplug */
3402			switch (src_data) {
3403			case 0:
3404				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3405					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3406					queue_hotplug = true;
3407					DRM_DEBUG("IH: HPD1\n");
3408				}
3409				break;
3410			case 1:
3411				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3412					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3413					queue_hotplug = true;
3414					DRM_DEBUG("IH: HPD2\n");
3415				}
3416				break;
3417			case 4:
3418				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3419					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3420					queue_hotplug = true;
3421					DRM_DEBUG("IH: HPD3\n");
3422				}
3423				break;
3424			case 5:
3425				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3426					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3427					queue_hotplug = true;
3428					DRM_DEBUG("IH: HPD4\n");
3429				}
3430				break;
3431			case 10:
3432				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3433					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3434					queue_hotplug = true;
3435					DRM_DEBUG("IH: HPD5\n");
3436				}
3437				break;
3438			case 12:
3439				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3440					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3441					queue_hotplug = true;
3442					DRM_DEBUG("IH: HPD6\n");
3443				}
3444				break;
3445			default:
3446				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3447				break;
3448			}
3449			break;
3450		case 21: /* hdmi */
3451			switch (src_data) {
3452			case 4:
3453				if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3454					rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3455					queue_hdmi = true;
3456					DRM_DEBUG("IH: HDMI0\n");
3457				}
3458				break;
3459			case 5:
3460				if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3461					rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3462					queue_hdmi = true;
3463					DRM_DEBUG("IH: HDMI1\n");
3464				}
3465				break;
3466			default:
3467				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3468				break;
3469			}
3470			break;
3471		case 176: /* CP_INT in ring buffer */
3472		case 177: /* CP_INT in IB1 */
3473		case 178: /* CP_INT in IB2 */
3474			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3475			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3476			break;
3477		case 181: /* CP EOP event */
3478			DRM_DEBUG("IH: CP EOP\n");
3479			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3480			break;
3481		case 233: /* GUI IDLE */
3482			DRM_DEBUG("IH: GUI idle\n");
3483			rdev->pm.gui_idle = true;
3484			wake_up(&rdev->irq.idle_queue);
3485			break;
3486		default:
3487			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3488			break;
3489		}
3490
3491		/* wptr/rptr are in bytes! */
3492		rptr += 16;
3493		rptr &= rdev->ih.ptr_mask;
3494	}
3495	/* make sure wptr hasn't changed while processing */
3496	wptr = r600_get_ih_wptr(rdev);
3497	if (wptr != rdev->ih.wptr)
3498		goto restart_ih;
3499	if (queue_hotplug)
3500		schedule_work(&rdev->hotplug_work);
3501	if (queue_hdmi)
3502		schedule_work(&rdev->audio_work);
3503	rdev->ih.rptr = rptr;
3504	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3505	spin_unlock_irqrestore(&rdev->ih.lock, flags);
3506	return IRQ_HANDLED;
3507}
3508
3509/*
3510 * Debugfs info
3511 */
3512#if defined(CONFIG_DEBUG_FS)
3513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3514static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3515{
3516	struct drm_info_node *node = (struct drm_info_node *) m->private;
3517	struct drm_device *dev = node->minor->dev;
3518	struct radeon_device *rdev = dev->dev_private;
3519
3520	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3521	DREG32_SYS(m, rdev, VM_L2_STATUS);
3522	return 0;
3523}
3524
3525static struct drm_info_list r600_mc_info_list[] = {
3526	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
 
3527};
3528#endif
3529
3530int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3531{
3532#if defined(CONFIG_DEBUG_FS)
3533	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3534#else
3535	return 0;
3536#endif
3537}
3538
3539/**
3540 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3541 * rdev: radeon device structure
3542 * bo: buffer object struct which userspace is waiting for idle
3543 *
3544 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3545 * through ring buffer, this leads to corruption in rendering, see
3546 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3547 * directly perform HDP flush by writing register through MMIO.
3548 */
3549void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3550{
3551	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
3552	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3553	 * This seems to cause problems on some AGP cards. Just use the old
3554	 * method for them.
3555	 */
3556	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3557	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3558		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3559		u32 tmp;
3560
3561		WREG32(HDP_DEBUG1, 0);
3562		tmp = readl((void __iomem *)ptr);
3563	} else
3564		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3565}
3566
3567void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3568{
3569	u32 link_width_cntl, mask, target_reg;
3570
3571	if (rdev->flags & RADEON_IS_IGP)
3572		return;
3573
3574	if (!(rdev->flags & RADEON_IS_PCIE))
3575		return;
3576
3577	/* x2 cards have a special sequence */
3578	if (ASIC_IS_X2(rdev))
3579		return;
3580
3581	/* FIXME wait for idle */
3582
3583	switch (lanes) {
3584	case 0:
3585		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3586		break;
3587	case 1:
3588		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3589		break;
3590	case 2:
3591		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3592		break;
3593	case 4:
3594		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3595		break;
3596	case 8:
3597		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3598		break;
3599	case 12:
3600		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3601		break;
3602	case 16:
3603	default:
3604		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3605		break;
3606	}
3607
3608	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3609
3610	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3611	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3612		return;
3613
3614	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3615		return;
3616
3617	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3618			     RADEON_PCIE_LC_RECONFIG_NOW |
3619			     R600_PCIE_LC_RENEGOTIATE_EN |
3620			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3621	link_width_cntl |= mask;
3622
3623	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3624
3625        /* some northbridges can renegotiate the link rather than requiring                                  
3626         * a complete re-config.                                                                             
3627         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
3628         */
3629        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3630		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3631        else
3632		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3633
3634	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3635						       RADEON_PCIE_LC_RECONFIG_NOW));
3636
3637        if (rdev->family >= CHIP_RV770)
3638		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3639        else
3640		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3641
3642        /* wait for lane set to complete */
3643        link_width_cntl = RREG32(target_reg);
3644        while (link_width_cntl == 0xffffffff)
3645		link_width_cntl = RREG32(target_reg);
3646
3647}
3648
3649int r600_get_pcie_lanes(struct radeon_device *rdev)
3650{
3651	u32 link_width_cntl;
3652
3653	if (rdev->flags & RADEON_IS_IGP)
3654		return 0;
3655
3656	if (!(rdev->flags & RADEON_IS_PCIE))
3657		return 0;
3658
3659	/* x2 cards have a special sequence */
3660	if (ASIC_IS_X2(rdev))
3661		return 0;
3662
3663	/* FIXME wait for idle */
3664
3665	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3666
3667	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3668	case RADEON_PCIE_LC_LINK_WIDTH_X0:
3669		return 0;
3670	case RADEON_PCIE_LC_LINK_WIDTH_X1:
3671		return 1;
3672	case RADEON_PCIE_LC_LINK_WIDTH_X2:
3673		return 2;
3674	case RADEON_PCIE_LC_LINK_WIDTH_X4:
3675		return 4;
3676	case RADEON_PCIE_LC_LINK_WIDTH_X8:
3677		return 8;
3678	case RADEON_PCIE_LC_LINK_WIDTH_X16:
3679	default:
3680		return 16;
3681	}
3682}
3683
3684static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3685{
3686	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3687	u16 link_cntl2;
3688
3689	if (radeon_pcie_gen2 == 0)
3690		return;
3691
3692	if (rdev->flags & RADEON_IS_IGP)
3693		return;
3694
3695	if (!(rdev->flags & RADEON_IS_PCIE))
3696		return;
3697
3698	/* x2 cards have a special sequence */
3699	if (ASIC_IS_X2(rdev))
3700		return;
3701
3702	/* only RV6xx+ chips are supported */
3703	if (rdev->family <= CHIP_R600)
3704		return;
3705
3706	/* 55 nm r6xx asics */
3707	if ((rdev->family == CHIP_RV670) ||
3708	    (rdev->family == CHIP_RV620) ||
3709	    (rdev->family == CHIP_RV635)) {
3710		/* advertise upconfig capability */
3711		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3712		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3713		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3714		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3715		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3716			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3717			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3718					     LC_RECONFIG_ARC_MISSING_ESCAPE);
3719			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3720			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3721		} else {
3722			link_width_cntl |= LC_UPCONFIGURE_DIS;
3723			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3724		}
3725	}
3726
3727	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3728	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3729	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3730
3731		/* 55 nm r6xx asics */
3732		if ((rdev->family == CHIP_RV670) ||
3733		    (rdev->family == CHIP_RV620) ||
3734		    (rdev->family == CHIP_RV635)) {
3735			WREG32(MM_CFGREGS_CNTL, 0x8);
3736			link_cntl2 = RREG32(0x4088);
3737			WREG32(MM_CFGREGS_CNTL, 0);
3738			/* not supported yet */
3739			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3740				return;
3741		}
3742
3743		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3744		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3745		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3746		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3747		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3748		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3749
3750		tmp = RREG32(0x541c);
3751		WREG32(0x541c, tmp | 0x8);
3752		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3753		link_cntl2 = RREG16(0x4088);
3754		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3755		link_cntl2 |= 0x2;
3756		WREG16(0x4088, link_cntl2);
3757		WREG32(MM_CFGREGS_CNTL, 0);
3758
3759		if ((rdev->family == CHIP_RV670) ||
3760		    (rdev->family == CHIP_RV620) ||
3761		    (rdev->family == CHIP_RV635)) {
3762			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3763			training_cntl &= ~LC_POINT_7_PLUS_EN;
3764			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3765		} else {
3766			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3767			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3768			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3769		}
3770
3771		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3772		speed_cntl |= LC_GEN2_EN_STRAP;
3773		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3774
3775	} else {
3776		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3777		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3778		if (1)
3779			link_width_cntl |= LC_UPCONFIGURE_DIS;
3780		else
3781			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3782		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3783	}
3784}