Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/radeon_drm.h>
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "radeon_asic.h"
35#include "r100d.h"
36#include "rs100d.h"
37#include "rv200d.h"
38#include "rv250d.h"
39#include "atom.h"
40
41#include <linux/firmware.h>
42#include <linux/module.h>
43
44#include "r100_reg_safe.h"
45#include "rn50_reg_safe.h"
46
47/* Firmware Names */
48#define FIRMWARE_R100 "radeon/R100_cp.bin"
49#define FIRMWARE_R200 "radeon/R200_cp.bin"
50#define FIRMWARE_R300 "radeon/R300_cp.bin"
51#define FIRMWARE_R420 "radeon/R420_cp.bin"
52#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
53#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
54#define FIRMWARE_R520 "radeon/R520_cp.bin"
55
56MODULE_FIRMWARE(FIRMWARE_R100);
57MODULE_FIRMWARE(FIRMWARE_R200);
58MODULE_FIRMWARE(FIRMWARE_R300);
59MODULE_FIRMWARE(FIRMWARE_R420);
60MODULE_FIRMWARE(FIRMWARE_RS690);
61MODULE_FIRMWARE(FIRMWARE_RS600);
62MODULE_FIRMWARE(FIRMWARE_R520);
63
64#include "r100_track.h"
65
66/* This files gather functions specifics to:
67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
68 * and others in some cases.
69 */
70
71static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
72{
73 if (crtc == 0) {
74 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
75 return true;
76 else
77 return false;
78 } else {
79 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
80 return true;
81 else
82 return false;
83 }
84}
85
86static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
87{
88 u32 vline1, vline2;
89
90 if (crtc == 0) {
91 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
92 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
93 } else {
94 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
95 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
96 }
97 if (vline1 != vline2)
98 return true;
99 else
100 return false;
101}
102
103/**
104 * r100_wait_for_vblank - vblank wait asic callback.
105 *
106 * @rdev: radeon_device pointer
107 * @crtc: crtc to wait for vblank on
108 *
109 * Wait for vblank on the requested crtc (r1xx-r4xx).
110 */
111void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
112{
113 unsigned i = 0;
114
115 if (crtc >= rdev->num_crtc)
116 return;
117
118 if (crtc == 0) {
119 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
120 return;
121 } else {
122 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
123 return;
124 }
125
126 /* depending on when we hit vblank, we may be close to active; if so,
127 * wait for another frame.
128 */
129 while (r100_is_in_vblank(rdev, crtc)) {
130 if (i++ % 100 == 0) {
131 if (!r100_is_counter_moving(rdev, crtc))
132 break;
133 }
134 }
135
136 while (!r100_is_in_vblank(rdev, crtc)) {
137 if (i++ % 100 == 0) {
138 if (!r100_is_counter_moving(rdev, crtc))
139 break;
140 }
141 }
142}
143
144/**
145 * r100_page_flip - pageflip callback.
146 *
147 * @rdev: radeon_device pointer
148 * @crtc_id: crtc to cleanup pageflip on
149 * @crtc_base: new address of the crtc (GPU MC address)
150 *
151 * Does the actual pageflip (r1xx-r4xx).
152 * During vblank we take the crtc lock and wait for the update_pending
153 * bit to go high, when it does, we release the lock, and allow the
154 * double buffered update to take place.
155 */
156void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
157{
158 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
159 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
160 int i;
161
162 /* Lock the graphics update lock */
163 /* update the scanout addresses */
164 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
165
166 /* Wait for update_pending to go high. */
167 for (i = 0; i < rdev->usec_timeout; i++) {
168 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
169 break;
170 udelay(1);
171 }
172 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
173
174 /* Unlock the lock, so double-buffering can take place inside vblank */
175 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
176 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
177
178}
179
180/**
181 * r100_page_flip_pending - check if page flip is still pending
182 *
183 * @rdev: radeon_device pointer
184 * @crtc_id: crtc to check
185 *
186 * Check if the last pagefilp is still pending (r1xx-r4xx).
187 * Returns the current update pending status.
188 */
189bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
190{
191 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
192
193 /* Return current update_pending status: */
194 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
195 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
196}
197
198/**
199 * r100_pm_get_dynpm_state - look up dynpm power state callback.
200 *
201 * @rdev: radeon_device pointer
202 *
203 * Look up the optimal power state based on the
204 * current state of the GPU (r1xx-r5xx).
205 * Used for dynpm only.
206 */
207void r100_pm_get_dynpm_state(struct radeon_device *rdev)
208{
209 int i;
210 rdev->pm.dynpm_can_upclock = true;
211 rdev->pm.dynpm_can_downclock = true;
212
213 switch (rdev->pm.dynpm_planned_action) {
214 case DYNPM_ACTION_MINIMUM:
215 rdev->pm.requested_power_state_index = 0;
216 rdev->pm.dynpm_can_downclock = false;
217 break;
218 case DYNPM_ACTION_DOWNCLOCK:
219 if (rdev->pm.current_power_state_index == 0) {
220 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
221 rdev->pm.dynpm_can_downclock = false;
222 } else {
223 if (rdev->pm.active_crtc_count > 1) {
224 for (i = 0; i < rdev->pm.num_power_states; i++) {
225 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
226 continue;
227 else if (i >= rdev->pm.current_power_state_index) {
228 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
229 break;
230 } else {
231 rdev->pm.requested_power_state_index = i;
232 break;
233 }
234 }
235 } else
236 rdev->pm.requested_power_state_index =
237 rdev->pm.current_power_state_index - 1;
238 }
239 /* don't use the power state if crtcs are active and no display flag is set */
240 if ((rdev->pm.active_crtc_count > 0) &&
241 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
242 RADEON_PM_MODE_NO_DISPLAY)) {
243 rdev->pm.requested_power_state_index++;
244 }
245 break;
246 case DYNPM_ACTION_UPCLOCK:
247 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
248 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
249 rdev->pm.dynpm_can_upclock = false;
250 } else {
251 if (rdev->pm.active_crtc_count > 1) {
252 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
253 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
254 continue;
255 else if (i <= rdev->pm.current_power_state_index) {
256 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
257 break;
258 } else {
259 rdev->pm.requested_power_state_index = i;
260 break;
261 }
262 }
263 } else
264 rdev->pm.requested_power_state_index =
265 rdev->pm.current_power_state_index + 1;
266 }
267 break;
268 case DYNPM_ACTION_DEFAULT:
269 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
270 rdev->pm.dynpm_can_upclock = false;
271 break;
272 case DYNPM_ACTION_NONE:
273 default:
274 DRM_ERROR("Requested mode for not defined action\n");
275 return;
276 }
277 /* only one clock mode per power state */
278 rdev->pm.requested_clock_mode_index = 0;
279
280 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
281 rdev->pm.power_state[rdev->pm.requested_power_state_index].
282 clock_info[rdev->pm.requested_clock_mode_index].sclk,
283 rdev->pm.power_state[rdev->pm.requested_power_state_index].
284 clock_info[rdev->pm.requested_clock_mode_index].mclk,
285 rdev->pm.power_state[rdev->pm.requested_power_state_index].
286 pcie_lanes);
287}
288
289/**
290 * r100_pm_init_profile - Initialize power profiles callback.
291 *
292 * @rdev: radeon_device pointer
293 *
294 * Initialize the power states used in profile mode
295 * (r1xx-r3xx).
296 * Used for profile mode only.
297 */
298void r100_pm_init_profile(struct radeon_device *rdev)
299{
300 /* default */
301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
303 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
304 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
305 /* low sh */
306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
309 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
310 /* mid sh */
311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
313 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
315 /* high sh */
316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
318 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
320 /* low mh */
321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
323 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
325 /* mid mh */
326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
328 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
330 /* high mh */
331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
335}
336
337/**
338 * r100_pm_misc - set additional pm hw parameters callback.
339 *
340 * @rdev: radeon_device pointer
341 *
342 * Set non-clock parameters associated with a power state
343 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
344 */
345void r100_pm_misc(struct radeon_device *rdev)
346{
347 int requested_index = rdev->pm.requested_power_state_index;
348 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
349 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
350 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
351
352 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
353 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
354 tmp = RREG32(voltage->gpio.reg);
355 if (voltage->active_high)
356 tmp |= voltage->gpio.mask;
357 else
358 tmp &= ~(voltage->gpio.mask);
359 WREG32(voltage->gpio.reg, tmp);
360 if (voltage->delay)
361 udelay(voltage->delay);
362 } else {
363 tmp = RREG32(voltage->gpio.reg);
364 if (voltage->active_high)
365 tmp &= ~voltage->gpio.mask;
366 else
367 tmp |= voltage->gpio.mask;
368 WREG32(voltage->gpio.reg, tmp);
369 if (voltage->delay)
370 udelay(voltage->delay);
371 }
372 }
373
374 sclk_cntl = RREG32_PLL(SCLK_CNTL);
375 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
376 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
377 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
378 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
379 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
380 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
381 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
382 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
383 else
384 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
385 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
386 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
387 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
388 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
389 } else
390 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
391
392 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
393 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
394 if (voltage->delay) {
395 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
396 switch (voltage->delay) {
397 case 33:
398 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
399 break;
400 case 66:
401 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
402 break;
403 case 99:
404 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
405 break;
406 case 132:
407 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
408 break;
409 }
410 } else
411 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
412 } else
413 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
414
415 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
416 sclk_cntl &= ~FORCE_HDP;
417 else
418 sclk_cntl |= FORCE_HDP;
419
420 WREG32_PLL(SCLK_CNTL, sclk_cntl);
421 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
422 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
423
424 /* set pcie lanes */
425 if ((rdev->flags & RADEON_IS_PCIE) &&
426 !(rdev->flags & RADEON_IS_IGP) &&
427 rdev->asic->pm.set_pcie_lanes &&
428 (ps->pcie_lanes !=
429 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
430 radeon_set_pcie_lanes(rdev,
431 ps->pcie_lanes);
432 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
433 }
434}
435
436/**
437 * r100_pm_prepare - pre-power state change callback.
438 *
439 * @rdev: radeon_device pointer
440 *
441 * Prepare for a power state change (r1xx-r4xx).
442 */
443void r100_pm_prepare(struct radeon_device *rdev)
444{
445 struct drm_device *ddev = rdev->ddev;
446 struct drm_crtc *crtc;
447 struct radeon_crtc *radeon_crtc;
448 u32 tmp;
449
450 /* disable any active CRTCs */
451 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
452 radeon_crtc = to_radeon_crtc(crtc);
453 if (radeon_crtc->enabled) {
454 if (radeon_crtc->crtc_id) {
455 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
456 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
457 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
458 } else {
459 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
460 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
461 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
462 }
463 }
464 }
465}
466
467/**
468 * r100_pm_finish - post-power state change callback.
469 *
470 * @rdev: radeon_device pointer
471 *
472 * Clean up after a power state change (r1xx-r4xx).
473 */
474void r100_pm_finish(struct radeon_device *rdev)
475{
476 struct drm_device *ddev = rdev->ddev;
477 struct drm_crtc *crtc;
478 struct radeon_crtc *radeon_crtc;
479 u32 tmp;
480
481 /* enable any active CRTCs */
482 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
483 radeon_crtc = to_radeon_crtc(crtc);
484 if (radeon_crtc->enabled) {
485 if (radeon_crtc->crtc_id) {
486 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
487 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
488 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
489 } else {
490 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
491 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
492 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
493 }
494 }
495 }
496}
497
498/**
499 * r100_gui_idle - gui idle callback.
500 *
501 * @rdev: radeon_device pointer
502 *
503 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
504 * Returns true if idle, false if not.
505 */
506bool r100_gui_idle(struct radeon_device *rdev)
507{
508 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
509 return false;
510 else
511 return true;
512}
513
514/* hpd for digital panel detect/disconnect */
515/**
516 * r100_hpd_sense - hpd sense callback.
517 *
518 * @rdev: radeon_device pointer
519 * @hpd: hpd (hotplug detect) pin
520 *
521 * Checks if a digital monitor is connected (r1xx-r4xx).
522 * Returns true if connected, false if not connected.
523 */
524bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
525{
526 bool connected = false;
527
528 switch (hpd) {
529 case RADEON_HPD_1:
530 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
531 connected = true;
532 break;
533 case RADEON_HPD_2:
534 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
535 connected = true;
536 break;
537 default:
538 break;
539 }
540 return connected;
541}
542
543/**
544 * r100_hpd_set_polarity - hpd set polarity callback.
545 *
546 * @rdev: radeon_device pointer
547 * @hpd: hpd (hotplug detect) pin
548 *
549 * Set the polarity of the hpd pin (r1xx-r4xx).
550 */
551void r100_hpd_set_polarity(struct radeon_device *rdev,
552 enum radeon_hpd_id hpd)
553{
554 u32 tmp;
555 bool connected = r100_hpd_sense(rdev, hpd);
556
557 switch (hpd) {
558 case RADEON_HPD_1:
559 tmp = RREG32(RADEON_FP_GEN_CNTL);
560 if (connected)
561 tmp &= ~RADEON_FP_DETECT_INT_POL;
562 else
563 tmp |= RADEON_FP_DETECT_INT_POL;
564 WREG32(RADEON_FP_GEN_CNTL, tmp);
565 break;
566 case RADEON_HPD_2:
567 tmp = RREG32(RADEON_FP2_GEN_CNTL);
568 if (connected)
569 tmp &= ~RADEON_FP2_DETECT_INT_POL;
570 else
571 tmp |= RADEON_FP2_DETECT_INT_POL;
572 WREG32(RADEON_FP2_GEN_CNTL, tmp);
573 break;
574 default:
575 break;
576 }
577}
578
579/**
580 * r100_hpd_init - hpd setup callback.
581 *
582 * @rdev: radeon_device pointer
583 *
584 * Setup the hpd pins used by the card (r1xx-r4xx).
585 * Set the polarity, and enable the hpd interrupts.
586 */
587void r100_hpd_init(struct radeon_device *rdev)
588{
589 struct drm_device *dev = rdev->ddev;
590 struct drm_connector *connector;
591 unsigned enable = 0;
592
593 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
594 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
595 enable |= 1 << radeon_connector->hpd.hpd;
596 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
597 }
598 radeon_irq_kms_enable_hpd(rdev, enable);
599}
600
601/**
602 * r100_hpd_fini - hpd tear down callback.
603 *
604 * @rdev: radeon_device pointer
605 *
606 * Tear down the hpd pins used by the card (r1xx-r4xx).
607 * Disable the hpd interrupts.
608 */
609void r100_hpd_fini(struct radeon_device *rdev)
610{
611 struct drm_device *dev = rdev->ddev;
612 struct drm_connector *connector;
613 unsigned disable = 0;
614
615 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
616 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
617 disable |= 1 << radeon_connector->hpd.hpd;
618 }
619 radeon_irq_kms_disable_hpd(rdev, disable);
620}
621
622/*
623 * PCI GART
624 */
625void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
626{
627 /* TODO: can we do somethings here ? */
628 /* It seems hw only cache one entry so we should discard this
629 * entry otherwise if first GPU GART read hit this entry it
630 * could end up in wrong address. */
631}
632
633int r100_pci_gart_init(struct radeon_device *rdev)
634{
635 int r;
636
637 if (rdev->gart.ptr) {
638 WARN(1, "R100 PCI GART already initialized\n");
639 return 0;
640 }
641 /* Initialize common gart structure */
642 r = radeon_gart_init(rdev);
643 if (r)
644 return r;
645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
647 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
648 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
649 return radeon_gart_table_ram_alloc(rdev);
650}
651
652int r100_pci_gart_enable(struct radeon_device *rdev)
653{
654 uint32_t tmp;
655
656 /* discard memory request outside of configured range */
657 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
658 WREG32(RADEON_AIC_CNTL, tmp);
659 /* set address range for PCI address translate */
660 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
661 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
662 /* set PCI GART page-table base address */
663 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
664 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
665 WREG32(RADEON_AIC_CNTL, tmp);
666 r100_pci_gart_tlb_flush(rdev);
667 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
668 (unsigned)(rdev->mc.gtt_size >> 20),
669 (unsigned long long)rdev->gart.table_addr);
670 rdev->gart.ready = true;
671 return 0;
672}
673
674void r100_pci_gart_disable(struct radeon_device *rdev)
675{
676 uint32_t tmp;
677
678 /* discard memory request outside of configured range */
679 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
680 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
681 WREG32(RADEON_AIC_LO_ADDR, 0);
682 WREG32(RADEON_AIC_HI_ADDR, 0);
683}
684
685uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
686{
687 return addr;
688}
689
690void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
691 uint64_t entry)
692{
693 u32 *gtt = rdev->gart.ptr;
694 gtt[i] = cpu_to_le32(lower_32_bits(entry));
695}
696
697void r100_pci_gart_fini(struct radeon_device *rdev)
698{
699 radeon_gart_fini(rdev);
700 r100_pci_gart_disable(rdev);
701 radeon_gart_table_ram_free(rdev);
702}
703
704int r100_irq_set(struct radeon_device *rdev)
705{
706 uint32_t tmp = 0;
707
708 if (!rdev->irq.installed) {
709 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
710 WREG32(R_000040_GEN_INT_CNTL, 0);
711 return -EINVAL;
712 }
713 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
714 tmp |= RADEON_SW_INT_ENABLE;
715 }
716 if (rdev->irq.crtc_vblank_int[0] ||
717 atomic_read(&rdev->irq.pflip[0])) {
718 tmp |= RADEON_CRTC_VBLANK_MASK;
719 }
720 if (rdev->irq.crtc_vblank_int[1] ||
721 atomic_read(&rdev->irq.pflip[1])) {
722 tmp |= RADEON_CRTC2_VBLANK_MASK;
723 }
724 if (rdev->irq.hpd[0]) {
725 tmp |= RADEON_FP_DETECT_MASK;
726 }
727 if (rdev->irq.hpd[1]) {
728 tmp |= RADEON_FP2_DETECT_MASK;
729 }
730 WREG32(RADEON_GEN_INT_CNTL, tmp);
731
732 /* read back to post the write */
733 RREG32(RADEON_GEN_INT_CNTL);
734
735 return 0;
736}
737
738void r100_irq_disable(struct radeon_device *rdev)
739{
740 u32 tmp;
741
742 WREG32(R_000040_GEN_INT_CNTL, 0);
743 /* Wait and acknowledge irq */
744 mdelay(1);
745 tmp = RREG32(R_000044_GEN_INT_STATUS);
746 WREG32(R_000044_GEN_INT_STATUS, tmp);
747}
748
749static uint32_t r100_irq_ack(struct radeon_device *rdev)
750{
751 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
752 uint32_t irq_mask = RADEON_SW_INT_TEST |
753 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
754 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
755
756 if (irqs) {
757 WREG32(RADEON_GEN_INT_STATUS, irqs);
758 }
759 return irqs & irq_mask;
760}
761
762int r100_irq_process(struct radeon_device *rdev)
763{
764 uint32_t status, msi_rearm;
765 bool queue_hotplug = false;
766
767 status = r100_irq_ack(rdev);
768 if (!status) {
769 return IRQ_NONE;
770 }
771 if (rdev->shutdown) {
772 return IRQ_NONE;
773 }
774 while (status) {
775 /* SW interrupt */
776 if (status & RADEON_SW_INT_TEST) {
777 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
778 }
779 /* Vertical blank interrupts */
780 if (status & RADEON_CRTC_VBLANK_STAT) {
781 if (rdev->irq.crtc_vblank_int[0]) {
782 drm_handle_vblank(rdev->ddev, 0);
783 rdev->pm.vblank_sync = true;
784 wake_up(&rdev->irq.vblank_queue);
785 }
786 if (atomic_read(&rdev->irq.pflip[0]))
787 radeon_crtc_handle_vblank(rdev, 0);
788 }
789 if (status & RADEON_CRTC2_VBLANK_STAT) {
790 if (rdev->irq.crtc_vblank_int[1]) {
791 drm_handle_vblank(rdev->ddev, 1);
792 rdev->pm.vblank_sync = true;
793 wake_up(&rdev->irq.vblank_queue);
794 }
795 if (atomic_read(&rdev->irq.pflip[1]))
796 radeon_crtc_handle_vblank(rdev, 1);
797 }
798 if (status & RADEON_FP_DETECT_STAT) {
799 queue_hotplug = true;
800 DRM_DEBUG("HPD1\n");
801 }
802 if (status & RADEON_FP2_DETECT_STAT) {
803 queue_hotplug = true;
804 DRM_DEBUG("HPD2\n");
805 }
806 status = r100_irq_ack(rdev);
807 }
808 if (queue_hotplug)
809 schedule_delayed_work(&rdev->hotplug_work, 0);
810 if (rdev->msi_enabled) {
811 switch (rdev->family) {
812 case CHIP_RS400:
813 case CHIP_RS480:
814 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
815 WREG32(RADEON_AIC_CNTL, msi_rearm);
816 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
817 break;
818 default:
819 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
820 break;
821 }
822 }
823 return IRQ_HANDLED;
824}
825
826u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
827{
828 if (crtc == 0)
829 return RREG32(RADEON_CRTC_CRNT_FRAME);
830 else
831 return RREG32(RADEON_CRTC2_CRNT_FRAME);
832}
833
834/**
835 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
836 * rdev: radeon device structure
837 * ring: ring buffer struct for emitting packets
838 */
839static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
840{
841 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
842 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
843 RADEON_HDP_READ_BUFFER_INVALIDATE);
844 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
845 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
846}
847
848/* Who ever call radeon_fence_emit should call ring_lock and ask
849 * for enough space (today caller are ib schedule and buffer move) */
850void r100_fence_ring_emit(struct radeon_device *rdev,
851 struct radeon_fence *fence)
852{
853 struct radeon_ring *ring = &rdev->ring[fence->ring];
854
855 /* We have to make sure that caches are flushed before
856 * CPU might read something from VRAM. */
857 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
858 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
859 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
860 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
861 /* Wait until IDLE & CLEAN */
862 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
863 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
864 r100_ring_hdp_flush(rdev, ring);
865 /* Emit fence sequence & fire IRQ */
866 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
867 radeon_ring_write(ring, fence->seq);
868 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
869 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
870}
871
872bool r100_semaphore_ring_emit(struct radeon_device *rdev,
873 struct radeon_ring *ring,
874 struct radeon_semaphore *semaphore,
875 bool emit_wait)
876{
877 /* Unused on older asics, since we don't have semaphores or multiple rings */
878 BUG();
879 return false;
880}
881
882struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
883 uint64_t src_offset,
884 uint64_t dst_offset,
885 unsigned num_gpu_pages,
886 struct reservation_object *resv)
887{
888 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
889 struct radeon_fence *fence;
890 uint32_t cur_pages;
891 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
892 uint32_t pitch;
893 uint32_t stride_pixels;
894 unsigned ndw;
895 int num_loops;
896 int r = 0;
897
898 /* radeon limited to 16k stride */
899 stride_bytes &= 0x3fff;
900 /* radeon pitch is /64 */
901 pitch = stride_bytes / 64;
902 stride_pixels = stride_bytes / 4;
903 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
904
905 /* Ask for enough room for blit + flush + fence */
906 ndw = 64 + (10 * num_loops);
907 r = radeon_ring_lock(rdev, ring, ndw);
908 if (r) {
909 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
910 return ERR_PTR(-EINVAL);
911 }
912 while (num_gpu_pages > 0) {
913 cur_pages = num_gpu_pages;
914 if (cur_pages > 8191) {
915 cur_pages = 8191;
916 }
917 num_gpu_pages -= cur_pages;
918
919 /* pages are in Y direction - height
920 page width in X direction - width */
921 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
922 radeon_ring_write(ring,
923 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
924 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
925 RADEON_GMC_SRC_CLIPPING |
926 RADEON_GMC_DST_CLIPPING |
927 RADEON_GMC_BRUSH_NONE |
928 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
929 RADEON_GMC_SRC_DATATYPE_COLOR |
930 RADEON_ROP3_S |
931 RADEON_DP_SRC_SOURCE_MEMORY |
932 RADEON_GMC_CLR_CMP_CNTL_DIS |
933 RADEON_GMC_WR_MSK_DIS);
934 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
935 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
936 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
937 radeon_ring_write(ring, 0);
938 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
939 radeon_ring_write(ring, num_gpu_pages);
940 radeon_ring_write(ring, num_gpu_pages);
941 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
942 }
943 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
944 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
945 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
946 radeon_ring_write(ring,
947 RADEON_WAIT_2D_IDLECLEAN |
948 RADEON_WAIT_HOST_IDLECLEAN |
949 RADEON_WAIT_DMA_GUI_IDLE);
950 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
951 if (r) {
952 radeon_ring_unlock_undo(rdev, ring);
953 return ERR_PTR(r);
954 }
955 radeon_ring_unlock_commit(rdev, ring, false);
956 return fence;
957}
958
959static int r100_cp_wait_for_idle(struct radeon_device *rdev)
960{
961 unsigned i;
962 u32 tmp;
963
964 for (i = 0; i < rdev->usec_timeout; i++) {
965 tmp = RREG32(R_000E40_RBBM_STATUS);
966 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
967 return 0;
968 }
969 udelay(1);
970 }
971 return -1;
972}
973
974void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
975{
976 int r;
977
978 r = radeon_ring_lock(rdev, ring, 2);
979 if (r) {
980 return;
981 }
982 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
983 radeon_ring_write(ring,
984 RADEON_ISYNC_ANY2D_IDLE3D |
985 RADEON_ISYNC_ANY3D_IDLE2D |
986 RADEON_ISYNC_WAIT_IDLEGUI |
987 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
988 radeon_ring_unlock_commit(rdev, ring, false);
989}
990
991
992/* Load the microcode for the CP */
993static int r100_cp_init_microcode(struct radeon_device *rdev)
994{
995 const char *fw_name = NULL;
996 int err;
997
998 DRM_DEBUG_KMS("\n");
999
1000 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
1001 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
1002 (rdev->family == CHIP_RS200)) {
1003 DRM_INFO("Loading R100 Microcode\n");
1004 fw_name = FIRMWARE_R100;
1005 } else if ((rdev->family == CHIP_R200) ||
1006 (rdev->family == CHIP_RV250) ||
1007 (rdev->family == CHIP_RV280) ||
1008 (rdev->family == CHIP_RS300)) {
1009 DRM_INFO("Loading R200 Microcode\n");
1010 fw_name = FIRMWARE_R200;
1011 } else if ((rdev->family == CHIP_R300) ||
1012 (rdev->family == CHIP_R350) ||
1013 (rdev->family == CHIP_RV350) ||
1014 (rdev->family == CHIP_RV380) ||
1015 (rdev->family == CHIP_RS400) ||
1016 (rdev->family == CHIP_RS480)) {
1017 DRM_INFO("Loading R300 Microcode\n");
1018 fw_name = FIRMWARE_R300;
1019 } else if ((rdev->family == CHIP_R420) ||
1020 (rdev->family == CHIP_R423) ||
1021 (rdev->family == CHIP_RV410)) {
1022 DRM_INFO("Loading R400 Microcode\n");
1023 fw_name = FIRMWARE_R420;
1024 } else if ((rdev->family == CHIP_RS690) ||
1025 (rdev->family == CHIP_RS740)) {
1026 DRM_INFO("Loading RS690/RS740 Microcode\n");
1027 fw_name = FIRMWARE_RS690;
1028 } else if (rdev->family == CHIP_RS600) {
1029 DRM_INFO("Loading RS600 Microcode\n");
1030 fw_name = FIRMWARE_RS600;
1031 } else if ((rdev->family == CHIP_RV515) ||
1032 (rdev->family == CHIP_R520) ||
1033 (rdev->family == CHIP_RV530) ||
1034 (rdev->family == CHIP_R580) ||
1035 (rdev->family == CHIP_RV560) ||
1036 (rdev->family == CHIP_RV570)) {
1037 DRM_INFO("Loading R500 Microcode\n");
1038 fw_name = FIRMWARE_R520;
1039 }
1040
1041 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1042 if (err) {
1043 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1044 fw_name);
1045 } else if (rdev->me_fw->size % 8) {
1046 printk(KERN_ERR
1047 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1048 rdev->me_fw->size, fw_name);
1049 err = -EINVAL;
1050 release_firmware(rdev->me_fw);
1051 rdev->me_fw = NULL;
1052 }
1053 return err;
1054}
1055
1056u32 r100_gfx_get_rptr(struct radeon_device *rdev,
1057 struct radeon_ring *ring)
1058{
1059 u32 rptr;
1060
1061 if (rdev->wb.enabled)
1062 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
1063 else
1064 rptr = RREG32(RADEON_CP_RB_RPTR);
1065
1066 return rptr;
1067}
1068
1069u32 r100_gfx_get_wptr(struct radeon_device *rdev,
1070 struct radeon_ring *ring)
1071{
1072 u32 wptr;
1073
1074 wptr = RREG32(RADEON_CP_RB_WPTR);
1075
1076 return wptr;
1077}
1078
1079void r100_gfx_set_wptr(struct radeon_device *rdev,
1080 struct radeon_ring *ring)
1081{
1082 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1083 (void)RREG32(RADEON_CP_RB_WPTR);
1084}
1085
1086static void r100_cp_load_microcode(struct radeon_device *rdev)
1087{
1088 const __be32 *fw_data;
1089 int i, size;
1090
1091 if (r100_gui_wait_for_idle(rdev)) {
1092 printk(KERN_WARNING "Failed to wait GUI idle while "
1093 "programming pipes. Bad things might happen.\n");
1094 }
1095
1096 if (rdev->me_fw) {
1097 size = rdev->me_fw->size / 4;
1098 fw_data = (const __be32 *)&rdev->me_fw->data[0];
1099 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
1100 for (i = 0; i < size; i += 2) {
1101 WREG32(RADEON_CP_ME_RAM_DATAH,
1102 be32_to_cpup(&fw_data[i]));
1103 WREG32(RADEON_CP_ME_RAM_DATAL,
1104 be32_to_cpup(&fw_data[i + 1]));
1105 }
1106 }
1107}
1108
1109int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1110{
1111 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1112 unsigned rb_bufsz;
1113 unsigned rb_blksz;
1114 unsigned max_fetch;
1115 unsigned pre_write_timer;
1116 unsigned pre_write_limit;
1117 unsigned indirect2_start;
1118 unsigned indirect1_start;
1119 uint32_t tmp;
1120 int r;
1121
1122 if (r100_debugfs_cp_init(rdev)) {
1123 DRM_ERROR("Failed to register debugfs file for CP !\n");
1124 }
1125 if (!rdev->me_fw) {
1126 r = r100_cp_init_microcode(rdev);
1127 if (r) {
1128 DRM_ERROR("Failed to load firmware!\n");
1129 return r;
1130 }
1131 }
1132
1133 /* Align ring size */
1134 rb_bufsz = order_base_2(ring_size / 8);
1135 ring_size = (1 << (rb_bufsz + 1)) * 4;
1136 r100_cp_load_microcode(rdev);
1137 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1138 RADEON_CP_PACKET2);
1139 if (r) {
1140 return r;
1141 }
1142 /* Each time the cp read 1024 bytes (16 dword/quadword) update
1143 * the rptr copy in system ram */
1144 rb_blksz = 9;
1145 /* cp will read 128bytes at a time (4 dwords) */
1146 max_fetch = 1;
1147 ring->align_mask = 16 - 1;
1148 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1149 pre_write_timer = 64;
1150 /* Force CP_RB_WPTR write if written more than one time before the
1151 * delay expire
1152 */
1153 pre_write_limit = 0;
1154 /* Setup the cp cache like this (cache size is 96 dwords) :
1155 * RING 0 to 15
1156 * INDIRECT1 16 to 79
1157 * INDIRECT2 80 to 95
1158 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1159 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1160 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1161 * Idea being that most of the gpu cmd will be through indirect1 buffer
1162 * so it gets the bigger cache.
1163 */
1164 indirect2_start = 80;
1165 indirect1_start = 16;
1166 /* cp setup */
1167 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
1168 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
1169 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
1170 REG_SET(RADEON_MAX_FETCH, max_fetch));
1171#ifdef __BIG_ENDIAN
1172 tmp |= RADEON_BUF_SWAP_32BIT;
1173#endif
1174 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1175
1176 /* Set ring address */
1177 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1178 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1179 /* Force read & write ptr to 0 */
1180 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1181 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1182 ring->wptr = 0;
1183 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1184
1185 /* set the wb address whether it's enabled or not */
1186 WREG32(R_00070C_CP_RB_RPTR_ADDR,
1187 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
1188 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
1189
1190 if (rdev->wb.enabled)
1191 WREG32(R_000770_SCRATCH_UMSK, 0xff);
1192 else {
1193 tmp |= RADEON_RB_NO_UPDATE;
1194 WREG32(R_000770_SCRATCH_UMSK, 0);
1195 }
1196
1197 WREG32(RADEON_CP_RB_CNTL, tmp);
1198 udelay(10);
1199 /* Set cp mode to bus mastering & enable cp*/
1200 WREG32(RADEON_CP_CSQ_MODE,
1201 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1202 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1203 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1204 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1205 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1206
1207 /* at this point everything should be setup correctly to enable master */
1208 pci_set_master(rdev->pdev);
1209
1210 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1211 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1212 if (r) {
1213 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1214 return r;
1215 }
1216 ring->ready = true;
1217 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1218
1219 if (!ring->rptr_save_reg /* not resuming from suspend */
1220 && radeon_ring_supports_scratch_reg(rdev, ring)) {
1221 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1222 if (r) {
1223 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
1224 ring->rptr_save_reg = 0;
1225 }
1226 }
1227 return 0;
1228}
1229
1230void r100_cp_fini(struct radeon_device *rdev)
1231{
1232 if (r100_cp_wait_for_idle(rdev)) {
1233 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1234 }
1235 /* Disable ring */
1236 r100_cp_disable(rdev);
1237 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
1238 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1239 DRM_INFO("radeon: cp finalized\n");
1240}
1241
1242void r100_cp_disable(struct radeon_device *rdev)
1243{
1244 /* Disable ring */
1245 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1246 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1247 WREG32(RADEON_CP_CSQ_MODE, 0);
1248 WREG32(RADEON_CP_CSQ_CNTL, 0);
1249 WREG32(R_000770_SCRATCH_UMSK, 0);
1250 if (r100_gui_wait_for_idle(rdev)) {
1251 printk(KERN_WARNING "Failed to wait GUI idle while "
1252 "programming pipes. Bad things might happen.\n");
1253 }
1254}
1255
1256/*
1257 * CS functions
1258 */
1259int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1260 struct radeon_cs_packet *pkt,
1261 unsigned idx,
1262 unsigned reg)
1263{
1264 int r;
1265 u32 tile_flags = 0;
1266 u32 tmp;
1267 struct radeon_bo_list *reloc;
1268 u32 value;
1269
1270 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1271 if (r) {
1272 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1273 idx, reg);
1274 radeon_cs_dump_packet(p, pkt);
1275 return r;
1276 }
1277
1278 value = radeon_get_ib_value(p, idx);
1279 tmp = value & 0x003fffff;
1280 tmp += (((u32)reloc->gpu_offset) >> 10);
1281
1282 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1283 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1284 tile_flags |= RADEON_DST_TILE_MACRO;
1285 if (reloc->tiling_flags & RADEON_TILING_MICRO) {
1286 if (reg == RADEON_SRC_PITCH_OFFSET) {
1287 DRM_ERROR("Cannot src blit from microtiled surface\n");
1288 radeon_cs_dump_packet(p, pkt);
1289 return -EINVAL;
1290 }
1291 tile_flags |= RADEON_DST_TILE_MICRO;
1292 }
1293
1294 tmp |= tile_flags;
1295 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1296 } else
1297 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1298 return 0;
1299}
1300
1301int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1302 struct radeon_cs_packet *pkt,
1303 int idx)
1304{
1305 unsigned c, i;
1306 struct radeon_bo_list *reloc;
1307 struct r100_cs_track *track;
1308 int r = 0;
1309 volatile uint32_t *ib;
1310 u32 idx_value;
1311
1312 ib = p->ib.ptr;
1313 track = (struct r100_cs_track *)p->track;
1314 c = radeon_get_ib_value(p, idx++) & 0x1F;
1315 if (c > 16) {
1316 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1317 pkt->opcode);
1318 radeon_cs_dump_packet(p, pkt);
1319 return -EINVAL;
1320 }
1321 track->num_arrays = c;
1322 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1323 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1324 if (r) {
1325 DRM_ERROR("No reloc for packet3 %d\n",
1326 pkt->opcode);
1327 radeon_cs_dump_packet(p, pkt);
1328 return r;
1329 }
1330 idx_value = radeon_get_ib_value(p, idx);
1331 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1332
1333 track->arrays[i + 0].esize = idx_value >> 8;
1334 track->arrays[i + 0].robj = reloc->robj;
1335 track->arrays[i + 0].esize &= 0x7F;
1336 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1337 if (r) {
1338 DRM_ERROR("No reloc for packet3 %d\n",
1339 pkt->opcode);
1340 radeon_cs_dump_packet(p, pkt);
1341 return r;
1342 }
1343 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
1344 track->arrays[i + 1].robj = reloc->robj;
1345 track->arrays[i + 1].esize = idx_value >> 24;
1346 track->arrays[i + 1].esize &= 0x7F;
1347 }
1348 if (c & 1) {
1349 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1350 if (r) {
1351 DRM_ERROR("No reloc for packet3 %d\n",
1352 pkt->opcode);
1353 radeon_cs_dump_packet(p, pkt);
1354 return r;
1355 }
1356 idx_value = radeon_get_ib_value(p, idx);
1357 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1358 track->arrays[i + 0].robj = reloc->robj;
1359 track->arrays[i + 0].esize = idx_value >> 8;
1360 track->arrays[i + 0].esize &= 0x7F;
1361 }
1362 return r;
1363}
1364
1365int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1366 struct radeon_cs_packet *pkt,
1367 const unsigned *auth, unsigned n,
1368 radeon_packet0_check_t check)
1369{
1370 unsigned reg;
1371 unsigned i, j, m;
1372 unsigned idx;
1373 int r;
1374
1375 idx = pkt->idx + 1;
1376 reg = pkt->reg;
1377 /* Check that register fall into register range
1378 * determined by the number of entry (n) in the
1379 * safe register bitmap.
1380 */
1381 if (pkt->one_reg_wr) {
1382 if ((reg >> 7) > n) {
1383 return -EINVAL;
1384 }
1385 } else {
1386 if (((reg + (pkt->count << 2)) >> 7) > n) {
1387 return -EINVAL;
1388 }
1389 }
1390 for (i = 0; i <= pkt->count; i++, idx++) {
1391 j = (reg >> 7);
1392 m = 1 << ((reg >> 2) & 31);
1393 if (auth[j] & m) {
1394 r = check(p, pkt, idx, reg);
1395 if (r) {
1396 return r;
1397 }
1398 }
1399 if (pkt->one_reg_wr) {
1400 if (!(auth[j] & m)) {
1401 break;
1402 }
1403 } else {
1404 reg += 4;
1405 }
1406 }
1407 return 0;
1408}
1409
1410/**
1411 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1412 * @parser: parser structure holding parsing context.
1413 *
1414 * Userspace sends a special sequence for VLINE waits.
1415 * PACKET0 - VLINE_START_END + value
1416 * PACKET0 - WAIT_UNTIL +_value
1417 * RELOC (P3) - crtc_id in reloc.
1418 *
1419 * This function parses this and relocates the VLINE START END
1420 * and WAIT UNTIL packets to the correct crtc.
1421 * It also detects a switched off crtc and nulls out the
1422 * wait in that case.
1423 */
1424int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1425{
1426 struct drm_crtc *crtc;
1427 struct radeon_crtc *radeon_crtc;
1428 struct radeon_cs_packet p3reloc, waitreloc;
1429 int crtc_id;
1430 int r;
1431 uint32_t header, h_idx, reg;
1432 volatile uint32_t *ib;
1433
1434 ib = p->ib.ptr;
1435
1436 /* parse the wait until */
1437 r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
1438 if (r)
1439 return r;
1440
1441 /* check its a wait until and only 1 count */
1442 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1443 waitreloc.count != 0) {
1444 DRM_ERROR("vline wait had illegal wait until segment\n");
1445 return -EINVAL;
1446 }
1447
1448 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1449 DRM_ERROR("vline wait had illegal wait until\n");
1450 return -EINVAL;
1451 }
1452
1453 /* jump over the NOP */
1454 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1455 if (r)
1456 return r;
1457
1458 h_idx = p->idx - 2;
1459 p->idx += waitreloc.count + 2;
1460 p->idx += p3reloc.count + 2;
1461
1462 header = radeon_get_ib_value(p, h_idx);
1463 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1464 reg = R100_CP_PACKET0_GET_REG(header);
1465 crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
1466 if (!crtc) {
1467 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1468 return -ENOENT;
1469 }
1470 radeon_crtc = to_radeon_crtc(crtc);
1471 crtc_id = radeon_crtc->crtc_id;
1472
1473 if (!crtc->enabled) {
1474 /* if the CRTC isn't enabled - we need to nop out the wait until */
1475 ib[h_idx + 2] = PACKET2(0);
1476 ib[h_idx + 3] = PACKET2(0);
1477 } else if (crtc_id == 1) {
1478 switch (reg) {
1479 case AVIVO_D1MODE_VLINE_START_END:
1480 header &= ~R300_CP_PACKET0_REG_MASK;
1481 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1482 break;
1483 case RADEON_CRTC_GUI_TRIG_VLINE:
1484 header &= ~R300_CP_PACKET0_REG_MASK;
1485 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1486 break;
1487 default:
1488 DRM_ERROR("unknown crtc reloc\n");
1489 return -EINVAL;
1490 }
1491 ib[h_idx] = header;
1492 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1493 }
1494
1495 return 0;
1496}
1497
1498static int r100_get_vtx_size(uint32_t vtx_fmt)
1499{
1500 int vtx_size;
1501 vtx_size = 2;
1502 /* ordered according to bits in spec */
1503 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1504 vtx_size++;
1505 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1506 vtx_size += 3;
1507 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1508 vtx_size++;
1509 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1510 vtx_size++;
1511 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1512 vtx_size += 3;
1513 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1514 vtx_size++;
1515 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1516 vtx_size++;
1517 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1518 vtx_size += 2;
1519 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1520 vtx_size += 2;
1521 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1522 vtx_size++;
1523 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1524 vtx_size += 2;
1525 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1526 vtx_size++;
1527 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1528 vtx_size += 2;
1529 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1530 vtx_size++;
1531 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1532 vtx_size++;
1533 /* blend weight */
1534 if (vtx_fmt & (0x7 << 15))
1535 vtx_size += (vtx_fmt >> 15) & 0x7;
1536 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1537 vtx_size += 3;
1538 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1539 vtx_size += 2;
1540 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1541 vtx_size++;
1542 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1543 vtx_size++;
1544 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1545 vtx_size++;
1546 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1547 vtx_size++;
1548 return vtx_size;
1549}
1550
1551static int r100_packet0_check(struct radeon_cs_parser *p,
1552 struct radeon_cs_packet *pkt,
1553 unsigned idx, unsigned reg)
1554{
1555 struct radeon_bo_list *reloc;
1556 struct r100_cs_track *track;
1557 volatile uint32_t *ib;
1558 uint32_t tmp;
1559 int r;
1560 int i, face;
1561 u32 tile_flags = 0;
1562 u32 idx_value;
1563
1564 ib = p->ib.ptr;
1565 track = (struct r100_cs_track *)p->track;
1566
1567 idx_value = radeon_get_ib_value(p, idx);
1568
1569 switch (reg) {
1570 case RADEON_CRTC_GUI_TRIG_VLINE:
1571 r = r100_cs_packet_parse_vline(p);
1572 if (r) {
1573 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1574 idx, reg);
1575 radeon_cs_dump_packet(p, pkt);
1576 return r;
1577 }
1578 break;
1579 /* FIXME: only allow PACKET3 blit? easier to check for out of
1580 * range access */
1581 case RADEON_DST_PITCH_OFFSET:
1582 case RADEON_SRC_PITCH_OFFSET:
1583 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1584 if (r)
1585 return r;
1586 break;
1587 case RADEON_RB3D_DEPTHOFFSET:
1588 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1589 if (r) {
1590 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1591 idx, reg);
1592 radeon_cs_dump_packet(p, pkt);
1593 return r;
1594 }
1595 track->zb.robj = reloc->robj;
1596 track->zb.offset = idx_value;
1597 track->zb_dirty = true;
1598 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1599 break;
1600 case RADEON_RB3D_COLOROFFSET:
1601 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1602 if (r) {
1603 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1604 idx, reg);
1605 radeon_cs_dump_packet(p, pkt);
1606 return r;
1607 }
1608 track->cb[0].robj = reloc->robj;
1609 track->cb[0].offset = idx_value;
1610 track->cb_dirty = true;
1611 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1612 break;
1613 case RADEON_PP_TXOFFSET_0:
1614 case RADEON_PP_TXOFFSET_1:
1615 case RADEON_PP_TXOFFSET_2:
1616 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1617 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1618 if (r) {
1619 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1620 idx, reg);
1621 radeon_cs_dump_packet(p, pkt);
1622 return r;
1623 }
1624 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1625 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1626 tile_flags |= RADEON_TXO_MACRO_TILE;
1627 if (reloc->tiling_flags & RADEON_TILING_MICRO)
1628 tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1629
1630 tmp = idx_value & ~(0x7 << 2);
1631 tmp |= tile_flags;
1632 ib[idx] = tmp + ((u32)reloc->gpu_offset);
1633 } else
1634 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1635 track->textures[i].robj = reloc->robj;
1636 track->tex_dirty = true;
1637 break;
1638 case RADEON_PP_CUBIC_OFFSET_T0_0:
1639 case RADEON_PP_CUBIC_OFFSET_T0_1:
1640 case RADEON_PP_CUBIC_OFFSET_T0_2:
1641 case RADEON_PP_CUBIC_OFFSET_T0_3:
1642 case RADEON_PP_CUBIC_OFFSET_T0_4:
1643 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1644 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1645 if (r) {
1646 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1647 idx, reg);
1648 radeon_cs_dump_packet(p, pkt);
1649 return r;
1650 }
1651 track->textures[0].cube_info[i].offset = idx_value;
1652 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1653 track->textures[0].cube_info[i].robj = reloc->robj;
1654 track->tex_dirty = true;
1655 break;
1656 case RADEON_PP_CUBIC_OFFSET_T1_0:
1657 case RADEON_PP_CUBIC_OFFSET_T1_1:
1658 case RADEON_PP_CUBIC_OFFSET_T1_2:
1659 case RADEON_PP_CUBIC_OFFSET_T1_3:
1660 case RADEON_PP_CUBIC_OFFSET_T1_4:
1661 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1662 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1663 if (r) {
1664 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1665 idx, reg);
1666 radeon_cs_dump_packet(p, pkt);
1667 return r;
1668 }
1669 track->textures[1].cube_info[i].offset = idx_value;
1670 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1671 track->textures[1].cube_info[i].robj = reloc->robj;
1672 track->tex_dirty = true;
1673 break;
1674 case RADEON_PP_CUBIC_OFFSET_T2_0:
1675 case RADEON_PP_CUBIC_OFFSET_T2_1:
1676 case RADEON_PP_CUBIC_OFFSET_T2_2:
1677 case RADEON_PP_CUBIC_OFFSET_T2_3:
1678 case RADEON_PP_CUBIC_OFFSET_T2_4:
1679 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1680 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1681 if (r) {
1682 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1683 idx, reg);
1684 radeon_cs_dump_packet(p, pkt);
1685 return r;
1686 }
1687 track->textures[2].cube_info[i].offset = idx_value;
1688 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1689 track->textures[2].cube_info[i].robj = reloc->robj;
1690 track->tex_dirty = true;
1691 break;
1692 case RADEON_RE_WIDTH_HEIGHT:
1693 track->maxy = ((idx_value >> 16) & 0x7FF);
1694 track->cb_dirty = true;
1695 track->zb_dirty = true;
1696 break;
1697 case RADEON_RB3D_COLORPITCH:
1698 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1699 if (r) {
1700 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1701 idx, reg);
1702 radeon_cs_dump_packet(p, pkt);
1703 return r;
1704 }
1705 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1706 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1707 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1708 if (reloc->tiling_flags & RADEON_TILING_MICRO)
1709 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1710
1711 tmp = idx_value & ~(0x7 << 16);
1712 tmp |= tile_flags;
1713 ib[idx] = tmp;
1714 } else
1715 ib[idx] = idx_value;
1716
1717 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1718 track->cb_dirty = true;
1719 break;
1720 case RADEON_RB3D_DEPTHPITCH:
1721 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1722 track->zb_dirty = true;
1723 break;
1724 case RADEON_RB3D_CNTL:
1725 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1726 case 7:
1727 case 8:
1728 case 9:
1729 case 11:
1730 case 12:
1731 track->cb[0].cpp = 1;
1732 break;
1733 case 3:
1734 case 4:
1735 case 15:
1736 track->cb[0].cpp = 2;
1737 break;
1738 case 6:
1739 track->cb[0].cpp = 4;
1740 break;
1741 default:
1742 DRM_ERROR("Invalid color buffer format (%d) !\n",
1743 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1744 return -EINVAL;
1745 }
1746 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1747 track->cb_dirty = true;
1748 track->zb_dirty = true;
1749 break;
1750 case RADEON_RB3D_ZSTENCILCNTL:
1751 switch (idx_value & 0xf) {
1752 case 0:
1753 track->zb.cpp = 2;
1754 break;
1755 case 2:
1756 case 3:
1757 case 4:
1758 case 5:
1759 case 9:
1760 case 11:
1761 track->zb.cpp = 4;
1762 break;
1763 default:
1764 break;
1765 }
1766 track->zb_dirty = true;
1767 break;
1768 case RADEON_RB3D_ZPASS_ADDR:
1769 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1770 if (r) {
1771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1772 idx, reg);
1773 radeon_cs_dump_packet(p, pkt);
1774 return r;
1775 }
1776 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1777 break;
1778 case RADEON_PP_CNTL:
1779 {
1780 uint32_t temp = idx_value >> 4;
1781 for (i = 0; i < track->num_texture; i++)
1782 track->textures[i].enabled = !!(temp & (1 << i));
1783 track->tex_dirty = true;
1784 }
1785 break;
1786 case RADEON_SE_VF_CNTL:
1787 track->vap_vf_cntl = idx_value;
1788 break;
1789 case RADEON_SE_VTX_FMT:
1790 track->vtx_size = r100_get_vtx_size(idx_value);
1791 break;
1792 case RADEON_PP_TEX_SIZE_0:
1793 case RADEON_PP_TEX_SIZE_1:
1794 case RADEON_PP_TEX_SIZE_2:
1795 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1796 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1797 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1798 track->tex_dirty = true;
1799 break;
1800 case RADEON_PP_TEX_PITCH_0:
1801 case RADEON_PP_TEX_PITCH_1:
1802 case RADEON_PP_TEX_PITCH_2:
1803 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1804 track->textures[i].pitch = idx_value + 32;
1805 track->tex_dirty = true;
1806 break;
1807 case RADEON_PP_TXFILTER_0:
1808 case RADEON_PP_TXFILTER_1:
1809 case RADEON_PP_TXFILTER_2:
1810 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1811 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1812 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1813 tmp = (idx_value >> 23) & 0x7;
1814 if (tmp == 2 || tmp == 6)
1815 track->textures[i].roundup_w = false;
1816 tmp = (idx_value >> 27) & 0x7;
1817 if (tmp == 2 || tmp == 6)
1818 track->textures[i].roundup_h = false;
1819 track->tex_dirty = true;
1820 break;
1821 case RADEON_PP_TXFORMAT_0:
1822 case RADEON_PP_TXFORMAT_1:
1823 case RADEON_PP_TXFORMAT_2:
1824 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1825 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1826 track->textures[i].use_pitch = 1;
1827 } else {
1828 track->textures[i].use_pitch = 0;
1829 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1830 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1831 }
1832 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1833 track->textures[i].tex_coord_type = 2;
1834 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1835 case RADEON_TXFORMAT_I8:
1836 case RADEON_TXFORMAT_RGB332:
1837 case RADEON_TXFORMAT_Y8:
1838 track->textures[i].cpp = 1;
1839 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1840 break;
1841 case RADEON_TXFORMAT_AI88:
1842 case RADEON_TXFORMAT_ARGB1555:
1843 case RADEON_TXFORMAT_RGB565:
1844 case RADEON_TXFORMAT_ARGB4444:
1845 case RADEON_TXFORMAT_VYUY422:
1846 case RADEON_TXFORMAT_YVYU422:
1847 case RADEON_TXFORMAT_SHADOW16:
1848 case RADEON_TXFORMAT_LDUDV655:
1849 case RADEON_TXFORMAT_DUDV88:
1850 track->textures[i].cpp = 2;
1851 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1852 break;
1853 case RADEON_TXFORMAT_ARGB8888:
1854 case RADEON_TXFORMAT_RGBA8888:
1855 case RADEON_TXFORMAT_SHADOW32:
1856 case RADEON_TXFORMAT_LDUDUV8888:
1857 track->textures[i].cpp = 4;
1858 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1859 break;
1860 case RADEON_TXFORMAT_DXT1:
1861 track->textures[i].cpp = 1;
1862 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1863 break;
1864 case RADEON_TXFORMAT_DXT23:
1865 case RADEON_TXFORMAT_DXT45:
1866 track->textures[i].cpp = 1;
1867 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1868 break;
1869 }
1870 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1871 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1872 track->tex_dirty = true;
1873 break;
1874 case RADEON_PP_CUBIC_FACES_0:
1875 case RADEON_PP_CUBIC_FACES_1:
1876 case RADEON_PP_CUBIC_FACES_2:
1877 tmp = idx_value;
1878 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1879 for (face = 0; face < 4; face++) {
1880 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1881 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1882 }
1883 track->tex_dirty = true;
1884 break;
1885 default:
1886 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1887 reg, idx);
1888 return -EINVAL;
1889 }
1890 return 0;
1891}
1892
1893int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1894 struct radeon_cs_packet *pkt,
1895 struct radeon_bo *robj)
1896{
1897 unsigned idx;
1898 u32 value;
1899 idx = pkt->idx + 1;
1900 value = radeon_get_ib_value(p, idx + 2);
1901 if ((value + 1) > radeon_bo_size(robj)) {
1902 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1903 "(need %u have %lu) !\n",
1904 value + 1,
1905 radeon_bo_size(robj));
1906 return -EINVAL;
1907 }
1908 return 0;
1909}
1910
1911static int r100_packet3_check(struct radeon_cs_parser *p,
1912 struct radeon_cs_packet *pkt)
1913{
1914 struct radeon_bo_list *reloc;
1915 struct r100_cs_track *track;
1916 unsigned idx;
1917 volatile uint32_t *ib;
1918 int r;
1919
1920 ib = p->ib.ptr;
1921 idx = pkt->idx + 1;
1922 track = (struct r100_cs_track *)p->track;
1923 switch (pkt->opcode) {
1924 case PACKET3_3D_LOAD_VBPNTR:
1925 r = r100_packet3_load_vbpntr(p, pkt, idx);
1926 if (r)
1927 return r;
1928 break;
1929 case PACKET3_INDX_BUFFER:
1930 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1931 if (r) {
1932 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1933 radeon_cs_dump_packet(p, pkt);
1934 return r;
1935 }
1936 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
1937 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1938 if (r) {
1939 return r;
1940 }
1941 break;
1942 case 0x23:
1943 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1944 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1945 if (r) {
1946 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1947 radeon_cs_dump_packet(p, pkt);
1948 return r;
1949 }
1950 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
1951 track->num_arrays = 1;
1952 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1953
1954 track->arrays[0].robj = reloc->robj;
1955 track->arrays[0].esize = track->vtx_size;
1956
1957 track->max_indx = radeon_get_ib_value(p, idx+1);
1958
1959 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1960 track->immd_dwords = pkt->count - 1;
1961 r = r100_cs_track_check(p->rdev, track);
1962 if (r)
1963 return r;
1964 break;
1965 case PACKET3_3D_DRAW_IMMD:
1966 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1967 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1968 return -EINVAL;
1969 }
1970 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1971 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1972 track->immd_dwords = pkt->count - 1;
1973 r = r100_cs_track_check(p->rdev, track);
1974 if (r)
1975 return r;
1976 break;
1977 /* triggers drawing using in-packet vertex data */
1978 case PACKET3_3D_DRAW_IMMD_2:
1979 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1980 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1981 return -EINVAL;
1982 }
1983 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1984 track->immd_dwords = pkt->count;
1985 r = r100_cs_track_check(p->rdev, track);
1986 if (r)
1987 return r;
1988 break;
1989 /* triggers drawing using in-packet vertex data */
1990 case PACKET3_3D_DRAW_VBUF_2:
1991 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1992 r = r100_cs_track_check(p->rdev, track);
1993 if (r)
1994 return r;
1995 break;
1996 /* triggers drawing of vertex buffers setup elsewhere */
1997 case PACKET3_3D_DRAW_INDX_2:
1998 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1999 r = r100_cs_track_check(p->rdev, track);
2000 if (r)
2001 return r;
2002 break;
2003 /* triggers drawing using indices to vertex buffer */
2004 case PACKET3_3D_DRAW_VBUF:
2005 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2006 r = r100_cs_track_check(p->rdev, track);
2007 if (r)
2008 return r;
2009 break;
2010 /* triggers drawing of vertex buffers setup elsewhere */
2011 case PACKET3_3D_DRAW_INDX:
2012 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2013 r = r100_cs_track_check(p->rdev, track);
2014 if (r)
2015 return r;
2016 break;
2017 /* triggers drawing using indices to vertex buffer */
2018 case PACKET3_3D_CLEAR_HIZ:
2019 case PACKET3_3D_CLEAR_ZMASK:
2020 if (p->rdev->hyperz_filp != p->filp)
2021 return -EINVAL;
2022 break;
2023 case PACKET3_NOP:
2024 break;
2025 default:
2026 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2027 return -EINVAL;
2028 }
2029 return 0;
2030}
2031
2032int r100_cs_parse(struct radeon_cs_parser *p)
2033{
2034 struct radeon_cs_packet pkt;
2035 struct r100_cs_track *track;
2036 int r;
2037
2038 track = kzalloc(sizeof(*track), GFP_KERNEL);
2039 if (!track)
2040 return -ENOMEM;
2041 r100_cs_track_clear(p->rdev, track);
2042 p->track = track;
2043 do {
2044 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2045 if (r) {
2046 return r;
2047 }
2048 p->idx += pkt.count + 2;
2049 switch (pkt.type) {
2050 case RADEON_PACKET_TYPE0:
2051 if (p->rdev->family >= CHIP_R200)
2052 r = r100_cs_parse_packet0(p, &pkt,
2053 p->rdev->config.r100.reg_safe_bm,
2054 p->rdev->config.r100.reg_safe_bm_size,
2055 &r200_packet0_check);
2056 else
2057 r = r100_cs_parse_packet0(p, &pkt,
2058 p->rdev->config.r100.reg_safe_bm,
2059 p->rdev->config.r100.reg_safe_bm_size,
2060 &r100_packet0_check);
2061 break;
2062 case RADEON_PACKET_TYPE2:
2063 break;
2064 case RADEON_PACKET_TYPE3:
2065 r = r100_packet3_check(p, &pkt);
2066 break;
2067 default:
2068 DRM_ERROR("Unknown packet type %d !\n",
2069 pkt.type);
2070 return -EINVAL;
2071 }
2072 if (r)
2073 return r;
2074 } while (p->idx < p->chunk_ib->length_dw);
2075 return 0;
2076}
2077
2078static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2079{
2080 DRM_ERROR("pitch %d\n", t->pitch);
2081 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2082 DRM_ERROR("width %d\n", t->width);
2083 DRM_ERROR("width_11 %d\n", t->width_11);
2084 DRM_ERROR("height %d\n", t->height);
2085 DRM_ERROR("height_11 %d\n", t->height_11);
2086 DRM_ERROR("num levels %d\n", t->num_levels);
2087 DRM_ERROR("depth %d\n", t->txdepth);
2088 DRM_ERROR("bpp %d\n", t->cpp);
2089 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2090 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2091 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2092 DRM_ERROR("compress format %d\n", t->compress_format);
2093}
2094
2095static int r100_track_compress_size(int compress_format, int w, int h)
2096{
2097 int block_width, block_height, block_bytes;
2098 int wblocks, hblocks;
2099 int min_wblocks;
2100 int sz;
2101
2102 block_width = 4;
2103 block_height = 4;
2104
2105 switch (compress_format) {
2106 case R100_TRACK_COMP_DXT1:
2107 block_bytes = 8;
2108 min_wblocks = 4;
2109 break;
2110 default:
2111 case R100_TRACK_COMP_DXT35:
2112 block_bytes = 16;
2113 min_wblocks = 2;
2114 break;
2115 }
2116
2117 hblocks = (h + block_height - 1) / block_height;
2118 wblocks = (w + block_width - 1) / block_width;
2119 if (wblocks < min_wblocks)
2120 wblocks = min_wblocks;
2121 sz = wblocks * hblocks * block_bytes;
2122 return sz;
2123}
2124
2125static int r100_cs_track_cube(struct radeon_device *rdev,
2126 struct r100_cs_track *track, unsigned idx)
2127{
2128 unsigned face, w, h;
2129 struct radeon_bo *cube_robj;
2130 unsigned long size;
2131 unsigned compress_format = track->textures[idx].compress_format;
2132
2133 for (face = 0; face < 5; face++) {
2134 cube_robj = track->textures[idx].cube_info[face].robj;
2135 w = track->textures[idx].cube_info[face].width;
2136 h = track->textures[idx].cube_info[face].height;
2137
2138 if (compress_format) {
2139 size = r100_track_compress_size(compress_format, w, h);
2140 } else
2141 size = w * h;
2142 size *= track->textures[idx].cpp;
2143
2144 size += track->textures[idx].cube_info[face].offset;
2145
2146 if (size > radeon_bo_size(cube_robj)) {
2147 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2148 size, radeon_bo_size(cube_robj));
2149 r100_cs_track_texture_print(&track->textures[idx]);
2150 return -1;
2151 }
2152 }
2153 return 0;
2154}
2155
2156static int r100_cs_track_texture_check(struct radeon_device *rdev,
2157 struct r100_cs_track *track)
2158{
2159 struct radeon_bo *robj;
2160 unsigned long size;
2161 unsigned u, i, w, h, d;
2162 int ret;
2163
2164 for (u = 0; u < track->num_texture; u++) {
2165 if (!track->textures[u].enabled)
2166 continue;
2167 if (track->textures[u].lookup_disable)
2168 continue;
2169 robj = track->textures[u].robj;
2170 if (robj == NULL) {
2171 DRM_ERROR("No texture bound to unit %u\n", u);
2172 return -EINVAL;
2173 }
2174 size = 0;
2175 for (i = 0; i <= track->textures[u].num_levels; i++) {
2176 if (track->textures[u].use_pitch) {
2177 if (rdev->family < CHIP_R300)
2178 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2179 else
2180 w = track->textures[u].pitch / (1 << i);
2181 } else {
2182 w = track->textures[u].width;
2183 if (rdev->family >= CHIP_RV515)
2184 w |= track->textures[u].width_11;
2185 w = w / (1 << i);
2186 if (track->textures[u].roundup_w)
2187 w = roundup_pow_of_two(w);
2188 }
2189 h = track->textures[u].height;
2190 if (rdev->family >= CHIP_RV515)
2191 h |= track->textures[u].height_11;
2192 h = h / (1 << i);
2193 if (track->textures[u].roundup_h)
2194 h = roundup_pow_of_two(h);
2195 if (track->textures[u].tex_coord_type == 1) {
2196 d = (1 << track->textures[u].txdepth) / (1 << i);
2197 if (!d)
2198 d = 1;
2199 } else {
2200 d = 1;
2201 }
2202 if (track->textures[u].compress_format) {
2203
2204 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2205 /* compressed textures are block based */
2206 } else
2207 size += w * h * d;
2208 }
2209 size *= track->textures[u].cpp;
2210
2211 switch (track->textures[u].tex_coord_type) {
2212 case 0:
2213 case 1:
2214 break;
2215 case 2:
2216 if (track->separate_cube) {
2217 ret = r100_cs_track_cube(rdev, track, u);
2218 if (ret)
2219 return ret;
2220 } else
2221 size *= 6;
2222 break;
2223 default:
2224 DRM_ERROR("Invalid texture coordinate type %u for unit "
2225 "%u\n", track->textures[u].tex_coord_type, u);
2226 return -EINVAL;
2227 }
2228 if (size > radeon_bo_size(robj)) {
2229 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2230 "%lu\n", u, size, radeon_bo_size(robj));
2231 r100_cs_track_texture_print(&track->textures[u]);
2232 return -EINVAL;
2233 }
2234 }
2235 return 0;
2236}
2237
2238int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2239{
2240 unsigned i;
2241 unsigned long size;
2242 unsigned prim_walk;
2243 unsigned nverts;
2244 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2245
2246 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2247 !track->blend_read_enable)
2248 num_cb = 0;
2249
2250 for (i = 0; i < num_cb; i++) {
2251 if (track->cb[i].robj == NULL) {
2252 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2253 return -EINVAL;
2254 }
2255 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2256 size += track->cb[i].offset;
2257 if (size > radeon_bo_size(track->cb[i].robj)) {
2258 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2259 "(need %lu have %lu) !\n", i, size,
2260 radeon_bo_size(track->cb[i].robj));
2261 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2262 i, track->cb[i].pitch, track->cb[i].cpp,
2263 track->cb[i].offset, track->maxy);
2264 return -EINVAL;
2265 }
2266 }
2267 track->cb_dirty = false;
2268
2269 if (track->zb_dirty && track->z_enabled) {
2270 if (track->zb.robj == NULL) {
2271 DRM_ERROR("[drm] No buffer for z buffer !\n");
2272 return -EINVAL;
2273 }
2274 size = track->zb.pitch * track->zb.cpp * track->maxy;
2275 size += track->zb.offset;
2276 if (size > radeon_bo_size(track->zb.robj)) {
2277 DRM_ERROR("[drm] Buffer too small for z buffer "
2278 "(need %lu have %lu) !\n", size,
2279 radeon_bo_size(track->zb.robj));
2280 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2281 track->zb.pitch, track->zb.cpp,
2282 track->zb.offset, track->maxy);
2283 return -EINVAL;
2284 }
2285 }
2286 track->zb_dirty = false;
2287
2288 if (track->aa_dirty && track->aaresolve) {
2289 if (track->aa.robj == NULL) {
2290 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
2291 return -EINVAL;
2292 }
2293 /* I believe the format comes from colorbuffer0. */
2294 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2295 size += track->aa.offset;
2296 if (size > radeon_bo_size(track->aa.robj)) {
2297 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
2298 "(need %lu have %lu) !\n", i, size,
2299 radeon_bo_size(track->aa.robj));
2300 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
2301 i, track->aa.pitch, track->cb[0].cpp,
2302 track->aa.offset, track->maxy);
2303 return -EINVAL;
2304 }
2305 }
2306 track->aa_dirty = false;
2307
2308 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2309 if (track->vap_vf_cntl & (1 << 14)) {
2310 nverts = track->vap_alt_nverts;
2311 } else {
2312 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2313 }
2314 switch (prim_walk) {
2315 case 1:
2316 for (i = 0; i < track->num_arrays; i++) {
2317 size = track->arrays[i].esize * track->max_indx * 4;
2318 if (track->arrays[i].robj == NULL) {
2319 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2320 "bound\n", prim_walk, i);
2321 return -EINVAL;
2322 }
2323 if (size > radeon_bo_size(track->arrays[i].robj)) {
2324 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2325 "need %lu dwords have %lu dwords\n",
2326 prim_walk, i, size >> 2,
2327 radeon_bo_size(track->arrays[i].robj)
2328 >> 2);
2329 DRM_ERROR("Max indices %u\n", track->max_indx);
2330 return -EINVAL;
2331 }
2332 }
2333 break;
2334 case 2:
2335 for (i = 0; i < track->num_arrays; i++) {
2336 size = track->arrays[i].esize * (nverts - 1) * 4;
2337 if (track->arrays[i].robj == NULL) {
2338 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2339 "bound\n", prim_walk, i);
2340 return -EINVAL;
2341 }
2342 if (size > radeon_bo_size(track->arrays[i].robj)) {
2343 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2344 "need %lu dwords have %lu dwords\n",
2345 prim_walk, i, size >> 2,
2346 radeon_bo_size(track->arrays[i].robj)
2347 >> 2);
2348 return -EINVAL;
2349 }
2350 }
2351 break;
2352 case 3:
2353 size = track->vtx_size * nverts;
2354 if (size != track->immd_dwords) {
2355 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2356 track->immd_dwords, size);
2357 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2358 nverts, track->vtx_size);
2359 return -EINVAL;
2360 }
2361 break;
2362 default:
2363 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2364 prim_walk);
2365 return -EINVAL;
2366 }
2367
2368 if (track->tex_dirty) {
2369 track->tex_dirty = false;
2370 return r100_cs_track_texture_check(rdev, track);
2371 }
2372 return 0;
2373}
2374
2375void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2376{
2377 unsigned i, face;
2378
2379 track->cb_dirty = true;
2380 track->zb_dirty = true;
2381 track->tex_dirty = true;
2382 track->aa_dirty = true;
2383
2384 if (rdev->family < CHIP_R300) {
2385 track->num_cb = 1;
2386 if (rdev->family <= CHIP_RS200)
2387 track->num_texture = 3;
2388 else
2389 track->num_texture = 6;
2390 track->maxy = 2048;
2391 track->separate_cube = 1;
2392 } else {
2393 track->num_cb = 4;
2394 track->num_texture = 16;
2395 track->maxy = 4096;
2396 track->separate_cube = 0;
2397 track->aaresolve = false;
2398 track->aa.robj = NULL;
2399 }
2400
2401 for (i = 0; i < track->num_cb; i++) {
2402 track->cb[i].robj = NULL;
2403 track->cb[i].pitch = 8192;
2404 track->cb[i].cpp = 16;
2405 track->cb[i].offset = 0;
2406 }
2407 track->z_enabled = true;
2408 track->zb.robj = NULL;
2409 track->zb.pitch = 8192;
2410 track->zb.cpp = 4;
2411 track->zb.offset = 0;
2412 track->vtx_size = 0x7F;
2413 track->immd_dwords = 0xFFFFFFFFUL;
2414 track->num_arrays = 11;
2415 track->max_indx = 0x00FFFFFFUL;
2416 for (i = 0; i < track->num_arrays; i++) {
2417 track->arrays[i].robj = NULL;
2418 track->arrays[i].esize = 0x7F;
2419 }
2420 for (i = 0; i < track->num_texture; i++) {
2421 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2422 track->textures[i].pitch = 16536;
2423 track->textures[i].width = 16536;
2424 track->textures[i].height = 16536;
2425 track->textures[i].width_11 = 1 << 11;
2426 track->textures[i].height_11 = 1 << 11;
2427 track->textures[i].num_levels = 12;
2428 if (rdev->family <= CHIP_RS200) {
2429 track->textures[i].tex_coord_type = 0;
2430 track->textures[i].txdepth = 0;
2431 } else {
2432 track->textures[i].txdepth = 16;
2433 track->textures[i].tex_coord_type = 1;
2434 }
2435 track->textures[i].cpp = 64;
2436 track->textures[i].robj = NULL;
2437 /* CS IB emission code makes sure texture unit are disabled */
2438 track->textures[i].enabled = false;
2439 track->textures[i].lookup_disable = false;
2440 track->textures[i].roundup_w = true;
2441 track->textures[i].roundup_h = true;
2442 if (track->separate_cube)
2443 for (face = 0; face < 5; face++) {
2444 track->textures[i].cube_info[face].robj = NULL;
2445 track->textures[i].cube_info[face].width = 16536;
2446 track->textures[i].cube_info[face].height = 16536;
2447 track->textures[i].cube_info[face].offset = 0;
2448 }
2449 }
2450}
2451
2452/*
2453 * Global GPU functions
2454 */
2455static void r100_errata(struct radeon_device *rdev)
2456{
2457 rdev->pll_errata = 0;
2458
2459 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
2460 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2461 }
2462
2463 if (rdev->family == CHIP_RV100 ||
2464 rdev->family == CHIP_RS100 ||
2465 rdev->family == CHIP_RS200) {
2466 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2467 }
2468}
2469
2470static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2471{
2472 unsigned i;
2473 uint32_t tmp;
2474
2475 for (i = 0; i < rdev->usec_timeout; i++) {
2476 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
2477 if (tmp >= n) {
2478 return 0;
2479 }
2480 DRM_UDELAY(1);
2481 }
2482 return -1;
2483}
2484
2485int r100_gui_wait_for_idle(struct radeon_device *rdev)
2486{
2487 unsigned i;
2488 uint32_t tmp;
2489
2490 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2491 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
2492 " Bad things might happen.\n");
2493 }
2494 for (i = 0; i < rdev->usec_timeout; i++) {
2495 tmp = RREG32(RADEON_RBBM_STATUS);
2496 if (!(tmp & RADEON_RBBM_ACTIVE)) {
2497 return 0;
2498 }
2499 DRM_UDELAY(1);
2500 }
2501 return -1;
2502}
2503
2504int r100_mc_wait_for_idle(struct radeon_device *rdev)
2505{
2506 unsigned i;
2507 uint32_t tmp;
2508
2509 for (i = 0; i < rdev->usec_timeout; i++) {
2510 /* read MC_STATUS */
2511 tmp = RREG32(RADEON_MC_STATUS);
2512 if (tmp & RADEON_MC_IDLE) {
2513 return 0;
2514 }
2515 DRM_UDELAY(1);
2516 }
2517 return -1;
2518}
2519
2520bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2521{
2522 u32 rbbm_status;
2523
2524 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2525 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2526 radeon_ring_lockup_update(rdev, ring);
2527 return false;
2528 }
2529 return radeon_ring_test_lockup(rdev, ring);
2530}
2531
2532/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2533void r100_enable_bm(struct radeon_device *rdev)
2534{
2535 uint32_t tmp;
2536 /* Enable bus mastering */
2537 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
2538 WREG32(RADEON_BUS_CNTL, tmp);
2539}
2540
2541void r100_bm_disable(struct radeon_device *rdev)
2542{
2543 u32 tmp;
2544
2545 /* disable bus mastering */
2546 tmp = RREG32(R_000030_BUS_CNTL);
2547 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2548 mdelay(1);
2549 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2550 mdelay(1);
2551 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2552 tmp = RREG32(RADEON_BUS_CNTL);
2553 mdelay(1);
2554 pci_clear_master(rdev->pdev);
2555 mdelay(1);
2556}
2557
2558int r100_asic_reset(struct radeon_device *rdev)
2559{
2560 struct r100_mc_save save;
2561 u32 status, tmp;
2562 int ret = 0;
2563
2564 status = RREG32(R_000E40_RBBM_STATUS);
2565 if (!G_000E40_GUI_ACTIVE(status)) {
2566 return 0;
2567 }
2568 r100_mc_stop(rdev, &save);
2569 status = RREG32(R_000E40_RBBM_STATUS);
2570 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2571 /* stop CP */
2572 WREG32(RADEON_CP_CSQ_CNTL, 0);
2573 tmp = RREG32(RADEON_CP_RB_CNTL);
2574 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2575 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2576 WREG32(RADEON_CP_RB_WPTR, 0);
2577 WREG32(RADEON_CP_RB_CNTL, tmp);
2578 /* save PCI state */
2579 pci_save_state(rdev->pdev);
2580 /* disable bus mastering */
2581 r100_bm_disable(rdev);
2582 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2583 S_0000F0_SOFT_RESET_RE(1) |
2584 S_0000F0_SOFT_RESET_PP(1) |
2585 S_0000F0_SOFT_RESET_RB(1));
2586 RREG32(R_0000F0_RBBM_SOFT_RESET);
2587 mdelay(500);
2588 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2589 mdelay(1);
2590 status = RREG32(R_000E40_RBBM_STATUS);
2591 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2592 /* reset CP */
2593 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2594 RREG32(R_0000F0_RBBM_SOFT_RESET);
2595 mdelay(500);
2596 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2597 mdelay(1);
2598 status = RREG32(R_000E40_RBBM_STATUS);
2599 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2600 /* restore PCI & busmastering */
2601 pci_restore_state(rdev->pdev);
2602 r100_enable_bm(rdev);
2603 /* Check if GPU is idle */
2604 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2605 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2606 dev_err(rdev->dev, "failed to reset GPU\n");
2607 ret = -1;
2608 } else
2609 dev_info(rdev->dev, "GPU reset succeed\n");
2610 r100_mc_resume(rdev, &save);
2611 return ret;
2612}
2613
2614void r100_set_common_regs(struct radeon_device *rdev)
2615{
2616 struct drm_device *dev = rdev->ddev;
2617 bool force_dac2 = false;
2618 u32 tmp;
2619
2620 /* set these so they don't interfere with anything */
2621 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2622 WREG32(RADEON_SUBPIC_CNTL, 0);
2623 WREG32(RADEON_VIPH_CONTROL, 0);
2624 WREG32(RADEON_I2C_CNTL_1, 0);
2625 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2626 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2627 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2628
2629 /* always set up dac2 on rn50 and some rv100 as lots
2630 * of servers seem to wire it up to a VGA port but
2631 * don't report it in the bios connector
2632 * table.
2633 */
2634 switch (dev->pdev->device) {
2635 /* RN50 */
2636 case 0x515e:
2637 case 0x5969:
2638 force_dac2 = true;
2639 break;
2640 /* RV100*/
2641 case 0x5159:
2642 case 0x515a:
2643 /* DELL triple head servers */
2644 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2645 ((dev->pdev->subsystem_device == 0x016c) ||
2646 (dev->pdev->subsystem_device == 0x016d) ||
2647 (dev->pdev->subsystem_device == 0x016e) ||
2648 (dev->pdev->subsystem_device == 0x016f) ||
2649 (dev->pdev->subsystem_device == 0x0170) ||
2650 (dev->pdev->subsystem_device == 0x017d) ||
2651 (dev->pdev->subsystem_device == 0x017e) ||
2652 (dev->pdev->subsystem_device == 0x0183) ||
2653 (dev->pdev->subsystem_device == 0x018a) ||
2654 (dev->pdev->subsystem_device == 0x019a)))
2655 force_dac2 = true;
2656 break;
2657 }
2658
2659 if (force_dac2) {
2660 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2661 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2662 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2663
2664 /* For CRT on DAC2, don't turn it on if BIOS didn't
2665 enable it, even it's detected.
2666 */
2667
2668 /* force it to crtc0 */
2669 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2670 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2671 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2672
2673 /* set up the TV DAC */
2674 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2675 RADEON_TV_DAC_STD_MASK |
2676 RADEON_TV_DAC_RDACPD |
2677 RADEON_TV_DAC_GDACPD |
2678 RADEON_TV_DAC_BDACPD |
2679 RADEON_TV_DAC_BGADJ_MASK |
2680 RADEON_TV_DAC_DACADJ_MASK);
2681 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2682 RADEON_TV_DAC_NHOLD |
2683 RADEON_TV_DAC_STD_PS2 |
2684 (0x58 << 16));
2685
2686 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2687 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2688 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2689 }
2690
2691 /* switch PM block to ACPI mode */
2692 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2693 tmp &= ~RADEON_PM_MODE_SEL;
2694 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2695
2696}
2697
2698/*
2699 * VRAM info
2700 */
2701static void r100_vram_get_type(struct radeon_device *rdev)
2702{
2703 uint32_t tmp;
2704
2705 rdev->mc.vram_is_ddr = false;
2706 if (rdev->flags & RADEON_IS_IGP)
2707 rdev->mc.vram_is_ddr = true;
2708 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2709 rdev->mc.vram_is_ddr = true;
2710 if ((rdev->family == CHIP_RV100) ||
2711 (rdev->family == CHIP_RS100) ||
2712 (rdev->family == CHIP_RS200)) {
2713 tmp = RREG32(RADEON_MEM_CNTL);
2714 if (tmp & RV100_HALF_MODE) {
2715 rdev->mc.vram_width = 32;
2716 } else {
2717 rdev->mc.vram_width = 64;
2718 }
2719 if (rdev->flags & RADEON_SINGLE_CRTC) {
2720 rdev->mc.vram_width /= 4;
2721 rdev->mc.vram_is_ddr = true;
2722 }
2723 } else if (rdev->family <= CHIP_RV280) {
2724 tmp = RREG32(RADEON_MEM_CNTL);
2725 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2726 rdev->mc.vram_width = 128;
2727 } else {
2728 rdev->mc.vram_width = 64;
2729 }
2730 } else {
2731 /* newer IGPs */
2732 rdev->mc.vram_width = 128;
2733 }
2734}
2735
2736static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2737{
2738 u32 aper_size;
2739 u8 byte;
2740
2741 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2742
2743 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2744 * that is has the 2nd generation multifunction PCI interface
2745 */
2746 if (rdev->family == CHIP_RV280 ||
2747 rdev->family >= CHIP_RV350) {
2748 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2749 ~RADEON_HDP_APER_CNTL);
2750 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2751 return aper_size * 2;
2752 }
2753
2754 /* Older cards have all sorts of funny issues to deal with. First
2755 * check if it's a multifunction card by reading the PCI config
2756 * header type... Limit those to one aperture size
2757 */
2758 pci_read_config_byte(rdev->pdev, 0xe, &byte);
2759 if (byte & 0x80) {
2760 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2761 DRM_INFO("Limiting VRAM to one aperture\n");
2762 return aper_size;
2763 }
2764
2765 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2766 * have set it up. We don't write this as it's broken on some ASICs but
2767 * we expect the BIOS to have done the right thing (might be too optimistic...)
2768 */
2769 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2770 return aper_size * 2;
2771 return aper_size;
2772}
2773
2774void r100_vram_init_sizes(struct radeon_device *rdev)
2775{
2776 u64 config_aper_size;
2777
2778 /* work out accessible VRAM */
2779 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2780 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2781 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2782 /* FIXME we don't use the second aperture yet when we could use it */
2783 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2784 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2785 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2786 if (rdev->flags & RADEON_IS_IGP) {
2787 uint32_t tom;
2788 /* read NB_TOM to get the amount of ram stolen for the GPU */
2789 tom = RREG32(RADEON_NB_TOM);
2790 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2791 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2792 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2793 } else {
2794 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2795 /* Some production boards of m6 will report 0
2796 * if it's 8 MB
2797 */
2798 if (rdev->mc.real_vram_size == 0) {
2799 rdev->mc.real_vram_size = 8192 * 1024;
2800 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2801 }
2802 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2803 * Novell bug 204882 + along with lots of ubuntu ones
2804 */
2805 if (rdev->mc.aper_size > config_aper_size)
2806 config_aper_size = rdev->mc.aper_size;
2807
2808 if (config_aper_size > rdev->mc.real_vram_size)
2809 rdev->mc.mc_vram_size = config_aper_size;
2810 else
2811 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2812 }
2813}
2814
2815void r100_vga_set_state(struct radeon_device *rdev, bool state)
2816{
2817 uint32_t temp;
2818
2819 temp = RREG32(RADEON_CONFIG_CNTL);
2820 if (state == false) {
2821 temp &= ~RADEON_CFG_VGA_RAM_EN;
2822 temp |= RADEON_CFG_VGA_IO_DIS;
2823 } else {
2824 temp &= ~RADEON_CFG_VGA_IO_DIS;
2825 }
2826 WREG32(RADEON_CONFIG_CNTL, temp);
2827}
2828
2829static void r100_mc_init(struct radeon_device *rdev)
2830{
2831 u64 base;
2832
2833 r100_vram_get_type(rdev);
2834 r100_vram_init_sizes(rdev);
2835 base = rdev->mc.aper_base;
2836 if (rdev->flags & RADEON_IS_IGP)
2837 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2838 radeon_vram_location(rdev, &rdev->mc, base);
2839 rdev->mc.gtt_base_align = 0;
2840 if (!(rdev->flags & RADEON_IS_AGP))
2841 radeon_gtt_location(rdev, &rdev->mc);
2842 radeon_update_bandwidth_info(rdev);
2843}
2844
2845
2846/*
2847 * Indirect registers accessor
2848 */
2849void r100_pll_errata_after_index(struct radeon_device *rdev)
2850{
2851 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2852 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2853 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2854 }
2855}
2856
2857static void r100_pll_errata_after_data(struct radeon_device *rdev)
2858{
2859 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2860 * or the chip could hang on a subsequent access
2861 */
2862 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2863 mdelay(5);
2864 }
2865
2866 /* This function is required to workaround a hardware bug in some (all?)
2867 * revisions of the R300. This workaround should be called after every
2868 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2869 * may not be correct.
2870 */
2871 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2872 uint32_t save, tmp;
2873
2874 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2875 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2876 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2877 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2878 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2879 }
2880}
2881
2882uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2883{
2884 unsigned long flags;
2885 uint32_t data;
2886
2887 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2888 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2889 r100_pll_errata_after_index(rdev);
2890 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2891 r100_pll_errata_after_data(rdev);
2892 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2893 return data;
2894}
2895
2896void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2897{
2898 unsigned long flags;
2899
2900 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2901 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2902 r100_pll_errata_after_index(rdev);
2903 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2904 r100_pll_errata_after_data(rdev);
2905 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2906}
2907
2908static void r100_set_safe_registers(struct radeon_device *rdev)
2909{
2910 if (ASIC_IS_RN50(rdev)) {
2911 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2912 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2913 } else if (rdev->family < CHIP_R200) {
2914 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2915 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2916 } else {
2917 r200_set_safe_registers(rdev);
2918 }
2919}
2920
2921/*
2922 * Debugfs info
2923 */
2924#if defined(CONFIG_DEBUG_FS)
2925static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2926{
2927 struct drm_info_node *node = (struct drm_info_node *) m->private;
2928 struct drm_device *dev = node->minor->dev;
2929 struct radeon_device *rdev = dev->dev_private;
2930 uint32_t reg, value;
2931 unsigned i;
2932
2933 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2934 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2935 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2936 for (i = 0; i < 64; i++) {
2937 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2938 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2939 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2940 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2941 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2942 }
2943 return 0;
2944}
2945
2946static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2947{
2948 struct drm_info_node *node = (struct drm_info_node *) m->private;
2949 struct drm_device *dev = node->minor->dev;
2950 struct radeon_device *rdev = dev->dev_private;
2951 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2952 uint32_t rdp, wdp;
2953 unsigned count, i, j;
2954
2955 radeon_ring_free_size(rdev, ring);
2956 rdp = RREG32(RADEON_CP_RB_RPTR);
2957 wdp = RREG32(RADEON_CP_RB_WPTR);
2958 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
2959 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2960 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2961 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2962 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2963 seq_printf(m, "%u dwords in ring\n", count);
2964 if (ring->ready) {
2965 for (j = 0; j <= count; j++) {
2966 i = (rdp + j) & ring->ptr_mask;
2967 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2968 }
2969 }
2970 return 0;
2971}
2972
2973
2974static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2975{
2976 struct drm_info_node *node = (struct drm_info_node *) m->private;
2977 struct drm_device *dev = node->minor->dev;
2978 struct radeon_device *rdev = dev->dev_private;
2979 uint32_t csq_stat, csq2_stat, tmp;
2980 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2981 unsigned i;
2982
2983 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2984 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2985 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2986 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2987 r_rptr = (csq_stat >> 0) & 0x3ff;
2988 r_wptr = (csq_stat >> 10) & 0x3ff;
2989 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2990 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2991 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2992 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2993 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2994 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2995 seq_printf(m, "Ring rptr %u\n", r_rptr);
2996 seq_printf(m, "Ring wptr %u\n", r_wptr);
2997 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2998 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2999 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
3000 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
3001 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
3002 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
3003 seq_printf(m, "Ring fifo:\n");
3004 for (i = 0; i < 256; i++) {
3005 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3006 tmp = RREG32(RADEON_CP_CSQ_DATA);
3007 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
3008 }
3009 seq_printf(m, "Indirect1 fifo:\n");
3010 for (i = 256; i <= 512; i++) {
3011 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3012 tmp = RREG32(RADEON_CP_CSQ_DATA);
3013 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
3014 }
3015 seq_printf(m, "Indirect2 fifo:\n");
3016 for (i = 640; i < ib1_wptr; i++) {
3017 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3018 tmp = RREG32(RADEON_CP_CSQ_DATA);
3019 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
3020 }
3021 return 0;
3022}
3023
3024static int r100_debugfs_mc_info(struct seq_file *m, void *data)
3025{
3026 struct drm_info_node *node = (struct drm_info_node *) m->private;
3027 struct drm_device *dev = node->minor->dev;
3028 struct radeon_device *rdev = dev->dev_private;
3029 uint32_t tmp;
3030
3031 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
3032 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3033 tmp = RREG32(RADEON_MC_FB_LOCATION);
3034 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3035 tmp = RREG32(RADEON_BUS_CNTL);
3036 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3037 tmp = RREG32(RADEON_MC_AGP_LOCATION);
3038 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3039 tmp = RREG32(RADEON_AGP_BASE);
3040 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3041 tmp = RREG32(RADEON_HOST_PATH_CNTL);
3042 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3043 tmp = RREG32(0x01D0);
3044 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3045 tmp = RREG32(RADEON_AIC_LO_ADDR);
3046 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3047 tmp = RREG32(RADEON_AIC_HI_ADDR);
3048 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3049 tmp = RREG32(0x01E4);
3050 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3051 return 0;
3052}
3053
3054static struct drm_info_list r100_debugfs_rbbm_list[] = {
3055 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
3056};
3057
3058static struct drm_info_list r100_debugfs_cp_list[] = {
3059 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
3060 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
3061};
3062
3063static struct drm_info_list r100_debugfs_mc_info_list[] = {
3064 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
3065};
3066#endif
3067
3068int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3069{
3070#if defined(CONFIG_DEBUG_FS)
3071 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3072#else
3073 return 0;
3074#endif
3075}
3076
3077int r100_debugfs_cp_init(struct radeon_device *rdev)
3078{
3079#if defined(CONFIG_DEBUG_FS)
3080 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3081#else
3082 return 0;
3083#endif
3084}
3085
3086int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3087{
3088#if defined(CONFIG_DEBUG_FS)
3089 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3090#else
3091 return 0;
3092#endif
3093}
3094
3095int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3096 uint32_t tiling_flags, uint32_t pitch,
3097 uint32_t offset, uint32_t obj_size)
3098{
3099 int surf_index = reg * 16;
3100 int flags = 0;
3101
3102 if (rdev->family <= CHIP_RS200) {
3103 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3104 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3105 flags |= RADEON_SURF_TILE_COLOR_BOTH;
3106 if (tiling_flags & RADEON_TILING_MACRO)
3107 flags |= RADEON_SURF_TILE_COLOR_MACRO;
3108 /* setting pitch to 0 disables tiling */
3109 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3110 == 0)
3111 pitch = 0;
3112 } else if (rdev->family <= CHIP_RV280) {
3113 if (tiling_flags & (RADEON_TILING_MACRO))
3114 flags |= R200_SURF_TILE_COLOR_MACRO;
3115 if (tiling_flags & RADEON_TILING_MICRO)
3116 flags |= R200_SURF_TILE_COLOR_MICRO;
3117 } else {
3118 if (tiling_flags & RADEON_TILING_MACRO)
3119 flags |= R300_SURF_TILE_MACRO;
3120 if (tiling_flags & RADEON_TILING_MICRO)
3121 flags |= R300_SURF_TILE_MICRO;
3122 }
3123
3124 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3125 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3126 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3127 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3128
3129 /* r100/r200 divide by 16 */
3130 if (rdev->family < CHIP_R300)
3131 flags |= pitch / 16;
3132 else
3133 flags |= pitch / 8;
3134
3135
3136 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
3137 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
3138 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
3139 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
3140 return 0;
3141}
3142
3143void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3144{
3145 int surf_index = reg * 16;
3146 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
3147}
3148
3149void r100_bandwidth_update(struct radeon_device *rdev)
3150{
3151 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3152 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3153 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
3154 fixed20_12 crit_point_ff = {0};
3155 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3156 fixed20_12 memtcas_ff[8] = {
3157 dfixed_init(1),
3158 dfixed_init(2),
3159 dfixed_init(3),
3160 dfixed_init(0),
3161 dfixed_init_half(1),
3162 dfixed_init_half(2),
3163 dfixed_init(0),
3164 };
3165 fixed20_12 memtcas_rs480_ff[8] = {
3166 dfixed_init(0),
3167 dfixed_init(1),
3168 dfixed_init(2),
3169 dfixed_init(3),
3170 dfixed_init(0),
3171 dfixed_init_half(1),
3172 dfixed_init_half(2),
3173 dfixed_init_half(3),
3174 };
3175 fixed20_12 memtcas2_ff[8] = {
3176 dfixed_init(0),
3177 dfixed_init(1),
3178 dfixed_init(2),
3179 dfixed_init(3),
3180 dfixed_init(4),
3181 dfixed_init(5),
3182 dfixed_init(6),
3183 dfixed_init(7),
3184 };
3185 fixed20_12 memtrbs[8] = {
3186 dfixed_init(1),
3187 dfixed_init_half(1),
3188 dfixed_init(2),
3189 dfixed_init_half(2),
3190 dfixed_init(3),
3191 dfixed_init_half(3),
3192 dfixed_init(4),
3193 dfixed_init_half(4)
3194 };
3195 fixed20_12 memtrbs_r4xx[8] = {
3196 dfixed_init(4),
3197 dfixed_init(5),
3198 dfixed_init(6),
3199 dfixed_init(7),
3200 dfixed_init(8),
3201 dfixed_init(9),
3202 dfixed_init(10),
3203 dfixed_init(11)
3204 };
3205 fixed20_12 min_mem_eff;
3206 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3207 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3208 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0},
3209 disp_drain_rate2, read_return_rate;
3210 fixed20_12 time_disp1_drop_priority;
3211 int c;
3212 int cur_size = 16; /* in octawords */
3213 int critical_point = 0, critical_point2;
3214/* uint32_t read_return_rate, time_disp1_drop_priority; */
3215 int stop_req, max_stop_req;
3216 struct drm_display_mode *mode1 = NULL;
3217 struct drm_display_mode *mode2 = NULL;
3218 uint32_t pixel_bytes1 = 0;
3219 uint32_t pixel_bytes2 = 0;
3220
3221 /* Guess line buffer size to be 8192 pixels */
3222 u32 lb_size = 8192;
3223
3224 if (!rdev->mode_info.mode_config_initialized)
3225 return;
3226
3227 radeon_update_display_priority(rdev);
3228
3229 if (rdev->mode_info.crtcs[0]->base.enabled) {
3230 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3231 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
3232 }
3233 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3234 if (rdev->mode_info.crtcs[1]->base.enabled) {
3235 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3236 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
3237 }
3238 }
3239
3240 min_mem_eff.full = dfixed_const_8(0);
3241 /* get modes */
3242 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
3243 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
3244 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
3245 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
3246 /* check crtc enables */
3247 if (mode2)
3248 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
3249 if (mode1)
3250 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
3251 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
3252 }
3253
3254 /*
3255 * determine is there is enough bw for current mode
3256 */
3257 sclk_ff = rdev->pm.sclk;
3258 mclk_ff = rdev->pm.mclk;
3259
3260 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3261 temp_ff.full = dfixed_const(temp);
3262 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
3263
3264 pix_clk.full = 0;
3265 pix_clk2.full = 0;
3266 peak_disp_bw.full = 0;
3267 if (mode1) {
3268 temp_ff.full = dfixed_const(1000);
3269 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
3270 pix_clk.full = dfixed_div(pix_clk, temp_ff);
3271 temp_ff.full = dfixed_const(pixel_bytes1);
3272 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
3273 }
3274 if (mode2) {
3275 temp_ff.full = dfixed_const(1000);
3276 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
3277 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3278 temp_ff.full = dfixed_const(pixel_bytes2);
3279 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
3280 }
3281
3282 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
3283 if (peak_disp_bw.full >= mem_bw.full) {
3284 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
3285 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
3286 }
3287
3288 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
3289 temp = RREG32(RADEON_MEM_TIMING_CNTL);
3290 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3291 mem_trcd = ((temp >> 2) & 0x3) + 1;
3292 mem_trp = ((temp & 0x3)) + 1;
3293 mem_tras = ((temp & 0x70) >> 4) + 1;
3294 } else if (rdev->family == CHIP_R300 ||
3295 rdev->family == CHIP_R350) { /* r300, r350 */
3296 mem_trcd = (temp & 0x7) + 1;
3297 mem_trp = ((temp >> 8) & 0x7) + 1;
3298 mem_tras = ((temp >> 11) & 0xf) + 4;
3299 } else if (rdev->family == CHIP_RV350 ||
3300 rdev->family <= CHIP_RV380) {
3301 /* rv3x0 */
3302 mem_trcd = (temp & 0x7) + 3;
3303 mem_trp = ((temp >> 8) & 0x7) + 3;
3304 mem_tras = ((temp >> 11) & 0xf) + 6;
3305 } else if (rdev->family == CHIP_R420 ||
3306 rdev->family == CHIP_R423 ||
3307 rdev->family == CHIP_RV410) {
3308 /* r4xx */
3309 mem_trcd = (temp & 0xf) + 3;
3310 if (mem_trcd > 15)
3311 mem_trcd = 15;
3312 mem_trp = ((temp >> 8) & 0xf) + 3;
3313 if (mem_trp > 15)
3314 mem_trp = 15;
3315 mem_tras = ((temp >> 12) & 0x1f) + 6;
3316 if (mem_tras > 31)
3317 mem_tras = 31;
3318 } else { /* RV200, R200 */
3319 mem_trcd = (temp & 0x7) + 1;
3320 mem_trp = ((temp >> 8) & 0x7) + 1;
3321 mem_tras = ((temp >> 12) & 0xf) + 4;
3322 }
3323 /* convert to FF */
3324 trcd_ff.full = dfixed_const(mem_trcd);
3325 trp_ff.full = dfixed_const(mem_trp);
3326 tras_ff.full = dfixed_const(mem_tras);
3327
3328 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3329 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
3330 data = (temp & (7 << 20)) >> 20;
3331 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3332 if (rdev->family == CHIP_RS480) /* don't think rs400 */
3333 tcas_ff = memtcas_rs480_ff[data];
3334 else
3335 tcas_ff = memtcas_ff[data];
3336 } else
3337 tcas_ff = memtcas2_ff[data];
3338
3339 if (rdev->family == CHIP_RS400 ||
3340 rdev->family == CHIP_RS480) {
3341 /* extra cas latency stored in bits 23-25 0-4 clocks */
3342 data = (temp >> 23) & 0x7;
3343 if (data < 5)
3344 tcas_ff.full += dfixed_const(data);
3345 }
3346
3347 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
3348 /* on the R300, Tcas is included in Trbs.
3349 */
3350 temp = RREG32(RADEON_MEM_CNTL);
3351 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
3352 if (data == 1) {
3353 if (R300_MEM_USE_CD_CH_ONLY & temp) {
3354 temp = RREG32(R300_MC_IND_INDEX);
3355 temp &= ~R300_MC_IND_ADDR_MASK;
3356 temp |= R300_MC_READ_CNTL_CD_mcind;
3357 WREG32(R300_MC_IND_INDEX, temp);
3358 temp = RREG32(R300_MC_IND_DATA);
3359 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
3360 } else {
3361 temp = RREG32(R300_MC_READ_CNTL_AB);
3362 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3363 }
3364 } else {
3365 temp = RREG32(R300_MC_READ_CNTL_AB);
3366 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3367 }
3368 if (rdev->family == CHIP_RV410 ||
3369 rdev->family == CHIP_R420 ||
3370 rdev->family == CHIP_R423)
3371 trbs_ff = memtrbs_r4xx[data];
3372 else
3373 trbs_ff = memtrbs[data];
3374 tcas_ff.full += trbs_ff.full;
3375 }
3376
3377 sclk_eff_ff.full = sclk_ff.full;
3378
3379 if (rdev->flags & RADEON_IS_AGP) {
3380 fixed20_12 agpmode_ff;
3381 agpmode_ff.full = dfixed_const(radeon_agpmode);
3382 temp_ff.full = dfixed_const_666(16);
3383 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
3384 }
3385 /* TODO PCIE lanes may affect this - agpmode == 16?? */
3386
3387 if (ASIC_IS_R300(rdev)) {
3388 sclk_delay_ff.full = dfixed_const(250);
3389 } else {
3390 if ((rdev->family == CHIP_RV100) ||
3391 rdev->flags & RADEON_IS_IGP) {
3392 if (rdev->mc.vram_is_ddr)
3393 sclk_delay_ff.full = dfixed_const(41);
3394 else
3395 sclk_delay_ff.full = dfixed_const(33);
3396 } else {
3397 if (rdev->mc.vram_width == 128)
3398 sclk_delay_ff.full = dfixed_const(57);
3399 else
3400 sclk_delay_ff.full = dfixed_const(41);
3401 }
3402 }
3403
3404 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3405
3406 if (rdev->mc.vram_is_ddr) {
3407 if (rdev->mc.vram_width == 32) {
3408 k1.full = dfixed_const(40);
3409 c = 3;
3410 } else {
3411 k1.full = dfixed_const(20);
3412 c = 1;
3413 }
3414 } else {
3415 k1.full = dfixed_const(40);
3416 c = 3;
3417 }
3418
3419 temp_ff.full = dfixed_const(2);
3420 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
3421 temp_ff.full = dfixed_const(c);
3422 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
3423 temp_ff.full = dfixed_const(4);
3424 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
3425 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
3426 mc_latency_mclk.full += k1.full;
3427
3428 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3429 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3430
3431 /*
3432 HW cursor time assuming worst case of full size colour cursor.
3433 */
3434 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
3435 temp_ff.full += trcd_ff.full;
3436 if (temp_ff.full < tras_ff.full)
3437 temp_ff.full = tras_ff.full;
3438 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3439
3440 temp_ff.full = dfixed_const(cur_size);
3441 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3442 /*
3443 Find the total latency for the display data.
3444 */
3445 disp_latency_overhead.full = dfixed_const(8);
3446 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3447 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3448 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3449
3450 if (mc_latency_mclk.full > mc_latency_sclk.full)
3451 disp_latency.full = mc_latency_mclk.full;
3452 else
3453 disp_latency.full = mc_latency_sclk.full;
3454
3455 /* setup Max GRPH_STOP_REQ default value */
3456 if (ASIC_IS_RV100(rdev))
3457 max_stop_req = 0x5c;
3458 else
3459 max_stop_req = 0x7c;
3460
3461 if (mode1) {
3462 /* CRTC1
3463 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3464 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3465 */
3466 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3467
3468 if (stop_req > max_stop_req)
3469 stop_req = max_stop_req;
3470
3471 /*
3472 Find the drain rate of the display buffer.
3473 */
3474 temp_ff.full = dfixed_const((16/pixel_bytes1));
3475 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3476
3477 /*
3478 Find the critical point of the display buffer.
3479 */
3480 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
3481 crit_point_ff.full += dfixed_const_half(0);
3482
3483 critical_point = dfixed_trunc(crit_point_ff);
3484
3485 if (rdev->disp_priority == 2) {
3486 critical_point = 0;
3487 }
3488
3489 /*
3490 The critical point should never be above max_stop_req-4. Setting
3491 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3492 */
3493 if (max_stop_req - critical_point < 4)
3494 critical_point = 0;
3495
3496 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3497 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3498 critical_point = 0x10;
3499 }
3500
3501 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3502 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3503 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3504 temp &= ~(RADEON_GRPH_START_REQ_MASK);
3505 if ((rdev->family == CHIP_R350) &&
3506 (stop_req > 0x15)) {
3507 stop_req -= 0x10;
3508 }
3509 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3510 temp |= RADEON_GRPH_BUFFER_SIZE;
3511 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3512 RADEON_GRPH_CRITICAL_AT_SOF |
3513 RADEON_GRPH_STOP_CNTL);
3514 /*
3515 Write the result into the register.
3516 */
3517 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3518 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3519
3520#if 0
3521 if ((rdev->family == CHIP_RS400) ||
3522 (rdev->family == CHIP_RS480)) {
3523 /* attempt to program RS400 disp regs correctly ??? */
3524 temp = RREG32(RS400_DISP1_REG_CNTL);
3525 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3526 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3527 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3528 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3529 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3530 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3531 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3532 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3533 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3534 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3535 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3536 }
3537#endif
3538
3539 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3540 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3541 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3542 }
3543
3544 if (mode2) {
3545 u32 grph2_cntl;
3546 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3547
3548 if (stop_req > max_stop_req)
3549 stop_req = max_stop_req;
3550
3551 /*
3552 Find the drain rate of the display buffer.
3553 */
3554 temp_ff.full = dfixed_const((16/pixel_bytes2));
3555 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3556
3557 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3558 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3559 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3560 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3561 if ((rdev->family == CHIP_R350) &&
3562 (stop_req > 0x15)) {
3563 stop_req -= 0x10;
3564 }
3565 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3566 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3567 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3568 RADEON_GRPH_CRITICAL_AT_SOF |
3569 RADEON_GRPH_STOP_CNTL);
3570
3571 if ((rdev->family == CHIP_RS100) ||
3572 (rdev->family == CHIP_RS200))
3573 critical_point2 = 0;
3574 else {
3575 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3576 temp_ff.full = dfixed_const(temp);
3577 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3578 if (sclk_ff.full < temp_ff.full)
3579 temp_ff.full = sclk_ff.full;
3580
3581 read_return_rate.full = temp_ff.full;
3582
3583 if (mode1) {
3584 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3585 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3586 } else {
3587 time_disp1_drop_priority.full = 0;
3588 }
3589 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3590 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3591 crit_point_ff.full += dfixed_const_half(0);
3592
3593 critical_point2 = dfixed_trunc(crit_point_ff);
3594
3595 if (rdev->disp_priority == 2) {
3596 critical_point2 = 0;
3597 }
3598
3599 if (max_stop_req - critical_point2 < 4)
3600 critical_point2 = 0;
3601
3602 }
3603
3604 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3605 /* some R300 cards have problem with this set to 0 */
3606 critical_point2 = 0x10;
3607 }
3608
3609 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3610 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3611
3612 if ((rdev->family == CHIP_RS400) ||
3613 (rdev->family == CHIP_RS480)) {
3614#if 0
3615 /* attempt to program RS400 disp2 regs correctly ??? */
3616 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3617 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3618 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3619 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3620 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3621 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3622 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3623 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3624 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3625 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3626 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3627 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3628#endif
3629 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3630 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3631 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3632 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3633 }
3634
3635 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3636 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3637 }
3638
3639 /* Save number of lines the linebuffer leads before the scanout */
3640 if (mode1)
3641 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
3642
3643 if (mode2)
3644 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
3645}
3646
3647int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3648{
3649 uint32_t scratch;
3650 uint32_t tmp = 0;
3651 unsigned i;
3652 int r;
3653
3654 r = radeon_scratch_get(rdev, &scratch);
3655 if (r) {
3656 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3657 return r;
3658 }
3659 WREG32(scratch, 0xCAFEDEAD);
3660 r = radeon_ring_lock(rdev, ring, 2);
3661 if (r) {
3662 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3663 radeon_scratch_free(rdev, scratch);
3664 return r;
3665 }
3666 radeon_ring_write(ring, PACKET0(scratch, 0));
3667 radeon_ring_write(ring, 0xDEADBEEF);
3668 radeon_ring_unlock_commit(rdev, ring, false);
3669 for (i = 0; i < rdev->usec_timeout; i++) {
3670 tmp = RREG32(scratch);
3671 if (tmp == 0xDEADBEEF) {
3672 break;
3673 }
3674 DRM_UDELAY(1);
3675 }
3676 if (i < rdev->usec_timeout) {
3677 DRM_INFO("ring test succeeded in %d usecs\n", i);
3678 } else {
3679 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3680 scratch, tmp);
3681 r = -EINVAL;
3682 }
3683 radeon_scratch_free(rdev, scratch);
3684 return r;
3685}
3686
3687void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3688{
3689 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3690
3691 if (ring->rptr_save_reg) {
3692 u32 next_rptr = ring->wptr + 2 + 3;
3693 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
3694 radeon_ring_write(ring, next_rptr);
3695 }
3696
3697 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3698 radeon_ring_write(ring, ib->gpu_addr);
3699 radeon_ring_write(ring, ib->length_dw);
3700}
3701
3702int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3703{
3704 struct radeon_ib ib;
3705 uint32_t scratch;
3706 uint32_t tmp = 0;
3707 unsigned i;
3708 int r;
3709
3710 r = radeon_scratch_get(rdev, &scratch);
3711 if (r) {
3712 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3713 return r;
3714 }
3715 WREG32(scratch, 0xCAFEDEAD);
3716 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3717 if (r) {
3718 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3719 goto free_scratch;
3720 }
3721 ib.ptr[0] = PACKET0(scratch, 0);
3722 ib.ptr[1] = 0xDEADBEEF;
3723 ib.ptr[2] = PACKET2(0);
3724 ib.ptr[3] = PACKET2(0);
3725 ib.ptr[4] = PACKET2(0);
3726 ib.ptr[5] = PACKET2(0);
3727 ib.ptr[6] = PACKET2(0);
3728 ib.ptr[7] = PACKET2(0);
3729 ib.length_dw = 8;
3730 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3731 if (r) {
3732 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3733 goto free_ib;
3734 }
3735 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
3736 RADEON_USEC_IB_TEST_TIMEOUT));
3737 if (r < 0) {
3738 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3739 goto free_ib;
3740 } else if (r == 0) {
3741 DRM_ERROR("radeon: fence wait timed out.\n");
3742 r = -ETIMEDOUT;
3743 goto free_ib;
3744 }
3745 r = 0;
3746 for (i = 0; i < rdev->usec_timeout; i++) {
3747 tmp = RREG32(scratch);
3748 if (tmp == 0xDEADBEEF) {
3749 break;
3750 }
3751 DRM_UDELAY(1);
3752 }
3753 if (i < rdev->usec_timeout) {
3754 DRM_INFO("ib test succeeded in %u usecs\n", i);
3755 } else {
3756 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3757 scratch, tmp);
3758 r = -EINVAL;
3759 }
3760free_ib:
3761 radeon_ib_free(rdev, &ib);
3762free_scratch:
3763 radeon_scratch_free(rdev, scratch);
3764 return r;
3765}
3766
3767void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3768{
3769 /* Shutdown CP we shouldn't need to do that but better be safe than
3770 * sorry
3771 */
3772 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3773 WREG32(R_000740_CP_CSQ_CNTL, 0);
3774
3775 /* Save few CRTC registers */
3776 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3777 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3778 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3779 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3780 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3781 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3782 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3783 }
3784
3785 /* Disable VGA aperture access */
3786 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3787 /* Disable cursor, overlay, crtc */
3788 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3789 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3790 S_000054_CRTC_DISPLAY_DIS(1));
3791 WREG32(R_000050_CRTC_GEN_CNTL,
3792 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3793 S_000050_CRTC_DISP_REQ_EN_B(1));
3794 WREG32(R_000420_OV0_SCALE_CNTL,
3795 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3796 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3797 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3798 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3799 S_000360_CUR2_LOCK(1));
3800 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3801 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3802 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3803 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3804 WREG32(R_000360_CUR2_OFFSET,
3805 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3806 }
3807}
3808
3809void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3810{
3811 /* Update base address for crtc */
3812 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3813 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3814 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3815 }
3816 /* Restore CRTC registers */
3817 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3818 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3819 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3820 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3821 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3822 }
3823}
3824
3825void r100_vga_render_disable(struct radeon_device *rdev)
3826{
3827 u32 tmp;
3828
3829 tmp = RREG8(R_0003C2_GENMO_WT);
3830 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3831}
3832
3833static void r100_debugfs(struct radeon_device *rdev)
3834{
3835 int r;
3836
3837 r = r100_debugfs_mc_info_init(rdev);
3838 if (r)
3839 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3840}
3841
3842static void r100_mc_program(struct radeon_device *rdev)
3843{
3844 struct r100_mc_save save;
3845
3846 /* Stops all mc clients */
3847 r100_mc_stop(rdev, &save);
3848 if (rdev->flags & RADEON_IS_AGP) {
3849 WREG32(R_00014C_MC_AGP_LOCATION,
3850 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3851 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3852 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3853 if (rdev->family > CHIP_RV200)
3854 WREG32(R_00015C_AGP_BASE_2,
3855 upper_32_bits(rdev->mc.agp_base) & 0xff);
3856 } else {
3857 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3858 WREG32(R_000170_AGP_BASE, 0);
3859 if (rdev->family > CHIP_RV200)
3860 WREG32(R_00015C_AGP_BASE_2, 0);
3861 }
3862 /* Wait for mc idle */
3863 if (r100_mc_wait_for_idle(rdev))
3864 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3865 /* Program MC, should be a 32bits limited address space */
3866 WREG32(R_000148_MC_FB_LOCATION,
3867 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3868 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3869 r100_mc_resume(rdev, &save);
3870}
3871
3872static void r100_clock_startup(struct radeon_device *rdev)
3873{
3874 u32 tmp;
3875
3876 if (radeon_dynclks != -1 && radeon_dynclks)
3877 radeon_legacy_set_clock_gating(rdev, 1);
3878 /* We need to force on some of the block */
3879 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3880 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3881 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3882 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3883 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3884}
3885
3886static int r100_startup(struct radeon_device *rdev)
3887{
3888 int r;
3889
3890 /* set common regs */
3891 r100_set_common_regs(rdev);
3892 /* program mc */
3893 r100_mc_program(rdev);
3894 /* Resume clock */
3895 r100_clock_startup(rdev);
3896 /* Initialize GART (initialize after TTM so we can allocate
3897 * memory through TTM but finalize after TTM) */
3898 r100_enable_bm(rdev);
3899 if (rdev->flags & RADEON_IS_PCI) {
3900 r = r100_pci_gart_enable(rdev);
3901 if (r)
3902 return r;
3903 }
3904
3905 /* allocate wb buffer */
3906 r = radeon_wb_init(rdev);
3907 if (r)
3908 return r;
3909
3910 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3911 if (r) {
3912 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3913 return r;
3914 }
3915
3916 /* Enable IRQ */
3917 if (!rdev->irq.installed) {
3918 r = radeon_irq_kms_init(rdev);
3919 if (r)
3920 return r;
3921 }
3922
3923 r100_irq_set(rdev);
3924 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3925 /* 1M ring buffer */
3926 r = r100_cp_init(rdev, 1024 * 1024);
3927 if (r) {
3928 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3929 return r;
3930 }
3931
3932 r = radeon_ib_pool_init(rdev);
3933 if (r) {
3934 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3935 return r;
3936 }
3937
3938 return 0;
3939}
3940
3941int r100_resume(struct radeon_device *rdev)
3942{
3943 int r;
3944
3945 /* Make sur GART are not working */
3946 if (rdev->flags & RADEON_IS_PCI)
3947 r100_pci_gart_disable(rdev);
3948 /* Resume clock before doing reset */
3949 r100_clock_startup(rdev);
3950 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3951 if (radeon_asic_reset(rdev)) {
3952 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3953 RREG32(R_000E40_RBBM_STATUS),
3954 RREG32(R_0007C0_CP_STAT));
3955 }
3956 /* post */
3957 radeon_combios_asic_init(rdev->ddev);
3958 /* Resume clock after posting */
3959 r100_clock_startup(rdev);
3960 /* Initialize surface registers */
3961 radeon_surface_init(rdev);
3962
3963 rdev->accel_working = true;
3964 r = r100_startup(rdev);
3965 if (r) {
3966 rdev->accel_working = false;
3967 }
3968 return r;
3969}
3970
3971int r100_suspend(struct radeon_device *rdev)
3972{
3973 radeon_pm_suspend(rdev);
3974 r100_cp_disable(rdev);
3975 radeon_wb_disable(rdev);
3976 r100_irq_disable(rdev);
3977 if (rdev->flags & RADEON_IS_PCI)
3978 r100_pci_gart_disable(rdev);
3979 return 0;
3980}
3981
3982void r100_fini(struct radeon_device *rdev)
3983{
3984 radeon_pm_fini(rdev);
3985 r100_cp_fini(rdev);
3986 radeon_wb_fini(rdev);
3987 radeon_ib_pool_fini(rdev);
3988 radeon_gem_fini(rdev);
3989 if (rdev->flags & RADEON_IS_PCI)
3990 r100_pci_gart_fini(rdev);
3991 radeon_agp_fini(rdev);
3992 radeon_irq_kms_fini(rdev);
3993 radeon_fence_driver_fini(rdev);
3994 radeon_bo_fini(rdev);
3995 radeon_atombios_fini(rdev);
3996 kfree(rdev->bios);
3997 rdev->bios = NULL;
3998}
3999
4000/*
4001 * Due to how kexec works, it can leave the hw fully initialised when it
4002 * boots the new kernel. However doing our init sequence with the CP and
4003 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
4004 * do some quick sanity checks and restore sane values to avoid this
4005 * problem.
4006 */
4007void r100_restore_sanity(struct radeon_device *rdev)
4008{
4009 u32 tmp;
4010
4011 tmp = RREG32(RADEON_CP_CSQ_CNTL);
4012 if (tmp) {
4013 WREG32(RADEON_CP_CSQ_CNTL, 0);
4014 }
4015 tmp = RREG32(RADEON_CP_RB_CNTL);
4016 if (tmp) {
4017 WREG32(RADEON_CP_RB_CNTL, 0);
4018 }
4019 tmp = RREG32(RADEON_SCRATCH_UMSK);
4020 if (tmp) {
4021 WREG32(RADEON_SCRATCH_UMSK, 0);
4022 }
4023}
4024
4025int r100_init(struct radeon_device *rdev)
4026{
4027 int r;
4028
4029 /* Register debugfs file specific to this group of asics */
4030 r100_debugfs(rdev);
4031 /* Disable VGA */
4032 r100_vga_render_disable(rdev);
4033 /* Initialize scratch registers */
4034 radeon_scratch_init(rdev);
4035 /* Initialize surface registers */
4036 radeon_surface_init(rdev);
4037 /* sanity check some register to avoid hangs like after kexec */
4038 r100_restore_sanity(rdev);
4039 /* TODO: disable VGA need to use VGA request */
4040 /* BIOS*/
4041 if (!radeon_get_bios(rdev)) {
4042 if (ASIC_IS_AVIVO(rdev))
4043 return -EINVAL;
4044 }
4045 if (rdev->is_atom_bios) {
4046 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
4047 return -EINVAL;
4048 } else {
4049 r = radeon_combios_init(rdev);
4050 if (r)
4051 return r;
4052 }
4053 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
4054 if (radeon_asic_reset(rdev)) {
4055 dev_warn(rdev->dev,
4056 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4057 RREG32(R_000E40_RBBM_STATUS),
4058 RREG32(R_0007C0_CP_STAT));
4059 }
4060 /* check if cards are posted or not */
4061 if (radeon_boot_test_post_card(rdev) == false)
4062 return -EINVAL;
4063 /* Set asic errata */
4064 r100_errata(rdev);
4065 /* Initialize clocks */
4066 radeon_get_clock_info(rdev->ddev);
4067 /* initialize AGP */
4068 if (rdev->flags & RADEON_IS_AGP) {
4069 r = radeon_agp_init(rdev);
4070 if (r) {
4071 radeon_agp_disable(rdev);
4072 }
4073 }
4074 /* initialize VRAM */
4075 r100_mc_init(rdev);
4076 /* Fence driver */
4077 r = radeon_fence_driver_init(rdev);
4078 if (r)
4079 return r;
4080 /* Memory manager */
4081 r = radeon_bo_init(rdev);
4082 if (r)
4083 return r;
4084 if (rdev->flags & RADEON_IS_PCI) {
4085 r = r100_pci_gart_init(rdev);
4086 if (r)
4087 return r;
4088 }
4089 r100_set_safe_registers(rdev);
4090
4091 /* Initialize power management */
4092 radeon_pm_init(rdev);
4093
4094 rdev->accel_working = true;
4095 r = r100_startup(rdev);
4096 if (r) {
4097 /* Somethings want wront with the accel init stop accel */
4098 dev_err(rdev->dev, "Disabling GPU acceleration\n");
4099 r100_cp_fini(rdev);
4100 radeon_wb_fini(rdev);
4101 radeon_ib_pool_fini(rdev);
4102 radeon_irq_kms_fini(rdev);
4103 if (rdev->flags & RADEON_IS_PCI)
4104 r100_pci_gart_fini(rdev);
4105 rdev->accel_working = false;
4106 }
4107 return 0;
4108}
4109
4110uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg)
4111{
4112 unsigned long flags;
4113 uint32_t ret;
4114
4115 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4116 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4117 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4118 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4119 return ret;
4120}
4121
4122void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v)
4123{
4124 unsigned long flags;
4125
4126 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4127 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4128 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4129 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4130}
4131
4132u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4133{
4134 if (reg < rdev->rio_mem_size)
4135 return ioread32(rdev->rio_mem + reg);
4136 else {
4137 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4138 return ioread32(rdev->rio_mem + RADEON_MM_DATA);
4139 }
4140}
4141
4142void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4143{
4144 if (reg < rdev->rio_mem_size)
4145 iowrite32(v, rdev->rio_mem + reg);
4146 else {
4147 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4148 iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
4149 }
4150}
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/radeon_drm.h>
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "radeon_asic.h"
35#include "r100d.h"
36#include "rs100d.h"
37#include "rv200d.h"
38#include "rv250d.h"
39#include "atom.h"
40
41#include <linux/firmware.h>
42#include <linux/module.h>
43
44#include "r100_reg_safe.h"
45#include "rn50_reg_safe.h"
46
47/* Firmware Names */
48#define FIRMWARE_R100 "radeon/R100_cp.bin"
49#define FIRMWARE_R200 "radeon/R200_cp.bin"
50#define FIRMWARE_R300 "radeon/R300_cp.bin"
51#define FIRMWARE_R420 "radeon/R420_cp.bin"
52#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
53#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
54#define FIRMWARE_R520 "radeon/R520_cp.bin"
55
56MODULE_FIRMWARE(FIRMWARE_R100);
57MODULE_FIRMWARE(FIRMWARE_R200);
58MODULE_FIRMWARE(FIRMWARE_R300);
59MODULE_FIRMWARE(FIRMWARE_R420);
60MODULE_FIRMWARE(FIRMWARE_RS690);
61MODULE_FIRMWARE(FIRMWARE_RS600);
62MODULE_FIRMWARE(FIRMWARE_R520);
63
64#include "r100_track.h"
65
66/* This files gather functions specifics to:
67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
68 * and others in some cases.
69 */
70
71static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
72{
73 if (crtc == 0) {
74 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
75 return true;
76 else
77 return false;
78 } else {
79 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
80 return true;
81 else
82 return false;
83 }
84}
85
86static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
87{
88 u32 vline1, vline2;
89
90 if (crtc == 0) {
91 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
92 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
93 } else {
94 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
95 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
96 }
97 if (vline1 != vline2)
98 return true;
99 else
100 return false;
101}
102
103/**
104 * r100_wait_for_vblank - vblank wait asic callback.
105 *
106 * @rdev: radeon_device pointer
107 * @crtc: crtc to wait for vblank on
108 *
109 * Wait for vblank on the requested crtc (r1xx-r4xx).
110 */
111void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
112{
113 unsigned i = 0;
114
115 if (crtc >= rdev->num_crtc)
116 return;
117
118 if (crtc == 0) {
119 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
120 return;
121 } else {
122 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
123 return;
124 }
125
126 /* depending on when we hit vblank, we may be close to active; if so,
127 * wait for another frame.
128 */
129 while (r100_is_in_vblank(rdev, crtc)) {
130 if (i++ % 100 == 0) {
131 if (!r100_is_counter_moving(rdev, crtc))
132 break;
133 }
134 }
135
136 while (!r100_is_in_vblank(rdev, crtc)) {
137 if (i++ % 100 == 0) {
138 if (!r100_is_counter_moving(rdev, crtc))
139 break;
140 }
141 }
142}
143
144/**
145 * r100_pre_page_flip - pre-pageflip callback.
146 *
147 * @rdev: radeon_device pointer
148 * @crtc: crtc to prepare for pageflip on
149 *
150 * Pre-pageflip callback (r1xx-r4xx).
151 * Enables the pageflip irq (vblank irq).
152 */
153void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
154{
155 /* enable the pflip int */
156 radeon_irq_kms_pflip_irq_get(rdev, crtc);
157}
158
159/**
160 * r100_post_page_flip - pos-pageflip callback.
161 *
162 * @rdev: radeon_device pointer
163 * @crtc: crtc to cleanup pageflip on
164 *
165 * Post-pageflip callback (r1xx-r4xx).
166 * Disables the pageflip irq (vblank irq).
167 */
168void r100_post_page_flip(struct radeon_device *rdev, int crtc)
169{
170 /* disable the pflip int */
171 radeon_irq_kms_pflip_irq_put(rdev, crtc);
172}
173
174/**
175 * r100_page_flip - pageflip callback.
176 *
177 * @rdev: radeon_device pointer
178 * @crtc_id: crtc to cleanup pageflip on
179 * @crtc_base: new address of the crtc (GPU MC address)
180 *
181 * Does the actual pageflip (r1xx-r4xx).
182 * During vblank we take the crtc lock and wait for the update_pending
183 * bit to go high, when it does, we release the lock, and allow the
184 * double buffered update to take place.
185 * Returns the current update pending status.
186 */
187u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
188{
189 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
190 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
191 int i;
192
193 /* Lock the graphics update lock */
194 /* update the scanout addresses */
195 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
196
197 /* Wait for update_pending to go high. */
198 for (i = 0; i < rdev->usec_timeout; i++) {
199 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
200 break;
201 udelay(1);
202 }
203 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
204
205 /* Unlock the lock, so double-buffering can take place inside vblank */
206 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
207 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
208
209 /* Return current update_pending status: */
210 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
211}
212
213/**
214 * r100_pm_get_dynpm_state - look up dynpm power state callback.
215 *
216 * @rdev: radeon_device pointer
217 *
218 * Look up the optimal power state based on the
219 * current state of the GPU (r1xx-r5xx).
220 * Used for dynpm only.
221 */
222void r100_pm_get_dynpm_state(struct radeon_device *rdev)
223{
224 int i;
225 rdev->pm.dynpm_can_upclock = true;
226 rdev->pm.dynpm_can_downclock = true;
227
228 switch (rdev->pm.dynpm_planned_action) {
229 case DYNPM_ACTION_MINIMUM:
230 rdev->pm.requested_power_state_index = 0;
231 rdev->pm.dynpm_can_downclock = false;
232 break;
233 case DYNPM_ACTION_DOWNCLOCK:
234 if (rdev->pm.current_power_state_index == 0) {
235 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
236 rdev->pm.dynpm_can_downclock = false;
237 } else {
238 if (rdev->pm.active_crtc_count > 1) {
239 for (i = 0; i < rdev->pm.num_power_states; i++) {
240 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
241 continue;
242 else if (i >= rdev->pm.current_power_state_index) {
243 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
244 break;
245 } else {
246 rdev->pm.requested_power_state_index = i;
247 break;
248 }
249 }
250 } else
251 rdev->pm.requested_power_state_index =
252 rdev->pm.current_power_state_index - 1;
253 }
254 /* don't use the power state if crtcs are active and no display flag is set */
255 if ((rdev->pm.active_crtc_count > 0) &&
256 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
257 RADEON_PM_MODE_NO_DISPLAY)) {
258 rdev->pm.requested_power_state_index++;
259 }
260 break;
261 case DYNPM_ACTION_UPCLOCK:
262 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
263 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
264 rdev->pm.dynpm_can_upclock = false;
265 } else {
266 if (rdev->pm.active_crtc_count > 1) {
267 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
268 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
269 continue;
270 else if (i <= rdev->pm.current_power_state_index) {
271 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
272 break;
273 } else {
274 rdev->pm.requested_power_state_index = i;
275 break;
276 }
277 }
278 } else
279 rdev->pm.requested_power_state_index =
280 rdev->pm.current_power_state_index + 1;
281 }
282 break;
283 case DYNPM_ACTION_DEFAULT:
284 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
285 rdev->pm.dynpm_can_upclock = false;
286 break;
287 case DYNPM_ACTION_NONE:
288 default:
289 DRM_ERROR("Requested mode for not defined action\n");
290 return;
291 }
292 /* only one clock mode per power state */
293 rdev->pm.requested_clock_mode_index = 0;
294
295 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
296 rdev->pm.power_state[rdev->pm.requested_power_state_index].
297 clock_info[rdev->pm.requested_clock_mode_index].sclk,
298 rdev->pm.power_state[rdev->pm.requested_power_state_index].
299 clock_info[rdev->pm.requested_clock_mode_index].mclk,
300 rdev->pm.power_state[rdev->pm.requested_power_state_index].
301 pcie_lanes);
302}
303
304/**
305 * r100_pm_init_profile - Initialize power profiles callback.
306 *
307 * @rdev: radeon_device pointer
308 *
309 * Initialize the power states used in profile mode
310 * (r1xx-r3xx).
311 * Used for profile mode only.
312 */
313void r100_pm_init_profile(struct radeon_device *rdev)
314{
315 /* default */
316 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
317 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
318 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
320 /* low sh */
321 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
325 /* mid sh */
326 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
330 /* high sh */
331 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
333 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
335 /* low mh */
336 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
337 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
338 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
339 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
340 /* mid mh */
341 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
342 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
343 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
344 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
345 /* high mh */
346 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
347 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
348 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
349 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
350}
351
352/**
353 * r100_pm_misc - set additional pm hw parameters callback.
354 *
355 * @rdev: radeon_device pointer
356 *
357 * Set non-clock parameters associated with a power state
358 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
359 */
360void r100_pm_misc(struct radeon_device *rdev)
361{
362 int requested_index = rdev->pm.requested_power_state_index;
363 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
364 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
365 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
366
367 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
368 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
369 tmp = RREG32(voltage->gpio.reg);
370 if (voltage->active_high)
371 tmp |= voltage->gpio.mask;
372 else
373 tmp &= ~(voltage->gpio.mask);
374 WREG32(voltage->gpio.reg, tmp);
375 if (voltage->delay)
376 udelay(voltage->delay);
377 } else {
378 tmp = RREG32(voltage->gpio.reg);
379 if (voltage->active_high)
380 tmp &= ~voltage->gpio.mask;
381 else
382 tmp |= voltage->gpio.mask;
383 WREG32(voltage->gpio.reg, tmp);
384 if (voltage->delay)
385 udelay(voltage->delay);
386 }
387 }
388
389 sclk_cntl = RREG32_PLL(SCLK_CNTL);
390 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
391 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
392 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
393 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
394 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
395 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
396 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
397 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
398 else
399 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
400 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
401 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
402 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
403 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
404 } else
405 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
406
407 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
408 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
409 if (voltage->delay) {
410 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
411 switch (voltage->delay) {
412 case 33:
413 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
414 break;
415 case 66:
416 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
417 break;
418 case 99:
419 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
420 break;
421 case 132:
422 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
423 break;
424 }
425 } else
426 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
427 } else
428 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
429
430 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
431 sclk_cntl &= ~FORCE_HDP;
432 else
433 sclk_cntl |= FORCE_HDP;
434
435 WREG32_PLL(SCLK_CNTL, sclk_cntl);
436 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
437 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
438
439 /* set pcie lanes */
440 if ((rdev->flags & RADEON_IS_PCIE) &&
441 !(rdev->flags & RADEON_IS_IGP) &&
442 rdev->asic->pm.set_pcie_lanes &&
443 (ps->pcie_lanes !=
444 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
445 radeon_set_pcie_lanes(rdev,
446 ps->pcie_lanes);
447 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
448 }
449}
450
451/**
452 * r100_pm_prepare - pre-power state change callback.
453 *
454 * @rdev: radeon_device pointer
455 *
456 * Prepare for a power state change (r1xx-r4xx).
457 */
458void r100_pm_prepare(struct radeon_device *rdev)
459{
460 struct drm_device *ddev = rdev->ddev;
461 struct drm_crtc *crtc;
462 struct radeon_crtc *radeon_crtc;
463 u32 tmp;
464
465 /* disable any active CRTCs */
466 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
467 radeon_crtc = to_radeon_crtc(crtc);
468 if (radeon_crtc->enabled) {
469 if (radeon_crtc->crtc_id) {
470 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
471 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
472 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
473 } else {
474 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
475 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
476 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
477 }
478 }
479 }
480}
481
482/**
483 * r100_pm_finish - post-power state change callback.
484 *
485 * @rdev: radeon_device pointer
486 *
487 * Clean up after a power state change (r1xx-r4xx).
488 */
489void r100_pm_finish(struct radeon_device *rdev)
490{
491 struct drm_device *ddev = rdev->ddev;
492 struct drm_crtc *crtc;
493 struct radeon_crtc *radeon_crtc;
494 u32 tmp;
495
496 /* enable any active CRTCs */
497 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
498 radeon_crtc = to_radeon_crtc(crtc);
499 if (radeon_crtc->enabled) {
500 if (radeon_crtc->crtc_id) {
501 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
502 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
503 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
504 } else {
505 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
506 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
507 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
508 }
509 }
510 }
511}
512
513/**
514 * r100_gui_idle - gui idle callback.
515 *
516 * @rdev: radeon_device pointer
517 *
518 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
519 * Returns true if idle, false if not.
520 */
521bool r100_gui_idle(struct radeon_device *rdev)
522{
523 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
524 return false;
525 else
526 return true;
527}
528
529/* hpd for digital panel detect/disconnect */
530/**
531 * r100_hpd_sense - hpd sense callback.
532 *
533 * @rdev: radeon_device pointer
534 * @hpd: hpd (hotplug detect) pin
535 *
536 * Checks if a digital monitor is connected (r1xx-r4xx).
537 * Returns true if connected, false if not connected.
538 */
539bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
540{
541 bool connected = false;
542
543 switch (hpd) {
544 case RADEON_HPD_1:
545 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
546 connected = true;
547 break;
548 case RADEON_HPD_2:
549 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
550 connected = true;
551 break;
552 default:
553 break;
554 }
555 return connected;
556}
557
558/**
559 * r100_hpd_set_polarity - hpd set polarity callback.
560 *
561 * @rdev: radeon_device pointer
562 * @hpd: hpd (hotplug detect) pin
563 *
564 * Set the polarity of the hpd pin (r1xx-r4xx).
565 */
566void r100_hpd_set_polarity(struct radeon_device *rdev,
567 enum radeon_hpd_id hpd)
568{
569 u32 tmp;
570 bool connected = r100_hpd_sense(rdev, hpd);
571
572 switch (hpd) {
573 case RADEON_HPD_1:
574 tmp = RREG32(RADEON_FP_GEN_CNTL);
575 if (connected)
576 tmp &= ~RADEON_FP_DETECT_INT_POL;
577 else
578 tmp |= RADEON_FP_DETECT_INT_POL;
579 WREG32(RADEON_FP_GEN_CNTL, tmp);
580 break;
581 case RADEON_HPD_2:
582 tmp = RREG32(RADEON_FP2_GEN_CNTL);
583 if (connected)
584 tmp &= ~RADEON_FP2_DETECT_INT_POL;
585 else
586 tmp |= RADEON_FP2_DETECT_INT_POL;
587 WREG32(RADEON_FP2_GEN_CNTL, tmp);
588 break;
589 default:
590 break;
591 }
592}
593
594/**
595 * r100_hpd_init - hpd setup callback.
596 *
597 * @rdev: radeon_device pointer
598 *
599 * Setup the hpd pins used by the card (r1xx-r4xx).
600 * Set the polarity, and enable the hpd interrupts.
601 */
602void r100_hpd_init(struct radeon_device *rdev)
603{
604 struct drm_device *dev = rdev->ddev;
605 struct drm_connector *connector;
606 unsigned enable = 0;
607
608 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
609 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
610 enable |= 1 << radeon_connector->hpd.hpd;
611 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
612 }
613 radeon_irq_kms_enable_hpd(rdev, enable);
614}
615
616/**
617 * r100_hpd_fini - hpd tear down callback.
618 *
619 * @rdev: radeon_device pointer
620 *
621 * Tear down the hpd pins used by the card (r1xx-r4xx).
622 * Disable the hpd interrupts.
623 */
624void r100_hpd_fini(struct radeon_device *rdev)
625{
626 struct drm_device *dev = rdev->ddev;
627 struct drm_connector *connector;
628 unsigned disable = 0;
629
630 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
631 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
632 disable |= 1 << radeon_connector->hpd.hpd;
633 }
634 radeon_irq_kms_disable_hpd(rdev, disable);
635}
636
637/*
638 * PCI GART
639 */
640void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
641{
642 /* TODO: can we do somethings here ? */
643 /* It seems hw only cache one entry so we should discard this
644 * entry otherwise if first GPU GART read hit this entry it
645 * could end up in wrong address. */
646}
647
648int r100_pci_gart_init(struct radeon_device *rdev)
649{
650 int r;
651
652 if (rdev->gart.ptr) {
653 WARN(1, "R100 PCI GART already initialized\n");
654 return 0;
655 }
656 /* Initialize common gart structure */
657 r = radeon_gart_init(rdev);
658 if (r)
659 return r;
660 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
661 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
662 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
663 return radeon_gart_table_ram_alloc(rdev);
664}
665
666int r100_pci_gart_enable(struct radeon_device *rdev)
667{
668 uint32_t tmp;
669
670 radeon_gart_restore(rdev);
671 /* discard memory request outside of configured range */
672 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
673 WREG32(RADEON_AIC_CNTL, tmp);
674 /* set address range for PCI address translate */
675 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
676 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
677 /* set PCI GART page-table base address */
678 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
679 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
680 WREG32(RADEON_AIC_CNTL, tmp);
681 r100_pci_gart_tlb_flush(rdev);
682 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
683 (unsigned)(rdev->mc.gtt_size >> 20),
684 (unsigned long long)rdev->gart.table_addr);
685 rdev->gart.ready = true;
686 return 0;
687}
688
689void r100_pci_gart_disable(struct radeon_device *rdev)
690{
691 uint32_t tmp;
692
693 /* discard memory request outside of configured range */
694 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
695 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
696 WREG32(RADEON_AIC_LO_ADDR, 0);
697 WREG32(RADEON_AIC_HI_ADDR, 0);
698}
699
700int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
701{
702 u32 *gtt = rdev->gart.ptr;
703
704 if (i < 0 || i > rdev->gart.num_gpu_pages) {
705 return -EINVAL;
706 }
707 gtt[i] = cpu_to_le32(lower_32_bits(addr));
708 return 0;
709}
710
711void r100_pci_gart_fini(struct radeon_device *rdev)
712{
713 radeon_gart_fini(rdev);
714 r100_pci_gart_disable(rdev);
715 radeon_gart_table_ram_free(rdev);
716}
717
718int r100_irq_set(struct radeon_device *rdev)
719{
720 uint32_t tmp = 0;
721
722 if (!rdev->irq.installed) {
723 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
724 WREG32(R_000040_GEN_INT_CNTL, 0);
725 return -EINVAL;
726 }
727 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
728 tmp |= RADEON_SW_INT_ENABLE;
729 }
730 if (rdev->irq.crtc_vblank_int[0] ||
731 atomic_read(&rdev->irq.pflip[0])) {
732 tmp |= RADEON_CRTC_VBLANK_MASK;
733 }
734 if (rdev->irq.crtc_vblank_int[1] ||
735 atomic_read(&rdev->irq.pflip[1])) {
736 tmp |= RADEON_CRTC2_VBLANK_MASK;
737 }
738 if (rdev->irq.hpd[0]) {
739 tmp |= RADEON_FP_DETECT_MASK;
740 }
741 if (rdev->irq.hpd[1]) {
742 tmp |= RADEON_FP2_DETECT_MASK;
743 }
744 WREG32(RADEON_GEN_INT_CNTL, tmp);
745 return 0;
746}
747
748void r100_irq_disable(struct radeon_device *rdev)
749{
750 u32 tmp;
751
752 WREG32(R_000040_GEN_INT_CNTL, 0);
753 /* Wait and acknowledge irq */
754 mdelay(1);
755 tmp = RREG32(R_000044_GEN_INT_STATUS);
756 WREG32(R_000044_GEN_INT_STATUS, tmp);
757}
758
759static uint32_t r100_irq_ack(struct radeon_device *rdev)
760{
761 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
762 uint32_t irq_mask = RADEON_SW_INT_TEST |
763 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
764 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
765
766 if (irqs) {
767 WREG32(RADEON_GEN_INT_STATUS, irqs);
768 }
769 return irqs & irq_mask;
770}
771
772int r100_irq_process(struct radeon_device *rdev)
773{
774 uint32_t status, msi_rearm;
775 bool queue_hotplug = false;
776
777 status = r100_irq_ack(rdev);
778 if (!status) {
779 return IRQ_NONE;
780 }
781 if (rdev->shutdown) {
782 return IRQ_NONE;
783 }
784 while (status) {
785 /* SW interrupt */
786 if (status & RADEON_SW_INT_TEST) {
787 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
788 }
789 /* Vertical blank interrupts */
790 if (status & RADEON_CRTC_VBLANK_STAT) {
791 if (rdev->irq.crtc_vblank_int[0]) {
792 drm_handle_vblank(rdev->ddev, 0);
793 rdev->pm.vblank_sync = true;
794 wake_up(&rdev->irq.vblank_queue);
795 }
796 if (atomic_read(&rdev->irq.pflip[0]))
797 radeon_crtc_handle_flip(rdev, 0);
798 }
799 if (status & RADEON_CRTC2_VBLANK_STAT) {
800 if (rdev->irq.crtc_vblank_int[1]) {
801 drm_handle_vblank(rdev->ddev, 1);
802 rdev->pm.vblank_sync = true;
803 wake_up(&rdev->irq.vblank_queue);
804 }
805 if (atomic_read(&rdev->irq.pflip[1]))
806 radeon_crtc_handle_flip(rdev, 1);
807 }
808 if (status & RADEON_FP_DETECT_STAT) {
809 queue_hotplug = true;
810 DRM_DEBUG("HPD1\n");
811 }
812 if (status & RADEON_FP2_DETECT_STAT) {
813 queue_hotplug = true;
814 DRM_DEBUG("HPD2\n");
815 }
816 status = r100_irq_ack(rdev);
817 }
818 if (queue_hotplug)
819 schedule_work(&rdev->hotplug_work);
820 if (rdev->msi_enabled) {
821 switch (rdev->family) {
822 case CHIP_RS400:
823 case CHIP_RS480:
824 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
825 WREG32(RADEON_AIC_CNTL, msi_rearm);
826 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
827 break;
828 default:
829 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
830 break;
831 }
832 }
833 return IRQ_HANDLED;
834}
835
836u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
837{
838 if (crtc == 0)
839 return RREG32(RADEON_CRTC_CRNT_FRAME);
840 else
841 return RREG32(RADEON_CRTC2_CRNT_FRAME);
842}
843
844/* Who ever call radeon_fence_emit should call ring_lock and ask
845 * for enough space (today caller are ib schedule and buffer move) */
846void r100_fence_ring_emit(struct radeon_device *rdev,
847 struct radeon_fence *fence)
848{
849 struct radeon_ring *ring = &rdev->ring[fence->ring];
850
851 /* We have to make sure that caches are flushed before
852 * CPU might read something from VRAM. */
853 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
854 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
855 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
856 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
857 /* Wait until IDLE & CLEAN */
858 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
859 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
860 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
861 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
862 RADEON_HDP_READ_BUFFER_INVALIDATE);
863 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
864 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
865 /* Emit fence sequence & fire IRQ */
866 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
867 radeon_ring_write(ring, fence->seq);
868 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
869 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
870}
871
872bool r100_semaphore_ring_emit(struct radeon_device *rdev,
873 struct radeon_ring *ring,
874 struct radeon_semaphore *semaphore,
875 bool emit_wait)
876{
877 /* Unused on older asics, since we don't have semaphores or multiple rings */
878 BUG();
879 return false;
880}
881
882int r100_copy_blit(struct radeon_device *rdev,
883 uint64_t src_offset,
884 uint64_t dst_offset,
885 unsigned num_gpu_pages,
886 struct radeon_fence **fence)
887{
888 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
889 uint32_t cur_pages;
890 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
891 uint32_t pitch;
892 uint32_t stride_pixels;
893 unsigned ndw;
894 int num_loops;
895 int r = 0;
896
897 /* radeon limited to 16k stride */
898 stride_bytes &= 0x3fff;
899 /* radeon pitch is /64 */
900 pitch = stride_bytes / 64;
901 stride_pixels = stride_bytes / 4;
902 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
903
904 /* Ask for enough room for blit + flush + fence */
905 ndw = 64 + (10 * num_loops);
906 r = radeon_ring_lock(rdev, ring, ndw);
907 if (r) {
908 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
909 return -EINVAL;
910 }
911 while (num_gpu_pages > 0) {
912 cur_pages = num_gpu_pages;
913 if (cur_pages > 8191) {
914 cur_pages = 8191;
915 }
916 num_gpu_pages -= cur_pages;
917
918 /* pages are in Y direction - height
919 page width in X direction - width */
920 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
921 radeon_ring_write(ring,
922 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
923 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
924 RADEON_GMC_SRC_CLIPPING |
925 RADEON_GMC_DST_CLIPPING |
926 RADEON_GMC_BRUSH_NONE |
927 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
928 RADEON_GMC_SRC_DATATYPE_COLOR |
929 RADEON_ROP3_S |
930 RADEON_DP_SRC_SOURCE_MEMORY |
931 RADEON_GMC_CLR_CMP_CNTL_DIS |
932 RADEON_GMC_WR_MSK_DIS);
933 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
934 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
935 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
936 radeon_ring_write(ring, 0);
937 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
938 radeon_ring_write(ring, num_gpu_pages);
939 radeon_ring_write(ring, num_gpu_pages);
940 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
941 }
942 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
943 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
944 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
945 radeon_ring_write(ring,
946 RADEON_WAIT_2D_IDLECLEAN |
947 RADEON_WAIT_HOST_IDLECLEAN |
948 RADEON_WAIT_DMA_GUI_IDLE);
949 if (fence) {
950 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
951 }
952 radeon_ring_unlock_commit(rdev, ring);
953 return r;
954}
955
956static int r100_cp_wait_for_idle(struct radeon_device *rdev)
957{
958 unsigned i;
959 u32 tmp;
960
961 for (i = 0; i < rdev->usec_timeout; i++) {
962 tmp = RREG32(R_000E40_RBBM_STATUS);
963 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
964 return 0;
965 }
966 udelay(1);
967 }
968 return -1;
969}
970
971void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
972{
973 int r;
974
975 r = radeon_ring_lock(rdev, ring, 2);
976 if (r) {
977 return;
978 }
979 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
980 radeon_ring_write(ring,
981 RADEON_ISYNC_ANY2D_IDLE3D |
982 RADEON_ISYNC_ANY3D_IDLE2D |
983 RADEON_ISYNC_WAIT_IDLEGUI |
984 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
985 radeon_ring_unlock_commit(rdev, ring);
986}
987
988
989/* Load the microcode for the CP */
990static int r100_cp_init_microcode(struct radeon_device *rdev)
991{
992 const char *fw_name = NULL;
993 int err;
994
995 DRM_DEBUG_KMS("\n");
996
997 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
998 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
999 (rdev->family == CHIP_RS200)) {
1000 DRM_INFO("Loading R100 Microcode\n");
1001 fw_name = FIRMWARE_R100;
1002 } else if ((rdev->family == CHIP_R200) ||
1003 (rdev->family == CHIP_RV250) ||
1004 (rdev->family == CHIP_RV280) ||
1005 (rdev->family == CHIP_RS300)) {
1006 DRM_INFO("Loading R200 Microcode\n");
1007 fw_name = FIRMWARE_R200;
1008 } else if ((rdev->family == CHIP_R300) ||
1009 (rdev->family == CHIP_R350) ||
1010 (rdev->family == CHIP_RV350) ||
1011 (rdev->family == CHIP_RV380) ||
1012 (rdev->family == CHIP_RS400) ||
1013 (rdev->family == CHIP_RS480)) {
1014 DRM_INFO("Loading R300 Microcode\n");
1015 fw_name = FIRMWARE_R300;
1016 } else if ((rdev->family == CHIP_R420) ||
1017 (rdev->family == CHIP_R423) ||
1018 (rdev->family == CHIP_RV410)) {
1019 DRM_INFO("Loading R400 Microcode\n");
1020 fw_name = FIRMWARE_R420;
1021 } else if ((rdev->family == CHIP_RS690) ||
1022 (rdev->family == CHIP_RS740)) {
1023 DRM_INFO("Loading RS690/RS740 Microcode\n");
1024 fw_name = FIRMWARE_RS690;
1025 } else if (rdev->family == CHIP_RS600) {
1026 DRM_INFO("Loading RS600 Microcode\n");
1027 fw_name = FIRMWARE_RS600;
1028 } else if ((rdev->family == CHIP_RV515) ||
1029 (rdev->family == CHIP_R520) ||
1030 (rdev->family == CHIP_RV530) ||
1031 (rdev->family == CHIP_R580) ||
1032 (rdev->family == CHIP_RV560) ||
1033 (rdev->family == CHIP_RV570)) {
1034 DRM_INFO("Loading R500 Microcode\n");
1035 fw_name = FIRMWARE_R520;
1036 }
1037
1038 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1039 if (err) {
1040 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1041 fw_name);
1042 } else if (rdev->me_fw->size % 8) {
1043 printk(KERN_ERR
1044 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1045 rdev->me_fw->size, fw_name);
1046 err = -EINVAL;
1047 release_firmware(rdev->me_fw);
1048 rdev->me_fw = NULL;
1049 }
1050 return err;
1051}
1052
1053u32 r100_gfx_get_rptr(struct radeon_device *rdev,
1054 struct radeon_ring *ring)
1055{
1056 u32 rptr;
1057
1058 if (rdev->wb.enabled)
1059 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
1060 else
1061 rptr = RREG32(RADEON_CP_RB_RPTR);
1062
1063 return rptr;
1064}
1065
1066u32 r100_gfx_get_wptr(struct radeon_device *rdev,
1067 struct radeon_ring *ring)
1068{
1069 u32 wptr;
1070
1071 wptr = RREG32(RADEON_CP_RB_WPTR);
1072
1073 return wptr;
1074}
1075
1076void r100_gfx_set_wptr(struct radeon_device *rdev,
1077 struct radeon_ring *ring)
1078{
1079 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1080 (void)RREG32(RADEON_CP_RB_WPTR);
1081}
1082
1083static void r100_cp_load_microcode(struct radeon_device *rdev)
1084{
1085 const __be32 *fw_data;
1086 int i, size;
1087
1088 if (r100_gui_wait_for_idle(rdev)) {
1089 printk(KERN_WARNING "Failed to wait GUI idle while "
1090 "programming pipes. Bad things might happen.\n");
1091 }
1092
1093 if (rdev->me_fw) {
1094 size = rdev->me_fw->size / 4;
1095 fw_data = (const __be32 *)&rdev->me_fw->data[0];
1096 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
1097 for (i = 0; i < size; i += 2) {
1098 WREG32(RADEON_CP_ME_RAM_DATAH,
1099 be32_to_cpup(&fw_data[i]));
1100 WREG32(RADEON_CP_ME_RAM_DATAL,
1101 be32_to_cpup(&fw_data[i + 1]));
1102 }
1103 }
1104}
1105
1106int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1107{
1108 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1109 unsigned rb_bufsz;
1110 unsigned rb_blksz;
1111 unsigned max_fetch;
1112 unsigned pre_write_timer;
1113 unsigned pre_write_limit;
1114 unsigned indirect2_start;
1115 unsigned indirect1_start;
1116 uint32_t tmp;
1117 int r;
1118
1119 if (r100_debugfs_cp_init(rdev)) {
1120 DRM_ERROR("Failed to register debugfs file for CP !\n");
1121 }
1122 if (!rdev->me_fw) {
1123 r = r100_cp_init_microcode(rdev);
1124 if (r) {
1125 DRM_ERROR("Failed to load firmware!\n");
1126 return r;
1127 }
1128 }
1129
1130 /* Align ring size */
1131 rb_bufsz = order_base_2(ring_size / 8);
1132 ring_size = (1 << (rb_bufsz + 1)) * 4;
1133 r100_cp_load_microcode(rdev);
1134 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1135 RADEON_CP_PACKET2);
1136 if (r) {
1137 return r;
1138 }
1139 /* Each time the cp read 1024 bytes (16 dword/quadword) update
1140 * the rptr copy in system ram */
1141 rb_blksz = 9;
1142 /* cp will read 128bytes at a time (4 dwords) */
1143 max_fetch = 1;
1144 ring->align_mask = 16 - 1;
1145 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1146 pre_write_timer = 64;
1147 /* Force CP_RB_WPTR write if written more than one time before the
1148 * delay expire
1149 */
1150 pre_write_limit = 0;
1151 /* Setup the cp cache like this (cache size is 96 dwords) :
1152 * RING 0 to 15
1153 * INDIRECT1 16 to 79
1154 * INDIRECT2 80 to 95
1155 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1156 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1157 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1158 * Idea being that most of the gpu cmd will be through indirect1 buffer
1159 * so it gets the bigger cache.
1160 */
1161 indirect2_start = 80;
1162 indirect1_start = 16;
1163 /* cp setup */
1164 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
1165 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
1166 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
1167 REG_SET(RADEON_MAX_FETCH, max_fetch));
1168#ifdef __BIG_ENDIAN
1169 tmp |= RADEON_BUF_SWAP_32BIT;
1170#endif
1171 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1172
1173 /* Set ring address */
1174 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1175 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1176 /* Force read & write ptr to 0 */
1177 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1178 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1179 ring->wptr = 0;
1180 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1181
1182 /* set the wb address whether it's enabled or not */
1183 WREG32(R_00070C_CP_RB_RPTR_ADDR,
1184 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
1185 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
1186
1187 if (rdev->wb.enabled)
1188 WREG32(R_000770_SCRATCH_UMSK, 0xff);
1189 else {
1190 tmp |= RADEON_RB_NO_UPDATE;
1191 WREG32(R_000770_SCRATCH_UMSK, 0);
1192 }
1193
1194 WREG32(RADEON_CP_RB_CNTL, tmp);
1195 udelay(10);
1196 /* Set cp mode to bus mastering & enable cp*/
1197 WREG32(RADEON_CP_CSQ_MODE,
1198 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1199 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1200 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1201 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1202 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1203
1204 /* at this point everything should be setup correctly to enable master */
1205 pci_set_master(rdev->pdev);
1206
1207 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1208 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1209 if (r) {
1210 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1211 return r;
1212 }
1213 ring->ready = true;
1214 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1215
1216 if (!ring->rptr_save_reg /* not resuming from suspend */
1217 && radeon_ring_supports_scratch_reg(rdev, ring)) {
1218 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1219 if (r) {
1220 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
1221 ring->rptr_save_reg = 0;
1222 }
1223 }
1224 return 0;
1225}
1226
1227void r100_cp_fini(struct radeon_device *rdev)
1228{
1229 if (r100_cp_wait_for_idle(rdev)) {
1230 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1231 }
1232 /* Disable ring */
1233 r100_cp_disable(rdev);
1234 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
1235 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1236 DRM_INFO("radeon: cp finalized\n");
1237}
1238
1239void r100_cp_disable(struct radeon_device *rdev)
1240{
1241 /* Disable ring */
1242 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1243 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1244 WREG32(RADEON_CP_CSQ_MODE, 0);
1245 WREG32(RADEON_CP_CSQ_CNTL, 0);
1246 WREG32(R_000770_SCRATCH_UMSK, 0);
1247 if (r100_gui_wait_for_idle(rdev)) {
1248 printk(KERN_WARNING "Failed to wait GUI idle while "
1249 "programming pipes. Bad things might happen.\n");
1250 }
1251}
1252
1253/*
1254 * CS functions
1255 */
1256int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1257 struct radeon_cs_packet *pkt,
1258 unsigned idx,
1259 unsigned reg)
1260{
1261 int r;
1262 u32 tile_flags = 0;
1263 u32 tmp;
1264 struct radeon_cs_reloc *reloc;
1265 u32 value;
1266
1267 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1268 if (r) {
1269 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1270 idx, reg);
1271 radeon_cs_dump_packet(p, pkt);
1272 return r;
1273 }
1274
1275 value = radeon_get_ib_value(p, idx);
1276 tmp = value & 0x003fffff;
1277 tmp += (((u32)reloc->gpu_offset) >> 10);
1278
1279 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1280 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1281 tile_flags |= RADEON_DST_TILE_MACRO;
1282 if (reloc->tiling_flags & RADEON_TILING_MICRO) {
1283 if (reg == RADEON_SRC_PITCH_OFFSET) {
1284 DRM_ERROR("Cannot src blit from microtiled surface\n");
1285 radeon_cs_dump_packet(p, pkt);
1286 return -EINVAL;
1287 }
1288 tile_flags |= RADEON_DST_TILE_MICRO;
1289 }
1290
1291 tmp |= tile_flags;
1292 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1293 } else
1294 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1295 return 0;
1296}
1297
1298int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1299 struct radeon_cs_packet *pkt,
1300 int idx)
1301{
1302 unsigned c, i;
1303 struct radeon_cs_reloc *reloc;
1304 struct r100_cs_track *track;
1305 int r = 0;
1306 volatile uint32_t *ib;
1307 u32 idx_value;
1308
1309 ib = p->ib.ptr;
1310 track = (struct r100_cs_track *)p->track;
1311 c = radeon_get_ib_value(p, idx++) & 0x1F;
1312 if (c > 16) {
1313 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1314 pkt->opcode);
1315 radeon_cs_dump_packet(p, pkt);
1316 return -EINVAL;
1317 }
1318 track->num_arrays = c;
1319 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1320 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1321 if (r) {
1322 DRM_ERROR("No reloc for packet3 %d\n",
1323 pkt->opcode);
1324 radeon_cs_dump_packet(p, pkt);
1325 return r;
1326 }
1327 idx_value = radeon_get_ib_value(p, idx);
1328 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1329
1330 track->arrays[i + 0].esize = idx_value >> 8;
1331 track->arrays[i + 0].robj = reloc->robj;
1332 track->arrays[i + 0].esize &= 0x7F;
1333 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1334 if (r) {
1335 DRM_ERROR("No reloc for packet3 %d\n",
1336 pkt->opcode);
1337 radeon_cs_dump_packet(p, pkt);
1338 return r;
1339 }
1340 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
1341 track->arrays[i + 1].robj = reloc->robj;
1342 track->arrays[i + 1].esize = idx_value >> 24;
1343 track->arrays[i + 1].esize &= 0x7F;
1344 }
1345 if (c & 1) {
1346 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1347 if (r) {
1348 DRM_ERROR("No reloc for packet3 %d\n",
1349 pkt->opcode);
1350 radeon_cs_dump_packet(p, pkt);
1351 return r;
1352 }
1353 idx_value = radeon_get_ib_value(p, idx);
1354 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1355 track->arrays[i + 0].robj = reloc->robj;
1356 track->arrays[i + 0].esize = idx_value >> 8;
1357 track->arrays[i + 0].esize &= 0x7F;
1358 }
1359 return r;
1360}
1361
1362int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1363 struct radeon_cs_packet *pkt,
1364 const unsigned *auth, unsigned n,
1365 radeon_packet0_check_t check)
1366{
1367 unsigned reg;
1368 unsigned i, j, m;
1369 unsigned idx;
1370 int r;
1371
1372 idx = pkt->idx + 1;
1373 reg = pkt->reg;
1374 /* Check that register fall into register range
1375 * determined by the number of entry (n) in the
1376 * safe register bitmap.
1377 */
1378 if (pkt->one_reg_wr) {
1379 if ((reg >> 7) > n) {
1380 return -EINVAL;
1381 }
1382 } else {
1383 if (((reg + (pkt->count << 2)) >> 7) > n) {
1384 return -EINVAL;
1385 }
1386 }
1387 for (i = 0; i <= pkt->count; i++, idx++) {
1388 j = (reg >> 7);
1389 m = 1 << ((reg >> 2) & 31);
1390 if (auth[j] & m) {
1391 r = check(p, pkt, idx, reg);
1392 if (r) {
1393 return r;
1394 }
1395 }
1396 if (pkt->one_reg_wr) {
1397 if (!(auth[j] & m)) {
1398 break;
1399 }
1400 } else {
1401 reg += 4;
1402 }
1403 }
1404 return 0;
1405}
1406
1407/**
1408 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1409 * @parser: parser structure holding parsing context.
1410 *
1411 * Userspace sends a special sequence for VLINE waits.
1412 * PACKET0 - VLINE_START_END + value
1413 * PACKET0 - WAIT_UNTIL +_value
1414 * RELOC (P3) - crtc_id in reloc.
1415 *
1416 * This function parses this and relocates the VLINE START END
1417 * and WAIT UNTIL packets to the correct crtc.
1418 * It also detects a switched off crtc and nulls out the
1419 * wait in that case.
1420 */
1421int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1422{
1423 struct drm_mode_object *obj;
1424 struct drm_crtc *crtc;
1425 struct radeon_crtc *radeon_crtc;
1426 struct radeon_cs_packet p3reloc, waitreloc;
1427 int crtc_id;
1428 int r;
1429 uint32_t header, h_idx, reg;
1430 volatile uint32_t *ib;
1431
1432 ib = p->ib.ptr;
1433
1434 /* parse the wait until */
1435 r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
1436 if (r)
1437 return r;
1438
1439 /* check its a wait until and only 1 count */
1440 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1441 waitreloc.count != 0) {
1442 DRM_ERROR("vline wait had illegal wait until segment\n");
1443 return -EINVAL;
1444 }
1445
1446 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1447 DRM_ERROR("vline wait had illegal wait until\n");
1448 return -EINVAL;
1449 }
1450
1451 /* jump over the NOP */
1452 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1453 if (r)
1454 return r;
1455
1456 h_idx = p->idx - 2;
1457 p->idx += waitreloc.count + 2;
1458 p->idx += p3reloc.count + 2;
1459
1460 header = radeon_get_ib_value(p, h_idx);
1461 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1462 reg = R100_CP_PACKET0_GET_REG(header);
1463 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1464 if (!obj) {
1465 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1466 return -ENOENT;
1467 }
1468 crtc = obj_to_crtc(obj);
1469 radeon_crtc = to_radeon_crtc(crtc);
1470 crtc_id = radeon_crtc->crtc_id;
1471
1472 if (!crtc->enabled) {
1473 /* if the CRTC isn't enabled - we need to nop out the wait until */
1474 ib[h_idx + 2] = PACKET2(0);
1475 ib[h_idx + 3] = PACKET2(0);
1476 } else if (crtc_id == 1) {
1477 switch (reg) {
1478 case AVIVO_D1MODE_VLINE_START_END:
1479 header &= ~R300_CP_PACKET0_REG_MASK;
1480 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1481 break;
1482 case RADEON_CRTC_GUI_TRIG_VLINE:
1483 header &= ~R300_CP_PACKET0_REG_MASK;
1484 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1485 break;
1486 default:
1487 DRM_ERROR("unknown crtc reloc\n");
1488 return -EINVAL;
1489 }
1490 ib[h_idx] = header;
1491 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1492 }
1493
1494 return 0;
1495}
1496
1497static int r100_get_vtx_size(uint32_t vtx_fmt)
1498{
1499 int vtx_size;
1500 vtx_size = 2;
1501 /* ordered according to bits in spec */
1502 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1503 vtx_size++;
1504 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1505 vtx_size += 3;
1506 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1507 vtx_size++;
1508 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1509 vtx_size++;
1510 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1511 vtx_size += 3;
1512 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1513 vtx_size++;
1514 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1515 vtx_size++;
1516 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1517 vtx_size += 2;
1518 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1519 vtx_size += 2;
1520 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1521 vtx_size++;
1522 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1523 vtx_size += 2;
1524 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1525 vtx_size++;
1526 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1527 vtx_size += 2;
1528 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1529 vtx_size++;
1530 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1531 vtx_size++;
1532 /* blend weight */
1533 if (vtx_fmt & (0x7 << 15))
1534 vtx_size += (vtx_fmt >> 15) & 0x7;
1535 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1536 vtx_size += 3;
1537 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1538 vtx_size += 2;
1539 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1540 vtx_size++;
1541 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1542 vtx_size++;
1543 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1544 vtx_size++;
1545 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1546 vtx_size++;
1547 return vtx_size;
1548}
1549
1550static int r100_packet0_check(struct radeon_cs_parser *p,
1551 struct radeon_cs_packet *pkt,
1552 unsigned idx, unsigned reg)
1553{
1554 struct radeon_cs_reloc *reloc;
1555 struct r100_cs_track *track;
1556 volatile uint32_t *ib;
1557 uint32_t tmp;
1558 int r;
1559 int i, face;
1560 u32 tile_flags = 0;
1561 u32 idx_value;
1562
1563 ib = p->ib.ptr;
1564 track = (struct r100_cs_track *)p->track;
1565
1566 idx_value = radeon_get_ib_value(p, idx);
1567
1568 switch (reg) {
1569 case RADEON_CRTC_GUI_TRIG_VLINE:
1570 r = r100_cs_packet_parse_vline(p);
1571 if (r) {
1572 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1573 idx, reg);
1574 radeon_cs_dump_packet(p, pkt);
1575 return r;
1576 }
1577 break;
1578 /* FIXME: only allow PACKET3 blit? easier to check for out of
1579 * range access */
1580 case RADEON_DST_PITCH_OFFSET:
1581 case RADEON_SRC_PITCH_OFFSET:
1582 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1583 if (r)
1584 return r;
1585 break;
1586 case RADEON_RB3D_DEPTHOFFSET:
1587 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1588 if (r) {
1589 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1590 idx, reg);
1591 radeon_cs_dump_packet(p, pkt);
1592 return r;
1593 }
1594 track->zb.robj = reloc->robj;
1595 track->zb.offset = idx_value;
1596 track->zb_dirty = true;
1597 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1598 break;
1599 case RADEON_RB3D_COLOROFFSET:
1600 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1601 if (r) {
1602 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1603 idx, reg);
1604 radeon_cs_dump_packet(p, pkt);
1605 return r;
1606 }
1607 track->cb[0].robj = reloc->robj;
1608 track->cb[0].offset = idx_value;
1609 track->cb_dirty = true;
1610 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1611 break;
1612 case RADEON_PP_TXOFFSET_0:
1613 case RADEON_PP_TXOFFSET_1:
1614 case RADEON_PP_TXOFFSET_2:
1615 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1616 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1617 if (r) {
1618 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1619 idx, reg);
1620 radeon_cs_dump_packet(p, pkt);
1621 return r;
1622 }
1623 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1624 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1625 tile_flags |= RADEON_TXO_MACRO_TILE;
1626 if (reloc->tiling_flags & RADEON_TILING_MICRO)
1627 tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1628
1629 tmp = idx_value & ~(0x7 << 2);
1630 tmp |= tile_flags;
1631 ib[idx] = tmp + ((u32)reloc->gpu_offset);
1632 } else
1633 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1634 track->textures[i].robj = reloc->robj;
1635 track->tex_dirty = true;
1636 break;
1637 case RADEON_PP_CUBIC_OFFSET_T0_0:
1638 case RADEON_PP_CUBIC_OFFSET_T0_1:
1639 case RADEON_PP_CUBIC_OFFSET_T0_2:
1640 case RADEON_PP_CUBIC_OFFSET_T0_3:
1641 case RADEON_PP_CUBIC_OFFSET_T0_4:
1642 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1643 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1644 if (r) {
1645 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1646 idx, reg);
1647 radeon_cs_dump_packet(p, pkt);
1648 return r;
1649 }
1650 track->textures[0].cube_info[i].offset = idx_value;
1651 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1652 track->textures[0].cube_info[i].robj = reloc->robj;
1653 track->tex_dirty = true;
1654 break;
1655 case RADEON_PP_CUBIC_OFFSET_T1_0:
1656 case RADEON_PP_CUBIC_OFFSET_T1_1:
1657 case RADEON_PP_CUBIC_OFFSET_T1_2:
1658 case RADEON_PP_CUBIC_OFFSET_T1_3:
1659 case RADEON_PP_CUBIC_OFFSET_T1_4:
1660 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1661 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1662 if (r) {
1663 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1664 idx, reg);
1665 radeon_cs_dump_packet(p, pkt);
1666 return r;
1667 }
1668 track->textures[1].cube_info[i].offset = idx_value;
1669 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1670 track->textures[1].cube_info[i].robj = reloc->robj;
1671 track->tex_dirty = true;
1672 break;
1673 case RADEON_PP_CUBIC_OFFSET_T2_0:
1674 case RADEON_PP_CUBIC_OFFSET_T2_1:
1675 case RADEON_PP_CUBIC_OFFSET_T2_2:
1676 case RADEON_PP_CUBIC_OFFSET_T2_3:
1677 case RADEON_PP_CUBIC_OFFSET_T2_4:
1678 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1679 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1680 if (r) {
1681 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1682 idx, reg);
1683 radeon_cs_dump_packet(p, pkt);
1684 return r;
1685 }
1686 track->textures[2].cube_info[i].offset = idx_value;
1687 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1688 track->textures[2].cube_info[i].robj = reloc->robj;
1689 track->tex_dirty = true;
1690 break;
1691 case RADEON_RE_WIDTH_HEIGHT:
1692 track->maxy = ((idx_value >> 16) & 0x7FF);
1693 track->cb_dirty = true;
1694 track->zb_dirty = true;
1695 break;
1696 case RADEON_RB3D_COLORPITCH:
1697 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1698 if (r) {
1699 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1700 idx, reg);
1701 radeon_cs_dump_packet(p, pkt);
1702 return r;
1703 }
1704 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1705 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1706 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1707 if (reloc->tiling_flags & RADEON_TILING_MICRO)
1708 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1709
1710 tmp = idx_value & ~(0x7 << 16);
1711 tmp |= tile_flags;
1712 ib[idx] = tmp;
1713 } else
1714 ib[idx] = idx_value;
1715
1716 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1717 track->cb_dirty = true;
1718 break;
1719 case RADEON_RB3D_DEPTHPITCH:
1720 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1721 track->zb_dirty = true;
1722 break;
1723 case RADEON_RB3D_CNTL:
1724 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1725 case 7:
1726 case 8:
1727 case 9:
1728 case 11:
1729 case 12:
1730 track->cb[0].cpp = 1;
1731 break;
1732 case 3:
1733 case 4:
1734 case 15:
1735 track->cb[0].cpp = 2;
1736 break;
1737 case 6:
1738 track->cb[0].cpp = 4;
1739 break;
1740 default:
1741 DRM_ERROR("Invalid color buffer format (%d) !\n",
1742 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1743 return -EINVAL;
1744 }
1745 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1746 track->cb_dirty = true;
1747 track->zb_dirty = true;
1748 break;
1749 case RADEON_RB3D_ZSTENCILCNTL:
1750 switch (idx_value & 0xf) {
1751 case 0:
1752 track->zb.cpp = 2;
1753 break;
1754 case 2:
1755 case 3:
1756 case 4:
1757 case 5:
1758 case 9:
1759 case 11:
1760 track->zb.cpp = 4;
1761 break;
1762 default:
1763 break;
1764 }
1765 track->zb_dirty = true;
1766 break;
1767 case RADEON_RB3D_ZPASS_ADDR:
1768 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1769 if (r) {
1770 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1771 idx, reg);
1772 radeon_cs_dump_packet(p, pkt);
1773 return r;
1774 }
1775 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1776 break;
1777 case RADEON_PP_CNTL:
1778 {
1779 uint32_t temp = idx_value >> 4;
1780 for (i = 0; i < track->num_texture; i++)
1781 track->textures[i].enabled = !!(temp & (1 << i));
1782 track->tex_dirty = true;
1783 }
1784 break;
1785 case RADEON_SE_VF_CNTL:
1786 track->vap_vf_cntl = idx_value;
1787 break;
1788 case RADEON_SE_VTX_FMT:
1789 track->vtx_size = r100_get_vtx_size(idx_value);
1790 break;
1791 case RADEON_PP_TEX_SIZE_0:
1792 case RADEON_PP_TEX_SIZE_1:
1793 case RADEON_PP_TEX_SIZE_2:
1794 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1795 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1796 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1797 track->tex_dirty = true;
1798 break;
1799 case RADEON_PP_TEX_PITCH_0:
1800 case RADEON_PP_TEX_PITCH_1:
1801 case RADEON_PP_TEX_PITCH_2:
1802 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1803 track->textures[i].pitch = idx_value + 32;
1804 track->tex_dirty = true;
1805 break;
1806 case RADEON_PP_TXFILTER_0:
1807 case RADEON_PP_TXFILTER_1:
1808 case RADEON_PP_TXFILTER_2:
1809 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1810 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1811 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1812 tmp = (idx_value >> 23) & 0x7;
1813 if (tmp == 2 || tmp == 6)
1814 track->textures[i].roundup_w = false;
1815 tmp = (idx_value >> 27) & 0x7;
1816 if (tmp == 2 || tmp == 6)
1817 track->textures[i].roundup_h = false;
1818 track->tex_dirty = true;
1819 break;
1820 case RADEON_PP_TXFORMAT_0:
1821 case RADEON_PP_TXFORMAT_1:
1822 case RADEON_PP_TXFORMAT_2:
1823 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1824 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1825 track->textures[i].use_pitch = 1;
1826 } else {
1827 track->textures[i].use_pitch = 0;
1828 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1829 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1830 }
1831 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1832 track->textures[i].tex_coord_type = 2;
1833 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1834 case RADEON_TXFORMAT_I8:
1835 case RADEON_TXFORMAT_RGB332:
1836 case RADEON_TXFORMAT_Y8:
1837 track->textures[i].cpp = 1;
1838 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1839 break;
1840 case RADEON_TXFORMAT_AI88:
1841 case RADEON_TXFORMAT_ARGB1555:
1842 case RADEON_TXFORMAT_RGB565:
1843 case RADEON_TXFORMAT_ARGB4444:
1844 case RADEON_TXFORMAT_VYUY422:
1845 case RADEON_TXFORMAT_YVYU422:
1846 case RADEON_TXFORMAT_SHADOW16:
1847 case RADEON_TXFORMAT_LDUDV655:
1848 case RADEON_TXFORMAT_DUDV88:
1849 track->textures[i].cpp = 2;
1850 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1851 break;
1852 case RADEON_TXFORMAT_ARGB8888:
1853 case RADEON_TXFORMAT_RGBA8888:
1854 case RADEON_TXFORMAT_SHADOW32:
1855 case RADEON_TXFORMAT_LDUDUV8888:
1856 track->textures[i].cpp = 4;
1857 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1858 break;
1859 case RADEON_TXFORMAT_DXT1:
1860 track->textures[i].cpp = 1;
1861 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1862 break;
1863 case RADEON_TXFORMAT_DXT23:
1864 case RADEON_TXFORMAT_DXT45:
1865 track->textures[i].cpp = 1;
1866 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1867 break;
1868 }
1869 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1870 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1871 track->tex_dirty = true;
1872 break;
1873 case RADEON_PP_CUBIC_FACES_0:
1874 case RADEON_PP_CUBIC_FACES_1:
1875 case RADEON_PP_CUBIC_FACES_2:
1876 tmp = idx_value;
1877 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1878 for (face = 0; face < 4; face++) {
1879 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1880 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1881 }
1882 track->tex_dirty = true;
1883 break;
1884 default:
1885 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1886 reg, idx);
1887 return -EINVAL;
1888 }
1889 return 0;
1890}
1891
1892int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1893 struct radeon_cs_packet *pkt,
1894 struct radeon_bo *robj)
1895{
1896 unsigned idx;
1897 u32 value;
1898 idx = pkt->idx + 1;
1899 value = radeon_get_ib_value(p, idx + 2);
1900 if ((value + 1) > radeon_bo_size(robj)) {
1901 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1902 "(need %u have %lu) !\n",
1903 value + 1,
1904 radeon_bo_size(robj));
1905 return -EINVAL;
1906 }
1907 return 0;
1908}
1909
1910static int r100_packet3_check(struct radeon_cs_parser *p,
1911 struct radeon_cs_packet *pkt)
1912{
1913 struct radeon_cs_reloc *reloc;
1914 struct r100_cs_track *track;
1915 unsigned idx;
1916 volatile uint32_t *ib;
1917 int r;
1918
1919 ib = p->ib.ptr;
1920 idx = pkt->idx + 1;
1921 track = (struct r100_cs_track *)p->track;
1922 switch (pkt->opcode) {
1923 case PACKET3_3D_LOAD_VBPNTR:
1924 r = r100_packet3_load_vbpntr(p, pkt, idx);
1925 if (r)
1926 return r;
1927 break;
1928 case PACKET3_INDX_BUFFER:
1929 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1930 if (r) {
1931 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1932 radeon_cs_dump_packet(p, pkt);
1933 return r;
1934 }
1935 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
1936 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1937 if (r) {
1938 return r;
1939 }
1940 break;
1941 case 0x23:
1942 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1943 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1944 if (r) {
1945 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1946 radeon_cs_dump_packet(p, pkt);
1947 return r;
1948 }
1949 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
1950 track->num_arrays = 1;
1951 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1952
1953 track->arrays[0].robj = reloc->robj;
1954 track->arrays[0].esize = track->vtx_size;
1955
1956 track->max_indx = radeon_get_ib_value(p, idx+1);
1957
1958 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1959 track->immd_dwords = pkt->count - 1;
1960 r = r100_cs_track_check(p->rdev, track);
1961 if (r)
1962 return r;
1963 break;
1964 case PACKET3_3D_DRAW_IMMD:
1965 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1966 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1967 return -EINVAL;
1968 }
1969 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1970 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1971 track->immd_dwords = pkt->count - 1;
1972 r = r100_cs_track_check(p->rdev, track);
1973 if (r)
1974 return r;
1975 break;
1976 /* triggers drawing using in-packet vertex data */
1977 case PACKET3_3D_DRAW_IMMD_2:
1978 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1979 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1980 return -EINVAL;
1981 }
1982 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1983 track->immd_dwords = pkt->count;
1984 r = r100_cs_track_check(p->rdev, track);
1985 if (r)
1986 return r;
1987 break;
1988 /* triggers drawing using in-packet vertex data */
1989 case PACKET3_3D_DRAW_VBUF_2:
1990 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1991 r = r100_cs_track_check(p->rdev, track);
1992 if (r)
1993 return r;
1994 break;
1995 /* triggers drawing of vertex buffers setup elsewhere */
1996 case PACKET3_3D_DRAW_INDX_2:
1997 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1998 r = r100_cs_track_check(p->rdev, track);
1999 if (r)
2000 return r;
2001 break;
2002 /* triggers drawing using indices to vertex buffer */
2003 case PACKET3_3D_DRAW_VBUF:
2004 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2005 r = r100_cs_track_check(p->rdev, track);
2006 if (r)
2007 return r;
2008 break;
2009 /* triggers drawing of vertex buffers setup elsewhere */
2010 case PACKET3_3D_DRAW_INDX:
2011 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2012 r = r100_cs_track_check(p->rdev, track);
2013 if (r)
2014 return r;
2015 break;
2016 /* triggers drawing using indices to vertex buffer */
2017 case PACKET3_3D_CLEAR_HIZ:
2018 case PACKET3_3D_CLEAR_ZMASK:
2019 if (p->rdev->hyperz_filp != p->filp)
2020 return -EINVAL;
2021 break;
2022 case PACKET3_NOP:
2023 break;
2024 default:
2025 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2026 return -EINVAL;
2027 }
2028 return 0;
2029}
2030
2031int r100_cs_parse(struct radeon_cs_parser *p)
2032{
2033 struct radeon_cs_packet pkt;
2034 struct r100_cs_track *track;
2035 int r;
2036
2037 track = kzalloc(sizeof(*track), GFP_KERNEL);
2038 if (!track)
2039 return -ENOMEM;
2040 r100_cs_track_clear(p->rdev, track);
2041 p->track = track;
2042 do {
2043 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2044 if (r) {
2045 return r;
2046 }
2047 p->idx += pkt.count + 2;
2048 switch (pkt.type) {
2049 case RADEON_PACKET_TYPE0:
2050 if (p->rdev->family >= CHIP_R200)
2051 r = r100_cs_parse_packet0(p, &pkt,
2052 p->rdev->config.r100.reg_safe_bm,
2053 p->rdev->config.r100.reg_safe_bm_size,
2054 &r200_packet0_check);
2055 else
2056 r = r100_cs_parse_packet0(p, &pkt,
2057 p->rdev->config.r100.reg_safe_bm,
2058 p->rdev->config.r100.reg_safe_bm_size,
2059 &r100_packet0_check);
2060 break;
2061 case RADEON_PACKET_TYPE2:
2062 break;
2063 case RADEON_PACKET_TYPE3:
2064 r = r100_packet3_check(p, &pkt);
2065 break;
2066 default:
2067 DRM_ERROR("Unknown packet type %d !\n",
2068 pkt.type);
2069 return -EINVAL;
2070 }
2071 if (r)
2072 return r;
2073 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2074 return 0;
2075}
2076
2077static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2078{
2079 DRM_ERROR("pitch %d\n", t->pitch);
2080 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2081 DRM_ERROR("width %d\n", t->width);
2082 DRM_ERROR("width_11 %d\n", t->width_11);
2083 DRM_ERROR("height %d\n", t->height);
2084 DRM_ERROR("height_11 %d\n", t->height_11);
2085 DRM_ERROR("num levels %d\n", t->num_levels);
2086 DRM_ERROR("depth %d\n", t->txdepth);
2087 DRM_ERROR("bpp %d\n", t->cpp);
2088 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2089 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2090 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2091 DRM_ERROR("compress format %d\n", t->compress_format);
2092}
2093
2094static int r100_track_compress_size(int compress_format, int w, int h)
2095{
2096 int block_width, block_height, block_bytes;
2097 int wblocks, hblocks;
2098 int min_wblocks;
2099 int sz;
2100
2101 block_width = 4;
2102 block_height = 4;
2103
2104 switch (compress_format) {
2105 case R100_TRACK_COMP_DXT1:
2106 block_bytes = 8;
2107 min_wblocks = 4;
2108 break;
2109 default:
2110 case R100_TRACK_COMP_DXT35:
2111 block_bytes = 16;
2112 min_wblocks = 2;
2113 break;
2114 }
2115
2116 hblocks = (h + block_height - 1) / block_height;
2117 wblocks = (w + block_width - 1) / block_width;
2118 if (wblocks < min_wblocks)
2119 wblocks = min_wblocks;
2120 sz = wblocks * hblocks * block_bytes;
2121 return sz;
2122}
2123
2124static int r100_cs_track_cube(struct radeon_device *rdev,
2125 struct r100_cs_track *track, unsigned idx)
2126{
2127 unsigned face, w, h;
2128 struct radeon_bo *cube_robj;
2129 unsigned long size;
2130 unsigned compress_format = track->textures[idx].compress_format;
2131
2132 for (face = 0; face < 5; face++) {
2133 cube_robj = track->textures[idx].cube_info[face].robj;
2134 w = track->textures[idx].cube_info[face].width;
2135 h = track->textures[idx].cube_info[face].height;
2136
2137 if (compress_format) {
2138 size = r100_track_compress_size(compress_format, w, h);
2139 } else
2140 size = w * h;
2141 size *= track->textures[idx].cpp;
2142
2143 size += track->textures[idx].cube_info[face].offset;
2144
2145 if (size > radeon_bo_size(cube_robj)) {
2146 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2147 size, radeon_bo_size(cube_robj));
2148 r100_cs_track_texture_print(&track->textures[idx]);
2149 return -1;
2150 }
2151 }
2152 return 0;
2153}
2154
2155static int r100_cs_track_texture_check(struct radeon_device *rdev,
2156 struct r100_cs_track *track)
2157{
2158 struct radeon_bo *robj;
2159 unsigned long size;
2160 unsigned u, i, w, h, d;
2161 int ret;
2162
2163 for (u = 0; u < track->num_texture; u++) {
2164 if (!track->textures[u].enabled)
2165 continue;
2166 if (track->textures[u].lookup_disable)
2167 continue;
2168 robj = track->textures[u].robj;
2169 if (robj == NULL) {
2170 DRM_ERROR("No texture bound to unit %u\n", u);
2171 return -EINVAL;
2172 }
2173 size = 0;
2174 for (i = 0; i <= track->textures[u].num_levels; i++) {
2175 if (track->textures[u].use_pitch) {
2176 if (rdev->family < CHIP_R300)
2177 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2178 else
2179 w = track->textures[u].pitch / (1 << i);
2180 } else {
2181 w = track->textures[u].width;
2182 if (rdev->family >= CHIP_RV515)
2183 w |= track->textures[u].width_11;
2184 w = w / (1 << i);
2185 if (track->textures[u].roundup_w)
2186 w = roundup_pow_of_two(w);
2187 }
2188 h = track->textures[u].height;
2189 if (rdev->family >= CHIP_RV515)
2190 h |= track->textures[u].height_11;
2191 h = h / (1 << i);
2192 if (track->textures[u].roundup_h)
2193 h = roundup_pow_of_two(h);
2194 if (track->textures[u].tex_coord_type == 1) {
2195 d = (1 << track->textures[u].txdepth) / (1 << i);
2196 if (!d)
2197 d = 1;
2198 } else {
2199 d = 1;
2200 }
2201 if (track->textures[u].compress_format) {
2202
2203 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2204 /* compressed textures are block based */
2205 } else
2206 size += w * h * d;
2207 }
2208 size *= track->textures[u].cpp;
2209
2210 switch (track->textures[u].tex_coord_type) {
2211 case 0:
2212 case 1:
2213 break;
2214 case 2:
2215 if (track->separate_cube) {
2216 ret = r100_cs_track_cube(rdev, track, u);
2217 if (ret)
2218 return ret;
2219 } else
2220 size *= 6;
2221 break;
2222 default:
2223 DRM_ERROR("Invalid texture coordinate type %u for unit "
2224 "%u\n", track->textures[u].tex_coord_type, u);
2225 return -EINVAL;
2226 }
2227 if (size > radeon_bo_size(robj)) {
2228 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2229 "%lu\n", u, size, radeon_bo_size(robj));
2230 r100_cs_track_texture_print(&track->textures[u]);
2231 return -EINVAL;
2232 }
2233 }
2234 return 0;
2235}
2236
2237int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2238{
2239 unsigned i;
2240 unsigned long size;
2241 unsigned prim_walk;
2242 unsigned nverts;
2243 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2244
2245 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2246 !track->blend_read_enable)
2247 num_cb = 0;
2248
2249 for (i = 0; i < num_cb; i++) {
2250 if (track->cb[i].robj == NULL) {
2251 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2252 return -EINVAL;
2253 }
2254 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2255 size += track->cb[i].offset;
2256 if (size > radeon_bo_size(track->cb[i].robj)) {
2257 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2258 "(need %lu have %lu) !\n", i, size,
2259 radeon_bo_size(track->cb[i].robj));
2260 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2261 i, track->cb[i].pitch, track->cb[i].cpp,
2262 track->cb[i].offset, track->maxy);
2263 return -EINVAL;
2264 }
2265 }
2266 track->cb_dirty = false;
2267
2268 if (track->zb_dirty && track->z_enabled) {
2269 if (track->zb.robj == NULL) {
2270 DRM_ERROR("[drm] No buffer for z buffer !\n");
2271 return -EINVAL;
2272 }
2273 size = track->zb.pitch * track->zb.cpp * track->maxy;
2274 size += track->zb.offset;
2275 if (size > radeon_bo_size(track->zb.robj)) {
2276 DRM_ERROR("[drm] Buffer too small for z buffer "
2277 "(need %lu have %lu) !\n", size,
2278 radeon_bo_size(track->zb.robj));
2279 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2280 track->zb.pitch, track->zb.cpp,
2281 track->zb.offset, track->maxy);
2282 return -EINVAL;
2283 }
2284 }
2285 track->zb_dirty = false;
2286
2287 if (track->aa_dirty && track->aaresolve) {
2288 if (track->aa.robj == NULL) {
2289 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
2290 return -EINVAL;
2291 }
2292 /* I believe the format comes from colorbuffer0. */
2293 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2294 size += track->aa.offset;
2295 if (size > radeon_bo_size(track->aa.robj)) {
2296 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
2297 "(need %lu have %lu) !\n", i, size,
2298 radeon_bo_size(track->aa.robj));
2299 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
2300 i, track->aa.pitch, track->cb[0].cpp,
2301 track->aa.offset, track->maxy);
2302 return -EINVAL;
2303 }
2304 }
2305 track->aa_dirty = false;
2306
2307 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2308 if (track->vap_vf_cntl & (1 << 14)) {
2309 nverts = track->vap_alt_nverts;
2310 } else {
2311 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2312 }
2313 switch (prim_walk) {
2314 case 1:
2315 for (i = 0; i < track->num_arrays; i++) {
2316 size = track->arrays[i].esize * track->max_indx * 4;
2317 if (track->arrays[i].robj == NULL) {
2318 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2319 "bound\n", prim_walk, i);
2320 return -EINVAL;
2321 }
2322 if (size > radeon_bo_size(track->arrays[i].robj)) {
2323 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2324 "need %lu dwords have %lu dwords\n",
2325 prim_walk, i, size >> 2,
2326 radeon_bo_size(track->arrays[i].robj)
2327 >> 2);
2328 DRM_ERROR("Max indices %u\n", track->max_indx);
2329 return -EINVAL;
2330 }
2331 }
2332 break;
2333 case 2:
2334 for (i = 0; i < track->num_arrays; i++) {
2335 size = track->arrays[i].esize * (nverts - 1) * 4;
2336 if (track->arrays[i].robj == NULL) {
2337 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2338 "bound\n", prim_walk, i);
2339 return -EINVAL;
2340 }
2341 if (size > radeon_bo_size(track->arrays[i].robj)) {
2342 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2343 "need %lu dwords have %lu dwords\n",
2344 prim_walk, i, size >> 2,
2345 radeon_bo_size(track->arrays[i].robj)
2346 >> 2);
2347 return -EINVAL;
2348 }
2349 }
2350 break;
2351 case 3:
2352 size = track->vtx_size * nverts;
2353 if (size != track->immd_dwords) {
2354 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2355 track->immd_dwords, size);
2356 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2357 nverts, track->vtx_size);
2358 return -EINVAL;
2359 }
2360 break;
2361 default:
2362 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2363 prim_walk);
2364 return -EINVAL;
2365 }
2366
2367 if (track->tex_dirty) {
2368 track->tex_dirty = false;
2369 return r100_cs_track_texture_check(rdev, track);
2370 }
2371 return 0;
2372}
2373
2374void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2375{
2376 unsigned i, face;
2377
2378 track->cb_dirty = true;
2379 track->zb_dirty = true;
2380 track->tex_dirty = true;
2381 track->aa_dirty = true;
2382
2383 if (rdev->family < CHIP_R300) {
2384 track->num_cb = 1;
2385 if (rdev->family <= CHIP_RS200)
2386 track->num_texture = 3;
2387 else
2388 track->num_texture = 6;
2389 track->maxy = 2048;
2390 track->separate_cube = 1;
2391 } else {
2392 track->num_cb = 4;
2393 track->num_texture = 16;
2394 track->maxy = 4096;
2395 track->separate_cube = 0;
2396 track->aaresolve = false;
2397 track->aa.robj = NULL;
2398 }
2399
2400 for (i = 0; i < track->num_cb; i++) {
2401 track->cb[i].robj = NULL;
2402 track->cb[i].pitch = 8192;
2403 track->cb[i].cpp = 16;
2404 track->cb[i].offset = 0;
2405 }
2406 track->z_enabled = true;
2407 track->zb.robj = NULL;
2408 track->zb.pitch = 8192;
2409 track->zb.cpp = 4;
2410 track->zb.offset = 0;
2411 track->vtx_size = 0x7F;
2412 track->immd_dwords = 0xFFFFFFFFUL;
2413 track->num_arrays = 11;
2414 track->max_indx = 0x00FFFFFFUL;
2415 for (i = 0; i < track->num_arrays; i++) {
2416 track->arrays[i].robj = NULL;
2417 track->arrays[i].esize = 0x7F;
2418 }
2419 for (i = 0; i < track->num_texture; i++) {
2420 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2421 track->textures[i].pitch = 16536;
2422 track->textures[i].width = 16536;
2423 track->textures[i].height = 16536;
2424 track->textures[i].width_11 = 1 << 11;
2425 track->textures[i].height_11 = 1 << 11;
2426 track->textures[i].num_levels = 12;
2427 if (rdev->family <= CHIP_RS200) {
2428 track->textures[i].tex_coord_type = 0;
2429 track->textures[i].txdepth = 0;
2430 } else {
2431 track->textures[i].txdepth = 16;
2432 track->textures[i].tex_coord_type = 1;
2433 }
2434 track->textures[i].cpp = 64;
2435 track->textures[i].robj = NULL;
2436 /* CS IB emission code makes sure texture unit are disabled */
2437 track->textures[i].enabled = false;
2438 track->textures[i].lookup_disable = false;
2439 track->textures[i].roundup_w = true;
2440 track->textures[i].roundup_h = true;
2441 if (track->separate_cube)
2442 for (face = 0; face < 5; face++) {
2443 track->textures[i].cube_info[face].robj = NULL;
2444 track->textures[i].cube_info[face].width = 16536;
2445 track->textures[i].cube_info[face].height = 16536;
2446 track->textures[i].cube_info[face].offset = 0;
2447 }
2448 }
2449}
2450
2451/*
2452 * Global GPU functions
2453 */
2454static void r100_errata(struct radeon_device *rdev)
2455{
2456 rdev->pll_errata = 0;
2457
2458 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
2459 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2460 }
2461
2462 if (rdev->family == CHIP_RV100 ||
2463 rdev->family == CHIP_RS100 ||
2464 rdev->family == CHIP_RS200) {
2465 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2466 }
2467}
2468
2469static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2470{
2471 unsigned i;
2472 uint32_t tmp;
2473
2474 for (i = 0; i < rdev->usec_timeout; i++) {
2475 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
2476 if (tmp >= n) {
2477 return 0;
2478 }
2479 DRM_UDELAY(1);
2480 }
2481 return -1;
2482}
2483
2484int r100_gui_wait_for_idle(struct radeon_device *rdev)
2485{
2486 unsigned i;
2487 uint32_t tmp;
2488
2489 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2490 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
2491 " Bad things might happen.\n");
2492 }
2493 for (i = 0; i < rdev->usec_timeout; i++) {
2494 tmp = RREG32(RADEON_RBBM_STATUS);
2495 if (!(tmp & RADEON_RBBM_ACTIVE)) {
2496 return 0;
2497 }
2498 DRM_UDELAY(1);
2499 }
2500 return -1;
2501}
2502
2503int r100_mc_wait_for_idle(struct radeon_device *rdev)
2504{
2505 unsigned i;
2506 uint32_t tmp;
2507
2508 for (i = 0; i < rdev->usec_timeout; i++) {
2509 /* read MC_STATUS */
2510 tmp = RREG32(RADEON_MC_STATUS);
2511 if (tmp & RADEON_MC_IDLE) {
2512 return 0;
2513 }
2514 DRM_UDELAY(1);
2515 }
2516 return -1;
2517}
2518
2519bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2520{
2521 u32 rbbm_status;
2522
2523 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2524 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2525 radeon_ring_lockup_update(rdev, ring);
2526 return false;
2527 }
2528 return radeon_ring_test_lockup(rdev, ring);
2529}
2530
2531/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2532void r100_enable_bm(struct radeon_device *rdev)
2533{
2534 uint32_t tmp;
2535 /* Enable bus mastering */
2536 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
2537 WREG32(RADEON_BUS_CNTL, tmp);
2538}
2539
2540void r100_bm_disable(struct radeon_device *rdev)
2541{
2542 u32 tmp;
2543
2544 /* disable bus mastering */
2545 tmp = RREG32(R_000030_BUS_CNTL);
2546 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2547 mdelay(1);
2548 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2549 mdelay(1);
2550 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2551 tmp = RREG32(RADEON_BUS_CNTL);
2552 mdelay(1);
2553 pci_clear_master(rdev->pdev);
2554 mdelay(1);
2555}
2556
2557int r100_asic_reset(struct radeon_device *rdev)
2558{
2559 struct r100_mc_save save;
2560 u32 status, tmp;
2561 int ret = 0;
2562
2563 status = RREG32(R_000E40_RBBM_STATUS);
2564 if (!G_000E40_GUI_ACTIVE(status)) {
2565 return 0;
2566 }
2567 r100_mc_stop(rdev, &save);
2568 status = RREG32(R_000E40_RBBM_STATUS);
2569 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2570 /* stop CP */
2571 WREG32(RADEON_CP_CSQ_CNTL, 0);
2572 tmp = RREG32(RADEON_CP_RB_CNTL);
2573 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2574 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2575 WREG32(RADEON_CP_RB_WPTR, 0);
2576 WREG32(RADEON_CP_RB_CNTL, tmp);
2577 /* save PCI state */
2578 pci_save_state(rdev->pdev);
2579 /* disable bus mastering */
2580 r100_bm_disable(rdev);
2581 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2582 S_0000F0_SOFT_RESET_RE(1) |
2583 S_0000F0_SOFT_RESET_PP(1) |
2584 S_0000F0_SOFT_RESET_RB(1));
2585 RREG32(R_0000F0_RBBM_SOFT_RESET);
2586 mdelay(500);
2587 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2588 mdelay(1);
2589 status = RREG32(R_000E40_RBBM_STATUS);
2590 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2591 /* reset CP */
2592 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2593 RREG32(R_0000F0_RBBM_SOFT_RESET);
2594 mdelay(500);
2595 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2596 mdelay(1);
2597 status = RREG32(R_000E40_RBBM_STATUS);
2598 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2599 /* restore PCI & busmastering */
2600 pci_restore_state(rdev->pdev);
2601 r100_enable_bm(rdev);
2602 /* Check if GPU is idle */
2603 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2604 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2605 dev_err(rdev->dev, "failed to reset GPU\n");
2606 ret = -1;
2607 } else
2608 dev_info(rdev->dev, "GPU reset succeed\n");
2609 r100_mc_resume(rdev, &save);
2610 return ret;
2611}
2612
2613void r100_set_common_regs(struct radeon_device *rdev)
2614{
2615 struct drm_device *dev = rdev->ddev;
2616 bool force_dac2 = false;
2617 u32 tmp;
2618
2619 /* set these so they don't interfere with anything */
2620 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2621 WREG32(RADEON_SUBPIC_CNTL, 0);
2622 WREG32(RADEON_VIPH_CONTROL, 0);
2623 WREG32(RADEON_I2C_CNTL_1, 0);
2624 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2625 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2626 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2627
2628 /* always set up dac2 on rn50 and some rv100 as lots
2629 * of servers seem to wire it up to a VGA port but
2630 * don't report it in the bios connector
2631 * table.
2632 */
2633 switch (dev->pdev->device) {
2634 /* RN50 */
2635 case 0x515e:
2636 case 0x5969:
2637 force_dac2 = true;
2638 break;
2639 /* RV100*/
2640 case 0x5159:
2641 case 0x515a:
2642 /* DELL triple head servers */
2643 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2644 ((dev->pdev->subsystem_device == 0x016c) ||
2645 (dev->pdev->subsystem_device == 0x016d) ||
2646 (dev->pdev->subsystem_device == 0x016e) ||
2647 (dev->pdev->subsystem_device == 0x016f) ||
2648 (dev->pdev->subsystem_device == 0x0170) ||
2649 (dev->pdev->subsystem_device == 0x017d) ||
2650 (dev->pdev->subsystem_device == 0x017e) ||
2651 (dev->pdev->subsystem_device == 0x0183) ||
2652 (dev->pdev->subsystem_device == 0x018a) ||
2653 (dev->pdev->subsystem_device == 0x019a)))
2654 force_dac2 = true;
2655 break;
2656 }
2657
2658 if (force_dac2) {
2659 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2660 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2661 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2662
2663 /* For CRT on DAC2, don't turn it on if BIOS didn't
2664 enable it, even it's detected.
2665 */
2666
2667 /* force it to crtc0 */
2668 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2669 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2670 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2671
2672 /* set up the TV DAC */
2673 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2674 RADEON_TV_DAC_STD_MASK |
2675 RADEON_TV_DAC_RDACPD |
2676 RADEON_TV_DAC_GDACPD |
2677 RADEON_TV_DAC_BDACPD |
2678 RADEON_TV_DAC_BGADJ_MASK |
2679 RADEON_TV_DAC_DACADJ_MASK);
2680 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2681 RADEON_TV_DAC_NHOLD |
2682 RADEON_TV_DAC_STD_PS2 |
2683 (0x58 << 16));
2684
2685 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2686 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2687 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2688 }
2689
2690 /* switch PM block to ACPI mode */
2691 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2692 tmp &= ~RADEON_PM_MODE_SEL;
2693 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2694
2695}
2696
2697/*
2698 * VRAM info
2699 */
2700static void r100_vram_get_type(struct radeon_device *rdev)
2701{
2702 uint32_t tmp;
2703
2704 rdev->mc.vram_is_ddr = false;
2705 if (rdev->flags & RADEON_IS_IGP)
2706 rdev->mc.vram_is_ddr = true;
2707 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2708 rdev->mc.vram_is_ddr = true;
2709 if ((rdev->family == CHIP_RV100) ||
2710 (rdev->family == CHIP_RS100) ||
2711 (rdev->family == CHIP_RS200)) {
2712 tmp = RREG32(RADEON_MEM_CNTL);
2713 if (tmp & RV100_HALF_MODE) {
2714 rdev->mc.vram_width = 32;
2715 } else {
2716 rdev->mc.vram_width = 64;
2717 }
2718 if (rdev->flags & RADEON_SINGLE_CRTC) {
2719 rdev->mc.vram_width /= 4;
2720 rdev->mc.vram_is_ddr = true;
2721 }
2722 } else if (rdev->family <= CHIP_RV280) {
2723 tmp = RREG32(RADEON_MEM_CNTL);
2724 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2725 rdev->mc.vram_width = 128;
2726 } else {
2727 rdev->mc.vram_width = 64;
2728 }
2729 } else {
2730 /* newer IGPs */
2731 rdev->mc.vram_width = 128;
2732 }
2733}
2734
2735static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2736{
2737 u32 aper_size;
2738 u8 byte;
2739
2740 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2741
2742 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2743 * that is has the 2nd generation multifunction PCI interface
2744 */
2745 if (rdev->family == CHIP_RV280 ||
2746 rdev->family >= CHIP_RV350) {
2747 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2748 ~RADEON_HDP_APER_CNTL);
2749 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2750 return aper_size * 2;
2751 }
2752
2753 /* Older cards have all sorts of funny issues to deal with. First
2754 * check if it's a multifunction card by reading the PCI config
2755 * header type... Limit those to one aperture size
2756 */
2757 pci_read_config_byte(rdev->pdev, 0xe, &byte);
2758 if (byte & 0x80) {
2759 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2760 DRM_INFO("Limiting VRAM to one aperture\n");
2761 return aper_size;
2762 }
2763
2764 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2765 * have set it up. We don't write this as it's broken on some ASICs but
2766 * we expect the BIOS to have done the right thing (might be too optimistic...)
2767 */
2768 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2769 return aper_size * 2;
2770 return aper_size;
2771}
2772
2773void r100_vram_init_sizes(struct radeon_device *rdev)
2774{
2775 u64 config_aper_size;
2776
2777 /* work out accessible VRAM */
2778 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2779 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2780 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2781 /* FIXME we don't use the second aperture yet when we could use it */
2782 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2783 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2784 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2785 if (rdev->flags & RADEON_IS_IGP) {
2786 uint32_t tom;
2787 /* read NB_TOM to get the amount of ram stolen for the GPU */
2788 tom = RREG32(RADEON_NB_TOM);
2789 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2790 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2791 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2792 } else {
2793 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2794 /* Some production boards of m6 will report 0
2795 * if it's 8 MB
2796 */
2797 if (rdev->mc.real_vram_size == 0) {
2798 rdev->mc.real_vram_size = 8192 * 1024;
2799 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2800 }
2801 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2802 * Novell bug 204882 + along with lots of ubuntu ones
2803 */
2804 if (rdev->mc.aper_size > config_aper_size)
2805 config_aper_size = rdev->mc.aper_size;
2806
2807 if (config_aper_size > rdev->mc.real_vram_size)
2808 rdev->mc.mc_vram_size = config_aper_size;
2809 else
2810 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2811 }
2812}
2813
2814void r100_vga_set_state(struct radeon_device *rdev, bool state)
2815{
2816 uint32_t temp;
2817
2818 temp = RREG32(RADEON_CONFIG_CNTL);
2819 if (state == false) {
2820 temp &= ~RADEON_CFG_VGA_RAM_EN;
2821 temp |= RADEON_CFG_VGA_IO_DIS;
2822 } else {
2823 temp &= ~RADEON_CFG_VGA_IO_DIS;
2824 }
2825 WREG32(RADEON_CONFIG_CNTL, temp);
2826}
2827
2828static void r100_mc_init(struct radeon_device *rdev)
2829{
2830 u64 base;
2831
2832 r100_vram_get_type(rdev);
2833 r100_vram_init_sizes(rdev);
2834 base = rdev->mc.aper_base;
2835 if (rdev->flags & RADEON_IS_IGP)
2836 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2837 radeon_vram_location(rdev, &rdev->mc, base);
2838 rdev->mc.gtt_base_align = 0;
2839 if (!(rdev->flags & RADEON_IS_AGP))
2840 radeon_gtt_location(rdev, &rdev->mc);
2841 radeon_update_bandwidth_info(rdev);
2842}
2843
2844
2845/*
2846 * Indirect registers accessor
2847 */
2848void r100_pll_errata_after_index(struct radeon_device *rdev)
2849{
2850 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2851 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2852 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2853 }
2854}
2855
2856static void r100_pll_errata_after_data(struct radeon_device *rdev)
2857{
2858 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2859 * or the chip could hang on a subsequent access
2860 */
2861 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2862 mdelay(5);
2863 }
2864
2865 /* This function is required to workaround a hardware bug in some (all?)
2866 * revisions of the R300. This workaround should be called after every
2867 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2868 * may not be correct.
2869 */
2870 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2871 uint32_t save, tmp;
2872
2873 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2874 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2875 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2876 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2877 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2878 }
2879}
2880
2881uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2882{
2883 unsigned long flags;
2884 uint32_t data;
2885
2886 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2887 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2888 r100_pll_errata_after_index(rdev);
2889 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2890 r100_pll_errata_after_data(rdev);
2891 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2892 return data;
2893}
2894
2895void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2896{
2897 unsigned long flags;
2898
2899 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2900 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2901 r100_pll_errata_after_index(rdev);
2902 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2903 r100_pll_errata_after_data(rdev);
2904 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2905}
2906
2907static void r100_set_safe_registers(struct radeon_device *rdev)
2908{
2909 if (ASIC_IS_RN50(rdev)) {
2910 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2911 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2912 } else if (rdev->family < CHIP_R200) {
2913 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2914 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2915 } else {
2916 r200_set_safe_registers(rdev);
2917 }
2918}
2919
2920/*
2921 * Debugfs info
2922 */
2923#if defined(CONFIG_DEBUG_FS)
2924static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2925{
2926 struct drm_info_node *node = (struct drm_info_node *) m->private;
2927 struct drm_device *dev = node->minor->dev;
2928 struct radeon_device *rdev = dev->dev_private;
2929 uint32_t reg, value;
2930 unsigned i;
2931
2932 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2933 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2934 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2935 for (i = 0; i < 64; i++) {
2936 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2937 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2938 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2939 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2940 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2941 }
2942 return 0;
2943}
2944
2945static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2946{
2947 struct drm_info_node *node = (struct drm_info_node *) m->private;
2948 struct drm_device *dev = node->minor->dev;
2949 struct radeon_device *rdev = dev->dev_private;
2950 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2951 uint32_t rdp, wdp;
2952 unsigned count, i, j;
2953
2954 radeon_ring_free_size(rdev, ring);
2955 rdp = RREG32(RADEON_CP_RB_RPTR);
2956 wdp = RREG32(RADEON_CP_RB_WPTR);
2957 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
2958 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2959 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2960 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2961 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2962 seq_printf(m, "%u dwords in ring\n", count);
2963 if (ring->ready) {
2964 for (j = 0; j <= count; j++) {
2965 i = (rdp + j) & ring->ptr_mask;
2966 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2967 }
2968 }
2969 return 0;
2970}
2971
2972
2973static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2974{
2975 struct drm_info_node *node = (struct drm_info_node *) m->private;
2976 struct drm_device *dev = node->minor->dev;
2977 struct radeon_device *rdev = dev->dev_private;
2978 uint32_t csq_stat, csq2_stat, tmp;
2979 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2980 unsigned i;
2981
2982 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2983 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2984 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2985 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2986 r_rptr = (csq_stat >> 0) & 0x3ff;
2987 r_wptr = (csq_stat >> 10) & 0x3ff;
2988 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2989 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2990 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2991 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2992 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2993 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2994 seq_printf(m, "Ring rptr %u\n", r_rptr);
2995 seq_printf(m, "Ring wptr %u\n", r_wptr);
2996 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2997 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2998 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2999 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
3000 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
3001 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
3002 seq_printf(m, "Ring fifo:\n");
3003 for (i = 0; i < 256; i++) {
3004 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3005 tmp = RREG32(RADEON_CP_CSQ_DATA);
3006 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
3007 }
3008 seq_printf(m, "Indirect1 fifo:\n");
3009 for (i = 256; i <= 512; i++) {
3010 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3011 tmp = RREG32(RADEON_CP_CSQ_DATA);
3012 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
3013 }
3014 seq_printf(m, "Indirect2 fifo:\n");
3015 for (i = 640; i < ib1_wptr; i++) {
3016 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3017 tmp = RREG32(RADEON_CP_CSQ_DATA);
3018 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
3019 }
3020 return 0;
3021}
3022
3023static int r100_debugfs_mc_info(struct seq_file *m, void *data)
3024{
3025 struct drm_info_node *node = (struct drm_info_node *) m->private;
3026 struct drm_device *dev = node->minor->dev;
3027 struct radeon_device *rdev = dev->dev_private;
3028 uint32_t tmp;
3029
3030 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
3031 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3032 tmp = RREG32(RADEON_MC_FB_LOCATION);
3033 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3034 tmp = RREG32(RADEON_BUS_CNTL);
3035 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3036 tmp = RREG32(RADEON_MC_AGP_LOCATION);
3037 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3038 tmp = RREG32(RADEON_AGP_BASE);
3039 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3040 tmp = RREG32(RADEON_HOST_PATH_CNTL);
3041 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3042 tmp = RREG32(0x01D0);
3043 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3044 tmp = RREG32(RADEON_AIC_LO_ADDR);
3045 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3046 tmp = RREG32(RADEON_AIC_HI_ADDR);
3047 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3048 tmp = RREG32(0x01E4);
3049 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3050 return 0;
3051}
3052
3053static struct drm_info_list r100_debugfs_rbbm_list[] = {
3054 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
3055};
3056
3057static struct drm_info_list r100_debugfs_cp_list[] = {
3058 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
3059 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
3060};
3061
3062static struct drm_info_list r100_debugfs_mc_info_list[] = {
3063 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
3064};
3065#endif
3066
3067int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3068{
3069#if defined(CONFIG_DEBUG_FS)
3070 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3071#else
3072 return 0;
3073#endif
3074}
3075
3076int r100_debugfs_cp_init(struct radeon_device *rdev)
3077{
3078#if defined(CONFIG_DEBUG_FS)
3079 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3080#else
3081 return 0;
3082#endif
3083}
3084
3085int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3086{
3087#if defined(CONFIG_DEBUG_FS)
3088 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3089#else
3090 return 0;
3091#endif
3092}
3093
3094int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3095 uint32_t tiling_flags, uint32_t pitch,
3096 uint32_t offset, uint32_t obj_size)
3097{
3098 int surf_index = reg * 16;
3099 int flags = 0;
3100
3101 if (rdev->family <= CHIP_RS200) {
3102 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3103 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3104 flags |= RADEON_SURF_TILE_COLOR_BOTH;
3105 if (tiling_flags & RADEON_TILING_MACRO)
3106 flags |= RADEON_SURF_TILE_COLOR_MACRO;
3107 /* setting pitch to 0 disables tiling */
3108 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3109 == 0)
3110 pitch = 0;
3111 } else if (rdev->family <= CHIP_RV280) {
3112 if (tiling_flags & (RADEON_TILING_MACRO))
3113 flags |= R200_SURF_TILE_COLOR_MACRO;
3114 if (tiling_flags & RADEON_TILING_MICRO)
3115 flags |= R200_SURF_TILE_COLOR_MICRO;
3116 } else {
3117 if (tiling_flags & RADEON_TILING_MACRO)
3118 flags |= R300_SURF_TILE_MACRO;
3119 if (tiling_flags & RADEON_TILING_MICRO)
3120 flags |= R300_SURF_TILE_MICRO;
3121 }
3122
3123 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3124 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3125 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3126 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3127
3128 /* r100/r200 divide by 16 */
3129 if (rdev->family < CHIP_R300)
3130 flags |= pitch / 16;
3131 else
3132 flags |= pitch / 8;
3133
3134
3135 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
3136 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
3137 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
3138 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
3139 return 0;
3140}
3141
3142void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3143{
3144 int surf_index = reg * 16;
3145 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
3146}
3147
3148void r100_bandwidth_update(struct radeon_device *rdev)
3149{
3150 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3151 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3152 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
3153 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3154 fixed20_12 memtcas_ff[8] = {
3155 dfixed_init(1),
3156 dfixed_init(2),
3157 dfixed_init(3),
3158 dfixed_init(0),
3159 dfixed_init_half(1),
3160 dfixed_init_half(2),
3161 dfixed_init(0),
3162 };
3163 fixed20_12 memtcas_rs480_ff[8] = {
3164 dfixed_init(0),
3165 dfixed_init(1),
3166 dfixed_init(2),
3167 dfixed_init(3),
3168 dfixed_init(0),
3169 dfixed_init_half(1),
3170 dfixed_init_half(2),
3171 dfixed_init_half(3),
3172 };
3173 fixed20_12 memtcas2_ff[8] = {
3174 dfixed_init(0),
3175 dfixed_init(1),
3176 dfixed_init(2),
3177 dfixed_init(3),
3178 dfixed_init(4),
3179 dfixed_init(5),
3180 dfixed_init(6),
3181 dfixed_init(7),
3182 };
3183 fixed20_12 memtrbs[8] = {
3184 dfixed_init(1),
3185 dfixed_init_half(1),
3186 dfixed_init(2),
3187 dfixed_init_half(2),
3188 dfixed_init(3),
3189 dfixed_init_half(3),
3190 dfixed_init(4),
3191 dfixed_init_half(4)
3192 };
3193 fixed20_12 memtrbs_r4xx[8] = {
3194 dfixed_init(4),
3195 dfixed_init(5),
3196 dfixed_init(6),
3197 dfixed_init(7),
3198 dfixed_init(8),
3199 dfixed_init(9),
3200 dfixed_init(10),
3201 dfixed_init(11)
3202 };
3203 fixed20_12 min_mem_eff;
3204 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3205 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3206 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
3207 disp_drain_rate2, read_return_rate;
3208 fixed20_12 time_disp1_drop_priority;
3209 int c;
3210 int cur_size = 16; /* in octawords */
3211 int critical_point = 0, critical_point2;
3212/* uint32_t read_return_rate, time_disp1_drop_priority; */
3213 int stop_req, max_stop_req;
3214 struct drm_display_mode *mode1 = NULL;
3215 struct drm_display_mode *mode2 = NULL;
3216 uint32_t pixel_bytes1 = 0;
3217 uint32_t pixel_bytes2 = 0;
3218
3219 radeon_update_display_priority(rdev);
3220
3221 if (rdev->mode_info.crtcs[0]->base.enabled) {
3222 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3223 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
3224 }
3225 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3226 if (rdev->mode_info.crtcs[1]->base.enabled) {
3227 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3228 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
3229 }
3230 }
3231
3232 min_mem_eff.full = dfixed_const_8(0);
3233 /* get modes */
3234 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
3235 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
3236 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
3237 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
3238 /* check crtc enables */
3239 if (mode2)
3240 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
3241 if (mode1)
3242 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
3243 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
3244 }
3245
3246 /*
3247 * determine is there is enough bw for current mode
3248 */
3249 sclk_ff = rdev->pm.sclk;
3250 mclk_ff = rdev->pm.mclk;
3251
3252 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3253 temp_ff.full = dfixed_const(temp);
3254 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
3255
3256 pix_clk.full = 0;
3257 pix_clk2.full = 0;
3258 peak_disp_bw.full = 0;
3259 if (mode1) {
3260 temp_ff.full = dfixed_const(1000);
3261 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
3262 pix_clk.full = dfixed_div(pix_clk, temp_ff);
3263 temp_ff.full = dfixed_const(pixel_bytes1);
3264 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
3265 }
3266 if (mode2) {
3267 temp_ff.full = dfixed_const(1000);
3268 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
3269 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3270 temp_ff.full = dfixed_const(pixel_bytes2);
3271 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
3272 }
3273
3274 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
3275 if (peak_disp_bw.full >= mem_bw.full) {
3276 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
3277 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
3278 }
3279
3280 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
3281 temp = RREG32(RADEON_MEM_TIMING_CNTL);
3282 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3283 mem_trcd = ((temp >> 2) & 0x3) + 1;
3284 mem_trp = ((temp & 0x3)) + 1;
3285 mem_tras = ((temp & 0x70) >> 4) + 1;
3286 } else if (rdev->family == CHIP_R300 ||
3287 rdev->family == CHIP_R350) { /* r300, r350 */
3288 mem_trcd = (temp & 0x7) + 1;
3289 mem_trp = ((temp >> 8) & 0x7) + 1;
3290 mem_tras = ((temp >> 11) & 0xf) + 4;
3291 } else if (rdev->family == CHIP_RV350 ||
3292 rdev->family <= CHIP_RV380) {
3293 /* rv3x0 */
3294 mem_trcd = (temp & 0x7) + 3;
3295 mem_trp = ((temp >> 8) & 0x7) + 3;
3296 mem_tras = ((temp >> 11) & 0xf) + 6;
3297 } else if (rdev->family == CHIP_R420 ||
3298 rdev->family == CHIP_R423 ||
3299 rdev->family == CHIP_RV410) {
3300 /* r4xx */
3301 mem_trcd = (temp & 0xf) + 3;
3302 if (mem_trcd > 15)
3303 mem_trcd = 15;
3304 mem_trp = ((temp >> 8) & 0xf) + 3;
3305 if (mem_trp > 15)
3306 mem_trp = 15;
3307 mem_tras = ((temp >> 12) & 0x1f) + 6;
3308 if (mem_tras > 31)
3309 mem_tras = 31;
3310 } else { /* RV200, R200 */
3311 mem_trcd = (temp & 0x7) + 1;
3312 mem_trp = ((temp >> 8) & 0x7) + 1;
3313 mem_tras = ((temp >> 12) & 0xf) + 4;
3314 }
3315 /* convert to FF */
3316 trcd_ff.full = dfixed_const(mem_trcd);
3317 trp_ff.full = dfixed_const(mem_trp);
3318 tras_ff.full = dfixed_const(mem_tras);
3319
3320 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3321 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
3322 data = (temp & (7 << 20)) >> 20;
3323 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3324 if (rdev->family == CHIP_RS480) /* don't think rs400 */
3325 tcas_ff = memtcas_rs480_ff[data];
3326 else
3327 tcas_ff = memtcas_ff[data];
3328 } else
3329 tcas_ff = memtcas2_ff[data];
3330
3331 if (rdev->family == CHIP_RS400 ||
3332 rdev->family == CHIP_RS480) {
3333 /* extra cas latency stored in bits 23-25 0-4 clocks */
3334 data = (temp >> 23) & 0x7;
3335 if (data < 5)
3336 tcas_ff.full += dfixed_const(data);
3337 }
3338
3339 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
3340 /* on the R300, Tcas is included in Trbs.
3341 */
3342 temp = RREG32(RADEON_MEM_CNTL);
3343 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
3344 if (data == 1) {
3345 if (R300_MEM_USE_CD_CH_ONLY & temp) {
3346 temp = RREG32(R300_MC_IND_INDEX);
3347 temp &= ~R300_MC_IND_ADDR_MASK;
3348 temp |= R300_MC_READ_CNTL_CD_mcind;
3349 WREG32(R300_MC_IND_INDEX, temp);
3350 temp = RREG32(R300_MC_IND_DATA);
3351 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
3352 } else {
3353 temp = RREG32(R300_MC_READ_CNTL_AB);
3354 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3355 }
3356 } else {
3357 temp = RREG32(R300_MC_READ_CNTL_AB);
3358 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3359 }
3360 if (rdev->family == CHIP_RV410 ||
3361 rdev->family == CHIP_R420 ||
3362 rdev->family == CHIP_R423)
3363 trbs_ff = memtrbs_r4xx[data];
3364 else
3365 trbs_ff = memtrbs[data];
3366 tcas_ff.full += trbs_ff.full;
3367 }
3368
3369 sclk_eff_ff.full = sclk_ff.full;
3370
3371 if (rdev->flags & RADEON_IS_AGP) {
3372 fixed20_12 agpmode_ff;
3373 agpmode_ff.full = dfixed_const(radeon_agpmode);
3374 temp_ff.full = dfixed_const_666(16);
3375 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
3376 }
3377 /* TODO PCIE lanes may affect this - agpmode == 16?? */
3378
3379 if (ASIC_IS_R300(rdev)) {
3380 sclk_delay_ff.full = dfixed_const(250);
3381 } else {
3382 if ((rdev->family == CHIP_RV100) ||
3383 rdev->flags & RADEON_IS_IGP) {
3384 if (rdev->mc.vram_is_ddr)
3385 sclk_delay_ff.full = dfixed_const(41);
3386 else
3387 sclk_delay_ff.full = dfixed_const(33);
3388 } else {
3389 if (rdev->mc.vram_width == 128)
3390 sclk_delay_ff.full = dfixed_const(57);
3391 else
3392 sclk_delay_ff.full = dfixed_const(41);
3393 }
3394 }
3395
3396 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3397
3398 if (rdev->mc.vram_is_ddr) {
3399 if (rdev->mc.vram_width == 32) {
3400 k1.full = dfixed_const(40);
3401 c = 3;
3402 } else {
3403 k1.full = dfixed_const(20);
3404 c = 1;
3405 }
3406 } else {
3407 k1.full = dfixed_const(40);
3408 c = 3;
3409 }
3410
3411 temp_ff.full = dfixed_const(2);
3412 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
3413 temp_ff.full = dfixed_const(c);
3414 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
3415 temp_ff.full = dfixed_const(4);
3416 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
3417 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
3418 mc_latency_mclk.full += k1.full;
3419
3420 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3421 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3422
3423 /*
3424 HW cursor time assuming worst case of full size colour cursor.
3425 */
3426 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
3427 temp_ff.full += trcd_ff.full;
3428 if (temp_ff.full < tras_ff.full)
3429 temp_ff.full = tras_ff.full;
3430 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3431
3432 temp_ff.full = dfixed_const(cur_size);
3433 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3434 /*
3435 Find the total latency for the display data.
3436 */
3437 disp_latency_overhead.full = dfixed_const(8);
3438 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3439 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3440 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3441
3442 if (mc_latency_mclk.full > mc_latency_sclk.full)
3443 disp_latency.full = mc_latency_mclk.full;
3444 else
3445 disp_latency.full = mc_latency_sclk.full;
3446
3447 /* setup Max GRPH_STOP_REQ default value */
3448 if (ASIC_IS_RV100(rdev))
3449 max_stop_req = 0x5c;
3450 else
3451 max_stop_req = 0x7c;
3452
3453 if (mode1) {
3454 /* CRTC1
3455 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3456 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3457 */
3458 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3459
3460 if (stop_req > max_stop_req)
3461 stop_req = max_stop_req;
3462
3463 /*
3464 Find the drain rate of the display buffer.
3465 */
3466 temp_ff.full = dfixed_const((16/pixel_bytes1));
3467 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3468
3469 /*
3470 Find the critical point of the display buffer.
3471 */
3472 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
3473 crit_point_ff.full += dfixed_const_half(0);
3474
3475 critical_point = dfixed_trunc(crit_point_ff);
3476
3477 if (rdev->disp_priority == 2) {
3478 critical_point = 0;
3479 }
3480
3481 /*
3482 The critical point should never be above max_stop_req-4. Setting
3483 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3484 */
3485 if (max_stop_req - critical_point < 4)
3486 critical_point = 0;
3487
3488 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3489 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3490 critical_point = 0x10;
3491 }
3492
3493 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3494 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3495 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3496 temp &= ~(RADEON_GRPH_START_REQ_MASK);
3497 if ((rdev->family == CHIP_R350) &&
3498 (stop_req > 0x15)) {
3499 stop_req -= 0x10;
3500 }
3501 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3502 temp |= RADEON_GRPH_BUFFER_SIZE;
3503 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3504 RADEON_GRPH_CRITICAL_AT_SOF |
3505 RADEON_GRPH_STOP_CNTL);
3506 /*
3507 Write the result into the register.
3508 */
3509 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3510 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3511
3512#if 0
3513 if ((rdev->family == CHIP_RS400) ||
3514 (rdev->family == CHIP_RS480)) {
3515 /* attempt to program RS400 disp regs correctly ??? */
3516 temp = RREG32(RS400_DISP1_REG_CNTL);
3517 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3518 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3519 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3520 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3521 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3522 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3523 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3524 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3525 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3526 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3527 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3528 }
3529#endif
3530
3531 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3532 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3533 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3534 }
3535
3536 if (mode2) {
3537 u32 grph2_cntl;
3538 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3539
3540 if (stop_req > max_stop_req)
3541 stop_req = max_stop_req;
3542
3543 /*
3544 Find the drain rate of the display buffer.
3545 */
3546 temp_ff.full = dfixed_const((16/pixel_bytes2));
3547 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3548
3549 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3550 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3551 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3552 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3553 if ((rdev->family == CHIP_R350) &&
3554 (stop_req > 0x15)) {
3555 stop_req -= 0x10;
3556 }
3557 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3558 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3559 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3560 RADEON_GRPH_CRITICAL_AT_SOF |
3561 RADEON_GRPH_STOP_CNTL);
3562
3563 if ((rdev->family == CHIP_RS100) ||
3564 (rdev->family == CHIP_RS200))
3565 critical_point2 = 0;
3566 else {
3567 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3568 temp_ff.full = dfixed_const(temp);
3569 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3570 if (sclk_ff.full < temp_ff.full)
3571 temp_ff.full = sclk_ff.full;
3572
3573 read_return_rate.full = temp_ff.full;
3574
3575 if (mode1) {
3576 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3577 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3578 } else {
3579 time_disp1_drop_priority.full = 0;
3580 }
3581 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3582 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3583 crit_point_ff.full += dfixed_const_half(0);
3584
3585 critical_point2 = dfixed_trunc(crit_point_ff);
3586
3587 if (rdev->disp_priority == 2) {
3588 critical_point2 = 0;
3589 }
3590
3591 if (max_stop_req - critical_point2 < 4)
3592 critical_point2 = 0;
3593
3594 }
3595
3596 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3597 /* some R300 cards have problem with this set to 0 */
3598 critical_point2 = 0x10;
3599 }
3600
3601 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3602 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3603
3604 if ((rdev->family == CHIP_RS400) ||
3605 (rdev->family == CHIP_RS480)) {
3606#if 0
3607 /* attempt to program RS400 disp2 regs correctly ??? */
3608 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3609 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3610 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3611 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3612 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3613 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3614 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3615 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3616 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3617 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3618 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3619 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3620#endif
3621 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3622 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3623 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3624 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3625 }
3626
3627 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3628 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3629 }
3630}
3631
3632int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3633{
3634 uint32_t scratch;
3635 uint32_t tmp = 0;
3636 unsigned i;
3637 int r;
3638
3639 r = radeon_scratch_get(rdev, &scratch);
3640 if (r) {
3641 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3642 return r;
3643 }
3644 WREG32(scratch, 0xCAFEDEAD);
3645 r = radeon_ring_lock(rdev, ring, 2);
3646 if (r) {
3647 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3648 radeon_scratch_free(rdev, scratch);
3649 return r;
3650 }
3651 radeon_ring_write(ring, PACKET0(scratch, 0));
3652 radeon_ring_write(ring, 0xDEADBEEF);
3653 radeon_ring_unlock_commit(rdev, ring);
3654 for (i = 0; i < rdev->usec_timeout; i++) {
3655 tmp = RREG32(scratch);
3656 if (tmp == 0xDEADBEEF) {
3657 break;
3658 }
3659 DRM_UDELAY(1);
3660 }
3661 if (i < rdev->usec_timeout) {
3662 DRM_INFO("ring test succeeded in %d usecs\n", i);
3663 } else {
3664 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3665 scratch, tmp);
3666 r = -EINVAL;
3667 }
3668 radeon_scratch_free(rdev, scratch);
3669 return r;
3670}
3671
3672void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3673{
3674 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3675
3676 if (ring->rptr_save_reg) {
3677 u32 next_rptr = ring->wptr + 2 + 3;
3678 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
3679 radeon_ring_write(ring, next_rptr);
3680 }
3681
3682 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3683 radeon_ring_write(ring, ib->gpu_addr);
3684 radeon_ring_write(ring, ib->length_dw);
3685}
3686
3687int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3688{
3689 struct radeon_ib ib;
3690 uint32_t scratch;
3691 uint32_t tmp = 0;
3692 unsigned i;
3693 int r;
3694
3695 r = radeon_scratch_get(rdev, &scratch);
3696 if (r) {
3697 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3698 return r;
3699 }
3700 WREG32(scratch, 0xCAFEDEAD);
3701 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3702 if (r) {
3703 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3704 goto free_scratch;
3705 }
3706 ib.ptr[0] = PACKET0(scratch, 0);
3707 ib.ptr[1] = 0xDEADBEEF;
3708 ib.ptr[2] = PACKET2(0);
3709 ib.ptr[3] = PACKET2(0);
3710 ib.ptr[4] = PACKET2(0);
3711 ib.ptr[5] = PACKET2(0);
3712 ib.ptr[6] = PACKET2(0);
3713 ib.ptr[7] = PACKET2(0);
3714 ib.length_dw = 8;
3715 r = radeon_ib_schedule(rdev, &ib, NULL);
3716 if (r) {
3717 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3718 goto free_ib;
3719 }
3720 r = radeon_fence_wait(ib.fence, false);
3721 if (r) {
3722 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3723 goto free_ib;
3724 }
3725 for (i = 0; i < rdev->usec_timeout; i++) {
3726 tmp = RREG32(scratch);
3727 if (tmp == 0xDEADBEEF) {
3728 break;
3729 }
3730 DRM_UDELAY(1);
3731 }
3732 if (i < rdev->usec_timeout) {
3733 DRM_INFO("ib test succeeded in %u usecs\n", i);
3734 } else {
3735 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3736 scratch, tmp);
3737 r = -EINVAL;
3738 }
3739free_ib:
3740 radeon_ib_free(rdev, &ib);
3741free_scratch:
3742 radeon_scratch_free(rdev, scratch);
3743 return r;
3744}
3745
3746void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3747{
3748 /* Shutdown CP we shouldn't need to do that but better be safe than
3749 * sorry
3750 */
3751 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3752 WREG32(R_000740_CP_CSQ_CNTL, 0);
3753
3754 /* Save few CRTC registers */
3755 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3756 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3757 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3758 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3759 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3760 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3761 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3762 }
3763
3764 /* Disable VGA aperture access */
3765 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3766 /* Disable cursor, overlay, crtc */
3767 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3768 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3769 S_000054_CRTC_DISPLAY_DIS(1));
3770 WREG32(R_000050_CRTC_GEN_CNTL,
3771 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3772 S_000050_CRTC_DISP_REQ_EN_B(1));
3773 WREG32(R_000420_OV0_SCALE_CNTL,
3774 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3775 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3776 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3777 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3778 S_000360_CUR2_LOCK(1));
3779 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3780 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3781 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3782 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3783 WREG32(R_000360_CUR2_OFFSET,
3784 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3785 }
3786}
3787
3788void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3789{
3790 /* Update base address for crtc */
3791 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3792 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3793 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3794 }
3795 /* Restore CRTC registers */
3796 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3797 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3798 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3799 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3800 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3801 }
3802}
3803
3804void r100_vga_render_disable(struct radeon_device *rdev)
3805{
3806 u32 tmp;
3807
3808 tmp = RREG8(R_0003C2_GENMO_WT);
3809 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3810}
3811
3812static void r100_debugfs(struct radeon_device *rdev)
3813{
3814 int r;
3815
3816 r = r100_debugfs_mc_info_init(rdev);
3817 if (r)
3818 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3819}
3820
3821static void r100_mc_program(struct radeon_device *rdev)
3822{
3823 struct r100_mc_save save;
3824
3825 /* Stops all mc clients */
3826 r100_mc_stop(rdev, &save);
3827 if (rdev->flags & RADEON_IS_AGP) {
3828 WREG32(R_00014C_MC_AGP_LOCATION,
3829 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3830 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3831 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3832 if (rdev->family > CHIP_RV200)
3833 WREG32(R_00015C_AGP_BASE_2,
3834 upper_32_bits(rdev->mc.agp_base) & 0xff);
3835 } else {
3836 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3837 WREG32(R_000170_AGP_BASE, 0);
3838 if (rdev->family > CHIP_RV200)
3839 WREG32(R_00015C_AGP_BASE_2, 0);
3840 }
3841 /* Wait for mc idle */
3842 if (r100_mc_wait_for_idle(rdev))
3843 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3844 /* Program MC, should be a 32bits limited address space */
3845 WREG32(R_000148_MC_FB_LOCATION,
3846 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3847 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3848 r100_mc_resume(rdev, &save);
3849}
3850
3851static void r100_clock_startup(struct radeon_device *rdev)
3852{
3853 u32 tmp;
3854
3855 if (radeon_dynclks != -1 && radeon_dynclks)
3856 radeon_legacy_set_clock_gating(rdev, 1);
3857 /* We need to force on some of the block */
3858 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3859 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3860 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3861 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3862 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3863}
3864
3865static int r100_startup(struct radeon_device *rdev)
3866{
3867 int r;
3868
3869 /* set common regs */
3870 r100_set_common_regs(rdev);
3871 /* program mc */
3872 r100_mc_program(rdev);
3873 /* Resume clock */
3874 r100_clock_startup(rdev);
3875 /* Initialize GART (initialize after TTM so we can allocate
3876 * memory through TTM but finalize after TTM) */
3877 r100_enable_bm(rdev);
3878 if (rdev->flags & RADEON_IS_PCI) {
3879 r = r100_pci_gart_enable(rdev);
3880 if (r)
3881 return r;
3882 }
3883
3884 /* allocate wb buffer */
3885 r = radeon_wb_init(rdev);
3886 if (r)
3887 return r;
3888
3889 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3890 if (r) {
3891 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3892 return r;
3893 }
3894
3895 /* Enable IRQ */
3896 if (!rdev->irq.installed) {
3897 r = radeon_irq_kms_init(rdev);
3898 if (r)
3899 return r;
3900 }
3901
3902 r100_irq_set(rdev);
3903 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3904 /* 1M ring buffer */
3905 r = r100_cp_init(rdev, 1024 * 1024);
3906 if (r) {
3907 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3908 return r;
3909 }
3910
3911 r = radeon_ib_pool_init(rdev);
3912 if (r) {
3913 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3914 return r;
3915 }
3916
3917 return 0;
3918}
3919
3920int r100_resume(struct radeon_device *rdev)
3921{
3922 int r;
3923
3924 /* Make sur GART are not working */
3925 if (rdev->flags & RADEON_IS_PCI)
3926 r100_pci_gart_disable(rdev);
3927 /* Resume clock before doing reset */
3928 r100_clock_startup(rdev);
3929 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3930 if (radeon_asic_reset(rdev)) {
3931 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3932 RREG32(R_000E40_RBBM_STATUS),
3933 RREG32(R_0007C0_CP_STAT));
3934 }
3935 /* post */
3936 radeon_combios_asic_init(rdev->ddev);
3937 /* Resume clock after posting */
3938 r100_clock_startup(rdev);
3939 /* Initialize surface registers */
3940 radeon_surface_init(rdev);
3941
3942 rdev->accel_working = true;
3943 r = r100_startup(rdev);
3944 if (r) {
3945 rdev->accel_working = false;
3946 }
3947 return r;
3948}
3949
3950int r100_suspend(struct radeon_device *rdev)
3951{
3952 radeon_pm_suspend(rdev);
3953 r100_cp_disable(rdev);
3954 radeon_wb_disable(rdev);
3955 r100_irq_disable(rdev);
3956 if (rdev->flags & RADEON_IS_PCI)
3957 r100_pci_gart_disable(rdev);
3958 return 0;
3959}
3960
3961void r100_fini(struct radeon_device *rdev)
3962{
3963 radeon_pm_fini(rdev);
3964 r100_cp_fini(rdev);
3965 radeon_wb_fini(rdev);
3966 radeon_ib_pool_fini(rdev);
3967 radeon_gem_fini(rdev);
3968 if (rdev->flags & RADEON_IS_PCI)
3969 r100_pci_gart_fini(rdev);
3970 radeon_agp_fini(rdev);
3971 radeon_irq_kms_fini(rdev);
3972 radeon_fence_driver_fini(rdev);
3973 radeon_bo_fini(rdev);
3974 radeon_atombios_fini(rdev);
3975 kfree(rdev->bios);
3976 rdev->bios = NULL;
3977}
3978
3979/*
3980 * Due to how kexec works, it can leave the hw fully initialised when it
3981 * boots the new kernel. However doing our init sequence with the CP and
3982 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
3983 * do some quick sanity checks and restore sane values to avoid this
3984 * problem.
3985 */
3986void r100_restore_sanity(struct radeon_device *rdev)
3987{
3988 u32 tmp;
3989
3990 tmp = RREG32(RADEON_CP_CSQ_CNTL);
3991 if (tmp) {
3992 WREG32(RADEON_CP_CSQ_CNTL, 0);
3993 }
3994 tmp = RREG32(RADEON_CP_RB_CNTL);
3995 if (tmp) {
3996 WREG32(RADEON_CP_RB_CNTL, 0);
3997 }
3998 tmp = RREG32(RADEON_SCRATCH_UMSK);
3999 if (tmp) {
4000 WREG32(RADEON_SCRATCH_UMSK, 0);
4001 }
4002}
4003
4004int r100_init(struct radeon_device *rdev)
4005{
4006 int r;
4007
4008 /* Register debugfs file specific to this group of asics */
4009 r100_debugfs(rdev);
4010 /* Disable VGA */
4011 r100_vga_render_disable(rdev);
4012 /* Initialize scratch registers */
4013 radeon_scratch_init(rdev);
4014 /* Initialize surface registers */
4015 radeon_surface_init(rdev);
4016 /* sanity check some register to avoid hangs like after kexec */
4017 r100_restore_sanity(rdev);
4018 /* TODO: disable VGA need to use VGA request */
4019 /* BIOS*/
4020 if (!radeon_get_bios(rdev)) {
4021 if (ASIC_IS_AVIVO(rdev))
4022 return -EINVAL;
4023 }
4024 if (rdev->is_atom_bios) {
4025 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
4026 return -EINVAL;
4027 } else {
4028 r = radeon_combios_init(rdev);
4029 if (r)
4030 return r;
4031 }
4032 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
4033 if (radeon_asic_reset(rdev)) {
4034 dev_warn(rdev->dev,
4035 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4036 RREG32(R_000E40_RBBM_STATUS),
4037 RREG32(R_0007C0_CP_STAT));
4038 }
4039 /* check if cards are posted or not */
4040 if (radeon_boot_test_post_card(rdev) == false)
4041 return -EINVAL;
4042 /* Set asic errata */
4043 r100_errata(rdev);
4044 /* Initialize clocks */
4045 radeon_get_clock_info(rdev->ddev);
4046 /* initialize AGP */
4047 if (rdev->flags & RADEON_IS_AGP) {
4048 r = radeon_agp_init(rdev);
4049 if (r) {
4050 radeon_agp_disable(rdev);
4051 }
4052 }
4053 /* initialize VRAM */
4054 r100_mc_init(rdev);
4055 /* Fence driver */
4056 r = radeon_fence_driver_init(rdev);
4057 if (r)
4058 return r;
4059 /* Memory manager */
4060 r = radeon_bo_init(rdev);
4061 if (r)
4062 return r;
4063 if (rdev->flags & RADEON_IS_PCI) {
4064 r = r100_pci_gart_init(rdev);
4065 if (r)
4066 return r;
4067 }
4068 r100_set_safe_registers(rdev);
4069
4070 /* Initialize power management */
4071 radeon_pm_init(rdev);
4072
4073 rdev->accel_working = true;
4074 r = r100_startup(rdev);
4075 if (r) {
4076 /* Somethings want wront with the accel init stop accel */
4077 dev_err(rdev->dev, "Disabling GPU acceleration\n");
4078 r100_cp_fini(rdev);
4079 radeon_wb_fini(rdev);
4080 radeon_ib_pool_fini(rdev);
4081 radeon_irq_kms_fini(rdev);
4082 if (rdev->flags & RADEON_IS_PCI)
4083 r100_pci_gart_fini(rdev);
4084 rdev->accel_working = false;
4085 }
4086 return 0;
4087}
4088
4089uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4090 bool always_indirect)
4091{
4092 if (reg < rdev->rmmio_size && !always_indirect)
4093 return readl(((void __iomem *)rdev->rmmio) + reg);
4094 else {
4095 unsigned long flags;
4096 uint32_t ret;
4097
4098 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4099 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4100 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4101 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4102
4103 return ret;
4104 }
4105}
4106
4107void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4108 bool always_indirect)
4109{
4110 if (reg < rdev->rmmio_size && !always_indirect)
4111 writel(v, ((void __iomem *)rdev->rmmio) + reg);
4112 else {
4113 unsigned long flags;
4114
4115 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4116 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4117 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4118 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4119 }
4120}
4121
4122u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4123{
4124 if (reg < rdev->rio_mem_size)
4125 return ioread32(rdev->rio_mem + reg);
4126 else {
4127 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4128 return ioread32(rdev->rio_mem + RADEON_MM_DATA);
4129 }
4130}
4131
4132void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4133{
4134 if (reg < rdev->rio_mem_size)
4135 iowrite32(v, rdev->rio_mem + reg);
4136 else {
4137 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4138 iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
4139 }
4140}