Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <asm/unaligned.h>
29
30#include <drm/drm_util.h>
31
32#define ATOM_DEBUG
33
34#include "atom.h"
35#include "atom-names.h"
36#include "atom-bits.h"
37#include "amdgpu.h"
38
39#define ATOM_COND_ABOVE 0
40#define ATOM_COND_ABOVEOREQUAL 1
41#define ATOM_COND_ALWAYS 2
42#define ATOM_COND_BELOW 3
43#define ATOM_COND_BELOWOREQUAL 4
44#define ATOM_COND_EQUAL 5
45#define ATOM_COND_NOTEQUAL 6
46
47#define ATOM_PORT_ATI 0
48#define ATOM_PORT_PCI 1
49#define ATOM_PORT_SYSIO 2
50
51#define ATOM_UNIT_MICROSEC 0
52#define ATOM_UNIT_MILLISEC 1
53
54#define PLL_INDEX 2
55#define PLL_DATA 3
56
57#define ATOM_CMD_TIMEOUT_SEC 20
58
59typedef struct {
60 struct atom_context *ctx;
61 uint32_t *ps, *ws;
62 int ps_shift;
63 uint16_t start;
64 unsigned last_jump;
65 unsigned long last_jump_jiffies;
66 bool abort;
67} atom_exec_context;
68
69int amdgpu_atom_debug = 0;
70static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
71int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
72
73static uint32_t atom_arg_mask[8] =
74 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
750xFF000000 };
76static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
77
78static int atom_dst_to_src[8][4] = {
79 /* translate destination alignment field to the source alignment encoding */
80 {0, 0, 0, 0},
81 {1, 2, 3, 0},
82 {1, 2, 3, 0},
83 {1, 2, 3, 0},
84 {4, 5, 6, 7},
85 {4, 5, 6, 7},
86 {4, 5, 6, 7},
87 {4, 5, 6, 7},
88};
89static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
90
91static int debug_depth = 0;
92#ifdef ATOM_DEBUG
93static void debug_print_spaces(int n)
94{
95 while (n--)
96 printk(" ");
97}
98
99#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
100#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
101#else
102#define DEBUG(...) do { } while (0)
103#define SDEBUG(...) do { } while (0)
104#endif
105
106static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
107 uint32_t index, uint32_t data)
108{
109 uint32_t temp = 0xCDCDCDCD;
110
111 while (1)
112 switch (CU8(base)) {
113 case ATOM_IIO_NOP:
114 base++;
115 break;
116 case ATOM_IIO_READ:
117 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
118 base += 3;
119 break;
120 case ATOM_IIO_WRITE:
121 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
122 base += 3;
123 break;
124 case ATOM_IIO_CLEAR:
125 temp &=
126 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
127 CU8(base + 2));
128 base += 3;
129 break;
130 case ATOM_IIO_SET:
131 temp |=
132 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
133 2);
134 base += 3;
135 break;
136 case ATOM_IIO_MOVE_INDEX:
137 temp &=
138 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
139 CU8(base + 3));
140 temp |=
141 ((index >> CU8(base + 2)) &
142 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
143 3);
144 base += 4;
145 break;
146 case ATOM_IIO_MOVE_DATA:
147 temp &=
148 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
149 CU8(base + 3));
150 temp |=
151 ((data >> CU8(base + 2)) &
152 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
153 3);
154 base += 4;
155 break;
156 case ATOM_IIO_MOVE_ATTR:
157 temp &=
158 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
159 CU8(base + 3));
160 temp |=
161 ((ctx->
162 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
163 CU8
164 (base
165 +
166 1))))
167 << CU8(base + 3);
168 base += 4;
169 break;
170 case ATOM_IIO_END:
171 return temp;
172 default:
173 pr_info("Unknown IIO opcode\n");
174 return 0;
175 }
176}
177
178static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
179 int *ptr, uint32_t *saved, int print)
180{
181 uint32_t idx, val = 0xCDCDCDCD, align, arg;
182 struct atom_context *gctx = ctx->ctx;
183 arg = attr & 7;
184 align = (attr >> 3) & 7;
185 switch (arg) {
186 case ATOM_ARG_REG:
187 idx = U16(*ptr);
188 (*ptr) += 2;
189 if (print)
190 DEBUG("REG[0x%04X]", idx);
191 idx += gctx->reg_block;
192 switch (gctx->io_mode) {
193 case ATOM_IO_MM:
194 val = gctx->card->reg_read(gctx->card, idx);
195 break;
196 case ATOM_IO_PCI:
197 pr_info("PCI registers are not implemented\n");
198 return 0;
199 case ATOM_IO_SYSIO:
200 pr_info("SYSIO registers are not implemented\n");
201 return 0;
202 default:
203 if (!(gctx->io_mode & 0x80)) {
204 pr_info("Bad IO mode\n");
205 return 0;
206 }
207 if (!gctx->iio[gctx->io_mode & 0x7F]) {
208 pr_info("Undefined indirect IO read method %d\n",
209 gctx->io_mode & 0x7F);
210 return 0;
211 }
212 val =
213 atom_iio_execute(gctx,
214 gctx->iio[gctx->io_mode & 0x7F],
215 idx, 0);
216 }
217 break;
218 case ATOM_ARG_PS:
219 idx = U8(*ptr);
220 (*ptr)++;
221 /* get_unaligned_le32 avoids unaligned accesses from atombios
222 * tables, noticed on a DEC Alpha. */
223 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
224 if (print)
225 DEBUG("PS[0x%02X,0x%04X]", idx, val);
226 break;
227 case ATOM_ARG_WS:
228 idx = U8(*ptr);
229 (*ptr)++;
230 if (print)
231 DEBUG("WS[0x%02X]", idx);
232 switch (idx) {
233 case ATOM_WS_QUOTIENT:
234 val = gctx->divmul[0];
235 break;
236 case ATOM_WS_REMAINDER:
237 val = gctx->divmul[1];
238 break;
239 case ATOM_WS_DATAPTR:
240 val = gctx->data_block;
241 break;
242 case ATOM_WS_SHIFT:
243 val = gctx->shift;
244 break;
245 case ATOM_WS_OR_MASK:
246 val = 1 << gctx->shift;
247 break;
248 case ATOM_WS_AND_MASK:
249 val = ~(1 << gctx->shift);
250 break;
251 case ATOM_WS_FB_WINDOW:
252 val = gctx->fb_base;
253 break;
254 case ATOM_WS_ATTRIBUTES:
255 val = gctx->io_attr;
256 break;
257 case ATOM_WS_REGPTR:
258 val = gctx->reg_block;
259 break;
260 default:
261 val = ctx->ws[idx];
262 }
263 break;
264 case ATOM_ARG_ID:
265 idx = U16(*ptr);
266 (*ptr) += 2;
267 if (print) {
268 if (gctx->data_block)
269 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
270 else
271 DEBUG("ID[0x%04X]", idx);
272 }
273 val = U32(idx + gctx->data_block);
274 break;
275 case ATOM_ARG_FB:
276 idx = U8(*ptr);
277 (*ptr)++;
278 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
279 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
280 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
281 val = 0;
282 } else
283 val = gctx->scratch[(gctx->fb_base / 4) + idx];
284 if (print)
285 DEBUG("FB[0x%02X]", idx);
286 break;
287 case ATOM_ARG_IMM:
288 switch (align) {
289 case ATOM_SRC_DWORD:
290 val = U32(*ptr);
291 (*ptr) += 4;
292 if (print)
293 DEBUG("IMM 0x%08X\n", val);
294 return val;
295 case ATOM_SRC_WORD0:
296 case ATOM_SRC_WORD8:
297 case ATOM_SRC_WORD16:
298 val = U16(*ptr);
299 (*ptr) += 2;
300 if (print)
301 DEBUG("IMM 0x%04X\n", val);
302 return val;
303 case ATOM_SRC_BYTE0:
304 case ATOM_SRC_BYTE8:
305 case ATOM_SRC_BYTE16:
306 case ATOM_SRC_BYTE24:
307 val = U8(*ptr);
308 (*ptr)++;
309 if (print)
310 DEBUG("IMM 0x%02X\n", val);
311 return val;
312 }
313 return 0;
314 case ATOM_ARG_PLL:
315 idx = U8(*ptr);
316 (*ptr)++;
317 if (print)
318 DEBUG("PLL[0x%02X]", idx);
319 val = gctx->card->pll_read(gctx->card, idx);
320 break;
321 case ATOM_ARG_MC:
322 idx = U8(*ptr);
323 (*ptr)++;
324 if (print)
325 DEBUG("MC[0x%02X]", idx);
326 val = gctx->card->mc_read(gctx->card, idx);
327 break;
328 }
329 if (saved)
330 *saved = val;
331 val &= atom_arg_mask[align];
332 val >>= atom_arg_shift[align];
333 if (print)
334 switch (align) {
335 case ATOM_SRC_DWORD:
336 DEBUG(".[31:0] -> 0x%08X\n", val);
337 break;
338 case ATOM_SRC_WORD0:
339 DEBUG(".[15:0] -> 0x%04X\n", val);
340 break;
341 case ATOM_SRC_WORD8:
342 DEBUG(".[23:8] -> 0x%04X\n", val);
343 break;
344 case ATOM_SRC_WORD16:
345 DEBUG(".[31:16] -> 0x%04X\n", val);
346 break;
347 case ATOM_SRC_BYTE0:
348 DEBUG(".[7:0] -> 0x%02X\n", val);
349 break;
350 case ATOM_SRC_BYTE8:
351 DEBUG(".[15:8] -> 0x%02X\n", val);
352 break;
353 case ATOM_SRC_BYTE16:
354 DEBUG(".[23:16] -> 0x%02X\n", val);
355 break;
356 case ATOM_SRC_BYTE24:
357 DEBUG(".[31:24] -> 0x%02X\n", val);
358 break;
359 }
360 return val;
361}
362
363static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
364{
365 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
366 switch (arg) {
367 case ATOM_ARG_REG:
368 case ATOM_ARG_ID:
369 (*ptr) += 2;
370 break;
371 case ATOM_ARG_PLL:
372 case ATOM_ARG_MC:
373 case ATOM_ARG_PS:
374 case ATOM_ARG_WS:
375 case ATOM_ARG_FB:
376 (*ptr)++;
377 break;
378 case ATOM_ARG_IMM:
379 switch (align) {
380 case ATOM_SRC_DWORD:
381 (*ptr) += 4;
382 return;
383 case ATOM_SRC_WORD0:
384 case ATOM_SRC_WORD8:
385 case ATOM_SRC_WORD16:
386 (*ptr) += 2;
387 return;
388 case ATOM_SRC_BYTE0:
389 case ATOM_SRC_BYTE8:
390 case ATOM_SRC_BYTE16:
391 case ATOM_SRC_BYTE24:
392 (*ptr)++;
393 return;
394 }
395 return;
396 }
397}
398
399static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
400{
401 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
402}
403
404static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
405{
406 uint32_t val = 0xCDCDCDCD;
407
408 switch (align) {
409 case ATOM_SRC_DWORD:
410 val = U32(*ptr);
411 (*ptr) += 4;
412 break;
413 case ATOM_SRC_WORD0:
414 case ATOM_SRC_WORD8:
415 case ATOM_SRC_WORD16:
416 val = U16(*ptr);
417 (*ptr) += 2;
418 break;
419 case ATOM_SRC_BYTE0:
420 case ATOM_SRC_BYTE8:
421 case ATOM_SRC_BYTE16:
422 case ATOM_SRC_BYTE24:
423 val = U8(*ptr);
424 (*ptr)++;
425 break;
426 }
427 return val;
428}
429
430static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
431 int *ptr, uint32_t *saved, int print)
432{
433 return atom_get_src_int(ctx,
434 arg | atom_dst_to_src[(attr >> 3) &
435 7][(attr >> 6) & 3] << 3,
436 ptr, saved, print);
437}
438
439static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
440{
441 atom_skip_src_int(ctx,
442 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
443 3] << 3, ptr);
444}
445
446static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
447 int *ptr, uint32_t val, uint32_t saved)
448{
449 uint32_t align =
450 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
451 val, idx;
452 struct atom_context *gctx = ctx->ctx;
453 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
454 val <<= atom_arg_shift[align];
455 val &= atom_arg_mask[align];
456 saved &= ~atom_arg_mask[align];
457 val |= saved;
458 switch (arg) {
459 case ATOM_ARG_REG:
460 idx = U16(*ptr);
461 (*ptr) += 2;
462 DEBUG("REG[0x%04X]", idx);
463 idx += gctx->reg_block;
464 switch (gctx->io_mode) {
465 case ATOM_IO_MM:
466 if (idx == 0)
467 gctx->card->reg_write(gctx->card, idx,
468 val << 2);
469 else
470 gctx->card->reg_write(gctx->card, idx, val);
471 break;
472 case ATOM_IO_PCI:
473 pr_info("PCI registers are not implemented\n");
474 return;
475 case ATOM_IO_SYSIO:
476 pr_info("SYSIO registers are not implemented\n");
477 return;
478 default:
479 if (!(gctx->io_mode & 0x80)) {
480 pr_info("Bad IO mode\n");
481 return;
482 }
483 if (!gctx->iio[gctx->io_mode & 0xFF]) {
484 pr_info("Undefined indirect IO write method %d\n",
485 gctx->io_mode & 0x7F);
486 return;
487 }
488 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
489 idx, val);
490 }
491 break;
492 case ATOM_ARG_PS:
493 idx = U8(*ptr);
494 (*ptr)++;
495 DEBUG("PS[0x%02X]", idx);
496 ctx->ps[idx] = cpu_to_le32(val);
497 break;
498 case ATOM_ARG_WS:
499 idx = U8(*ptr);
500 (*ptr)++;
501 DEBUG("WS[0x%02X]", idx);
502 switch (idx) {
503 case ATOM_WS_QUOTIENT:
504 gctx->divmul[0] = val;
505 break;
506 case ATOM_WS_REMAINDER:
507 gctx->divmul[1] = val;
508 break;
509 case ATOM_WS_DATAPTR:
510 gctx->data_block = val;
511 break;
512 case ATOM_WS_SHIFT:
513 gctx->shift = val;
514 break;
515 case ATOM_WS_OR_MASK:
516 case ATOM_WS_AND_MASK:
517 break;
518 case ATOM_WS_FB_WINDOW:
519 gctx->fb_base = val;
520 break;
521 case ATOM_WS_ATTRIBUTES:
522 gctx->io_attr = val;
523 break;
524 case ATOM_WS_REGPTR:
525 gctx->reg_block = val;
526 break;
527 default:
528 ctx->ws[idx] = val;
529 }
530 break;
531 case ATOM_ARG_FB:
532 idx = U8(*ptr);
533 (*ptr)++;
534 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
535 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
536 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
537 } else
538 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
539 DEBUG("FB[0x%02X]", idx);
540 break;
541 case ATOM_ARG_PLL:
542 idx = U8(*ptr);
543 (*ptr)++;
544 DEBUG("PLL[0x%02X]", idx);
545 gctx->card->pll_write(gctx->card, idx, val);
546 break;
547 case ATOM_ARG_MC:
548 idx = U8(*ptr);
549 (*ptr)++;
550 DEBUG("MC[0x%02X]", idx);
551 gctx->card->mc_write(gctx->card, idx, val);
552 return;
553 }
554 switch (align) {
555 case ATOM_SRC_DWORD:
556 DEBUG(".[31:0] <- 0x%08X\n", old_val);
557 break;
558 case ATOM_SRC_WORD0:
559 DEBUG(".[15:0] <- 0x%04X\n", old_val);
560 break;
561 case ATOM_SRC_WORD8:
562 DEBUG(".[23:8] <- 0x%04X\n", old_val);
563 break;
564 case ATOM_SRC_WORD16:
565 DEBUG(".[31:16] <- 0x%04X\n", old_val);
566 break;
567 case ATOM_SRC_BYTE0:
568 DEBUG(".[7:0] <- 0x%02X\n", old_val);
569 break;
570 case ATOM_SRC_BYTE8:
571 DEBUG(".[15:8] <- 0x%02X\n", old_val);
572 break;
573 case ATOM_SRC_BYTE16:
574 DEBUG(".[23:16] <- 0x%02X\n", old_val);
575 break;
576 case ATOM_SRC_BYTE24:
577 DEBUG(".[31:24] <- 0x%02X\n", old_val);
578 break;
579 }
580}
581
582static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
583{
584 uint8_t attr = U8((*ptr)++);
585 uint32_t dst, src, saved;
586 int dptr = *ptr;
587 SDEBUG(" dst: ");
588 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
589 SDEBUG(" src: ");
590 src = atom_get_src(ctx, attr, ptr);
591 dst += src;
592 SDEBUG(" dst: ");
593 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
594}
595
596static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
597{
598 uint8_t attr = U8((*ptr)++);
599 uint32_t dst, src, saved;
600 int dptr = *ptr;
601 SDEBUG(" dst: ");
602 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
603 SDEBUG(" src: ");
604 src = atom_get_src(ctx, attr, ptr);
605 dst &= src;
606 SDEBUG(" dst: ");
607 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
608}
609
610static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
611{
612 printk("ATOM BIOS beeped!\n");
613}
614
615static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
616{
617 int idx = U8((*ptr)++);
618 int r = 0;
619
620 if (idx < ATOM_TABLE_NAMES_CNT)
621 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
622 else
623 SDEBUG(" table: %d\n", idx);
624 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
625 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
626 if (r) {
627 ctx->abort = true;
628 }
629}
630
631static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
632{
633 uint8_t attr = U8((*ptr)++);
634 uint32_t saved;
635 int dptr = *ptr;
636 attr &= 0x38;
637 attr |= atom_def_dst[attr >> 3] << 6;
638 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
639 SDEBUG(" dst: ");
640 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
641}
642
643static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
644{
645 uint8_t attr = U8((*ptr)++);
646 uint32_t dst, src;
647 SDEBUG(" src1: ");
648 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
649 SDEBUG(" src2: ");
650 src = atom_get_src(ctx, attr, ptr);
651 ctx->ctx->cs_equal = (dst == src);
652 ctx->ctx->cs_above = (dst > src);
653 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
654 ctx->ctx->cs_above ? "GT" : "LE");
655}
656
657static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
658{
659 unsigned count = U8((*ptr)++);
660 SDEBUG(" count: %d\n", count);
661 if (arg == ATOM_UNIT_MICROSEC)
662 udelay(count);
663 else if (!drm_can_sleep())
664 mdelay(count);
665 else
666 msleep(count);
667}
668
669static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
670{
671 uint8_t attr = U8((*ptr)++);
672 uint32_t dst, src;
673 SDEBUG(" src1: ");
674 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
675 SDEBUG(" src2: ");
676 src = atom_get_src(ctx, attr, ptr);
677 if (src != 0) {
678 ctx->ctx->divmul[0] = dst / src;
679 ctx->ctx->divmul[1] = dst % src;
680 } else {
681 ctx->ctx->divmul[0] = 0;
682 ctx->ctx->divmul[1] = 0;
683 }
684}
685
686static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
687{
688 uint64_t val64;
689 uint8_t attr = U8((*ptr)++);
690 uint32_t dst, src;
691 SDEBUG(" src1: ");
692 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
693 SDEBUG(" src2: ");
694 src = atom_get_src(ctx, attr, ptr);
695 if (src != 0) {
696 val64 = dst;
697 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
698 do_div(val64, src);
699 ctx->ctx->divmul[0] = lower_32_bits(val64);
700 ctx->ctx->divmul[1] = upper_32_bits(val64);
701 } else {
702 ctx->ctx->divmul[0] = 0;
703 ctx->ctx->divmul[1] = 0;
704 }
705}
706
707static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
708{
709 /* functionally, a nop */
710}
711
712static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
713{
714 int execute = 0, target = U16(*ptr);
715 unsigned long cjiffies;
716
717 (*ptr) += 2;
718 switch (arg) {
719 case ATOM_COND_ABOVE:
720 execute = ctx->ctx->cs_above;
721 break;
722 case ATOM_COND_ABOVEOREQUAL:
723 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
724 break;
725 case ATOM_COND_ALWAYS:
726 execute = 1;
727 break;
728 case ATOM_COND_BELOW:
729 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
730 break;
731 case ATOM_COND_BELOWOREQUAL:
732 execute = !ctx->ctx->cs_above;
733 break;
734 case ATOM_COND_EQUAL:
735 execute = ctx->ctx->cs_equal;
736 break;
737 case ATOM_COND_NOTEQUAL:
738 execute = !ctx->ctx->cs_equal;
739 break;
740 }
741 if (arg != ATOM_COND_ALWAYS)
742 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
743 SDEBUG(" target: 0x%04X\n", target);
744 if (execute) {
745 if (ctx->last_jump == (ctx->start + target)) {
746 cjiffies = jiffies;
747 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
748 cjiffies -= ctx->last_jump_jiffies;
749 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
750 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
751 ATOM_CMD_TIMEOUT_SEC);
752 ctx->abort = true;
753 }
754 } else {
755 /* jiffies wrap around we will just wait a little longer */
756 ctx->last_jump_jiffies = jiffies;
757 }
758 } else {
759 ctx->last_jump = ctx->start + target;
760 ctx->last_jump_jiffies = jiffies;
761 }
762 *ptr = ctx->start + target;
763 }
764}
765
766static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
767{
768 uint8_t attr = U8((*ptr)++);
769 uint32_t dst, mask, src, saved;
770 int dptr = *ptr;
771 SDEBUG(" dst: ");
772 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
773 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
774 SDEBUG(" mask: 0x%08x", mask);
775 SDEBUG(" src: ");
776 src = atom_get_src(ctx, attr, ptr);
777 dst &= mask;
778 dst |= src;
779 SDEBUG(" dst: ");
780 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
781}
782
783static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
784{
785 uint8_t attr = U8((*ptr)++);
786 uint32_t src, saved;
787 int dptr = *ptr;
788 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
789 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
790 else {
791 atom_skip_dst(ctx, arg, attr, ptr);
792 saved = 0xCDCDCDCD;
793 }
794 SDEBUG(" src: ");
795 src = atom_get_src(ctx, attr, ptr);
796 SDEBUG(" dst: ");
797 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
798}
799
800static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
801{
802 uint8_t attr = U8((*ptr)++);
803 uint32_t dst, src;
804 SDEBUG(" src1: ");
805 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
806 SDEBUG(" src2: ");
807 src = atom_get_src(ctx, attr, ptr);
808 ctx->ctx->divmul[0] = dst * src;
809}
810
811static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
812{
813 uint64_t val64;
814 uint8_t attr = U8((*ptr)++);
815 uint32_t dst, src;
816 SDEBUG(" src1: ");
817 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
818 SDEBUG(" src2: ");
819 src = atom_get_src(ctx, attr, ptr);
820 val64 = (uint64_t)dst * (uint64_t)src;
821 ctx->ctx->divmul[0] = lower_32_bits(val64);
822 ctx->ctx->divmul[1] = upper_32_bits(val64);
823}
824
825static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
826{
827 /* nothing */
828}
829
830static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
831{
832 uint8_t attr = U8((*ptr)++);
833 uint32_t dst, src, saved;
834 int dptr = *ptr;
835 SDEBUG(" dst: ");
836 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
837 SDEBUG(" src: ");
838 src = atom_get_src(ctx, attr, ptr);
839 dst |= src;
840 SDEBUG(" dst: ");
841 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
842}
843
844static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
845{
846 uint8_t val = U8((*ptr)++);
847 SDEBUG("POST card output: 0x%02X\n", val);
848}
849
850static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
851{
852 pr_info("unimplemented!\n");
853}
854
855static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
856{
857 pr_info("unimplemented!\n");
858}
859
860static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
861{
862 pr_info("unimplemented!\n");
863}
864
865static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
866{
867 int idx = U8(*ptr);
868 (*ptr)++;
869 SDEBUG(" block: %d\n", idx);
870 if (!idx)
871 ctx->ctx->data_block = 0;
872 else if (idx == 255)
873 ctx->ctx->data_block = ctx->start;
874 else
875 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
876 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
877}
878
879static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
880{
881 uint8_t attr = U8((*ptr)++);
882 SDEBUG(" fb_base: ");
883 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
884}
885
886static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
887{
888 int port;
889 switch (arg) {
890 case ATOM_PORT_ATI:
891 port = U16(*ptr);
892 if (port < ATOM_IO_NAMES_CNT)
893 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
894 else
895 SDEBUG(" port: %d\n", port);
896 if (!port)
897 ctx->ctx->io_mode = ATOM_IO_MM;
898 else
899 ctx->ctx->io_mode = ATOM_IO_IIO | port;
900 (*ptr) += 2;
901 break;
902 case ATOM_PORT_PCI:
903 ctx->ctx->io_mode = ATOM_IO_PCI;
904 (*ptr)++;
905 break;
906 case ATOM_PORT_SYSIO:
907 ctx->ctx->io_mode = ATOM_IO_SYSIO;
908 (*ptr)++;
909 break;
910 }
911}
912
913static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
914{
915 ctx->ctx->reg_block = U16(*ptr);
916 (*ptr) += 2;
917 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
918}
919
920static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
921{
922 uint8_t attr = U8((*ptr)++), shift;
923 uint32_t saved, dst;
924 int dptr = *ptr;
925 attr &= 0x38;
926 attr |= atom_def_dst[attr >> 3] << 6;
927 SDEBUG(" dst: ");
928 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
929 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
930 SDEBUG(" shift: %d\n", shift);
931 dst <<= shift;
932 SDEBUG(" dst: ");
933 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
934}
935
936static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
937{
938 uint8_t attr = U8((*ptr)++), shift;
939 uint32_t saved, dst;
940 int dptr = *ptr;
941 attr &= 0x38;
942 attr |= atom_def_dst[attr >> 3] << 6;
943 SDEBUG(" dst: ");
944 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
945 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
946 SDEBUG(" shift: %d\n", shift);
947 dst >>= shift;
948 SDEBUG(" dst: ");
949 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
950}
951
952static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
953{
954 uint8_t attr = U8((*ptr)++), shift;
955 uint32_t saved, dst;
956 int dptr = *ptr;
957 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
958 SDEBUG(" dst: ");
959 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
960 /* op needs to full dst value */
961 dst = saved;
962 shift = atom_get_src(ctx, attr, ptr);
963 SDEBUG(" shift: %d\n", shift);
964 dst <<= shift;
965 dst &= atom_arg_mask[dst_align];
966 dst >>= atom_arg_shift[dst_align];
967 SDEBUG(" dst: ");
968 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
969}
970
971static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
972{
973 uint8_t attr = U8((*ptr)++), shift;
974 uint32_t saved, dst;
975 int dptr = *ptr;
976 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
977 SDEBUG(" dst: ");
978 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
979 /* op needs to full dst value */
980 dst = saved;
981 shift = atom_get_src(ctx, attr, ptr);
982 SDEBUG(" shift: %d\n", shift);
983 dst >>= shift;
984 dst &= atom_arg_mask[dst_align];
985 dst >>= atom_arg_shift[dst_align];
986 SDEBUG(" dst: ");
987 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
988}
989
990static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
991{
992 uint8_t attr = U8((*ptr)++);
993 uint32_t dst, src, saved;
994 int dptr = *ptr;
995 SDEBUG(" dst: ");
996 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
997 SDEBUG(" src: ");
998 src = atom_get_src(ctx, attr, ptr);
999 dst -= src;
1000 SDEBUG(" dst: ");
1001 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1002}
1003
1004static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1005{
1006 uint8_t attr = U8((*ptr)++);
1007 uint32_t src, val, target;
1008 SDEBUG(" switch: ");
1009 src = atom_get_src(ctx, attr, ptr);
1010 while (U16(*ptr) != ATOM_CASE_END)
1011 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1012 (*ptr)++;
1013 SDEBUG(" case: ");
1014 val =
1015 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1016 ptr);
1017 target = U16(*ptr);
1018 if (val == src) {
1019 SDEBUG(" target: %04X\n", target);
1020 *ptr = ctx->start + target;
1021 return;
1022 }
1023 (*ptr) += 2;
1024 } else {
1025 pr_info("Bad case\n");
1026 return;
1027 }
1028 (*ptr) += 2;
1029}
1030
1031static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1032{
1033 uint8_t attr = U8((*ptr)++);
1034 uint32_t dst, src;
1035 SDEBUG(" src1: ");
1036 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1037 SDEBUG(" src2: ");
1038 src = atom_get_src(ctx, attr, ptr);
1039 ctx->ctx->cs_equal = ((dst & src) == 0);
1040 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1041}
1042
1043static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1044{
1045 uint8_t attr = U8((*ptr)++);
1046 uint32_t dst, src, saved;
1047 int dptr = *ptr;
1048 SDEBUG(" dst: ");
1049 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1050 SDEBUG(" src: ");
1051 src = atom_get_src(ctx, attr, ptr);
1052 dst ^= src;
1053 SDEBUG(" dst: ");
1054 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1055}
1056
1057static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1058{
1059 uint8_t val = U8((*ptr)++);
1060 SDEBUG("DEBUG output: 0x%02X\n", val);
1061}
1062
1063static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1064{
1065 uint16_t val = U16(*ptr);
1066 (*ptr) += val + 2;
1067 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1068}
1069
1070static struct {
1071 void (*func) (atom_exec_context *, int *, int);
1072 int arg;
1073} opcode_table[ATOM_OP_CNT] = {
1074 {
1075 NULL, 0}, {
1076 atom_op_move, ATOM_ARG_REG}, {
1077 atom_op_move, ATOM_ARG_PS}, {
1078 atom_op_move, ATOM_ARG_WS}, {
1079 atom_op_move, ATOM_ARG_FB}, {
1080 atom_op_move, ATOM_ARG_PLL}, {
1081 atom_op_move, ATOM_ARG_MC}, {
1082 atom_op_and, ATOM_ARG_REG}, {
1083 atom_op_and, ATOM_ARG_PS}, {
1084 atom_op_and, ATOM_ARG_WS}, {
1085 atom_op_and, ATOM_ARG_FB}, {
1086 atom_op_and, ATOM_ARG_PLL}, {
1087 atom_op_and, ATOM_ARG_MC}, {
1088 atom_op_or, ATOM_ARG_REG}, {
1089 atom_op_or, ATOM_ARG_PS}, {
1090 atom_op_or, ATOM_ARG_WS}, {
1091 atom_op_or, ATOM_ARG_FB}, {
1092 atom_op_or, ATOM_ARG_PLL}, {
1093 atom_op_or, ATOM_ARG_MC}, {
1094 atom_op_shift_left, ATOM_ARG_REG}, {
1095 atom_op_shift_left, ATOM_ARG_PS}, {
1096 atom_op_shift_left, ATOM_ARG_WS}, {
1097 atom_op_shift_left, ATOM_ARG_FB}, {
1098 atom_op_shift_left, ATOM_ARG_PLL}, {
1099 atom_op_shift_left, ATOM_ARG_MC}, {
1100 atom_op_shift_right, ATOM_ARG_REG}, {
1101 atom_op_shift_right, ATOM_ARG_PS}, {
1102 atom_op_shift_right, ATOM_ARG_WS}, {
1103 atom_op_shift_right, ATOM_ARG_FB}, {
1104 atom_op_shift_right, ATOM_ARG_PLL}, {
1105 atom_op_shift_right, ATOM_ARG_MC}, {
1106 atom_op_mul, ATOM_ARG_REG}, {
1107 atom_op_mul, ATOM_ARG_PS}, {
1108 atom_op_mul, ATOM_ARG_WS}, {
1109 atom_op_mul, ATOM_ARG_FB}, {
1110 atom_op_mul, ATOM_ARG_PLL}, {
1111 atom_op_mul, ATOM_ARG_MC}, {
1112 atom_op_div, ATOM_ARG_REG}, {
1113 atom_op_div, ATOM_ARG_PS}, {
1114 atom_op_div, ATOM_ARG_WS}, {
1115 atom_op_div, ATOM_ARG_FB}, {
1116 atom_op_div, ATOM_ARG_PLL}, {
1117 atom_op_div, ATOM_ARG_MC}, {
1118 atom_op_add, ATOM_ARG_REG}, {
1119 atom_op_add, ATOM_ARG_PS}, {
1120 atom_op_add, ATOM_ARG_WS}, {
1121 atom_op_add, ATOM_ARG_FB}, {
1122 atom_op_add, ATOM_ARG_PLL}, {
1123 atom_op_add, ATOM_ARG_MC}, {
1124 atom_op_sub, ATOM_ARG_REG}, {
1125 atom_op_sub, ATOM_ARG_PS}, {
1126 atom_op_sub, ATOM_ARG_WS}, {
1127 atom_op_sub, ATOM_ARG_FB}, {
1128 atom_op_sub, ATOM_ARG_PLL}, {
1129 atom_op_sub, ATOM_ARG_MC}, {
1130 atom_op_setport, ATOM_PORT_ATI}, {
1131 atom_op_setport, ATOM_PORT_PCI}, {
1132 atom_op_setport, ATOM_PORT_SYSIO}, {
1133 atom_op_setregblock, 0}, {
1134 atom_op_setfbbase, 0}, {
1135 atom_op_compare, ATOM_ARG_REG}, {
1136 atom_op_compare, ATOM_ARG_PS}, {
1137 atom_op_compare, ATOM_ARG_WS}, {
1138 atom_op_compare, ATOM_ARG_FB}, {
1139 atom_op_compare, ATOM_ARG_PLL}, {
1140 atom_op_compare, ATOM_ARG_MC}, {
1141 atom_op_switch, 0}, {
1142 atom_op_jump, ATOM_COND_ALWAYS}, {
1143 atom_op_jump, ATOM_COND_EQUAL}, {
1144 atom_op_jump, ATOM_COND_BELOW}, {
1145 atom_op_jump, ATOM_COND_ABOVE}, {
1146 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1147 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1148 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1149 atom_op_test, ATOM_ARG_REG}, {
1150 atom_op_test, ATOM_ARG_PS}, {
1151 atom_op_test, ATOM_ARG_WS}, {
1152 atom_op_test, ATOM_ARG_FB}, {
1153 atom_op_test, ATOM_ARG_PLL}, {
1154 atom_op_test, ATOM_ARG_MC}, {
1155 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1156 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1157 atom_op_calltable, 0}, {
1158 atom_op_repeat, 0}, {
1159 atom_op_clear, ATOM_ARG_REG}, {
1160 atom_op_clear, ATOM_ARG_PS}, {
1161 atom_op_clear, ATOM_ARG_WS}, {
1162 atom_op_clear, ATOM_ARG_FB}, {
1163 atom_op_clear, ATOM_ARG_PLL}, {
1164 atom_op_clear, ATOM_ARG_MC}, {
1165 atom_op_nop, 0}, {
1166 atom_op_eot, 0}, {
1167 atom_op_mask, ATOM_ARG_REG}, {
1168 atom_op_mask, ATOM_ARG_PS}, {
1169 atom_op_mask, ATOM_ARG_WS}, {
1170 atom_op_mask, ATOM_ARG_FB}, {
1171 atom_op_mask, ATOM_ARG_PLL}, {
1172 atom_op_mask, ATOM_ARG_MC}, {
1173 atom_op_postcard, 0}, {
1174 atom_op_beep, 0}, {
1175 atom_op_savereg, 0}, {
1176 atom_op_restorereg, 0}, {
1177 atom_op_setdatablock, 0}, {
1178 atom_op_xor, ATOM_ARG_REG}, {
1179 atom_op_xor, ATOM_ARG_PS}, {
1180 atom_op_xor, ATOM_ARG_WS}, {
1181 atom_op_xor, ATOM_ARG_FB}, {
1182 atom_op_xor, ATOM_ARG_PLL}, {
1183 atom_op_xor, ATOM_ARG_MC}, {
1184 atom_op_shl, ATOM_ARG_REG}, {
1185 atom_op_shl, ATOM_ARG_PS}, {
1186 atom_op_shl, ATOM_ARG_WS}, {
1187 atom_op_shl, ATOM_ARG_FB}, {
1188 atom_op_shl, ATOM_ARG_PLL}, {
1189 atom_op_shl, ATOM_ARG_MC}, {
1190 atom_op_shr, ATOM_ARG_REG}, {
1191 atom_op_shr, ATOM_ARG_PS}, {
1192 atom_op_shr, ATOM_ARG_WS}, {
1193 atom_op_shr, ATOM_ARG_FB}, {
1194 atom_op_shr, ATOM_ARG_PLL}, {
1195 atom_op_shr, ATOM_ARG_MC}, {
1196 atom_op_debug, 0}, {
1197 atom_op_processds, 0}, {
1198 atom_op_mul32, ATOM_ARG_PS}, {
1199 atom_op_mul32, ATOM_ARG_WS}, {
1200 atom_op_div32, ATOM_ARG_PS}, {
1201 atom_op_div32, ATOM_ARG_WS},
1202};
1203
1204static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1205{
1206 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1207 int len, ws, ps, ptr;
1208 unsigned char op;
1209 atom_exec_context ectx;
1210 int ret = 0;
1211
1212 if (!base)
1213 return -EINVAL;
1214
1215 len = CU16(base + ATOM_CT_SIZE_PTR);
1216 ws = CU8(base + ATOM_CT_WS_PTR);
1217 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1218 ptr = base + ATOM_CT_CODE_PTR;
1219
1220 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1221
1222 ectx.ctx = ctx;
1223 ectx.ps_shift = ps / 4;
1224 ectx.start = base;
1225 ectx.ps = params;
1226 ectx.abort = false;
1227 ectx.last_jump = 0;
1228 if (ws)
1229 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1230 else
1231 ectx.ws = NULL;
1232
1233 debug_depth++;
1234 while (1) {
1235 op = CU8(ptr++);
1236 if (op < ATOM_OP_NAMES_CNT)
1237 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1238 else
1239 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1240 if (ectx.abort) {
1241 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1242 base, len, ws, ps, ptr - 1);
1243 ret = -EINVAL;
1244 goto free;
1245 }
1246
1247 if (op < ATOM_OP_CNT && op > 0)
1248 opcode_table[op].func(&ectx, &ptr,
1249 opcode_table[op].arg);
1250 else
1251 break;
1252
1253 if (op == ATOM_OP_EOT)
1254 break;
1255 }
1256 debug_depth--;
1257 SDEBUG("<<\n");
1258
1259free:
1260 if (ws)
1261 kfree(ectx.ws);
1262 return ret;
1263}
1264
1265int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1266{
1267 int r;
1268
1269 mutex_lock(&ctx->mutex);
1270 /* reset data block */
1271 ctx->data_block = 0;
1272 /* reset reg block */
1273 ctx->reg_block = 0;
1274 /* reset fb window */
1275 ctx->fb_base = 0;
1276 /* reset io mode */
1277 ctx->io_mode = ATOM_IO_MM;
1278 /* reset divmul */
1279 ctx->divmul[0] = 0;
1280 ctx->divmul[1] = 0;
1281 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1282 mutex_unlock(&ctx->mutex);
1283 return r;
1284}
1285
1286static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1287
1288static void atom_index_iio(struct atom_context *ctx, int base)
1289{
1290 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1291 if (!ctx->iio)
1292 return;
1293 while (CU8(base) == ATOM_IIO_START) {
1294 ctx->iio[CU8(base + 1)] = base + 2;
1295 base += 2;
1296 while (CU8(base) != ATOM_IIO_END)
1297 base += atom_iio_len[CU8(base)];
1298 base += 3;
1299 }
1300}
1301
1302struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1303{
1304 int base;
1305 struct atom_context *ctx =
1306 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1307 char *str;
1308 u16 idx;
1309
1310 if (!ctx)
1311 return NULL;
1312
1313 ctx->card = card;
1314 ctx->bios = bios;
1315
1316 if (CU16(0) != ATOM_BIOS_MAGIC) {
1317 pr_info("Invalid BIOS magic\n");
1318 kfree(ctx);
1319 return NULL;
1320 }
1321 if (strncmp
1322 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1323 strlen(ATOM_ATI_MAGIC))) {
1324 pr_info("Invalid ATI magic\n");
1325 kfree(ctx);
1326 return NULL;
1327 }
1328
1329 base = CU16(ATOM_ROM_TABLE_PTR);
1330 if (strncmp
1331 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1332 strlen(ATOM_ROM_MAGIC))) {
1333 pr_info("Invalid ATOM magic\n");
1334 kfree(ctx);
1335 return NULL;
1336 }
1337
1338 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1339 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1340 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1341 if (!ctx->iio) {
1342 amdgpu_atom_destroy(ctx);
1343 return NULL;
1344 }
1345
1346 idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
1347 if (idx == 0)
1348 idx = 0x80;
1349
1350 str = CSTR(idx);
1351 if (*str != '\0') {
1352 pr_info("ATOM BIOS: %s\n", str);
1353 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1354 }
1355
1356
1357 return ctx;
1358}
1359
1360int amdgpu_atom_asic_init(struct atom_context *ctx)
1361{
1362 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1363 uint32_t ps[16];
1364 int ret;
1365
1366 memset(ps, 0, 64);
1367
1368 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1369 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1370 if (!ps[0] || !ps[1])
1371 return 1;
1372
1373 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1374 return 1;
1375 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1376 if (ret)
1377 return ret;
1378
1379 memset(ps, 0, 64);
1380
1381 return ret;
1382}
1383
1384void amdgpu_atom_destroy(struct atom_context *ctx)
1385{
1386 kfree(ctx->iio);
1387 kfree(ctx);
1388}
1389
1390bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1391 uint16_t * size, uint8_t * frev, uint8_t * crev,
1392 uint16_t * data_start)
1393{
1394 int offset = index * 2 + 4;
1395 int idx = CU16(ctx->data_table + offset);
1396 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1397
1398 if (!mdt[index])
1399 return false;
1400
1401 if (size)
1402 *size = CU16(idx);
1403 if (frev)
1404 *frev = CU8(idx + 2);
1405 if (crev)
1406 *crev = CU8(idx + 3);
1407 *data_start = idx;
1408 return true;
1409}
1410
1411bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1412 uint8_t * crev)
1413{
1414 int offset = index * 2 + 4;
1415 int idx = CU16(ctx->cmd_table + offset);
1416 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1417
1418 if (!mct[index])
1419 return false;
1420
1421 if (frev)
1422 *frev = CU8(idx + 2);
1423 if (crev)
1424 *crev = CU8(idx + 3);
1425 return true;
1426}
1427
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/string_helpers.h>
29
30#include <asm/unaligned.h>
31
32#include <drm/drm_util.h>
33
34#define ATOM_DEBUG
35
36#include "atomfirmware.h"
37#include "atom.h"
38#include "atom-names.h"
39#include "atom-bits.h"
40#include "amdgpu.h"
41
42#define ATOM_COND_ABOVE 0
43#define ATOM_COND_ABOVEOREQUAL 1
44#define ATOM_COND_ALWAYS 2
45#define ATOM_COND_BELOW 3
46#define ATOM_COND_BELOWOREQUAL 4
47#define ATOM_COND_EQUAL 5
48#define ATOM_COND_NOTEQUAL 6
49
50#define ATOM_PORT_ATI 0
51#define ATOM_PORT_PCI 1
52#define ATOM_PORT_SYSIO 2
53
54#define ATOM_UNIT_MICROSEC 0
55#define ATOM_UNIT_MILLISEC 1
56
57#define PLL_INDEX 2
58#define PLL_DATA 3
59
60#define ATOM_CMD_TIMEOUT_SEC 20
61
62typedef struct {
63 struct atom_context *ctx;
64 uint32_t *ps, *ws;
65 int ps_shift;
66 uint16_t start;
67 unsigned last_jump;
68 unsigned long last_jump_jiffies;
69 bool abort;
70} atom_exec_context;
71
72int amdgpu_atom_debug;
73static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
74int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
75
76static uint32_t atom_arg_mask[8] =
77 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
78 0xFF000000 };
79static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
80
81static int atom_dst_to_src[8][4] = {
82 /* translate destination alignment field to the source alignment encoding */
83 {0, 0, 0, 0},
84 {1, 2, 3, 0},
85 {1, 2, 3, 0},
86 {1, 2, 3, 0},
87 {4, 5, 6, 7},
88 {4, 5, 6, 7},
89 {4, 5, 6, 7},
90 {4, 5, 6, 7},
91};
92static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
93
94static int debug_depth;
95#ifdef ATOM_DEBUG
96static void debug_print_spaces(int n)
97{
98 while (n--)
99 printk(" ");
100}
101
102#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
103#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
104#else
105#define DEBUG(...) do { } while (0)
106#define SDEBUG(...) do { } while (0)
107#endif
108
109static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
110 uint32_t index, uint32_t data)
111{
112 uint32_t temp = 0xCDCDCDCD;
113
114 while (1)
115 switch (CU8(base)) {
116 case ATOM_IIO_NOP:
117 base++;
118 break;
119 case ATOM_IIO_READ:
120 temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
121 base += 3;
122 break;
123 case ATOM_IIO_WRITE:
124 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
125 base += 3;
126 break;
127 case ATOM_IIO_CLEAR:
128 temp &=
129 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
130 CU8(base + 2));
131 base += 3;
132 break;
133 case ATOM_IIO_SET:
134 temp |=
135 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
136 2);
137 base += 3;
138 break;
139 case ATOM_IIO_MOVE_INDEX:
140 temp &=
141 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
142 CU8(base + 3));
143 temp |=
144 ((index >> CU8(base + 2)) &
145 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
146 3);
147 base += 4;
148 break;
149 case ATOM_IIO_MOVE_DATA:
150 temp &=
151 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
152 CU8(base + 3));
153 temp |=
154 ((data >> CU8(base + 2)) &
155 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
156 3);
157 base += 4;
158 break;
159 case ATOM_IIO_MOVE_ATTR:
160 temp &=
161 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
162 CU8(base + 3));
163 temp |=
164 ((ctx->
165 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
166 CU8
167 (base
168 +
169 1))))
170 << CU8(base + 3);
171 base += 4;
172 break;
173 case ATOM_IIO_END:
174 return temp;
175 default:
176 pr_info("Unknown IIO opcode\n");
177 return 0;
178 }
179}
180
181static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
182 int *ptr, uint32_t *saved, int print)
183{
184 uint32_t idx, val = 0xCDCDCDCD, align, arg;
185 struct atom_context *gctx = ctx->ctx;
186 arg = attr & 7;
187 align = (attr >> 3) & 7;
188 switch (arg) {
189 case ATOM_ARG_REG:
190 idx = U16(*ptr);
191 (*ptr) += 2;
192 if (print)
193 DEBUG("REG[0x%04X]", idx);
194 idx += gctx->reg_block;
195 switch (gctx->io_mode) {
196 case ATOM_IO_MM:
197 val = gctx->card->reg_read(gctx->card, idx);
198 break;
199 case ATOM_IO_PCI:
200 pr_info("PCI registers are not implemented\n");
201 return 0;
202 case ATOM_IO_SYSIO:
203 pr_info("SYSIO registers are not implemented\n");
204 return 0;
205 default:
206 if (!(gctx->io_mode & 0x80)) {
207 pr_info("Bad IO mode\n");
208 return 0;
209 }
210 if (!gctx->iio[gctx->io_mode & 0x7F]) {
211 pr_info("Undefined indirect IO read method %d\n",
212 gctx->io_mode & 0x7F);
213 return 0;
214 }
215 val =
216 atom_iio_execute(gctx,
217 gctx->iio[gctx->io_mode & 0x7F],
218 idx, 0);
219 }
220 break;
221 case ATOM_ARG_PS:
222 idx = U8(*ptr);
223 (*ptr)++;
224 /* get_unaligned_le32 avoids unaligned accesses from atombios
225 * tables, noticed on a DEC Alpha. */
226 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
227 if (print)
228 DEBUG("PS[0x%02X,0x%04X]", idx, val);
229 break;
230 case ATOM_ARG_WS:
231 idx = U8(*ptr);
232 (*ptr)++;
233 if (print)
234 DEBUG("WS[0x%02X]", idx);
235 switch (idx) {
236 case ATOM_WS_QUOTIENT:
237 val = gctx->divmul[0];
238 break;
239 case ATOM_WS_REMAINDER:
240 val = gctx->divmul[1];
241 break;
242 case ATOM_WS_DATAPTR:
243 val = gctx->data_block;
244 break;
245 case ATOM_WS_SHIFT:
246 val = gctx->shift;
247 break;
248 case ATOM_WS_OR_MASK:
249 val = 1 << gctx->shift;
250 break;
251 case ATOM_WS_AND_MASK:
252 val = ~(1 << gctx->shift);
253 break;
254 case ATOM_WS_FB_WINDOW:
255 val = gctx->fb_base;
256 break;
257 case ATOM_WS_ATTRIBUTES:
258 val = gctx->io_attr;
259 break;
260 case ATOM_WS_REGPTR:
261 val = gctx->reg_block;
262 break;
263 default:
264 val = ctx->ws[idx];
265 }
266 break;
267 case ATOM_ARG_ID:
268 idx = U16(*ptr);
269 (*ptr) += 2;
270 if (print) {
271 if (gctx->data_block)
272 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
273 else
274 DEBUG("ID[0x%04X]", idx);
275 }
276 val = U32(idx + gctx->data_block);
277 break;
278 case ATOM_ARG_FB:
279 idx = U8(*ptr);
280 (*ptr)++;
281 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
282 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
283 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
284 val = 0;
285 } else
286 val = gctx->scratch[(gctx->fb_base / 4) + idx];
287 if (print)
288 DEBUG("FB[0x%02X]", idx);
289 break;
290 case ATOM_ARG_IMM:
291 switch (align) {
292 case ATOM_SRC_DWORD:
293 val = U32(*ptr);
294 (*ptr) += 4;
295 if (print)
296 DEBUG("IMM 0x%08X\n", val);
297 return val;
298 case ATOM_SRC_WORD0:
299 case ATOM_SRC_WORD8:
300 case ATOM_SRC_WORD16:
301 val = U16(*ptr);
302 (*ptr) += 2;
303 if (print)
304 DEBUG("IMM 0x%04X\n", val);
305 return val;
306 case ATOM_SRC_BYTE0:
307 case ATOM_SRC_BYTE8:
308 case ATOM_SRC_BYTE16:
309 case ATOM_SRC_BYTE24:
310 val = U8(*ptr);
311 (*ptr)++;
312 if (print)
313 DEBUG("IMM 0x%02X\n", val);
314 return val;
315 }
316 return 0;
317 case ATOM_ARG_PLL:
318 idx = U8(*ptr);
319 (*ptr)++;
320 if (print)
321 DEBUG("PLL[0x%02X]", idx);
322 val = gctx->card->pll_read(gctx->card, idx);
323 break;
324 case ATOM_ARG_MC:
325 idx = U8(*ptr);
326 (*ptr)++;
327 if (print)
328 DEBUG("MC[0x%02X]", idx);
329 val = gctx->card->mc_read(gctx->card, idx);
330 break;
331 }
332 if (saved)
333 *saved = val;
334 val &= atom_arg_mask[align];
335 val >>= atom_arg_shift[align];
336 if (print)
337 switch (align) {
338 case ATOM_SRC_DWORD:
339 DEBUG(".[31:0] -> 0x%08X\n", val);
340 break;
341 case ATOM_SRC_WORD0:
342 DEBUG(".[15:0] -> 0x%04X\n", val);
343 break;
344 case ATOM_SRC_WORD8:
345 DEBUG(".[23:8] -> 0x%04X\n", val);
346 break;
347 case ATOM_SRC_WORD16:
348 DEBUG(".[31:16] -> 0x%04X\n", val);
349 break;
350 case ATOM_SRC_BYTE0:
351 DEBUG(".[7:0] -> 0x%02X\n", val);
352 break;
353 case ATOM_SRC_BYTE8:
354 DEBUG(".[15:8] -> 0x%02X\n", val);
355 break;
356 case ATOM_SRC_BYTE16:
357 DEBUG(".[23:16] -> 0x%02X\n", val);
358 break;
359 case ATOM_SRC_BYTE24:
360 DEBUG(".[31:24] -> 0x%02X\n", val);
361 break;
362 }
363 return val;
364}
365
366static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
367{
368 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
369 switch (arg) {
370 case ATOM_ARG_REG:
371 case ATOM_ARG_ID:
372 (*ptr) += 2;
373 break;
374 case ATOM_ARG_PLL:
375 case ATOM_ARG_MC:
376 case ATOM_ARG_PS:
377 case ATOM_ARG_WS:
378 case ATOM_ARG_FB:
379 (*ptr)++;
380 break;
381 case ATOM_ARG_IMM:
382 switch (align) {
383 case ATOM_SRC_DWORD:
384 (*ptr) += 4;
385 return;
386 case ATOM_SRC_WORD0:
387 case ATOM_SRC_WORD8:
388 case ATOM_SRC_WORD16:
389 (*ptr) += 2;
390 return;
391 case ATOM_SRC_BYTE0:
392 case ATOM_SRC_BYTE8:
393 case ATOM_SRC_BYTE16:
394 case ATOM_SRC_BYTE24:
395 (*ptr)++;
396 return;
397 }
398 }
399}
400
401static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
402{
403 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
404}
405
406static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
407{
408 uint32_t val = 0xCDCDCDCD;
409
410 switch (align) {
411 case ATOM_SRC_DWORD:
412 val = U32(*ptr);
413 (*ptr) += 4;
414 break;
415 case ATOM_SRC_WORD0:
416 case ATOM_SRC_WORD8:
417 case ATOM_SRC_WORD16:
418 val = U16(*ptr);
419 (*ptr) += 2;
420 break;
421 case ATOM_SRC_BYTE0:
422 case ATOM_SRC_BYTE8:
423 case ATOM_SRC_BYTE16:
424 case ATOM_SRC_BYTE24:
425 val = U8(*ptr);
426 (*ptr)++;
427 break;
428 }
429 return val;
430}
431
432static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
433 int *ptr, uint32_t *saved, int print)
434{
435 return atom_get_src_int(ctx,
436 arg | atom_dst_to_src[(attr >> 3) &
437 7][(attr >> 6) & 3] << 3,
438 ptr, saved, print);
439}
440
441static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
442{
443 atom_skip_src_int(ctx,
444 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
445 3] << 3, ptr);
446}
447
448static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
449 int *ptr, uint32_t val, uint32_t saved)
450{
451 uint32_t align =
452 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
453 val, idx;
454 struct atom_context *gctx = ctx->ctx;
455 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
456 val <<= atom_arg_shift[align];
457 val &= atom_arg_mask[align];
458 saved &= ~atom_arg_mask[align];
459 val |= saved;
460 switch (arg) {
461 case ATOM_ARG_REG:
462 idx = U16(*ptr);
463 (*ptr) += 2;
464 DEBUG("REG[0x%04X]", idx);
465 idx += gctx->reg_block;
466 switch (gctx->io_mode) {
467 case ATOM_IO_MM:
468 if (idx == 0)
469 gctx->card->reg_write(gctx->card, idx,
470 val << 2);
471 else
472 gctx->card->reg_write(gctx->card, idx, val);
473 break;
474 case ATOM_IO_PCI:
475 pr_info("PCI registers are not implemented\n");
476 return;
477 case ATOM_IO_SYSIO:
478 pr_info("SYSIO registers are not implemented\n");
479 return;
480 default:
481 if (!(gctx->io_mode & 0x80)) {
482 pr_info("Bad IO mode\n");
483 return;
484 }
485 if (!gctx->iio[gctx->io_mode & 0xFF]) {
486 pr_info("Undefined indirect IO write method %d\n",
487 gctx->io_mode & 0x7F);
488 return;
489 }
490 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
491 idx, val);
492 }
493 break;
494 case ATOM_ARG_PS:
495 idx = U8(*ptr);
496 (*ptr)++;
497 DEBUG("PS[0x%02X]", idx);
498 ctx->ps[idx] = cpu_to_le32(val);
499 break;
500 case ATOM_ARG_WS:
501 idx = U8(*ptr);
502 (*ptr)++;
503 DEBUG("WS[0x%02X]", idx);
504 switch (idx) {
505 case ATOM_WS_QUOTIENT:
506 gctx->divmul[0] = val;
507 break;
508 case ATOM_WS_REMAINDER:
509 gctx->divmul[1] = val;
510 break;
511 case ATOM_WS_DATAPTR:
512 gctx->data_block = val;
513 break;
514 case ATOM_WS_SHIFT:
515 gctx->shift = val;
516 break;
517 case ATOM_WS_OR_MASK:
518 case ATOM_WS_AND_MASK:
519 break;
520 case ATOM_WS_FB_WINDOW:
521 gctx->fb_base = val;
522 break;
523 case ATOM_WS_ATTRIBUTES:
524 gctx->io_attr = val;
525 break;
526 case ATOM_WS_REGPTR:
527 gctx->reg_block = val;
528 break;
529 default:
530 ctx->ws[idx] = val;
531 }
532 break;
533 case ATOM_ARG_FB:
534 idx = U8(*ptr);
535 (*ptr)++;
536 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
537 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
538 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
539 } else
540 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
541 DEBUG("FB[0x%02X]", idx);
542 break;
543 case ATOM_ARG_PLL:
544 idx = U8(*ptr);
545 (*ptr)++;
546 DEBUG("PLL[0x%02X]", idx);
547 gctx->card->pll_write(gctx->card, idx, val);
548 break;
549 case ATOM_ARG_MC:
550 idx = U8(*ptr);
551 (*ptr)++;
552 DEBUG("MC[0x%02X]", idx);
553 gctx->card->mc_write(gctx->card, idx, val);
554 return;
555 }
556 switch (align) {
557 case ATOM_SRC_DWORD:
558 DEBUG(".[31:0] <- 0x%08X\n", old_val);
559 break;
560 case ATOM_SRC_WORD0:
561 DEBUG(".[15:0] <- 0x%04X\n", old_val);
562 break;
563 case ATOM_SRC_WORD8:
564 DEBUG(".[23:8] <- 0x%04X\n", old_val);
565 break;
566 case ATOM_SRC_WORD16:
567 DEBUG(".[31:16] <- 0x%04X\n", old_val);
568 break;
569 case ATOM_SRC_BYTE0:
570 DEBUG(".[7:0] <- 0x%02X\n", old_val);
571 break;
572 case ATOM_SRC_BYTE8:
573 DEBUG(".[15:8] <- 0x%02X\n", old_val);
574 break;
575 case ATOM_SRC_BYTE16:
576 DEBUG(".[23:16] <- 0x%02X\n", old_val);
577 break;
578 case ATOM_SRC_BYTE24:
579 DEBUG(".[31:24] <- 0x%02X\n", old_val);
580 break;
581 }
582}
583
584static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
585{
586 uint8_t attr = U8((*ptr)++);
587 uint32_t dst, src, saved;
588 int dptr = *ptr;
589 SDEBUG(" dst: ");
590 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
591 SDEBUG(" src: ");
592 src = atom_get_src(ctx, attr, ptr);
593 dst += src;
594 SDEBUG(" dst: ");
595 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
596}
597
598static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
599{
600 uint8_t attr = U8((*ptr)++);
601 uint32_t dst, src, saved;
602 int dptr = *ptr;
603 SDEBUG(" dst: ");
604 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
605 SDEBUG(" src: ");
606 src = atom_get_src(ctx, attr, ptr);
607 dst &= src;
608 SDEBUG(" dst: ");
609 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
610}
611
612static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
613{
614 printk("ATOM BIOS beeped!\n");
615}
616
617static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
618{
619 int idx = U8((*ptr)++);
620 int r = 0;
621
622 if (idx < ATOM_TABLE_NAMES_CNT)
623 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
624 else
625 SDEBUG(" table: %d\n", idx);
626 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
627 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
628 if (r) {
629 ctx->abort = true;
630 }
631}
632
633static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
634{
635 uint8_t attr = U8((*ptr)++);
636 uint32_t saved;
637 int dptr = *ptr;
638 attr &= 0x38;
639 attr |= atom_def_dst[attr >> 3] << 6;
640 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
641 SDEBUG(" dst: ");
642 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
643}
644
645static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
646{
647 uint8_t attr = U8((*ptr)++);
648 uint32_t dst, src;
649 SDEBUG(" src1: ");
650 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
651 SDEBUG(" src2: ");
652 src = atom_get_src(ctx, attr, ptr);
653 ctx->ctx->cs_equal = (dst == src);
654 ctx->ctx->cs_above = (dst > src);
655 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
656 ctx->ctx->cs_above ? "GT" : "LE");
657}
658
659static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
660{
661 unsigned count = U8((*ptr)++);
662 SDEBUG(" count: %d\n", count);
663 if (arg == ATOM_UNIT_MICROSEC)
664 udelay(count);
665 else if (!drm_can_sleep())
666 mdelay(count);
667 else
668 msleep(count);
669}
670
671static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
672{
673 uint8_t attr = U8((*ptr)++);
674 uint32_t dst, src;
675 SDEBUG(" src1: ");
676 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
677 SDEBUG(" src2: ");
678 src = atom_get_src(ctx, attr, ptr);
679 if (src != 0) {
680 ctx->ctx->divmul[0] = dst / src;
681 ctx->ctx->divmul[1] = dst % src;
682 } else {
683 ctx->ctx->divmul[0] = 0;
684 ctx->ctx->divmul[1] = 0;
685 }
686}
687
688static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
689{
690 uint64_t val64;
691 uint8_t attr = U8((*ptr)++);
692 uint32_t dst, src;
693 SDEBUG(" src1: ");
694 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
695 SDEBUG(" src2: ");
696 src = atom_get_src(ctx, attr, ptr);
697 if (src != 0) {
698 val64 = dst;
699 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
700 do_div(val64, src);
701 ctx->ctx->divmul[0] = lower_32_bits(val64);
702 ctx->ctx->divmul[1] = upper_32_bits(val64);
703 } else {
704 ctx->ctx->divmul[0] = 0;
705 ctx->ctx->divmul[1] = 0;
706 }
707}
708
709static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
710{
711 /* functionally, a nop */
712}
713
714static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
715{
716 int execute = 0, target = U16(*ptr);
717 unsigned long cjiffies;
718
719 (*ptr) += 2;
720 switch (arg) {
721 case ATOM_COND_ABOVE:
722 execute = ctx->ctx->cs_above;
723 break;
724 case ATOM_COND_ABOVEOREQUAL:
725 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
726 break;
727 case ATOM_COND_ALWAYS:
728 execute = 1;
729 break;
730 case ATOM_COND_BELOW:
731 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
732 break;
733 case ATOM_COND_BELOWOREQUAL:
734 execute = !ctx->ctx->cs_above;
735 break;
736 case ATOM_COND_EQUAL:
737 execute = ctx->ctx->cs_equal;
738 break;
739 case ATOM_COND_NOTEQUAL:
740 execute = !ctx->ctx->cs_equal;
741 break;
742 }
743 if (arg != ATOM_COND_ALWAYS)
744 SDEBUG(" taken: %s\n", str_yes_no(execute));
745 SDEBUG(" target: 0x%04X\n", target);
746 if (execute) {
747 if (ctx->last_jump == (ctx->start + target)) {
748 cjiffies = jiffies;
749 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
750 cjiffies -= ctx->last_jump_jiffies;
751 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
752 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
753 ATOM_CMD_TIMEOUT_SEC);
754 ctx->abort = true;
755 }
756 } else {
757 /* jiffies wrap around we will just wait a little longer */
758 ctx->last_jump_jiffies = jiffies;
759 }
760 } else {
761 ctx->last_jump = ctx->start + target;
762 ctx->last_jump_jiffies = jiffies;
763 }
764 *ptr = ctx->start + target;
765 }
766}
767
768static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
769{
770 uint8_t attr = U8((*ptr)++);
771 uint32_t dst, mask, src, saved;
772 int dptr = *ptr;
773 SDEBUG(" dst: ");
774 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
775 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
776 SDEBUG(" mask: 0x%08x", mask);
777 SDEBUG(" src: ");
778 src = atom_get_src(ctx, attr, ptr);
779 dst &= mask;
780 dst |= src;
781 SDEBUG(" dst: ");
782 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
783}
784
785static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
786{
787 uint8_t attr = U8((*ptr)++);
788 uint32_t src, saved;
789 int dptr = *ptr;
790 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
791 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
792 else {
793 atom_skip_dst(ctx, arg, attr, ptr);
794 saved = 0xCDCDCDCD;
795 }
796 SDEBUG(" src: ");
797 src = atom_get_src(ctx, attr, ptr);
798 SDEBUG(" dst: ");
799 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
800}
801
802static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
803{
804 uint8_t attr = U8((*ptr)++);
805 uint32_t dst, src;
806 SDEBUG(" src1: ");
807 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
808 SDEBUG(" src2: ");
809 src = atom_get_src(ctx, attr, ptr);
810 ctx->ctx->divmul[0] = dst * src;
811}
812
813static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
814{
815 uint64_t val64;
816 uint8_t attr = U8((*ptr)++);
817 uint32_t dst, src;
818 SDEBUG(" src1: ");
819 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
820 SDEBUG(" src2: ");
821 src = atom_get_src(ctx, attr, ptr);
822 val64 = (uint64_t)dst * (uint64_t)src;
823 ctx->ctx->divmul[0] = lower_32_bits(val64);
824 ctx->ctx->divmul[1] = upper_32_bits(val64);
825}
826
827static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
828{
829 /* nothing */
830}
831
832static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
833{
834 uint8_t attr = U8((*ptr)++);
835 uint32_t dst, src, saved;
836 int dptr = *ptr;
837 SDEBUG(" dst: ");
838 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
839 SDEBUG(" src: ");
840 src = atom_get_src(ctx, attr, ptr);
841 dst |= src;
842 SDEBUG(" dst: ");
843 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
844}
845
846static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
847{
848 uint8_t val = U8((*ptr)++);
849 SDEBUG("POST card output: 0x%02X\n", val);
850}
851
852static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
853{
854 pr_info("unimplemented!\n");
855}
856
857static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
858{
859 pr_info("unimplemented!\n");
860}
861
862static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
863{
864 pr_info("unimplemented!\n");
865}
866
867static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
868{
869 int idx = U8(*ptr);
870 (*ptr)++;
871 SDEBUG(" block: %d\n", idx);
872 if (!idx)
873 ctx->ctx->data_block = 0;
874 else if (idx == 255)
875 ctx->ctx->data_block = ctx->start;
876 else
877 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
878 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
879}
880
881static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
882{
883 uint8_t attr = U8((*ptr)++);
884 SDEBUG(" fb_base: ");
885 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
886}
887
888static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
889{
890 int port;
891 switch (arg) {
892 case ATOM_PORT_ATI:
893 port = U16(*ptr);
894 if (port < ATOM_IO_NAMES_CNT)
895 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
896 else
897 SDEBUG(" port: %d\n", port);
898 if (!port)
899 ctx->ctx->io_mode = ATOM_IO_MM;
900 else
901 ctx->ctx->io_mode = ATOM_IO_IIO | port;
902 (*ptr) += 2;
903 break;
904 case ATOM_PORT_PCI:
905 ctx->ctx->io_mode = ATOM_IO_PCI;
906 (*ptr)++;
907 break;
908 case ATOM_PORT_SYSIO:
909 ctx->ctx->io_mode = ATOM_IO_SYSIO;
910 (*ptr)++;
911 break;
912 }
913}
914
915static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
916{
917 ctx->ctx->reg_block = U16(*ptr);
918 (*ptr) += 2;
919 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
920}
921
922static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
923{
924 uint8_t attr = U8((*ptr)++), shift;
925 uint32_t saved, dst;
926 int dptr = *ptr;
927 attr &= 0x38;
928 attr |= atom_def_dst[attr >> 3] << 6;
929 SDEBUG(" dst: ");
930 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
931 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
932 SDEBUG(" shift: %d\n", shift);
933 dst <<= shift;
934 SDEBUG(" dst: ");
935 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
936}
937
938static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
939{
940 uint8_t attr = U8((*ptr)++), shift;
941 uint32_t saved, dst;
942 int dptr = *ptr;
943 attr &= 0x38;
944 attr |= atom_def_dst[attr >> 3] << 6;
945 SDEBUG(" dst: ");
946 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
947 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
948 SDEBUG(" shift: %d\n", shift);
949 dst >>= shift;
950 SDEBUG(" dst: ");
951 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
952}
953
954static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
955{
956 uint8_t attr = U8((*ptr)++), shift;
957 uint32_t saved, dst;
958 int dptr = *ptr;
959 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
960 SDEBUG(" dst: ");
961 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
962 /* op needs to full dst value */
963 dst = saved;
964 shift = atom_get_src(ctx, attr, ptr);
965 SDEBUG(" shift: %d\n", shift);
966 dst <<= shift;
967 dst &= atom_arg_mask[dst_align];
968 dst >>= atom_arg_shift[dst_align];
969 SDEBUG(" dst: ");
970 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
971}
972
973static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
974{
975 uint8_t attr = U8((*ptr)++), shift;
976 uint32_t saved, dst;
977 int dptr = *ptr;
978 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
979 SDEBUG(" dst: ");
980 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
981 /* op needs to full dst value */
982 dst = saved;
983 shift = atom_get_src(ctx, attr, ptr);
984 SDEBUG(" shift: %d\n", shift);
985 dst >>= shift;
986 dst &= atom_arg_mask[dst_align];
987 dst >>= atom_arg_shift[dst_align];
988 SDEBUG(" dst: ");
989 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
990}
991
992static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
993{
994 uint8_t attr = U8((*ptr)++);
995 uint32_t dst, src, saved;
996 int dptr = *ptr;
997 SDEBUG(" dst: ");
998 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
999 SDEBUG(" src: ");
1000 src = atom_get_src(ctx, attr, ptr);
1001 dst -= src;
1002 SDEBUG(" dst: ");
1003 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1004}
1005
1006static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1007{
1008 uint8_t attr = U8((*ptr)++);
1009 uint32_t src, val, target;
1010 SDEBUG(" switch: ");
1011 src = atom_get_src(ctx, attr, ptr);
1012 while (U16(*ptr) != ATOM_CASE_END)
1013 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1014 (*ptr)++;
1015 SDEBUG(" case: ");
1016 val =
1017 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1018 ptr);
1019 target = U16(*ptr);
1020 if (val == src) {
1021 SDEBUG(" target: %04X\n", target);
1022 *ptr = ctx->start + target;
1023 return;
1024 }
1025 (*ptr) += 2;
1026 } else {
1027 pr_info("Bad case\n");
1028 return;
1029 }
1030 (*ptr) += 2;
1031}
1032
1033static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1034{
1035 uint8_t attr = U8((*ptr)++);
1036 uint32_t dst, src;
1037 SDEBUG(" src1: ");
1038 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1039 SDEBUG(" src2: ");
1040 src = atom_get_src(ctx, attr, ptr);
1041 ctx->ctx->cs_equal = ((dst & src) == 0);
1042 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1043}
1044
1045static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1046{
1047 uint8_t attr = U8((*ptr)++);
1048 uint32_t dst, src, saved;
1049 int dptr = *ptr;
1050 SDEBUG(" dst: ");
1051 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1052 SDEBUG(" src: ");
1053 src = atom_get_src(ctx, attr, ptr);
1054 dst ^= src;
1055 SDEBUG(" dst: ");
1056 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1057}
1058
1059static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1060{
1061 uint8_t val = U8((*ptr)++);
1062 SDEBUG("DEBUG output: 0x%02X\n", val);
1063}
1064
1065static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1066{
1067 uint16_t val = U16(*ptr);
1068 (*ptr) += val + 2;
1069 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1070}
1071
1072static struct {
1073 void (*func) (atom_exec_context *, int *, int);
1074 int arg;
1075} opcode_table[ATOM_OP_CNT] = {
1076 {
1077 NULL, 0}, {
1078 atom_op_move, ATOM_ARG_REG}, {
1079 atom_op_move, ATOM_ARG_PS}, {
1080 atom_op_move, ATOM_ARG_WS}, {
1081 atom_op_move, ATOM_ARG_FB}, {
1082 atom_op_move, ATOM_ARG_PLL}, {
1083 atom_op_move, ATOM_ARG_MC}, {
1084 atom_op_and, ATOM_ARG_REG}, {
1085 atom_op_and, ATOM_ARG_PS}, {
1086 atom_op_and, ATOM_ARG_WS}, {
1087 atom_op_and, ATOM_ARG_FB}, {
1088 atom_op_and, ATOM_ARG_PLL}, {
1089 atom_op_and, ATOM_ARG_MC}, {
1090 atom_op_or, ATOM_ARG_REG}, {
1091 atom_op_or, ATOM_ARG_PS}, {
1092 atom_op_or, ATOM_ARG_WS}, {
1093 atom_op_or, ATOM_ARG_FB}, {
1094 atom_op_or, ATOM_ARG_PLL}, {
1095 atom_op_or, ATOM_ARG_MC}, {
1096 atom_op_shift_left, ATOM_ARG_REG}, {
1097 atom_op_shift_left, ATOM_ARG_PS}, {
1098 atom_op_shift_left, ATOM_ARG_WS}, {
1099 atom_op_shift_left, ATOM_ARG_FB}, {
1100 atom_op_shift_left, ATOM_ARG_PLL}, {
1101 atom_op_shift_left, ATOM_ARG_MC}, {
1102 atom_op_shift_right, ATOM_ARG_REG}, {
1103 atom_op_shift_right, ATOM_ARG_PS}, {
1104 atom_op_shift_right, ATOM_ARG_WS}, {
1105 atom_op_shift_right, ATOM_ARG_FB}, {
1106 atom_op_shift_right, ATOM_ARG_PLL}, {
1107 atom_op_shift_right, ATOM_ARG_MC}, {
1108 atom_op_mul, ATOM_ARG_REG}, {
1109 atom_op_mul, ATOM_ARG_PS}, {
1110 atom_op_mul, ATOM_ARG_WS}, {
1111 atom_op_mul, ATOM_ARG_FB}, {
1112 atom_op_mul, ATOM_ARG_PLL}, {
1113 atom_op_mul, ATOM_ARG_MC}, {
1114 atom_op_div, ATOM_ARG_REG}, {
1115 atom_op_div, ATOM_ARG_PS}, {
1116 atom_op_div, ATOM_ARG_WS}, {
1117 atom_op_div, ATOM_ARG_FB}, {
1118 atom_op_div, ATOM_ARG_PLL}, {
1119 atom_op_div, ATOM_ARG_MC}, {
1120 atom_op_add, ATOM_ARG_REG}, {
1121 atom_op_add, ATOM_ARG_PS}, {
1122 atom_op_add, ATOM_ARG_WS}, {
1123 atom_op_add, ATOM_ARG_FB}, {
1124 atom_op_add, ATOM_ARG_PLL}, {
1125 atom_op_add, ATOM_ARG_MC}, {
1126 atom_op_sub, ATOM_ARG_REG}, {
1127 atom_op_sub, ATOM_ARG_PS}, {
1128 atom_op_sub, ATOM_ARG_WS}, {
1129 atom_op_sub, ATOM_ARG_FB}, {
1130 atom_op_sub, ATOM_ARG_PLL}, {
1131 atom_op_sub, ATOM_ARG_MC}, {
1132 atom_op_setport, ATOM_PORT_ATI}, {
1133 atom_op_setport, ATOM_PORT_PCI}, {
1134 atom_op_setport, ATOM_PORT_SYSIO}, {
1135 atom_op_setregblock, 0}, {
1136 atom_op_setfbbase, 0}, {
1137 atom_op_compare, ATOM_ARG_REG}, {
1138 atom_op_compare, ATOM_ARG_PS}, {
1139 atom_op_compare, ATOM_ARG_WS}, {
1140 atom_op_compare, ATOM_ARG_FB}, {
1141 atom_op_compare, ATOM_ARG_PLL}, {
1142 atom_op_compare, ATOM_ARG_MC}, {
1143 atom_op_switch, 0}, {
1144 atom_op_jump, ATOM_COND_ALWAYS}, {
1145 atom_op_jump, ATOM_COND_EQUAL}, {
1146 atom_op_jump, ATOM_COND_BELOW}, {
1147 atom_op_jump, ATOM_COND_ABOVE}, {
1148 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1149 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1150 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1151 atom_op_test, ATOM_ARG_REG}, {
1152 atom_op_test, ATOM_ARG_PS}, {
1153 atom_op_test, ATOM_ARG_WS}, {
1154 atom_op_test, ATOM_ARG_FB}, {
1155 atom_op_test, ATOM_ARG_PLL}, {
1156 atom_op_test, ATOM_ARG_MC}, {
1157 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1158 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1159 atom_op_calltable, 0}, {
1160 atom_op_repeat, 0}, {
1161 atom_op_clear, ATOM_ARG_REG}, {
1162 atom_op_clear, ATOM_ARG_PS}, {
1163 atom_op_clear, ATOM_ARG_WS}, {
1164 atom_op_clear, ATOM_ARG_FB}, {
1165 atom_op_clear, ATOM_ARG_PLL}, {
1166 atom_op_clear, ATOM_ARG_MC}, {
1167 atom_op_nop, 0}, {
1168 atom_op_eot, 0}, {
1169 atom_op_mask, ATOM_ARG_REG}, {
1170 atom_op_mask, ATOM_ARG_PS}, {
1171 atom_op_mask, ATOM_ARG_WS}, {
1172 atom_op_mask, ATOM_ARG_FB}, {
1173 atom_op_mask, ATOM_ARG_PLL}, {
1174 atom_op_mask, ATOM_ARG_MC}, {
1175 atom_op_postcard, 0}, {
1176 atom_op_beep, 0}, {
1177 atom_op_savereg, 0}, {
1178 atom_op_restorereg, 0}, {
1179 atom_op_setdatablock, 0}, {
1180 atom_op_xor, ATOM_ARG_REG}, {
1181 atom_op_xor, ATOM_ARG_PS}, {
1182 atom_op_xor, ATOM_ARG_WS}, {
1183 atom_op_xor, ATOM_ARG_FB}, {
1184 atom_op_xor, ATOM_ARG_PLL}, {
1185 atom_op_xor, ATOM_ARG_MC}, {
1186 atom_op_shl, ATOM_ARG_REG}, {
1187 atom_op_shl, ATOM_ARG_PS}, {
1188 atom_op_shl, ATOM_ARG_WS}, {
1189 atom_op_shl, ATOM_ARG_FB}, {
1190 atom_op_shl, ATOM_ARG_PLL}, {
1191 atom_op_shl, ATOM_ARG_MC}, {
1192 atom_op_shr, ATOM_ARG_REG}, {
1193 atom_op_shr, ATOM_ARG_PS}, {
1194 atom_op_shr, ATOM_ARG_WS}, {
1195 atom_op_shr, ATOM_ARG_FB}, {
1196 atom_op_shr, ATOM_ARG_PLL}, {
1197 atom_op_shr, ATOM_ARG_MC}, {
1198 atom_op_debug, 0}, {
1199 atom_op_processds, 0}, {
1200 atom_op_mul32, ATOM_ARG_PS}, {
1201 atom_op_mul32, ATOM_ARG_WS}, {
1202 atom_op_div32, ATOM_ARG_PS}, {
1203 atom_op_div32, ATOM_ARG_WS},
1204};
1205
1206static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
1207{
1208 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1209 int len, ws, ps, ptr;
1210 unsigned char op;
1211 atom_exec_context ectx;
1212 int ret = 0;
1213
1214 if (!base)
1215 return -EINVAL;
1216
1217 len = CU16(base + ATOM_CT_SIZE_PTR);
1218 ws = CU8(base + ATOM_CT_WS_PTR);
1219 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1220 ptr = base + ATOM_CT_CODE_PTR;
1221
1222 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1223
1224 ectx.ctx = ctx;
1225 ectx.ps_shift = ps / 4;
1226 ectx.start = base;
1227 ectx.ps = params;
1228 ectx.abort = false;
1229 ectx.last_jump = 0;
1230 if (ws)
1231 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1232 else
1233 ectx.ws = NULL;
1234
1235 debug_depth++;
1236 while (1) {
1237 op = CU8(ptr++);
1238 if (op < ATOM_OP_NAMES_CNT)
1239 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1240 else
1241 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1242 if (ectx.abort) {
1243 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1244 base, len, ws, ps, ptr - 1);
1245 ret = -EINVAL;
1246 goto free;
1247 }
1248
1249 if (op < ATOM_OP_CNT && op > 0)
1250 opcode_table[op].func(&ectx, &ptr,
1251 opcode_table[op].arg);
1252 else
1253 break;
1254
1255 if (op == ATOM_OP_EOT)
1256 break;
1257 }
1258 debug_depth--;
1259 SDEBUG("<<\n");
1260
1261free:
1262 if (ws)
1263 kfree(ectx.ws);
1264 return ret;
1265}
1266
1267int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
1268{
1269 int r;
1270
1271 mutex_lock(&ctx->mutex);
1272 /* reset data block */
1273 ctx->data_block = 0;
1274 /* reset reg block */
1275 ctx->reg_block = 0;
1276 /* reset fb window */
1277 ctx->fb_base = 0;
1278 /* reset io mode */
1279 ctx->io_mode = ATOM_IO_MM;
1280 /* reset divmul */
1281 ctx->divmul[0] = 0;
1282 ctx->divmul[1] = 0;
1283 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1284 mutex_unlock(&ctx->mutex);
1285 return r;
1286}
1287
1288static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1289
1290static void atom_index_iio(struct atom_context *ctx, int base)
1291{
1292 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1293 if (!ctx->iio)
1294 return;
1295 while (CU8(base) == ATOM_IIO_START) {
1296 ctx->iio[CU8(base + 1)] = base + 2;
1297 base += 2;
1298 while (CU8(base) != ATOM_IIO_END)
1299 base += atom_iio_len[CU8(base)];
1300 base += 3;
1301 }
1302}
1303
1304static void atom_get_vbios_name(struct atom_context *ctx)
1305{
1306 unsigned char *p_rom;
1307 unsigned char str_num;
1308 unsigned short off_to_vbios_str;
1309 unsigned char *c_ptr;
1310 int name_size;
1311 int i;
1312
1313 const char *na = "--N/A--";
1314 char *back;
1315
1316 p_rom = ctx->bios;
1317
1318 str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
1319 if (str_num != 0) {
1320 off_to_vbios_str =
1321 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1322
1323 c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
1324 } else {
1325 /* do not know where to find name */
1326 memcpy(ctx->name, na, 7);
1327 ctx->name[7] = 0;
1328 return;
1329 }
1330
1331 /*
1332 * skip the atombios strings, usually 4
1333 * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
1334 */
1335 for (i = 0; i < str_num; i++) {
1336 while (*c_ptr != 0)
1337 c_ptr++;
1338 c_ptr++;
1339 }
1340
1341 /* skip the following 2 chars: 0x0D 0x0A */
1342 c_ptr += 2;
1343
1344 name_size = strnlen(c_ptr, STRLEN_LONG - 1);
1345 memcpy(ctx->name, c_ptr, name_size);
1346 back = ctx->name + name_size;
1347 while ((*--back) == ' ')
1348 ;
1349 *(back + 1) = '\0';
1350}
1351
1352static void atom_get_vbios_date(struct atom_context *ctx)
1353{
1354 unsigned char *p_rom;
1355 unsigned char *date_in_rom;
1356
1357 p_rom = ctx->bios;
1358
1359 date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
1360
1361 ctx->date[0] = '2';
1362 ctx->date[1] = '0';
1363 ctx->date[2] = date_in_rom[6];
1364 ctx->date[3] = date_in_rom[7];
1365 ctx->date[4] = '/';
1366 ctx->date[5] = date_in_rom[0];
1367 ctx->date[6] = date_in_rom[1];
1368 ctx->date[7] = '/';
1369 ctx->date[8] = date_in_rom[3];
1370 ctx->date[9] = date_in_rom[4];
1371 ctx->date[10] = ' ';
1372 ctx->date[11] = date_in_rom[9];
1373 ctx->date[12] = date_in_rom[10];
1374 ctx->date[13] = date_in_rom[11];
1375 ctx->date[14] = date_in_rom[12];
1376 ctx->date[15] = date_in_rom[13];
1377 ctx->date[16] = '\0';
1378}
1379
1380static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
1381 int end, int maxlen)
1382{
1383 unsigned long str_off;
1384 unsigned char *p_rom;
1385 unsigned short str_len;
1386
1387 str_off = 0;
1388 str_len = strnlen(str, maxlen);
1389 p_rom = ctx->bios;
1390
1391 for (; start <= end; ++start) {
1392 for (str_off = 0; str_off < str_len; ++str_off) {
1393 if (str[str_off] != *(p_rom + start + str_off))
1394 break;
1395 }
1396
1397 if (str_off == str_len || str[str_off] == 0)
1398 return p_rom + start;
1399 }
1400 return NULL;
1401}
1402
1403static void atom_get_vbios_pn(struct atom_context *ctx)
1404{
1405 unsigned char *p_rom;
1406 unsigned short off_to_vbios_str;
1407 unsigned char *vbios_str;
1408 int count;
1409
1410 off_to_vbios_str = 0;
1411 p_rom = ctx->bios;
1412
1413 if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
1414 off_to_vbios_str =
1415 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1416
1417 vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
1418 } else {
1419 vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
1420 }
1421
1422 if (*vbios_str == 0) {
1423 vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
1424 if (vbios_str == NULL)
1425 vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
1426 }
1427 if (vbios_str != NULL && *vbios_str == 0)
1428 vbios_str++;
1429
1430 if (vbios_str != NULL) {
1431 count = 0;
1432 while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
1433 vbios_str[count] <= 'z') {
1434 ctx->vbios_pn[count] = vbios_str[count];
1435 count++;
1436 }
1437
1438 ctx->vbios_pn[count] = 0;
1439 }
1440
1441 pr_info("ATOM BIOS: %s\n", ctx->vbios_pn);
1442}
1443
1444static void atom_get_vbios_version(struct atom_context *ctx)
1445{
1446 unsigned short start = 3, end;
1447 unsigned char *vbios_ver;
1448 unsigned char *p_rom;
1449
1450 p_rom = ctx->bios;
1451 /* Search from strings offset if it's present */
1452 start = *(unsigned short *)(p_rom +
1453 OFFSET_TO_GET_ATOMBIOS_STRING_START);
1454
1455 /* Search till atom rom header start point */
1456 end = *(unsigned short *)(p_rom + OFFSET_TO_ATOM_ROM_HEADER_POINTER);
1457
1458 /* Use hardcoded offsets, if the offsets are not populated */
1459 if (end <= start) {
1460 start = 3;
1461 end = 1024;
1462 }
1463
1464 /* find anchor ATOMBIOSBK-AMD */
1465 vbios_ver =
1466 atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, start, end, 64);
1467 if (vbios_ver != NULL) {
1468 /* skip ATOMBIOSBK-AMD VER */
1469 vbios_ver += 18;
1470 memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
1471 } else {
1472 ctx->vbios_ver_str[0] = '\0';
1473 }
1474}
1475
1476struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1477{
1478 int base;
1479 struct atom_context *ctx =
1480 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1481 struct _ATOM_ROM_HEADER *atom_rom_header;
1482 struct _ATOM_MASTER_DATA_TABLE *master_table;
1483 struct _ATOM_FIRMWARE_INFO *atom_fw_info;
1484
1485 if (!ctx)
1486 return NULL;
1487
1488 ctx->card = card;
1489 ctx->bios = bios;
1490
1491 if (CU16(0) != ATOM_BIOS_MAGIC) {
1492 pr_info("Invalid BIOS magic\n");
1493 kfree(ctx);
1494 return NULL;
1495 }
1496 if (strncmp
1497 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1498 strlen(ATOM_ATI_MAGIC))) {
1499 pr_info("Invalid ATI magic\n");
1500 kfree(ctx);
1501 return NULL;
1502 }
1503
1504 base = CU16(ATOM_ROM_TABLE_PTR);
1505 if (strncmp
1506 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1507 strlen(ATOM_ROM_MAGIC))) {
1508 pr_info("Invalid ATOM magic\n");
1509 kfree(ctx);
1510 return NULL;
1511 }
1512
1513 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1514 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1515 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1516 if (!ctx->iio) {
1517 amdgpu_atom_destroy(ctx);
1518 return NULL;
1519 }
1520
1521 atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
1522 if (atom_rom_header->usMasterDataTableOffset != 0) {
1523 master_table = (struct _ATOM_MASTER_DATA_TABLE *)
1524 CSTR(atom_rom_header->usMasterDataTableOffset);
1525 if (master_table->ListOfDataTables.FirmwareInfo != 0) {
1526 atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
1527 CSTR(master_table->ListOfDataTables.FirmwareInfo);
1528 ctx->version = atom_fw_info->ulFirmwareRevision;
1529 }
1530 }
1531
1532 atom_get_vbios_name(ctx);
1533 atom_get_vbios_pn(ctx);
1534 atom_get_vbios_date(ctx);
1535 atom_get_vbios_version(ctx);
1536
1537 return ctx;
1538}
1539
1540int amdgpu_atom_asic_init(struct atom_context *ctx)
1541{
1542 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1543 uint32_t ps[16];
1544 int ret;
1545
1546 memset(ps, 0, 64);
1547
1548 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1549 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1550 if (!ps[0] || !ps[1])
1551 return 1;
1552
1553 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1554 return 1;
1555 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1556 if (ret)
1557 return ret;
1558
1559 memset(ps, 0, 64);
1560
1561 return ret;
1562}
1563
1564void amdgpu_atom_destroy(struct atom_context *ctx)
1565{
1566 kfree(ctx->iio);
1567 kfree(ctx);
1568}
1569
1570bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1571 uint16_t *size, uint8_t *frev, uint8_t *crev,
1572 uint16_t *data_start)
1573{
1574 int offset = index * 2 + 4;
1575 int idx = CU16(ctx->data_table + offset);
1576 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1577
1578 if (!mdt[index])
1579 return false;
1580
1581 if (size)
1582 *size = CU16(idx);
1583 if (frev)
1584 *frev = CU8(idx + 2);
1585 if (crev)
1586 *crev = CU8(idx + 3);
1587 *data_start = idx;
1588 return true;
1589}
1590
1591bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
1592 uint8_t *crev)
1593{
1594 int offset = index * 2 + 4;
1595 int idx = CU16(ctx->cmd_table + offset);
1596 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1597
1598 if (!mct[index])
1599 return false;
1600
1601 if (frev)
1602 *frev = CU8(idx + 2);
1603 if (crev)
1604 *crev = CU8(idx + 3);
1605 return true;
1606}
1607