Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <asm/unaligned.h>
29
30#define ATOM_DEBUG
31
32#include "atom.h"
33#include "atom-names.h"
34#include "atom-bits.h"
35#include "amdgpu.h"
36
37#define ATOM_COND_ABOVE 0
38#define ATOM_COND_ABOVEOREQUAL 1
39#define ATOM_COND_ALWAYS 2
40#define ATOM_COND_BELOW 3
41#define ATOM_COND_BELOWOREQUAL 4
42#define ATOM_COND_EQUAL 5
43#define ATOM_COND_NOTEQUAL 6
44
45#define ATOM_PORT_ATI 0
46#define ATOM_PORT_PCI 1
47#define ATOM_PORT_SYSIO 2
48
49#define ATOM_UNIT_MICROSEC 0
50#define ATOM_UNIT_MILLISEC 1
51
52#define PLL_INDEX 2
53#define PLL_DATA 3
54
55typedef struct {
56 struct atom_context *ctx;
57 uint32_t *ps, *ws;
58 int ps_shift;
59 uint16_t start;
60 unsigned last_jump;
61 unsigned long last_jump_jiffies;
62 bool abort;
63} atom_exec_context;
64
65int amdgpu_atom_debug = 0;
66static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
67int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
68
69static uint32_t atom_arg_mask[8] =
70 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
710xFF000000 };
72static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
73
74static int atom_dst_to_src[8][4] = {
75 /* translate destination alignment field to the source alignment encoding */
76 {0, 0, 0, 0},
77 {1, 2, 3, 0},
78 {1, 2, 3, 0},
79 {1, 2, 3, 0},
80 {4, 5, 6, 7},
81 {4, 5, 6, 7},
82 {4, 5, 6, 7},
83 {4, 5, 6, 7},
84};
85static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
86
87static int debug_depth = 0;
88#ifdef ATOM_DEBUG
89static void debug_print_spaces(int n)
90{
91 while (n--)
92 printk(" ");
93}
94
95#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
96#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
97#else
98#define DEBUG(...) do { } while (0)
99#define SDEBUG(...) do { } while (0)
100#endif
101
102static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
103 uint32_t index, uint32_t data)
104{
105 uint32_t temp = 0xCDCDCDCD;
106
107 while (1)
108 switch (CU8(base)) {
109 case ATOM_IIO_NOP:
110 base++;
111 break;
112 case ATOM_IIO_READ:
113 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
114 base += 3;
115 break;
116 case ATOM_IIO_WRITE:
117 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
118 base += 3;
119 break;
120 case ATOM_IIO_CLEAR:
121 temp &=
122 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
123 CU8(base + 2));
124 base += 3;
125 break;
126 case ATOM_IIO_SET:
127 temp |=
128 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
129 2);
130 base += 3;
131 break;
132 case ATOM_IIO_MOVE_INDEX:
133 temp &=
134 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
135 CU8(base + 3));
136 temp |=
137 ((index >> CU8(base + 2)) &
138 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
139 3);
140 base += 4;
141 break;
142 case ATOM_IIO_MOVE_DATA:
143 temp &=
144 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
145 CU8(base + 3));
146 temp |=
147 ((data >> CU8(base + 2)) &
148 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
149 3);
150 base += 4;
151 break;
152 case ATOM_IIO_MOVE_ATTR:
153 temp &=
154 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
155 CU8(base + 3));
156 temp |=
157 ((ctx->
158 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
159 CU8
160 (base
161 +
162 1))))
163 << CU8(base + 3);
164 base += 4;
165 break;
166 case ATOM_IIO_END:
167 return temp;
168 default:
169 pr_info("Unknown IIO opcode\n");
170 return 0;
171 }
172}
173
174static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
175 int *ptr, uint32_t *saved, int print)
176{
177 uint32_t idx, val = 0xCDCDCDCD, align, arg;
178 struct atom_context *gctx = ctx->ctx;
179 arg = attr & 7;
180 align = (attr >> 3) & 7;
181 switch (arg) {
182 case ATOM_ARG_REG:
183 idx = U16(*ptr);
184 (*ptr) += 2;
185 if (print)
186 DEBUG("REG[0x%04X]", idx);
187 idx += gctx->reg_block;
188 switch (gctx->io_mode) {
189 case ATOM_IO_MM:
190 val = gctx->card->reg_read(gctx->card, idx);
191 break;
192 case ATOM_IO_PCI:
193 pr_info("PCI registers are not implemented\n");
194 return 0;
195 case ATOM_IO_SYSIO:
196 pr_info("SYSIO registers are not implemented\n");
197 return 0;
198 default:
199 if (!(gctx->io_mode & 0x80)) {
200 pr_info("Bad IO mode\n");
201 return 0;
202 }
203 if (!gctx->iio[gctx->io_mode & 0x7F]) {
204 pr_info("Undefined indirect IO read method %d\n",
205 gctx->io_mode & 0x7F);
206 return 0;
207 }
208 val =
209 atom_iio_execute(gctx,
210 gctx->iio[gctx->io_mode & 0x7F],
211 idx, 0);
212 }
213 break;
214 case ATOM_ARG_PS:
215 idx = U8(*ptr);
216 (*ptr)++;
217 /* get_unaligned_le32 avoids unaligned accesses from atombios
218 * tables, noticed on a DEC Alpha. */
219 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
220 if (print)
221 DEBUG("PS[0x%02X,0x%04X]", idx, val);
222 break;
223 case ATOM_ARG_WS:
224 idx = U8(*ptr);
225 (*ptr)++;
226 if (print)
227 DEBUG("WS[0x%02X]", idx);
228 switch (idx) {
229 case ATOM_WS_QUOTIENT:
230 val = gctx->divmul[0];
231 break;
232 case ATOM_WS_REMAINDER:
233 val = gctx->divmul[1];
234 break;
235 case ATOM_WS_DATAPTR:
236 val = gctx->data_block;
237 break;
238 case ATOM_WS_SHIFT:
239 val = gctx->shift;
240 break;
241 case ATOM_WS_OR_MASK:
242 val = 1 << gctx->shift;
243 break;
244 case ATOM_WS_AND_MASK:
245 val = ~(1 << gctx->shift);
246 break;
247 case ATOM_WS_FB_WINDOW:
248 val = gctx->fb_base;
249 break;
250 case ATOM_WS_ATTRIBUTES:
251 val = gctx->io_attr;
252 break;
253 case ATOM_WS_REGPTR:
254 val = gctx->reg_block;
255 break;
256 default:
257 val = ctx->ws[idx];
258 }
259 break;
260 case ATOM_ARG_ID:
261 idx = U16(*ptr);
262 (*ptr) += 2;
263 if (print) {
264 if (gctx->data_block)
265 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
266 else
267 DEBUG("ID[0x%04X]", idx);
268 }
269 val = U32(idx + gctx->data_block);
270 break;
271 case ATOM_ARG_FB:
272 idx = U8(*ptr);
273 (*ptr)++;
274 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
275 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
276 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
277 val = 0;
278 } else
279 val = gctx->scratch[(gctx->fb_base / 4) + idx];
280 if (print)
281 DEBUG("FB[0x%02X]", idx);
282 break;
283 case ATOM_ARG_IMM:
284 switch (align) {
285 case ATOM_SRC_DWORD:
286 val = U32(*ptr);
287 (*ptr) += 4;
288 if (print)
289 DEBUG("IMM 0x%08X\n", val);
290 return val;
291 case ATOM_SRC_WORD0:
292 case ATOM_SRC_WORD8:
293 case ATOM_SRC_WORD16:
294 val = U16(*ptr);
295 (*ptr) += 2;
296 if (print)
297 DEBUG("IMM 0x%04X\n", val);
298 return val;
299 case ATOM_SRC_BYTE0:
300 case ATOM_SRC_BYTE8:
301 case ATOM_SRC_BYTE16:
302 case ATOM_SRC_BYTE24:
303 val = U8(*ptr);
304 (*ptr)++;
305 if (print)
306 DEBUG("IMM 0x%02X\n", val);
307 return val;
308 }
309 return 0;
310 case ATOM_ARG_PLL:
311 idx = U8(*ptr);
312 (*ptr)++;
313 if (print)
314 DEBUG("PLL[0x%02X]", idx);
315 val = gctx->card->pll_read(gctx->card, idx);
316 break;
317 case ATOM_ARG_MC:
318 idx = U8(*ptr);
319 (*ptr)++;
320 if (print)
321 DEBUG("MC[0x%02X]", idx);
322 val = gctx->card->mc_read(gctx->card, idx);
323 break;
324 }
325 if (saved)
326 *saved = val;
327 val &= atom_arg_mask[align];
328 val >>= atom_arg_shift[align];
329 if (print)
330 switch (align) {
331 case ATOM_SRC_DWORD:
332 DEBUG(".[31:0] -> 0x%08X\n", val);
333 break;
334 case ATOM_SRC_WORD0:
335 DEBUG(".[15:0] -> 0x%04X\n", val);
336 break;
337 case ATOM_SRC_WORD8:
338 DEBUG(".[23:8] -> 0x%04X\n", val);
339 break;
340 case ATOM_SRC_WORD16:
341 DEBUG(".[31:16] -> 0x%04X\n", val);
342 break;
343 case ATOM_SRC_BYTE0:
344 DEBUG(".[7:0] -> 0x%02X\n", val);
345 break;
346 case ATOM_SRC_BYTE8:
347 DEBUG(".[15:8] -> 0x%02X\n", val);
348 break;
349 case ATOM_SRC_BYTE16:
350 DEBUG(".[23:16] -> 0x%02X\n", val);
351 break;
352 case ATOM_SRC_BYTE24:
353 DEBUG(".[31:24] -> 0x%02X\n", val);
354 break;
355 }
356 return val;
357}
358
359static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
360{
361 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
362 switch (arg) {
363 case ATOM_ARG_REG:
364 case ATOM_ARG_ID:
365 (*ptr) += 2;
366 break;
367 case ATOM_ARG_PLL:
368 case ATOM_ARG_MC:
369 case ATOM_ARG_PS:
370 case ATOM_ARG_WS:
371 case ATOM_ARG_FB:
372 (*ptr)++;
373 break;
374 case ATOM_ARG_IMM:
375 switch (align) {
376 case ATOM_SRC_DWORD:
377 (*ptr) += 4;
378 return;
379 case ATOM_SRC_WORD0:
380 case ATOM_SRC_WORD8:
381 case ATOM_SRC_WORD16:
382 (*ptr) += 2;
383 return;
384 case ATOM_SRC_BYTE0:
385 case ATOM_SRC_BYTE8:
386 case ATOM_SRC_BYTE16:
387 case ATOM_SRC_BYTE24:
388 (*ptr)++;
389 return;
390 }
391 return;
392 }
393}
394
395static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
396{
397 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
398}
399
400static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
401{
402 uint32_t val = 0xCDCDCDCD;
403
404 switch (align) {
405 case ATOM_SRC_DWORD:
406 val = U32(*ptr);
407 (*ptr) += 4;
408 break;
409 case ATOM_SRC_WORD0:
410 case ATOM_SRC_WORD8:
411 case ATOM_SRC_WORD16:
412 val = U16(*ptr);
413 (*ptr) += 2;
414 break;
415 case ATOM_SRC_BYTE0:
416 case ATOM_SRC_BYTE8:
417 case ATOM_SRC_BYTE16:
418 case ATOM_SRC_BYTE24:
419 val = U8(*ptr);
420 (*ptr)++;
421 break;
422 }
423 return val;
424}
425
426static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
427 int *ptr, uint32_t *saved, int print)
428{
429 return atom_get_src_int(ctx,
430 arg | atom_dst_to_src[(attr >> 3) &
431 7][(attr >> 6) & 3] << 3,
432 ptr, saved, print);
433}
434
435static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
436{
437 atom_skip_src_int(ctx,
438 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
439 3] << 3, ptr);
440}
441
442static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
443 int *ptr, uint32_t val, uint32_t saved)
444{
445 uint32_t align =
446 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
447 val, idx;
448 struct atom_context *gctx = ctx->ctx;
449 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
450 val <<= atom_arg_shift[align];
451 val &= atom_arg_mask[align];
452 saved &= ~atom_arg_mask[align];
453 val |= saved;
454 switch (arg) {
455 case ATOM_ARG_REG:
456 idx = U16(*ptr);
457 (*ptr) += 2;
458 DEBUG("REG[0x%04X]", idx);
459 idx += gctx->reg_block;
460 switch (gctx->io_mode) {
461 case ATOM_IO_MM:
462 if (idx == 0)
463 gctx->card->reg_write(gctx->card, idx,
464 val << 2);
465 else
466 gctx->card->reg_write(gctx->card, idx, val);
467 break;
468 case ATOM_IO_PCI:
469 pr_info("PCI registers are not implemented\n");
470 return;
471 case ATOM_IO_SYSIO:
472 pr_info("SYSIO registers are not implemented\n");
473 return;
474 default:
475 if (!(gctx->io_mode & 0x80)) {
476 pr_info("Bad IO mode\n");
477 return;
478 }
479 if (!gctx->iio[gctx->io_mode & 0xFF]) {
480 pr_info("Undefined indirect IO write method %d\n",
481 gctx->io_mode & 0x7F);
482 return;
483 }
484 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
485 idx, val);
486 }
487 break;
488 case ATOM_ARG_PS:
489 idx = U8(*ptr);
490 (*ptr)++;
491 DEBUG("PS[0x%02X]", idx);
492 ctx->ps[idx] = cpu_to_le32(val);
493 break;
494 case ATOM_ARG_WS:
495 idx = U8(*ptr);
496 (*ptr)++;
497 DEBUG("WS[0x%02X]", idx);
498 switch (idx) {
499 case ATOM_WS_QUOTIENT:
500 gctx->divmul[0] = val;
501 break;
502 case ATOM_WS_REMAINDER:
503 gctx->divmul[1] = val;
504 break;
505 case ATOM_WS_DATAPTR:
506 gctx->data_block = val;
507 break;
508 case ATOM_WS_SHIFT:
509 gctx->shift = val;
510 break;
511 case ATOM_WS_OR_MASK:
512 case ATOM_WS_AND_MASK:
513 break;
514 case ATOM_WS_FB_WINDOW:
515 gctx->fb_base = val;
516 break;
517 case ATOM_WS_ATTRIBUTES:
518 gctx->io_attr = val;
519 break;
520 case ATOM_WS_REGPTR:
521 gctx->reg_block = val;
522 break;
523 default:
524 ctx->ws[idx] = val;
525 }
526 break;
527 case ATOM_ARG_FB:
528 idx = U8(*ptr);
529 (*ptr)++;
530 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
531 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
532 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
533 } else
534 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
535 DEBUG("FB[0x%02X]", idx);
536 break;
537 case ATOM_ARG_PLL:
538 idx = U8(*ptr);
539 (*ptr)++;
540 DEBUG("PLL[0x%02X]", idx);
541 gctx->card->pll_write(gctx->card, idx, val);
542 break;
543 case ATOM_ARG_MC:
544 idx = U8(*ptr);
545 (*ptr)++;
546 DEBUG("MC[0x%02X]", idx);
547 gctx->card->mc_write(gctx->card, idx, val);
548 return;
549 }
550 switch (align) {
551 case ATOM_SRC_DWORD:
552 DEBUG(".[31:0] <- 0x%08X\n", old_val);
553 break;
554 case ATOM_SRC_WORD0:
555 DEBUG(".[15:0] <- 0x%04X\n", old_val);
556 break;
557 case ATOM_SRC_WORD8:
558 DEBUG(".[23:8] <- 0x%04X\n", old_val);
559 break;
560 case ATOM_SRC_WORD16:
561 DEBUG(".[31:16] <- 0x%04X\n", old_val);
562 break;
563 case ATOM_SRC_BYTE0:
564 DEBUG(".[7:0] <- 0x%02X\n", old_val);
565 break;
566 case ATOM_SRC_BYTE8:
567 DEBUG(".[15:8] <- 0x%02X\n", old_val);
568 break;
569 case ATOM_SRC_BYTE16:
570 DEBUG(".[23:16] <- 0x%02X\n", old_val);
571 break;
572 case ATOM_SRC_BYTE24:
573 DEBUG(".[31:24] <- 0x%02X\n", old_val);
574 break;
575 }
576}
577
578static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
579{
580 uint8_t attr = U8((*ptr)++);
581 uint32_t dst, src, saved;
582 int dptr = *ptr;
583 SDEBUG(" dst: ");
584 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
585 SDEBUG(" src: ");
586 src = atom_get_src(ctx, attr, ptr);
587 dst += src;
588 SDEBUG(" dst: ");
589 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
590}
591
592static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
593{
594 uint8_t attr = U8((*ptr)++);
595 uint32_t dst, src, saved;
596 int dptr = *ptr;
597 SDEBUG(" dst: ");
598 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
599 SDEBUG(" src: ");
600 src = atom_get_src(ctx, attr, ptr);
601 dst &= src;
602 SDEBUG(" dst: ");
603 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
604}
605
606static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
607{
608 printk("ATOM BIOS beeped!\n");
609}
610
611static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
612{
613 int idx = U8((*ptr)++);
614 int r = 0;
615
616 if (idx < ATOM_TABLE_NAMES_CNT)
617 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
618 else
619 SDEBUG(" table: %d\n", idx);
620 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
621 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
622 if (r) {
623 ctx->abort = true;
624 }
625}
626
627static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
628{
629 uint8_t attr = U8((*ptr)++);
630 uint32_t saved;
631 int dptr = *ptr;
632 attr &= 0x38;
633 attr |= atom_def_dst[attr >> 3] << 6;
634 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
635 SDEBUG(" dst: ");
636 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
637}
638
639static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
640{
641 uint8_t attr = U8((*ptr)++);
642 uint32_t dst, src;
643 SDEBUG(" src1: ");
644 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
645 SDEBUG(" src2: ");
646 src = atom_get_src(ctx, attr, ptr);
647 ctx->ctx->cs_equal = (dst == src);
648 ctx->ctx->cs_above = (dst > src);
649 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
650 ctx->ctx->cs_above ? "GT" : "LE");
651}
652
653static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
654{
655 unsigned count = U8((*ptr)++);
656 SDEBUG(" count: %d\n", count);
657 if (arg == ATOM_UNIT_MICROSEC)
658 udelay(count);
659 else if (!drm_can_sleep())
660 mdelay(count);
661 else
662 msleep(count);
663}
664
665static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
666{
667 uint8_t attr = U8((*ptr)++);
668 uint32_t dst, src;
669 SDEBUG(" src1: ");
670 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
671 SDEBUG(" src2: ");
672 src = atom_get_src(ctx, attr, ptr);
673 if (src != 0) {
674 ctx->ctx->divmul[0] = dst / src;
675 ctx->ctx->divmul[1] = dst % src;
676 } else {
677 ctx->ctx->divmul[0] = 0;
678 ctx->ctx->divmul[1] = 0;
679 }
680}
681
682static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
683{
684 uint64_t val64;
685 uint8_t attr = U8((*ptr)++);
686 uint32_t dst, src;
687 SDEBUG(" src1: ");
688 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
689 SDEBUG(" src2: ");
690 src = atom_get_src(ctx, attr, ptr);
691 if (src != 0) {
692 val64 = dst;
693 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
694 do_div(val64, src);
695 ctx->ctx->divmul[0] = lower_32_bits(val64);
696 ctx->ctx->divmul[1] = upper_32_bits(val64);
697 } else {
698 ctx->ctx->divmul[0] = 0;
699 ctx->ctx->divmul[1] = 0;
700 }
701}
702
703static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
704{
705 /* functionally, a nop */
706}
707
708static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
709{
710 int execute = 0, target = U16(*ptr);
711 unsigned long cjiffies;
712
713 (*ptr) += 2;
714 switch (arg) {
715 case ATOM_COND_ABOVE:
716 execute = ctx->ctx->cs_above;
717 break;
718 case ATOM_COND_ABOVEOREQUAL:
719 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
720 break;
721 case ATOM_COND_ALWAYS:
722 execute = 1;
723 break;
724 case ATOM_COND_BELOW:
725 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
726 break;
727 case ATOM_COND_BELOWOREQUAL:
728 execute = !ctx->ctx->cs_above;
729 break;
730 case ATOM_COND_EQUAL:
731 execute = ctx->ctx->cs_equal;
732 break;
733 case ATOM_COND_NOTEQUAL:
734 execute = !ctx->ctx->cs_equal;
735 break;
736 }
737 if (arg != ATOM_COND_ALWAYS)
738 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
739 SDEBUG(" target: 0x%04X\n", target);
740 if (execute) {
741 if (ctx->last_jump == (ctx->start + target)) {
742 cjiffies = jiffies;
743 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
744 cjiffies -= ctx->last_jump_jiffies;
745 if ((jiffies_to_msecs(cjiffies) > 5000)) {
746 DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
747 ctx->abort = true;
748 }
749 } else {
750 /* jiffies wrap around we will just wait a little longer */
751 ctx->last_jump_jiffies = jiffies;
752 }
753 } else {
754 ctx->last_jump = ctx->start + target;
755 ctx->last_jump_jiffies = jiffies;
756 }
757 *ptr = ctx->start + target;
758 }
759}
760
761static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
762{
763 uint8_t attr = U8((*ptr)++);
764 uint32_t dst, mask, src, saved;
765 int dptr = *ptr;
766 SDEBUG(" dst: ");
767 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
768 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
769 SDEBUG(" mask: 0x%08x", mask);
770 SDEBUG(" src: ");
771 src = atom_get_src(ctx, attr, ptr);
772 dst &= mask;
773 dst |= src;
774 SDEBUG(" dst: ");
775 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
776}
777
778static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
779{
780 uint8_t attr = U8((*ptr)++);
781 uint32_t src, saved;
782 int dptr = *ptr;
783 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
784 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
785 else {
786 atom_skip_dst(ctx, arg, attr, ptr);
787 saved = 0xCDCDCDCD;
788 }
789 SDEBUG(" src: ");
790 src = atom_get_src(ctx, attr, ptr);
791 SDEBUG(" dst: ");
792 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
793}
794
795static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
796{
797 uint8_t attr = U8((*ptr)++);
798 uint32_t dst, src;
799 SDEBUG(" src1: ");
800 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
801 SDEBUG(" src2: ");
802 src = atom_get_src(ctx, attr, ptr);
803 ctx->ctx->divmul[0] = dst * src;
804}
805
806static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
807{
808 uint64_t val64;
809 uint8_t attr = U8((*ptr)++);
810 uint32_t dst, src;
811 SDEBUG(" src1: ");
812 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
813 SDEBUG(" src2: ");
814 src = atom_get_src(ctx, attr, ptr);
815 val64 = (uint64_t)dst * (uint64_t)src;
816 ctx->ctx->divmul[0] = lower_32_bits(val64);
817 ctx->ctx->divmul[1] = upper_32_bits(val64);
818}
819
820static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
821{
822 /* nothing */
823}
824
825static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
826{
827 uint8_t attr = U8((*ptr)++);
828 uint32_t dst, src, saved;
829 int dptr = *ptr;
830 SDEBUG(" dst: ");
831 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
832 SDEBUG(" src: ");
833 src = atom_get_src(ctx, attr, ptr);
834 dst |= src;
835 SDEBUG(" dst: ");
836 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
837}
838
839static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
840{
841 uint8_t val = U8((*ptr)++);
842 SDEBUG("POST card output: 0x%02X\n", val);
843}
844
845static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
846{
847 pr_info("unimplemented!\n");
848}
849
850static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
851{
852 pr_info("unimplemented!\n");
853}
854
855static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
856{
857 pr_info("unimplemented!\n");
858}
859
860static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
861{
862 int idx = U8(*ptr);
863 (*ptr)++;
864 SDEBUG(" block: %d\n", idx);
865 if (!idx)
866 ctx->ctx->data_block = 0;
867 else if (idx == 255)
868 ctx->ctx->data_block = ctx->start;
869 else
870 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
871 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
872}
873
874static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
875{
876 uint8_t attr = U8((*ptr)++);
877 SDEBUG(" fb_base: ");
878 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
879}
880
881static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
882{
883 int port;
884 switch (arg) {
885 case ATOM_PORT_ATI:
886 port = U16(*ptr);
887 if (port < ATOM_IO_NAMES_CNT)
888 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
889 else
890 SDEBUG(" port: %d\n", port);
891 if (!port)
892 ctx->ctx->io_mode = ATOM_IO_MM;
893 else
894 ctx->ctx->io_mode = ATOM_IO_IIO | port;
895 (*ptr) += 2;
896 break;
897 case ATOM_PORT_PCI:
898 ctx->ctx->io_mode = ATOM_IO_PCI;
899 (*ptr)++;
900 break;
901 case ATOM_PORT_SYSIO:
902 ctx->ctx->io_mode = ATOM_IO_SYSIO;
903 (*ptr)++;
904 break;
905 }
906}
907
908static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
909{
910 ctx->ctx->reg_block = U16(*ptr);
911 (*ptr) += 2;
912 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
913}
914
915static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
916{
917 uint8_t attr = U8((*ptr)++), shift;
918 uint32_t saved, dst;
919 int dptr = *ptr;
920 attr &= 0x38;
921 attr |= atom_def_dst[attr >> 3] << 6;
922 SDEBUG(" dst: ");
923 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
924 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
925 SDEBUG(" shift: %d\n", shift);
926 dst <<= shift;
927 SDEBUG(" dst: ");
928 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
929}
930
931static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
932{
933 uint8_t attr = U8((*ptr)++), shift;
934 uint32_t saved, dst;
935 int dptr = *ptr;
936 attr &= 0x38;
937 attr |= atom_def_dst[attr >> 3] << 6;
938 SDEBUG(" dst: ");
939 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
940 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
941 SDEBUG(" shift: %d\n", shift);
942 dst >>= shift;
943 SDEBUG(" dst: ");
944 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
945}
946
947static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
948{
949 uint8_t attr = U8((*ptr)++), shift;
950 uint32_t saved, dst;
951 int dptr = *ptr;
952 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
953 SDEBUG(" dst: ");
954 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
955 /* op needs to full dst value */
956 dst = saved;
957 shift = atom_get_src(ctx, attr, ptr);
958 SDEBUG(" shift: %d\n", shift);
959 dst <<= shift;
960 dst &= atom_arg_mask[dst_align];
961 dst >>= atom_arg_shift[dst_align];
962 SDEBUG(" dst: ");
963 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
964}
965
966static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
967{
968 uint8_t attr = U8((*ptr)++), shift;
969 uint32_t saved, dst;
970 int dptr = *ptr;
971 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
972 SDEBUG(" dst: ");
973 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
974 /* op needs to full dst value */
975 dst = saved;
976 shift = atom_get_src(ctx, attr, ptr);
977 SDEBUG(" shift: %d\n", shift);
978 dst >>= shift;
979 dst &= atom_arg_mask[dst_align];
980 dst >>= atom_arg_shift[dst_align];
981 SDEBUG(" dst: ");
982 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
983}
984
985static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
986{
987 uint8_t attr = U8((*ptr)++);
988 uint32_t dst, src, saved;
989 int dptr = *ptr;
990 SDEBUG(" dst: ");
991 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
992 SDEBUG(" src: ");
993 src = atom_get_src(ctx, attr, ptr);
994 dst -= src;
995 SDEBUG(" dst: ");
996 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
997}
998
999static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1000{
1001 uint8_t attr = U8((*ptr)++);
1002 uint32_t src, val, target;
1003 SDEBUG(" switch: ");
1004 src = atom_get_src(ctx, attr, ptr);
1005 while (U16(*ptr) != ATOM_CASE_END)
1006 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1007 (*ptr)++;
1008 SDEBUG(" case: ");
1009 val =
1010 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1011 ptr);
1012 target = U16(*ptr);
1013 if (val == src) {
1014 SDEBUG(" target: %04X\n", target);
1015 *ptr = ctx->start + target;
1016 return;
1017 }
1018 (*ptr) += 2;
1019 } else {
1020 pr_info("Bad case\n");
1021 return;
1022 }
1023 (*ptr) += 2;
1024}
1025
1026static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1027{
1028 uint8_t attr = U8((*ptr)++);
1029 uint32_t dst, src;
1030 SDEBUG(" src1: ");
1031 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1032 SDEBUG(" src2: ");
1033 src = atom_get_src(ctx, attr, ptr);
1034 ctx->ctx->cs_equal = ((dst & src) == 0);
1035 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1036}
1037
1038static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1039{
1040 uint8_t attr = U8((*ptr)++);
1041 uint32_t dst, src, saved;
1042 int dptr = *ptr;
1043 SDEBUG(" dst: ");
1044 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1045 SDEBUG(" src: ");
1046 src = atom_get_src(ctx, attr, ptr);
1047 dst ^= src;
1048 SDEBUG(" dst: ");
1049 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1050}
1051
1052static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1053{
1054 uint8_t val = U8((*ptr)++);
1055 SDEBUG("DEBUG output: 0x%02X\n", val);
1056}
1057
1058static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1059{
1060 uint16_t val = U16(*ptr);
1061 (*ptr) += val + 2;
1062 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1063}
1064
1065static struct {
1066 void (*func) (atom_exec_context *, int *, int);
1067 int arg;
1068} opcode_table[ATOM_OP_CNT] = {
1069 {
1070 NULL, 0}, {
1071 atom_op_move, ATOM_ARG_REG}, {
1072 atom_op_move, ATOM_ARG_PS}, {
1073 atom_op_move, ATOM_ARG_WS}, {
1074 atom_op_move, ATOM_ARG_FB}, {
1075 atom_op_move, ATOM_ARG_PLL}, {
1076 atom_op_move, ATOM_ARG_MC}, {
1077 atom_op_and, ATOM_ARG_REG}, {
1078 atom_op_and, ATOM_ARG_PS}, {
1079 atom_op_and, ATOM_ARG_WS}, {
1080 atom_op_and, ATOM_ARG_FB}, {
1081 atom_op_and, ATOM_ARG_PLL}, {
1082 atom_op_and, ATOM_ARG_MC}, {
1083 atom_op_or, ATOM_ARG_REG}, {
1084 atom_op_or, ATOM_ARG_PS}, {
1085 atom_op_or, ATOM_ARG_WS}, {
1086 atom_op_or, ATOM_ARG_FB}, {
1087 atom_op_or, ATOM_ARG_PLL}, {
1088 atom_op_or, ATOM_ARG_MC}, {
1089 atom_op_shift_left, ATOM_ARG_REG}, {
1090 atom_op_shift_left, ATOM_ARG_PS}, {
1091 atom_op_shift_left, ATOM_ARG_WS}, {
1092 atom_op_shift_left, ATOM_ARG_FB}, {
1093 atom_op_shift_left, ATOM_ARG_PLL}, {
1094 atom_op_shift_left, ATOM_ARG_MC}, {
1095 atom_op_shift_right, ATOM_ARG_REG}, {
1096 atom_op_shift_right, ATOM_ARG_PS}, {
1097 atom_op_shift_right, ATOM_ARG_WS}, {
1098 atom_op_shift_right, ATOM_ARG_FB}, {
1099 atom_op_shift_right, ATOM_ARG_PLL}, {
1100 atom_op_shift_right, ATOM_ARG_MC}, {
1101 atom_op_mul, ATOM_ARG_REG}, {
1102 atom_op_mul, ATOM_ARG_PS}, {
1103 atom_op_mul, ATOM_ARG_WS}, {
1104 atom_op_mul, ATOM_ARG_FB}, {
1105 atom_op_mul, ATOM_ARG_PLL}, {
1106 atom_op_mul, ATOM_ARG_MC}, {
1107 atom_op_div, ATOM_ARG_REG}, {
1108 atom_op_div, ATOM_ARG_PS}, {
1109 atom_op_div, ATOM_ARG_WS}, {
1110 atom_op_div, ATOM_ARG_FB}, {
1111 atom_op_div, ATOM_ARG_PLL}, {
1112 atom_op_div, ATOM_ARG_MC}, {
1113 atom_op_add, ATOM_ARG_REG}, {
1114 atom_op_add, ATOM_ARG_PS}, {
1115 atom_op_add, ATOM_ARG_WS}, {
1116 atom_op_add, ATOM_ARG_FB}, {
1117 atom_op_add, ATOM_ARG_PLL}, {
1118 atom_op_add, ATOM_ARG_MC}, {
1119 atom_op_sub, ATOM_ARG_REG}, {
1120 atom_op_sub, ATOM_ARG_PS}, {
1121 atom_op_sub, ATOM_ARG_WS}, {
1122 atom_op_sub, ATOM_ARG_FB}, {
1123 atom_op_sub, ATOM_ARG_PLL}, {
1124 atom_op_sub, ATOM_ARG_MC}, {
1125 atom_op_setport, ATOM_PORT_ATI}, {
1126 atom_op_setport, ATOM_PORT_PCI}, {
1127 atom_op_setport, ATOM_PORT_SYSIO}, {
1128 atom_op_setregblock, 0}, {
1129 atom_op_setfbbase, 0}, {
1130 atom_op_compare, ATOM_ARG_REG}, {
1131 atom_op_compare, ATOM_ARG_PS}, {
1132 atom_op_compare, ATOM_ARG_WS}, {
1133 atom_op_compare, ATOM_ARG_FB}, {
1134 atom_op_compare, ATOM_ARG_PLL}, {
1135 atom_op_compare, ATOM_ARG_MC}, {
1136 atom_op_switch, 0}, {
1137 atom_op_jump, ATOM_COND_ALWAYS}, {
1138 atom_op_jump, ATOM_COND_EQUAL}, {
1139 atom_op_jump, ATOM_COND_BELOW}, {
1140 atom_op_jump, ATOM_COND_ABOVE}, {
1141 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1142 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1143 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1144 atom_op_test, ATOM_ARG_REG}, {
1145 atom_op_test, ATOM_ARG_PS}, {
1146 atom_op_test, ATOM_ARG_WS}, {
1147 atom_op_test, ATOM_ARG_FB}, {
1148 atom_op_test, ATOM_ARG_PLL}, {
1149 atom_op_test, ATOM_ARG_MC}, {
1150 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1151 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1152 atom_op_calltable, 0}, {
1153 atom_op_repeat, 0}, {
1154 atom_op_clear, ATOM_ARG_REG}, {
1155 atom_op_clear, ATOM_ARG_PS}, {
1156 atom_op_clear, ATOM_ARG_WS}, {
1157 atom_op_clear, ATOM_ARG_FB}, {
1158 atom_op_clear, ATOM_ARG_PLL}, {
1159 atom_op_clear, ATOM_ARG_MC}, {
1160 atom_op_nop, 0}, {
1161 atom_op_eot, 0}, {
1162 atom_op_mask, ATOM_ARG_REG}, {
1163 atom_op_mask, ATOM_ARG_PS}, {
1164 atom_op_mask, ATOM_ARG_WS}, {
1165 atom_op_mask, ATOM_ARG_FB}, {
1166 atom_op_mask, ATOM_ARG_PLL}, {
1167 atom_op_mask, ATOM_ARG_MC}, {
1168 atom_op_postcard, 0}, {
1169 atom_op_beep, 0}, {
1170 atom_op_savereg, 0}, {
1171 atom_op_restorereg, 0}, {
1172 atom_op_setdatablock, 0}, {
1173 atom_op_xor, ATOM_ARG_REG}, {
1174 atom_op_xor, ATOM_ARG_PS}, {
1175 atom_op_xor, ATOM_ARG_WS}, {
1176 atom_op_xor, ATOM_ARG_FB}, {
1177 atom_op_xor, ATOM_ARG_PLL}, {
1178 atom_op_xor, ATOM_ARG_MC}, {
1179 atom_op_shl, ATOM_ARG_REG}, {
1180 atom_op_shl, ATOM_ARG_PS}, {
1181 atom_op_shl, ATOM_ARG_WS}, {
1182 atom_op_shl, ATOM_ARG_FB}, {
1183 atom_op_shl, ATOM_ARG_PLL}, {
1184 atom_op_shl, ATOM_ARG_MC}, {
1185 atom_op_shr, ATOM_ARG_REG}, {
1186 atom_op_shr, ATOM_ARG_PS}, {
1187 atom_op_shr, ATOM_ARG_WS}, {
1188 atom_op_shr, ATOM_ARG_FB}, {
1189 atom_op_shr, ATOM_ARG_PLL}, {
1190 atom_op_shr, ATOM_ARG_MC}, {
1191 atom_op_debug, 0}, {
1192 atom_op_processds, 0}, {
1193 atom_op_mul32, ATOM_ARG_PS}, {
1194 atom_op_mul32, ATOM_ARG_WS}, {
1195 atom_op_div32, ATOM_ARG_PS}, {
1196 atom_op_div32, ATOM_ARG_WS},
1197};
1198
1199static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1200{
1201 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1202 int len, ws, ps, ptr;
1203 unsigned char op;
1204 atom_exec_context ectx;
1205 int ret = 0;
1206
1207 if (!base)
1208 return -EINVAL;
1209
1210 len = CU16(base + ATOM_CT_SIZE_PTR);
1211 ws = CU8(base + ATOM_CT_WS_PTR);
1212 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1213 ptr = base + ATOM_CT_CODE_PTR;
1214
1215 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1216
1217 ectx.ctx = ctx;
1218 ectx.ps_shift = ps / 4;
1219 ectx.start = base;
1220 ectx.ps = params;
1221 ectx.abort = false;
1222 ectx.last_jump = 0;
1223 if (ws)
1224 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1225 else
1226 ectx.ws = NULL;
1227
1228 debug_depth++;
1229 while (1) {
1230 op = CU8(ptr++);
1231 if (op < ATOM_OP_NAMES_CNT)
1232 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1233 else
1234 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1235 if (ectx.abort) {
1236 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1237 base, len, ws, ps, ptr - 1);
1238 ret = -EINVAL;
1239 goto free;
1240 }
1241
1242 if (op < ATOM_OP_CNT && op > 0)
1243 opcode_table[op].func(&ectx, &ptr,
1244 opcode_table[op].arg);
1245 else
1246 break;
1247
1248 if (op == ATOM_OP_EOT)
1249 break;
1250 }
1251 debug_depth--;
1252 SDEBUG("<<\n");
1253
1254free:
1255 if (ws)
1256 kfree(ectx.ws);
1257 return ret;
1258}
1259
1260int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1261{
1262 int r;
1263
1264 mutex_lock(&ctx->mutex);
1265 /* reset data block */
1266 ctx->data_block = 0;
1267 /* reset reg block */
1268 ctx->reg_block = 0;
1269 /* reset fb window */
1270 ctx->fb_base = 0;
1271 /* reset io mode */
1272 ctx->io_mode = ATOM_IO_MM;
1273 /* reset divmul */
1274 ctx->divmul[0] = 0;
1275 ctx->divmul[1] = 0;
1276 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1277 mutex_unlock(&ctx->mutex);
1278 return r;
1279}
1280
1281static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1282
1283static void atom_index_iio(struct atom_context *ctx, int base)
1284{
1285 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1286 if (!ctx->iio)
1287 return;
1288 while (CU8(base) == ATOM_IIO_START) {
1289 ctx->iio[CU8(base + 1)] = base + 2;
1290 base += 2;
1291 while (CU8(base) != ATOM_IIO_END)
1292 base += atom_iio_len[CU8(base)];
1293 base += 3;
1294 }
1295}
1296
1297struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1298{
1299 int base;
1300 struct atom_context *ctx =
1301 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1302 char *str;
1303 u16 idx;
1304
1305 if (!ctx)
1306 return NULL;
1307
1308 ctx->card = card;
1309 ctx->bios = bios;
1310
1311 if (CU16(0) != ATOM_BIOS_MAGIC) {
1312 pr_info("Invalid BIOS magic\n");
1313 kfree(ctx);
1314 return NULL;
1315 }
1316 if (strncmp
1317 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1318 strlen(ATOM_ATI_MAGIC))) {
1319 pr_info("Invalid ATI magic\n");
1320 kfree(ctx);
1321 return NULL;
1322 }
1323
1324 base = CU16(ATOM_ROM_TABLE_PTR);
1325 if (strncmp
1326 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1327 strlen(ATOM_ROM_MAGIC))) {
1328 pr_info("Invalid ATOM magic\n");
1329 kfree(ctx);
1330 return NULL;
1331 }
1332
1333 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1334 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1335 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1336 if (!ctx->iio) {
1337 amdgpu_atom_destroy(ctx);
1338 return NULL;
1339 }
1340
1341 idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
1342 if (idx == 0)
1343 idx = 0x80;
1344
1345 str = CSTR(idx);
1346 if (*str != '\0') {
1347 pr_info("ATOM BIOS: %s\n", str);
1348 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1349 }
1350
1351
1352 return ctx;
1353}
1354
1355int amdgpu_atom_asic_init(struct atom_context *ctx)
1356{
1357 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1358 uint32_t ps[16];
1359 int ret;
1360
1361 memset(ps, 0, 64);
1362
1363 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1364 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1365 if (!ps[0] || !ps[1])
1366 return 1;
1367
1368 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1369 return 1;
1370 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1371 if (ret)
1372 return ret;
1373
1374 memset(ps, 0, 64);
1375
1376 return ret;
1377}
1378
1379void amdgpu_atom_destroy(struct atom_context *ctx)
1380{
1381 kfree(ctx->iio);
1382 kfree(ctx);
1383}
1384
1385bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1386 uint16_t * size, uint8_t * frev, uint8_t * crev,
1387 uint16_t * data_start)
1388{
1389 int offset = index * 2 + 4;
1390 int idx = CU16(ctx->data_table + offset);
1391 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1392
1393 if (!mdt[index])
1394 return false;
1395
1396 if (size)
1397 *size = CU16(idx);
1398 if (frev)
1399 *frev = CU8(idx + 2);
1400 if (crev)
1401 *crev = CU8(idx + 3);
1402 *data_start = idx;
1403 return true;
1404}
1405
1406bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1407 uint8_t * crev)
1408{
1409 int offset = index * 2 + 4;
1410 int idx = CU16(ctx->cmd_table + offset);
1411 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1412
1413 if (!mct[index])
1414 return false;
1415
1416 if (frev)
1417 *frev = CU8(idx + 2);
1418 if (crev)
1419 *crev = CU8(idx + 3);
1420 return true;
1421}
1422
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <asm/unaligned.h>
29
30#include <drm/drm_util.h>
31
32#define ATOM_DEBUG
33
34#include "atomfirmware.h"
35#include "atom.h"
36#include "atom-names.h"
37#include "atom-bits.h"
38#include "amdgpu.h"
39
40#define ATOM_COND_ABOVE 0
41#define ATOM_COND_ABOVEOREQUAL 1
42#define ATOM_COND_ALWAYS 2
43#define ATOM_COND_BELOW 3
44#define ATOM_COND_BELOWOREQUAL 4
45#define ATOM_COND_EQUAL 5
46#define ATOM_COND_NOTEQUAL 6
47
48#define ATOM_PORT_ATI 0
49#define ATOM_PORT_PCI 1
50#define ATOM_PORT_SYSIO 2
51
52#define ATOM_UNIT_MICROSEC 0
53#define ATOM_UNIT_MILLISEC 1
54
55#define PLL_INDEX 2
56#define PLL_DATA 3
57
58#define ATOM_CMD_TIMEOUT_SEC 20
59
60typedef struct {
61 struct atom_context *ctx;
62 uint32_t *ps, *ws;
63 int ps_shift;
64 uint16_t start;
65 unsigned last_jump;
66 unsigned long last_jump_jiffies;
67 bool abort;
68} atom_exec_context;
69
70int amdgpu_atom_debug;
71static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
72int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
73
74static uint32_t atom_arg_mask[8] =
75 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
76 0xFF000000 };
77static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
78
79static int atom_dst_to_src[8][4] = {
80 /* translate destination alignment field to the source alignment encoding */
81 {0, 0, 0, 0},
82 {1, 2, 3, 0},
83 {1, 2, 3, 0},
84 {1, 2, 3, 0},
85 {4, 5, 6, 7},
86 {4, 5, 6, 7},
87 {4, 5, 6, 7},
88 {4, 5, 6, 7},
89};
90static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
91
92static int debug_depth;
93#ifdef ATOM_DEBUG
94static void debug_print_spaces(int n)
95{
96 while (n--)
97 printk(" ");
98}
99
100#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
101#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
102#else
103#define DEBUG(...) do { } while (0)
104#define SDEBUG(...) do { } while (0)
105#endif
106
107static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
108 uint32_t index, uint32_t data)
109{
110 uint32_t temp = 0xCDCDCDCD;
111
112 while (1)
113 switch (CU8(base)) {
114 case ATOM_IIO_NOP:
115 base++;
116 break;
117 case ATOM_IIO_READ:
118 temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
119 base += 3;
120 break;
121 case ATOM_IIO_WRITE:
122 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
123 base += 3;
124 break;
125 case ATOM_IIO_CLEAR:
126 temp &=
127 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
128 CU8(base + 2));
129 base += 3;
130 break;
131 case ATOM_IIO_SET:
132 temp |=
133 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
134 2);
135 base += 3;
136 break;
137 case ATOM_IIO_MOVE_INDEX:
138 temp &=
139 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
140 CU8(base + 3));
141 temp |=
142 ((index >> CU8(base + 2)) &
143 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
144 3);
145 base += 4;
146 break;
147 case ATOM_IIO_MOVE_DATA:
148 temp &=
149 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
150 CU8(base + 3));
151 temp |=
152 ((data >> CU8(base + 2)) &
153 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
154 3);
155 base += 4;
156 break;
157 case ATOM_IIO_MOVE_ATTR:
158 temp &=
159 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
160 CU8(base + 3));
161 temp |=
162 ((ctx->
163 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
164 CU8
165 (base
166 +
167 1))))
168 << CU8(base + 3);
169 base += 4;
170 break;
171 case ATOM_IIO_END:
172 return temp;
173 default:
174 pr_info("Unknown IIO opcode\n");
175 return 0;
176 }
177}
178
179static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
180 int *ptr, uint32_t *saved, int print)
181{
182 uint32_t idx, val = 0xCDCDCDCD, align, arg;
183 struct atom_context *gctx = ctx->ctx;
184 arg = attr & 7;
185 align = (attr >> 3) & 7;
186 switch (arg) {
187 case ATOM_ARG_REG:
188 idx = U16(*ptr);
189 (*ptr) += 2;
190 if (print)
191 DEBUG("REG[0x%04X]", idx);
192 idx += gctx->reg_block;
193 switch (gctx->io_mode) {
194 case ATOM_IO_MM:
195 val = gctx->card->reg_read(gctx->card, idx);
196 break;
197 case ATOM_IO_PCI:
198 pr_info("PCI registers are not implemented\n");
199 return 0;
200 case ATOM_IO_SYSIO:
201 pr_info("SYSIO registers are not implemented\n");
202 return 0;
203 default:
204 if (!(gctx->io_mode & 0x80)) {
205 pr_info("Bad IO mode\n");
206 return 0;
207 }
208 if (!gctx->iio[gctx->io_mode & 0x7F]) {
209 pr_info("Undefined indirect IO read method %d\n",
210 gctx->io_mode & 0x7F);
211 return 0;
212 }
213 val =
214 atom_iio_execute(gctx,
215 gctx->iio[gctx->io_mode & 0x7F],
216 idx, 0);
217 }
218 break;
219 case ATOM_ARG_PS:
220 idx = U8(*ptr);
221 (*ptr)++;
222 /* get_unaligned_le32 avoids unaligned accesses from atombios
223 * tables, noticed on a DEC Alpha. */
224 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
225 if (print)
226 DEBUG("PS[0x%02X,0x%04X]", idx, val);
227 break;
228 case ATOM_ARG_WS:
229 idx = U8(*ptr);
230 (*ptr)++;
231 if (print)
232 DEBUG("WS[0x%02X]", idx);
233 switch (idx) {
234 case ATOM_WS_QUOTIENT:
235 val = gctx->divmul[0];
236 break;
237 case ATOM_WS_REMAINDER:
238 val = gctx->divmul[1];
239 break;
240 case ATOM_WS_DATAPTR:
241 val = gctx->data_block;
242 break;
243 case ATOM_WS_SHIFT:
244 val = gctx->shift;
245 break;
246 case ATOM_WS_OR_MASK:
247 val = 1 << gctx->shift;
248 break;
249 case ATOM_WS_AND_MASK:
250 val = ~(1 << gctx->shift);
251 break;
252 case ATOM_WS_FB_WINDOW:
253 val = gctx->fb_base;
254 break;
255 case ATOM_WS_ATTRIBUTES:
256 val = gctx->io_attr;
257 break;
258 case ATOM_WS_REGPTR:
259 val = gctx->reg_block;
260 break;
261 default:
262 val = ctx->ws[idx];
263 }
264 break;
265 case ATOM_ARG_ID:
266 idx = U16(*ptr);
267 (*ptr) += 2;
268 if (print) {
269 if (gctx->data_block)
270 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
271 else
272 DEBUG("ID[0x%04X]", idx);
273 }
274 val = U32(idx + gctx->data_block);
275 break;
276 case ATOM_ARG_FB:
277 idx = U8(*ptr);
278 (*ptr)++;
279 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
280 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
281 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
282 val = 0;
283 } else
284 val = gctx->scratch[(gctx->fb_base / 4) + idx];
285 if (print)
286 DEBUG("FB[0x%02X]", idx);
287 break;
288 case ATOM_ARG_IMM:
289 switch (align) {
290 case ATOM_SRC_DWORD:
291 val = U32(*ptr);
292 (*ptr) += 4;
293 if (print)
294 DEBUG("IMM 0x%08X\n", val);
295 return val;
296 case ATOM_SRC_WORD0:
297 case ATOM_SRC_WORD8:
298 case ATOM_SRC_WORD16:
299 val = U16(*ptr);
300 (*ptr) += 2;
301 if (print)
302 DEBUG("IMM 0x%04X\n", val);
303 return val;
304 case ATOM_SRC_BYTE0:
305 case ATOM_SRC_BYTE8:
306 case ATOM_SRC_BYTE16:
307 case ATOM_SRC_BYTE24:
308 val = U8(*ptr);
309 (*ptr)++;
310 if (print)
311 DEBUG("IMM 0x%02X\n", val);
312 return val;
313 }
314 return 0;
315 case ATOM_ARG_PLL:
316 idx = U8(*ptr);
317 (*ptr)++;
318 if (print)
319 DEBUG("PLL[0x%02X]", idx);
320 val = gctx->card->pll_read(gctx->card, idx);
321 break;
322 case ATOM_ARG_MC:
323 idx = U8(*ptr);
324 (*ptr)++;
325 if (print)
326 DEBUG("MC[0x%02X]", idx);
327 val = gctx->card->mc_read(gctx->card, idx);
328 break;
329 }
330 if (saved)
331 *saved = val;
332 val &= atom_arg_mask[align];
333 val >>= atom_arg_shift[align];
334 if (print)
335 switch (align) {
336 case ATOM_SRC_DWORD:
337 DEBUG(".[31:0] -> 0x%08X\n", val);
338 break;
339 case ATOM_SRC_WORD0:
340 DEBUG(".[15:0] -> 0x%04X\n", val);
341 break;
342 case ATOM_SRC_WORD8:
343 DEBUG(".[23:8] -> 0x%04X\n", val);
344 break;
345 case ATOM_SRC_WORD16:
346 DEBUG(".[31:16] -> 0x%04X\n", val);
347 break;
348 case ATOM_SRC_BYTE0:
349 DEBUG(".[7:0] -> 0x%02X\n", val);
350 break;
351 case ATOM_SRC_BYTE8:
352 DEBUG(".[15:8] -> 0x%02X\n", val);
353 break;
354 case ATOM_SRC_BYTE16:
355 DEBUG(".[23:16] -> 0x%02X\n", val);
356 break;
357 case ATOM_SRC_BYTE24:
358 DEBUG(".[31:24] -> 0x%02X\n", val);
359 break;
360 }
361 return val;
362}
363
364static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
365{
366 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
367 switch (arg) {
368 case ATOM_ARG_REG:
369 case ATOM_ARG_ID:
370 (*ptr) += 2;
371 break;
372 case ATOM_ARG_PLL:
373 case ATOM_ARG_MC:
374 case ATOM_ARG_PS:
375 case ATOM_ARG_WS:
376 case ATOM_ARG_FB:
377 (*ptr)++;
378 break;
379 case ATOM_ARG_IMM:
380 switch (align) {
381 case ATOM_SRC_DWORD:
382 (*ptr) += 4;
383 return;
384 case ATOM_SRC_WORD0:
385 case ATOM_SRC_WORD8:
386 case ATOM_SRC_WORD16:
387 (*ptr) += 2;
388 return;
389 case ATOM_SRC_BYTE0:
390 case ATOM_SRC_BYTE8:
391 case ATOM_SRC_BYTE16:
392 case ATOM_SRC_BYTE24:
393 (*ptr)++;
394 return;
395 }
396 return;
397 }
398}
399
400static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
401{
402 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
403}
404
405static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
406{
407 uint32_t val = 0xCDCDCDCD;
408
409 switch (align) {
410 case ATOM_SRC_DWORD:
411 val = U32(*ptr);
412 (*ptr) += 4;
413 break;
414 case ATOM_SRC_WORD0:
415 case ATOM_SRC_WORD8:
416 case ATOM_SRC_WORD16:
417 val = U16(*ptr);
418 (*ptr) += 2;
419 break;
420 case ATOM_SRC_BYTE0:
421 case ATOM_SRC_BYTE8:
422 case ATOM_SRC_BYTE16:
423 case ATOM_SRC_BYTE24:
424 val = U8(*ptr);
425 (*ptr)++;
426 break;
427 }
428 return val;
429}
430
431static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
432 int *ptr, uint32_t *saved, int print)
433{
434 return atom_get_src_int(ctx,
435 arg | atom_dst_to_src[(attr >> 3) &
436 7][(attr >> 6) & 3] << 3,
437 ptr, saved, print);
438}
439
440static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
441{
442 atom_skip_src_int(ctx,
443 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
444 3] << 3, ptr);
445}
446
447static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
448 int *ptr, uint32_t val, uint32_t saved)
449{
450 uint32_t align =
451 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
452 val, idx;
453 struct atom_context *gctx = ctx->ctx;
454 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
455 val <<= atom_arg_shift[align];
456 val &= atom_arg_mask[align];
457 saved &= ~atom_arg_mask[align];
458 val |= saved;
459 switch (arg) {
460 case ATOM_ARG_REG:
461 idx = U16(*ptr);
462 (*ptr) += 2;
463 DEBUG("REG[0x%04X]", idx);
464 idx += gctx->reg_block;
465 switch (gctx->io_mode) {
466 case ATOM_IO_MM:
467 if (idx == 0)
468 gctx->card->reg_write(gctx->card, idx,
469 val << 2);
470 else
471 gctx->card->reg_write(gctx->card, idx, val);
472 break;
473 case ATOM_IO_PCI:
474 pr_info("PCI registers are not implemented\n");
475 return;
476 case ATOM_IO_SYSIO:
477 pr_info("SYSIO registers are not implemented\n");
478 return;
479 default:
480 if (!(gctx->io_mode & 0x80)) {
481 pr_info("Bad IO mode\n");
482 return;
483 }
484 if (!gctx->iio[gctx->io_mode & 0xFF]) {
485 pr_info("Undefined indirect IO write method %d\n",
486 gctx->io_mode & 0x7F);
487 return;
488 }
489 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
490 idx, val);
491 }
492 break;
493 case ATOM_ARG_PS:
494 idx = U8(*ptr);
495 (*ptr)++;
496 DEBUG("PS[0x%02X]", idx);
497 ctx->ps[idx] = cpu_to_le32(val);
498 break;
499 case ATOM_ARG_WS:
500 idx = U8(*ptr);
501 (*ptr)++;
502 DEBUG("WS[0x%02X]", idx);
503 switch (idx) {
504 case ATOM_WS_QUOTIENT:
505 gctx->divmul[0] = val;
506 break;
507 case ATOM_WS_REMAINDER:
508 gctx->divmul[1] = val;
509 break;
510 case ATOM_WS_DATAPTR:
511 gctx->data_block = val;
512 break;
513 case ATOM_WS_SHIFT:
514 gctx->shift = val;
515 break;
516 case ATOM_WS_OR_MASK:
517 case ATOM_WS_AND_MASK:
518 break;
519 case ATOM_WS_FB_WINDOW:
520 gctx->fb_base = val;
521 break;
522 case ATOM_WS_ATTRIBUTES:
523 gctx->io_attr = val;
524 break;
525 case ATOM_WS_REGPTR:
526 gctx->reg_block = val;
527 break;
528 default:
529 ctx->ws[idx] = val;
530 }
531 break;
532 case ATOM_ARG_FB:
533 idx = U8(*ptr);
534 (*ptr)++;
535 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
536 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
537 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
538 } else
539 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
540 DEBUG("FB[0x%02X]", idx);
541 break;
542 case ATOM_ARG_PLL:
543 idx = U8(*ptr);
544 (*ptr)++;
545 DEBUG("PLL[0x%02X]", idx);
546 gctx->card->pll_write(gctx->card, idx, val);
547 break;
548 case ATOM_ARG_MC:
549 idx = U8(*ptr);
550 (*ptr)++;
551 DEBUG("MC[0x%02X]", idx);
552 gctx->card->mc_write(gctx->card, idx, val);
553 return;
554 }
555 switch (align) {
556 case ATOM_SRC_DWORD:
557 DEBUG(".[31:0] <- 0x%08X\n", old_val);
558 break;
559 case ATOM_SRC_WORD0:
560 DEBUG(".[15:0] <- 0x%04X\n", old_val);
561 break;
562 case ATOM_SRC_WORD8:
563 DEBUG(".[23:8] <- 0x%04X\n", old_val);
564 break;
565 case ATOM_SRC_WORD16:
566 DEBUG(".[31:16] <- 0x%04X\n", old_val);
567 break;
568 case ATOM_SRC_BYTE0:
569 DEBUG(".[7:0] <- 0x%02X\n", old_val);
570 break;
571 case ATOM_SRC_BYTE8:
572 DEBUG(".[15:8] <- 0x%02X\n", old_val);
573 break;
574 case ATOM_SRC_BYTE16:
575 DEBUG(".[23:16] <- 0x%02X\n", old_val);
576 break;
577 case ATOM_SRC_BYTE24:
578 DEBUG(".[31:24] <- 0x%02X\n", old_val);
579 break;
580 }
581}
582
583static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
584{
585 uint8_t attr = U8((*ptr)++);
586 uint32_t dst, src, saved;
587 int dptr = *ptr;
588 SDEBUG(" dst: ");
589 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
590 SDEBUG(" src: ");
591 src = atom_get_src(ctx, attr, ptr);
592 dst += src;
593 SDEBUG(" dst: ");
594 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
595}
596
597static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
598{
599 uint8_t attr = U8((*ptr)++);
600 uint32_t dst, src, saved;
601 int dptr = *ptr;
602 SDEBUG(" dst: ");
603 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
604 SDEBUG(" src: ");
605 src = atom_get_src(ctx, attr, ptr);
606 dst &= src;
607 SDEBUG(" dst: ");
608 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
609}
610
611static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
612{
613 printk("ATOM BIOS beeped!\n");
614}
615
616static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
617{
618 int idx = U8((*ptr)++);
619 int r = 0;
620
621 if (idx < ATOM_TABLE_NAMES_CNT)
622 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
623 else
624 SDEBUG(" table: %d\n", idx);
625 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
626 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
627 if (r) {
628 ctx->abort = true;
629 }
630}
631
632static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
633{
634 uint8_t attr = U8((*ptr)++);
635 uint32_t saved;
636 int dptr = *ptr;
637 attr &= 0x38;
638 attr |= atom_def_dst[attr >> 3] << 6;
639 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
640 SDEBUG(" dst: ");
641 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
642}
643
644static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
645{
646 uint8_t attr = U8((*ptr)++);
647 uint32_t dst, src;
648 SDEBUG(" src1: ");
649 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
650 SDEBUG(" src2: ");
651 src = atom_get_src(ctx, attr, ptr);
652 ctx->ctx->cs_equal = (dst == src);
653 ctx->ctx->cs_above = (dst > src);
654 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
655 ctx->ctx->cs_above ? "GT" : "LE");
656}
657
658static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
659{
660 unsigned count = U8((*ptr)++);
661 SDEBUG(" count: %d\n", count);
662 if (arg == ATOM_UNIT_MICROSEC)
663 udelay(count);
664 else if (!drm_can_sleep())
665 mdelay(count);
666 else
667 msleep(count);
668}
669
670static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
671{
672 uint8_t attr = U8((*ptr)++);
673 uint32_t dst, src;
674 SDEBUG(" src1: ");
675 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
676 SDEBUG(" src2: ");
677 src = atom_get_src(ctx, attr, ptr);
678 if (src != 0) {
679 ctx->ctx->divmul[0] = dst / src;
680 ctx->ctx->divmul[1] = dst % src;
681 } else {
682 ctx->ctx->divmul[0] = 0;
683 ctx->ctx->divmul[1] = 0;
684 }
685}
686
687static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
688{
689 uint64_t val64;
690 uint8_t attr = U8((*ptr)++);
691 uint32_t dst, src;
692 SDEBUG(" src1: ");
693 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
694 SDEBUG(" src2: ");
695 src = atom_get_src(ctx, attr, ptr);
696 if (src != 0) {
697 val64 = dst;
698 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
699 do_div(val64, src);
700 ctx->ctx->divmul[0] = lower_32_bits(val64);
701 ctx->ctx->divmul[1] = upper_32_bits(val64);
702 } else {
703 ctx->ctx->divmul[0] = 0;
704 ctx->ctx->divmul[1] = 0;
705 }
706}
707
708static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
709{
710 /* functionally, a nop */
711}
712
713static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
714{
715 int execute = 0, target = U16(*ptr);
716 unsigned long cjiffies;
717
718 (*ptr) += 2;
719 switch (arg) {
720 case ATOM_COND_ABOVE:
721 execute = ctx->ctx->cs_above;
722 break;
723 case ATOM_COND_ABOVEOREQUAL:
724 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
725 break;
726 case ATOM_COND_ALWAYS:
727 execute = 1;
728 break;
729 case ATOM_COND_BELOW:
730 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
731 break;
732 case ATOM_COND_BELOWOREQUAL:
733 execute = !ctx->ctx->cs_above;
734 break;
735 case ATOM_COND_EQUAL:
736 execute = ctx->ctx->cs_equal;
737 break;
738 case ATOM_COND_NOTEQUAL:
739 execute = !ctx->ctx->cs_equal;
740 break;
741 }
742 if (arg != ATOM_COND_ALWAYS)
743 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
744 SDEBUG(" target: 0x%04X\n", target);
745 if (execute) {
746 if (ctx->last_jump == (ctx->start + target)) {
747 cjiffies = jiffies;
748 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
749 cjiffies -= ctx->last_jump_jiffies;
750 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
751 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
752 ATOM_CMD_TIMEOUT_SEC);
753 ctx->abort = true;
754 }
755 } else {
756 /* jiffies wrap around we will just wait a little longer */
757 ctx->last_jump_jiffies = jiffies;
758 }
759 } else {
760 ctx->last_jump = ctx->start + target;
761 ctx->last_jump_jiffies = jiffies;
762 }
763 *ptr = ctx->start + target;
764 }
765}
766
767static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
768{
769 uint8_t attr = U8((*ptr)++);
770 uint32_t dst, mask, src, saved;
771 int dptr = *ptr;
772 SDEBUG(" dst: ");
773 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
774 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
775 SDEBUG(" mask: 0x%08x", mask);
776 SDEBUG(" src: ");
777 src = atom_get_src(ctx, attr, ptr);
778 dst &= mask;
779 dst |= src;
780 SDEBUG(" dst: ");
781 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
782}
783
784static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
785{
786 uint8_t attr = U8((*ptr)++);
787 uint32_t src, saved;
788 int dptr = *ptr;
789 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
790 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
791 else {
792 atom_skip_dst(ctx, arg, attr, ptr);
793 saved = 0xCDCDCDCD;
794 }
795 SDEBUG(" src: ");
796 src = atom_get_src(ctx, attr, ptr);
797 SDEBUG(" dst: ");
798 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
799}
800
801static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
802{
803 uint8_t attr = U8((*ptr)++);
804 uint32_t dst, src;
805 SDEBUG(" src1: ");
806 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
807 SDEBUG(" src2: ");
808 src = atom_get_src(ctx, attr, ptr);
809 ctx->ctx->divmul[0] = dst * src;
810}
811
812static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
813{
814 uint64_t val64;
815 uint8_t attr = U8((*ptr)++);
816 uint32_t dst, src;
817 SDEBUG(" src1: ");
818 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
819 SDEBUG(" src2: ");
820 src = atom_get_src(ctx, attr, ptr);
821 val64 = (uint64_t)dst * (uint64_t)src;
822 ctx->ctx->divmul[0] = lower_32_bits(val64);
823 ctx->ctx->divmul[1] = upper_32_bits(val64);
824}
825
826static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
827{
828 /* nothing */
829}
830
831static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
832{
833 uint8_t attr = U8((*ptr)++);
834 uint32_t dst, src, saved;
835 int dptr = *ptr;
836 SDEBUG(" dst: ");
837 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
838 SDEBUG(" src: ");
839 src = atom_get_src(ctx, attr, ptr);
840 dst |= src;
841 SDEBUG(" dst: ");
842 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
843}
844
845static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
846{
847 uint8_t val = U8((*ptr)++);
848 SDEBUG("POST card output: 0x%02X\n", val);
849}
850
851static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
852{
853 pr_info("unimplemented!\n");
854}
855
856static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
857{
858 pr_info("unimplemented!\n");
859}
860
861static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
862{
863 pr_info("unimplemented!\n");
864}
865
866static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
867{
868 int idx = U8(*ptr);
869 (*ptr)++;
870 SDEBUG(" block: %d\n", idx);
871 if (!idx)
872 ctx->ctx->data_block = 0;
873 else if (idx == 255)
874 ctx->ctx->data_block = ctx->start;
875 else
876 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
877 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
878}
879
880static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
881{
882 uint8_t attr = U8((*ptr)++);
883 SDEBUG(" fb_base: ");
884 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
885}
886
887static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
888{
889 int port;
890 switch (arg) {
891 case ATOM_PORT_ATI:
892 port = U16(*ptr);
893 if (port < ATOM_IO_NAMES_CNT)
894 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
895 else
896 SDEBUG(" port: %d\n", port);
897 if (!port)
898 ctx->ctx->io_mode = ATOM_IO_MM;
899 else
900 ctx->ctx->io_mode = ATOM_IO_IIO | port;
901 (*ptr) += 2;
902 break;
903 case ATOM_PORT_PCI:
904 ctx->ctx->io_mode = ATOM_IO_PCI;
905 (*ptr)++;
906 break;
907 case ATOM_PORT_SYSIO:
908 ctx->ctx->io_mode = ATOM_IO_SYSIO;
909 (*ptr)++;
910 break;
911 }
912}
913
914static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
915{
916 ctx->ctx->reg_block = U16(*ptr);
917 (*ptr) += 2;
918 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
919}
920
921static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
922{
923 uint8_t attr = U8((*ptr)++), shift;
924 uint32_t saved, dst;
925 int dptr = *ptr;
926 attr &= 0x38;
927 attr |= atom_def_dst[attr >> 3] << 6;
928 SDEBUG(" dst: ");
929 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
930 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
931 SDEBUG(" shift: %d\n", shift);
932 dst <<= shift;
933 SDEBUG(" dst: ");
934 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
935}
936
937static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
938{
939 uint8_t attr = U8((*ptr)++), shift;
940 uint32_t saved, dst;
941 int dptr = *ptr;
942 attr &= 0x38;
943 attr |= atom_def_dst[attr >> 3] << 6;
944 SDEBUG(" dst: ");
945 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
946 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
947 SDEBUG(" shift: %d\n", shift);
948 dst >>= shift;
949 SDEBUG(" dst: ");
950 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
951}
952
953static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
954{
955 uint8_t attr = U8((*ptr)++), shift;
956 uint32_t saved, dst;
957 int dptr = *ptr;
958 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
959 SDEBUG(" dst: ");
960 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
961 /* op needs to full dst value */
962 dst = saved;
963 shift = atom_get_src(ctx, attr, ptr);
964 SDEBUG(" shift: %d\n", shift);
965 dst <<= shift;
966 dst &= atom_arg_mask[dst_align];
967 dst >>= atom_arg_shift[dst_align];
968 SDEBUG(" dst: ");
969 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
970}
971
972static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
973{
974 uint8_t attr = U8((*ptr)++), shift;
975 uint32_t saved, dst;
976 int dptr = *ptr;
977 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
978 SDEBUG(" dst: ");
979 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
980 /* op needs to full dst value */
981 dst = saved;
982 shift = atom_get_src(ctx, attr, ptr);
983 SDEBUG(" shift: %d\n", shift);
984 dst >>= shift;
985 dst &= atom_arg_mask[dst_align];
986 dst >>= atom_arg_shift[dst_align];
987 SDEBUG(" dst: ");
988 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
989}
990
991static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
992{
993 uint8_t attr = U8((*ptr)++);
994 uint32_t dst, src, saved;
995 int dptr = *ptr;
996 SDEBUG(" dst: ");
997 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
998 SDEBUG(" src: ");
999 src = atom_get_src(ctx, attr, ptr);
1000 dst -= src;
1001 SDEBUG(" dst: ");
1002 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1003}
1004
1005static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1006{
1007 uint8_t attr = U8((*ptr)++);
1008 uint32_t src, val, target;
1009 SDEBUG(" switch: ");
1010 src = atom_get_src(ctx, attr, ptr);
1011 while (U16(*ptr) != ATOM_CASE_END)
1012 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1013 (*ptr)++;
1014 SDEBUG(" case: ");
1015 val =
1016 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1017 ptr);
1018 target = U16(*ptr);
1019 if (val == src) {
1020 SDEBUG(" target: %04X\n", target);
1021 *ptr = ctx->start + target;
1022 return;
1023 }
1024 (*ptr) += 2;
1025 } else {
1026 pr_info("Bad case\n");
1027 return;
1028 }
1029 (*ptr) += 2;
1030}
1031
1032static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1033{
1034 uint8_t attr = U8((*ptr)++);
1035 uint32_t dst, src;
1036 SDEBUG(" src1: ");
1037 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1038 SDEBUG(" src2: ");
1039 src = atom_get_src(ctx, attr, ptr);
1040 ctx->ctx->cs_equal = ((dst & src) == 0);
1041 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1042}
1043
1044static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1045{
1046 uint8_t attr = U8((*ptr)++);
1047 uint32_t dst, src, saved;
1048 int dptr = *ptr;
1049 SDEBUG(" dst: ");
1050 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1051 SDEBUG(" src: ");
1052 src = atom_get_src(ctx, attr, ptr);
1053 dst ^= src;
1054 SDEBUG(" dst: ");
1055 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1056}
1057
1058static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1059{
1060 uint8_t val = U8((*ptr)++);
1061 SDEBUG("DEBUG output: 0x%02X\n", val);
1062}
1063
1064static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1065{
1066 uint16_t val = U16(*ptr);
1067 (*ptr) += val + 2;
1068 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1069}
1070
1071static struct {
1072 void (*func) (atom_exec_context *, int *, int);
1073 int arg;
1074} opcode_table[ATOM_OP_CNT] = {
1075 {
1076 NULL, 0}, {
1077 atom_op_move, ATOM_ARG_REG}, {
1078 atom_op_move, ATOM_ARG_PS}, {
1079 atom_op_move, ATOM_ARG_WS}, {
1080 atom_op_move, ATOM_ARG_FB}, {
1081 atom_op_move, ATOM_ARG_PLL}, {
1082 atom_op_move, ATOM_ARG_MC}, {
1083 atom_op_and, ATOM_ARG_REG}, {
1084 atom_op_and, ATOM_ARG_PS}, {
1085 atom_op_and, ATOM_ARG_WS}, {
1086 atom_op_and, ATOM_ARG_FB}, {
1087 atom_op_and, ATOM_ARG_PLL}, {
1088 atom_op_and, ATOM_ARG_MC}, {
1089 atom_op_or, ATOM_ARG_REG}, {
1090 atom_op_or, ATOM_ARG_PS}, {
1091 atom_op_or, ATOM_ARG_WS}, {
1092 atom_op_or, ATOM_ARG_FB}, {
1093 atom_op_or, ATOM_ARG_PLL}, {
1094 atom_op_or, ATOM_ARG_MC}, {
1095 atom_op_shift_left, ATOM_ARG_REG}, {
1096 atom_op_shift_left, ATOM_ARG_PS}, {
1097 atom_op_shift_left, ATOM_ARG_WS}, {
1098 atom_op_shift_left, ATOM_ARG_FB}, {
1099 atom_op_shift_left, ATOM_ARG_PLL}, {
1100 atom_op_shift_left, ATOM_ARG_MC}, {
1101 atom_op_shift_right, ATOM_ARG_REG}, {
1102 atom_op_shift_right, ATOM_ARG_PS}, {
1103 atom_op_shift_right, ATOM_ARG_WS}, {
1104 atom_op_shift_right, ATOM_ARG_FB}, {
1105 atom_op_shift_right, ATOM_ARG_PLL}, {
1106 atom_op_shift_right, ATOM_ARG_MC}, {
1107 atom_op_mul, ATOM_ARG_REG}, {
1108 atom_op_mul, ATOM_ARG_PS}, {
1109 atom_op_mul, ATOM_ARG_WS}, {
1110 atom_op_mul, ATOM_ARG_FB}, {
1111 atom_op_mul, ATOM_ARG_PLL}, {
1112 atom_op_mul, ATOM_ARG_MC}, {
1113 atom_op_div, ATOM_ARG_REG}, {
1114 atom_op_div, ATOM_ARG_PS}, {
1115 atom_op_div, ATOM_ARG_WS}, {
1116 atom_op_div, ATOM_ARG_FB}, {
1117 atom_op_div, ATOM_ARG_PLL}, {
1118 atom_op_div, ATOM_ARG_MC}, {
1119 atom_op_add, ATOM_ARG_REG}, {
1120 atom_op_add, ATOM_ARG_PS}, {
1121 atom_op_add, ATOM_ARG_WS}, {
1122 atom_op_add, ATOM_ARG_FB}, {
1123 atom_op_add, ATOM_ARG_PLL}, {
1124 atom_op_add, ATOM_ARG_MC}, {
1125 atom_op_sub, ATOM_ARG_REG}, {
1126 atom_op_sub, ATOM_ARG_PS}, {
1127 atom_op_sub, ATOM_ARG_WS}, {
1128 atom_op_sub, ATOM_ARG_FB}, {
1129 atom_op_sub, ATOM_ARG_PLL}, {
1130 atom_op_sub, ATOM_ARG_MC}, {
1131 atom_op_setport, ATOM_PORT_ATI}, {
1132 atom_op_setport, ATOM_PORT_PCI}, {
1133 atom_op_setport, ATOM_PORT_SYSIO}, {
1134 atom_op_setregblock, 0}, {
1135 atom_op_setfbbase, 0}, {
1136 atom_op_compare, ATOM_ARG_REG}, {
1137 atom_op_compare, ATOM_ARG_PS}, {
1138 atom_op_compare, ATOM_ARG_WS}, {
1139 atom_op_compare, ATOM_ARG_FB}, {
1140 atom_op_compare, ATOM_ARG_PLL}, {
1141 atom_op_compare, ATOM_ARG_MC}, {
1142 atom_op_switch, 0}, {
1143 atom_op_jump, ATOM_COND_ALWAYS}, {
1144 atom_op_jump, ATOM_COND_EQUAL}, {
1145 atom_op_jump, ATOM_COND_BELOW}, {
1146 atom_op_jump, ATOM_COND_ABOVE}, {
1147 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1148 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1149 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1150 atom_op_test, ATOM_ARG_REG}, {
1151 atom_op_test, ATOM_ARG_PS}, {
1152 atom_op_test, ATOM_ARG_WS}, {
1153 atom_op_test, ATOM_ARG_FB}, {
1154 atom_op_test, ATOM_ARG_PLL}, {
1155 atom_op_test, ATOM_ARG_MC}, {
1156 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1157 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1158 atom_op_calltable, 0}, {
1159 atom_op_repeat, 0}, {
1160 atom_op_clear, ATOM_ARG_REG}, {
1161 atom_op_clear, ATOM_ARG_PS}, {
1162 atom_op_clear, ATOM_ARG_WS}, {
1163 atom_op_clear, ATOM_ARG_FB}, {
1164 atom_op_clear, ATOM_ARG_PLL}, {
1165 atom_op_clear, ATOM_ARG_MC}, {
1166 atom_op_nop, 0}, {
1167 atom_op_eot, 0}, {
1168 atom_op_mask, ATOM_ARG_REG}, {
1169 atom_op_mask, ATOM_ARG_PS}, {
1170 atom_op_mask, ATOM_ARG_WS}, {
1171 atom_op_mask, ATOM_ARG_FB}, {
1172 atom_op_mask, ATOM_ARG_PLL}, {
1173 atom_op_mask, ATOM_ARG_MC}, {
1174 atom_op_postcard, 0}, {
1175 atom_op_beep, 0}, {
1176 atom_op_savereg, 0}, {
1177 atom_op_restorereg, 0}, {
1178 atom_op_setdatablock, 0}, {
1179 atom_op_xor, ATOM_ARG_REG}, {
1180 atom_op_xor, ATOM_ARG_PS}, {
1181 atom_op_xor, ATOM_ARG_WS}, {
1182 atom_op_xor, ATOM_ARG_FB}, {
1183 atom_op_xor, ATOM_ARG_PLL}, {
1184 atom_op_xor, ATOM_ARG_MC}, {
1185 atom_op_shl, ATOM_ARG_REG}, {
1186 atom_op_shl, ATOM_ARG_PS}, {
1187 atom_op_shl, ATOM_ARG_WS}, {
1188 atom_op_shl, ATOM_ARG_FB}, {
1189 atom_op_shl, ATOM_ARG_PLL}, {
1190 atom_op_shl, ATOM_ARG_MC}, {
1191 atom_op_shr, ATOM_ARG_REG}, {
1192 atom_op_shr, ATOM_ARG_PS}, {
1193 atom_op_shr, ATOM_ARG_WS}, {
1194 atom_op_shr, ATOM_ARG_FB}, {
1195 atom_op_shr, ATOM_ARG_PLL}, {
1196 atom_op_shr, ATOM_ARG_MC}, {
1197 atom_op_debug, 0}, {
1198 atom_op_processds, 0}, {
1199 atom_op_mul32, ATOM_ARG_PS}, {
1200 atom_op_mul32, ATOM_ARG_WS}, {
1201 atom_op_div32, ATOM_ARG_PS}, {
1202 atom_op_div32, ATOM_ARG_WS},
1203};
1204
1205static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
1206{
1207 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1208 int len, ws, ps, ptr;
1209 unsigned char op;
1210 atom_exec_context ectx;
1211 int ret = 0;
1212
1213 if (!base)
1214 return -EINVAL;
1215
1216 len = CU16(base + ATOM_CT_SIZE_PTR);
1217 ws = CU8(base + ATOM_CT_WS_PTR);
1218 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1219 ptr = base + ATOM_CT_CODE_PTR;
1220
1221 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1222
1223 ectx.ctx = ctx;
1224 ectx.ps_shift = ps / 4;
1225 ectx.start = base;
1226 ectx.ps = params;
1227 ectx.abort = false;
1228 ectx.last_jump = 0;
1229 if (ws)
1230 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1231 else
1232 ectx.ws = NULL;
1233
1234 debug_depth++;
1235 while (1) {
1236 op = CU8(ptr++);
1237 if (op < ATOM_OP_NAMES_CNT)
1238 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1239 else
1240 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1241 if (ectx.abort) {
1242 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1243 base, len, ws, ps, ptr - 1);
1244 ret = -EINVAL;
1245 goto free;
1246 }
1247
1248 if (op < ATOM_OP_CNT && op > 0)
1249 opcode_table[op].func(&ectx, &ptr,
1250 opcode_table[op].arg);
1251 else
1252 break;
1253
1254 if (op == ATOM_OP_EOT)
1255 break;
1256 }
1257 debug_depth--;
1258 SDEBUG("<<\n");
1259
1260free:
1261 if (ws)
1262 kfree(ectx.ws);
1263 return ret;
1264}
1265
1266int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
1267{
1268 int r;
1269
1270 mutex_lock(&ctx->mutex);
1271 /* reset data block */
1272 ctx->data_block = 0;
1273 /* reset reg block */
1274 ctx->reg_block = 0;
1275 /* reset fb window */
1276 ctx->fb_base = 0;
1277 /* reset io mode */
1278 ctx->io_mode = ATOM_IO_MM;
1279 /* reset divmul */
1280 ctx->divmul[0] = 0;
1281 ctx->divmul[1] = 0;
1282 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1283 mutex_unlock(&ctx->mutex);
1284 return r;
1285}
1286
1287static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1288
1289static void atom_index_iio(struct atom_context *ctx, int base)
1290{
1291 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1292 if (!ctx->iio)
1293 return;
1294 while (CU8(base) == ATOM_IIO_START) {
1295 ctx->iio[CU8(base + 1)] = base + 2;
1296 base += 2;
1297 while (CU8(base) != ATOM_IIO_END)
1298 base += atom_iio_len[CU8(base)];
1299 base += 3;
1300 }
1301}
1302
1303static void atom_get_vbios_name(struct atom_context *ctx)
1304{
1305 unsigned char *p_rom;
1306 unsigned char str_num;
1307 unsigned short off_to_vbios_str;
1308 unsigned char *c_ptr;
1309 int name_size;
1310 int i;
1311
1312 const char *na = "--N/A--";
1313 char *back;
1314
1315 p_rom = ctx->bios;
1316
1317 str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
1318 if (str_num != 0) {
1319 off_to_vbios_str =
1320 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1321
1322 c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
1323 } else {
1324 /* do not know where to find name */
1325 memcpy(ctx->name, na, 7);
1326 ctx->name[7] = 0;
1327 return;
1328 }
1329
1330 /*
1331 * skip the atombios strings, usually 4
1332 * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
1333 */
1334 for (i = 0; i < str_num; i++) {
1335 while (*c_ptr != 0)
1336 c_ptr++;
1337 c_ptr++;
1338 }
1339
1340 /* skip the following 2 chars: 0x0D 0x0A */
1341 c_ptr += 2;
1342
1343 name_size = strnlen(c_ptr, STRLEN_LONG - 1);
1344 memcpy(ctx->name, c_ptr, name_size);
1345 back = ctx->name + name_size;
1346 while ((*--back) == ' ')
1347 ;
1348 *(back + 1) = '\0';
1349}
1350
1351static void atom_get_vbios_date(struct atom_context *ctx)
1352{
1353 unsigned char *p_rom;
1354 unsigned char *date_in_rom;
1355
1356 p_rom = ctx->bios;
1357
1358 date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
1359
1360 ctx->date[0] = '2';
1361 ctx->date[1] = '0';
1362 ctx->date[2] = date_in_rom[6];
1363 ctx->date[3] = date_in_rom[7];
1364 ctx->date[4] = '/';
1365 ctx->date[5] = date_in_rom[0];
1366 ctx->date[6] = date_in_rom[1];
1367 ctx->date[7] = '/';
1368 ctx->date[8] = date_in_rom[3];
1369 ctx->date[9] = date_in_rom[4];
1370 ctx->date[10] = ' ';
1371 ctx->date[11] = date_in_rom[9];
1372 ctx->date[12] = date_in_rom[10];
1373 ctx->date[13] = date_in_rom[11];
1374 ctx->date[14] = date_in_rom[12];
1375 ctx->date[15] = date_in_rom[13];
1376 ctx->date[16] = '\0';
1377}
1378
1379static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
1380 int end, int maxlen)
1381{
1382 unsigned long str_off;
1383 unsigned char *p_rom;
1384 unsigned short str_len;
1385
1386 str_off = 0;
1387 str_len = strnlen(str, maxlen);
1388 p_rom = ctx->bios;
1389
1390 for (; start <= end; ++start) {
1391 for (str_off = 0; str_off < str_len; ++str_off) {
1392 if (str[str_off] != *(p_rom + start + str_off))
1393 break;
1394 }
1395
1396 if (str_off == str_len || str[str_off] == 0)
1397 return p_rom + start;
1398 }
1399 return NULL;
1400}
1401
1402static void atom_get_vbios_pn(struct atom_context *ctx)
1403{
1404 unsigned char *p_rom;
1405 unsigned short off_to_vbios_str;
1406 unsigned char *vbios_str;
1407 int count;
1408
1409 off_to_vbios_str = 0;
1410 p_rom = ctx->bios;
1411
1412 if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
1413 off_to_vbios_str =
1414 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1415
1416 vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
1417 } else {
1418 vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
1419 }
1420
1421 if (*vbios_str == 0) {
1422 vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
1423 if (vbios_str == NULL)
1424 vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
1425 }
1426 if (vbios_str != NULL && *vbios_str == 0)
1427 vbios_str++;
1428
1429 if (vbios_str != NULL) {
1430 count = 0;
1431 while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
1432 vbios_str[count] <= 'z') {
1433 ctx->vbios_pn[count] = vbios_str[count];
1434 count++;
1435 }
1436
1437 ctx->vbios_pn[count] = 0;
1438 }
1439}
1440
1441static void atom_get_vbios_version(struct atom_context *ctx)
1442{
1443 unsigned char *vbios_ver;
1444
1445 /* find anchor ATOMBIOSBK-AMD */
1446 vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64);
1447 if (vbios_ver != NULL) {
1448 /* skip ATOMBIOSBK-AMD VER */
1449 vbios_ver += 18;
1450 memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
1451 } else {
1452 ctx->vbios_ver_str[0] = '\0';
1453 }
1454}
1455
1456struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1457{
1458 int base;
1459 struct atom_context *ctx =
1460 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1461 char *str;
1462 struct _ATOM_ROM_HEADER *atom_rom_header;
1463 struct _ATOM_MASTER_DATA_TABLE *master_table;
1464 struct _ATOM_FIRMWARE_INFO *atom_fw_info;
1465 u16 idx;
1466
1467 if (!ctx)
1468 return NULL;
1469
1470 ctx->card = card;
1471 ctx->bios = bios;
1472
1473 if (CU16(0) != ATOM_BIOS_MAGIC) {
1474 pr_info("Invalid BIOS magic\n");
1475 kfree(ctx);
1476 return NULL;
1477 }
1478 if (strncmp
1479 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1480 strlen(ATOM_ATI_MAGIC))) {
1481 pr_info("Invalid ATI magic\n");
1482 kfree(ctx);
1483 return NULL;
1484 }
1485
1486 base = CU16(ATOM_ROM_TABLE_PTR);
1487 if (strncmp
1488 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1489 strlen(ATOM_ROM_MAGIC))) {
1490 pr_info("Invalid ATOM magic\n");
1491 kfree(ctx);
1492 return NULL;
1493 }
1494
1495 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1496 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1497 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1498 if (!ctx->iio) {
1499 amdgpu_atom_destroy(ctx);
1500 return NULL;
1501 }
1502
1503 idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
1504 if (idx == 0)
1505 idx = 0x80;
1506
1507 str = CSTR(idx);
1508 if (*str != '\0') {
1509 pr_info("ATOM BIOS: %s\n", str);
1510 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1511 }
1512
1513 atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
1514 if (atom_rom_header->usMasterDataTableOffset != 0) {
1515 master_table = (struct _ATOM_MASTER_DATA_TABLE *)
1516 CSTR(atom_rom_header->usMasterDataTableOffset);
1517 if (master_table->ListOfDataTables.FirmwareInfo != 0) {
1518 atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
1519 CSTR(master_table->ListOfDataTables.FirmwareInfo);
1520 ctx->version = atom_fw_info->ulFirmwareRevision;
1521 }
1522 }
1523
1524 atom_get_vbios_name(ctx);
1525 atom_get_vbios_pn(ctx);
1526 atom_get_vbios_date(ctx);
1527 atom_get_vbios_version(ctx);
1528
1529 return ctx;
1530}
1531
1532int amdgpu_atom_asic_init(struct atom_context *ctx)
1533{
1534 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1535 uint32_t ps[16];
1536 int ret;
1537
1538 memset(ps, 0, 64);
1539
1540 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1541 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1542 if (!ps[0] || !ps[1])
1543 return 1;
1544
1545 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1546 return 1;
1547 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1548 if (ret)
1549 return ret;
1550
1551 memset(ps, 0, 64);
1552
1553 return ret;
1554}
1555
1556void amdgpu_atom_destroy(struct atom_context *ctx)
1557{
1558 kfree(ctx->iio);
1559 kfree(ctx);
1560}
1561
1562bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1563 uint16_t *size, uint8_t *frev, uint8_t *crev,
1564 uint16_t *data_start)
1565{
1566 int offset = index * 2 + 4;
1567 int idx = CU16(ctx->data_table + offset);
1568 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1569
1570 if (!mdt[index])
1571 return false;
1572
1573 if (size)
1574 *size = CU16(idx);
1575 if (frev)
1576 *frev = CU8(idx + 2);
1577 if (crev)
1578 *crev = CU8(idx + 3);
1579 *data_start = idx;
1580 return true;
1581}
1582
1583bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
1584 uint8_t *crev)
1585{
1586 int offset = index * 2 + 4;
1587 int idx = CU16(ctx->cmd_table + offset);
1588 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1589
1590 if (!mct[index])
1591 return false;
1592
1593 if (frev)
1594 *frev = CU8(idx + 2);
1595 if (crev)
1596 *crev = CU8(idx + 3);
1597 return true;
1598}
1599