Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Author: Stanislaw Skowronek
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/string_helpers.h>
  29
  30#include <asm/unaligned.h>
  31
  32#include <drm/drm_util.h>
  33
  34#define ATOM_DEBUG
  35
  36#include "atomfirmware.h"
  37#include "atom.h"
  38#include "atom-names.h"
  39#include "atom-bits.h"
  40#include "amdgpu.h"
  41
  42#define ATOM_COND_ABOVE		0
  43#define ATOM_COND_ABOVEOREQUAL	1
  44#define ATOM_COND_ALWAYS	2
  45#define ATOM_COND_BELOW		3
  46#define ATOM_COND_BELOWOREQUAL	4
  47#define ATOM_COND_EQUAL		5
  48#define ATOM_COND_NOTEQUAL	6
  49
  50#define ATOM_PORT_ATI	0
  51#define ATOM_PORT_PCI	1
  52#define ATOM_PORT_SYSIO	2
  53
  54#define ATOM_UNIT_MICROSEC	0
  55#define ATOM_UNIT_MILLISEC	1
  56
  57#define PLL_INDEX	2
  58#define PLL_DATA	3
  59
  60#define ATOM_CMD_TIMEOUT_SEC	20
  61
  62typedef struct {
  63	struct atom_context *ctx;
  64	uint32_t *ps, *ws;
  65	int ps_size, ws_size;
  66	int ps_shift;
  67	uint16_t start;
  68	unsigned last_jump;
  69	unsigned long last_jump_jiffies;
  70	bool abort;
  71} atom_exec_context;
  72
  73int amdgpu_atom_debug;
  74static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
  75int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
  76
  77static uint32_t atom_arg_mask[8] =
  78	{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
  79	  0xFF000000 };
  80static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
  81
  82static int atom_dst_to_src[8][4] = {
  83	/* translate destination alignment field to the source alignment encoding */
  84	{0, 0, 0, 0},
  85	{1, 2, 3, 0},
  86	{1, 2, 3, 0},
  87	{1, 2, 3, 0},
  88	{4, 5, 6, 7},
  89	{4, 5, 6, 7},
  90	{4, 5, 6, 7},
  91	{4, 5, 6, 7},
  92};
  93static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
  94
  95static int debug_depth;
  96#ifdef ATOM_DEBUG
  97static void debug_print_spaces(int n)
  98{
  99	while (n--)
 100		printk("   ");
 101}
 102
 103#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
 104#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
 105#else
 106#define DEBUG(...) do { } while (0)
 107#define SDEBUG(...) do { } while (0)
 108#endif
 109
 110static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
 111				 uint32_t index, uint32_t data)
 112{
 113	uint32_t temp = 0xCDCDCDCD;
 114
 115	while (1)
 116		switch (CU8(base)) {
 117		case ATOM_IIO_NOP:
 118			base++;
 119			break;
 120		case ATOM_IIO_READ:
 121			temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
 122			base += 3;
 123			break;
 124		case ATOM_IIO_WRITE:
 125			ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
 126			base += 3;
 127			break;
 128		case ATOM_IIO_CLEAR:
 129			temp &=
 130			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 131			      CU8(base + 2));
 132			base += 3;
 133			break;
 134		case ATOM_IIO_SET:
 135			temp |=
 136			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
 137									2);
 138			base += 3;
 139			break;
 140		case ATOM_IIO_MOVE_INDEX:
 141			temp &=
 142			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 143			      CU8(base + 3));
 144			temp |=
 145			    ((index >> CU8(base + 2)) &
 146			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
 147									  3);
 148			base += 4;
 149			break;
 150		case ATOM_IIO_MOVE_DATA:
 151			temp &=
 152			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 153			      CU8(base + 3));
 154			temp |=
 155			    ((data >> CU8(base + 2)) &
 156			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
 157									  3);
 158			base += 4;
 159			break;
 160		case ATOM_IIO_MOVE_ATTR:
 161			temp &=
 162			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 163			      CU8(base + 3));
 164			temp |=
 165			    ((ctx->
 166			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
 167									  CU8
 168									  (base
 169									   +
 170									   1))))
 171			    << CU8(base + 3);
 172			base += 4;
 173			break;
 174		case ATOM_IIO_END:
 175			return temp;
 176		default:
 177			pr_info("Unknown IIO opcode\n");
 178			return 0;
 179		}
 180}
 181
 182static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
 183				 int *ptr, uint32_t *saved, int print)
 184{
 185	uint32_t idx, val = 0xCDCDCDCD, align, arg;
 186	struct atom_context *gctx = ctx->ctx;
 187	arg = attr & 7;
 188	align = (attr >> 3) & 7;
 189	switch (arg) {
 190	case ATOM_ARG_REG:
 191		idx = U16(*ptr);
 192		(*ptr) += 2;
 193		if (print)
 194			DEBUG("REG[0x%04X]", idx);
 195		idx += gctx->reg_block;
 196		switch (gctx->io_mode) {
 197		case ATOM_IO_MM:
 198			val = gctx->card->reg_read(gctx->card, idx);
 199			break;
 200		case ATOM_IO_PCI:
 201			pr_info("PCI registers are not implemented\n");
 
 202			return 0;
 203		case ATOM_IO_SYSIO:
 204			pr_info("SYSIO registers are not implemented\n");
 
 205			return 0;
 206		default:
 207			if (!(gctx->io_mode & 0x80)) {
 208				pr_info("Bad IO mode\n");
 209				return 0;
 210			}
 211			if (!gctx->iio[gctx->io_mode & 0x7F]) {
 212				pr_info("Undefined indirect IO read method %d\n",
 213					gctx->io_mode & 0x7F);
 
 214				return 0;
 215			}
 216			val =
 217			    atom_iio_execute(gctx,
 218					     gctx->iio[gctx->io_mode & 0x7F],
 219					     idx, 0);
 220		}
 221		break;
 222	case ATOM_ARG_PS:
 223		idx = U8(*ptr);
 224		(*ptr)++;
 225		/* get_unaligned_le32 avoids unaligned accesses from atombios
 226		 * tables, noticed on a DEC Alpha. */
 227		if (idx < ctx->ps_size)
 228			val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
 229		else
 230			pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
 231		if (print)
 232			DEBUG("PS[0x%02X,0x%04X]", idx, val);
 233		break;
 234	case ATOM_ARG_WS:
 235		idx = U8(*ptr);
 236		(*ptr)++;
 237		if (print)
 238			DEBUG("WS[0x%02X]", idx);
 239		switch (idx) {
 240		case ATOM_WS_QUOTIENT:
 241			val = gctx->divmul[0];
 242			break;
 243		case ATOM_WS_REMAINDER:
 244			val = gctx->divmul[1];
 245			break;
 246		case ATOM_WS_DATAPTR:
 247			val = gctx->data_block;
 248			break;
 249		case ATOM_WS_SHIFT:
 250			val = gctx->shift;
 251			break;
 252		case ATOM_WS_OR_MASK:
 253			val = 1 << gctx->shift;
 254			break;
 255		case ATOM_WS_AND_MASK:
 256			val = ~(1 << gctx->shift);
 257			break;
 258		case ATOM_WS_FB_WINDOW:
 259			val = gctx->fb_base;
 260			break;
 261		case ATOM_WS_ATTRIBUTES:
 262			val = gctx->io_attr;
 263			break;
 264		case ATOM_WS_REGPTR:
 265			val = gctx->reg_block;
 266			break;
 267		default:
 268			if (idx < ctx->ws_size)
 269				val = ctx->ws[idx];
 270			else
 271				pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
 272		}
 273		break;
 274	case ATOM_ARG_ID:
 275		idx = U16(*ptr);
 276		(*ptr) += 2;
 277		if (print) {
 278			if (gctx->data_block)
 279				DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
 280			else
 281				DEBUG("ID[0x%04X]", idx);
 282		}
 283		val = U32(idx + gctx->data_block);
 284		break;
 285	case ATOM_ARG_FB:
 286		idx = U8(*ptr);
 287		(*ptr)++;
 288		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
 289			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
 290				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
 291			val = 0;
 292		} else
 293			val = gctx->scratch[(gctx->fb_base / 4) + idx];
 294		if (print)
 295			DEBUG("FB[0x%02X]", idx);
 296		break;
 297	case ATOM_ARG_IMM:
 298		switch (align) {
 299		case ATOM_SRC_DWORD:
 300			val = U32(*ptr);
 301			(*ptr) += 4;
 302			if (print)
 303				DEBUG("IMM 0x%08X\n", val);
 304			return val;
 305		case ATOM_SRC_WORD0:
 306		case ATOM_SRC_WORD8:
 307		case ATOM_SRC_WORD16:
 308			val = U16(*ptr);
 309			(*ptr) += 2;
 310			if (print)
 311				DEBUG("IMM 0x%04X\n", val);
 312			return val;
 313		case ATOM_SRC_BYTE0:
 314		case ATOM_SRC_BYTE8:
 315		case ATOM_SRC_BYTE16:
 316		case ATOM_SRC_BYTE24:
 317			val = U8(*ptr);
 318			(*ptr)++;
 319			if (print)
 320				DEBUG("IMM 0x%02X\n", val);
 321			return val;
 322		}
 323		break;
 324	case ATOM_ARG_PLL:
 325		idx = U8(*ptr);
 326		(*ptr)++;
 327		if (print)
 328			DEBUG("PLL[0x%02X]", idx);
 329		val = gctx->card->pll_read(gctx->card, idx);
 330		break;
 331	case ATOM_ARG_MC:
 332		idx = U8(*ptr);
 333		(*ptr)++;
 334		if (print)
 335			DEBUG("MC[0x%02X]", idx);
 336		val = gctx->card->mc_read(gctx->card, idx);
 337		break;
 338	}
 339	if (saved)
 340		*saved = val;
 341	val &= atom_arg_mask[align];
 342	val >>= atom_arg_shift[align];
 343	if (print)
 344		switch (align) {
 345		case ATOM_SRC_DWORD:
 346			DEBUG(".[31:0] -> 0x%08X\n", val);
 347			break;
 348		case ATOM_SRC_WORD0:
 349			DEBUG(".[15:0] -> 0x%04X\n", val);
 350			break;
 351		case ATOM_SRC_WORD8:
 352			DEBUG(".[23:8] -> 0x%04X\n", val);
 353			break;
 354		case ATOM_SRC_WORD16:
 355			DEBUG(".[31:16] -> 0x%04X\n", val);
 356			break;
 357		case ATOM_SRC_BYTE0:
 358			DEBUG(".[7:0] -> 0x%02X\n", val);
 359			break;
 360		case ATOM_SRC_BYTE8:
 361			DEBUG(".[15:8] -> 0x%02X\n", val);
 362			break;
 363		case ATOM_SRC_BYTE16:
 364			DEBUG(".[23:16] -> 0x%02X\n", val);
 365			break;
 366		case ATOM_SRC_BYTE24:
 367			DEBUG(".[31:24] -> 0x%02X\n", val);
 368			break;
 369		}
 370	return val;
 371}
 372
 373static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
 374{
 375	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
 376	switch (arg) {
 377	case ATOM_ARG_REG:
 378	case ATOM_ARG_ID:
 379		(*ptr) += 2;
 380		break;
 381	case ATOM_ARG_PLL:
 382	case ATOM_ARG_MC:
 383	case ATOM_ARG_PS:
 384	case ATOM_ARG_WS:
 385	case ATOM_ARG_FB:
 386		(*ptr)++;
 387		break;
 388	case ATOM_ARG_IMM:
 389		switch (align) {
 390		case ATOM_SRC_DWORD:
 391			(*ptr) += 4;
 392			return;
 393		case ATOM_SRC_WORD0:
 394		case ATOM_SRC_WORD8:
 395		case ATOM_SRC_WORD16:
 396			(*ptr) += 2;
 397			return;
 398		case ATOM_SRC_BYTE0:
 399		case ATOM_SRC_BYTE8:
 400		case ATOM_SRC_BYTE16:
 401		case ATOM_SRC_BYTE24:
 402			(*ptr)++;
 403			return;
 404		}
 
 405	}
 406}
 407
 408static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
 409{
 410	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
 411}
 412
 413static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
 414{
 415	uint32_t val = 0xCDCDCDCD;
 416
 417	switch (align) {
 418	case ATOM_SRC_DWORD:
 419		val = U32(*ptr);
 420		(*ptr) += 4;
 421		break;
 422	case ATOM_SRC_WORD0:
 423	case ATOM_SRC_WORD8:
 424	case ATOM_SRC_WORD16:
 425		val = U16(*ptr);
 426		(*ptr) += 2;
 427		break;
 428	case ATOM_SRC_BYTE0:
 429	case ATOM_SRC_BYTE8:
 430	case ATOM_SRC_BYTE16:
 431	case ATOM_SRC_BYTE24:
 432		val = U8(*ptr);
 433		(*ptr)++;
 434		break;
 435	}
 436	return val;
 437}
 438
 439static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
 440			     int *ptr, uint32_t *saved, int print)
 441{
 442	return atom_get_src_int(ctx,
 443				arg | atom_dst_to_src[(attr >> 3) &
 444						      7][(attr >> 6) & 3] << 3,
 445				ptr, saved, print);
 446}
 447
 448static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
 449{
 450	atom_skip_src_int(ctx,
 451			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
 452								 3] << 3, ptr);
 453}
 454
 455static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
 456			 int *ptr, uint32_t val, uint32_t saved)
 457{
 458	uint32_t align =
 459	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
 460	    val, idx;
 461	struct atom_context *gctx = ctx->ctx;
 462	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
 463	val <<= atom_arg_shift[align];
 464	val &= atom_arg_mask[align];
 465	saved &= ~atom_arg_mask[align];
 466	val |= saved;
 467	switch (arg) {
 468	case ATOM_ARG_REG:
 469		idx = U16(*ptr);
 470		(*ptr) += 2;
 471		DEBUG("REG[0x%04X]", idx);
 472		idx += gctx->reg_block;
 473		switch (gctx->io_mode) {
 474		case ATOM_IO_MM:
 475			if (idx == 0)
 476				gctx->card->reg_write(gctx->card, idx,
 477						      val << 2);
 478			else
 479				gctx->card->reg_write(gctx->card, idx, val);
 480			break;
 481		case ATOM_IO_PCI:
 482			pr_info("PCI registers are not implemented\n");
 
 483			return;
 484		case ATOM_IO_SYSIO:
 485			pr_info("SYSIO registers are not implemented\n");
 
 486			return;
 487		default:
 488			if (!(gctx->io_mode & 0x80)) {
 489				pr_info("Bad IO mode\n");
 490				return;
 491			}
 492			if (!gctx->iio[gctx->io_mode & 0xFF]) {
 493				pr_info("Undefined indirect IO write method %d\n",
 494					gctx->io_mode & 0x7F);
 
 495				return;
 496			}
 497			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
 498					 idx, val);
 499		}
 500		break;
 501	case ATOM_ARG_PS:
 502		idx = U8(*ptr);
 503		(*ptr)++;
 504		DEBUG("PS[0x%02X]", idx);
 505		if (idx >= ctx->ps_size) {
 506			pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
 507			return;
 508		}
 509		ctx->ps[idx] = cpu_to_le32(val);
 510		break;
 511	case ATOM_ARG_WS:
 512		idx = U8(*ptr);
 513		(*ptr)++;
 514		DEBUG("WS[0x%02X]", idx);
 515		switch (idx) {
 516		case ATOM_WS_QUOTIENT:
 517			gctx->divmul[0] = val;
 518			break;
 519		case ATOM_WS_REMAINDER:
 520			gctx->divmul[1] = val;
 521			break;
 522		case ATOM_WS_DATAPTR:
 523			gctx->data_block = val;
 524			break;
 525		case ATOM_WS_SHIFT:
 526			gctx->shift = val;
 527			break;
 528		case ATOM_WS_OR_MASK:
 529		case ATOM_WS_AND_MASK:
 530			break;
 531		case ATOM_WS_FB_WINDOW:
 532			gctx->fb_base = val;
 533			break;
 534		case ATOM_WS_ATTRIBUTES:
 535			gctx->io_attr = val;
 536			break;
 537		case ATOM_WS_REGPTR:
 538			gctx->reg_block = val;
 539			break;
 540		default:
 541			if (idx >= ctx->ws_size) {
 542				pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
 543				return;
 544			}
 545			ctx->ws[idx] = val;
 546		}
 547		break;
 548	case ATOM_ARG_FB:
 549		idx = U8(*ptr);
 550		(*ptr)++;
 551		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
 552			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
 553				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
 554		} else
 555			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
 556		DEBUG("FB[0x%02X]", idx);
 557		break;
 558	case ATOM_ARG_PLL:
 559		idx = U8(*ptr);
 560		(*ptr)++;
 561		DEBUG("PLL[0x%02X]", idx);
 562		gctx->card->pll_write(gctx->card, idx, val);
 563		break;
 564	case ATOM_ARG_MC:
 565		idx = U8(*ptr);
 566		(*ptr)++;
 567		DEBUG("MC[0x%02X]", idx);
 568		gctx->card->mc_write(gctx->card, idx, val);
 569		return;
 570	}
 571	switch (align) {
 572	case ATOM_SRC_DWORD:
 573		DEBUG(".[31:0] <- 0x%08X\n", old_val);
 574		break;
 575	case ATOM_SRC_WORD0:
 576		DEBUG(".[15:0] <- 0x%04X\n", old_val);
 577		break;
 578	case ATOM_SRC_WORD8:
 579		DEBUG(".[23:8] <- 0x%04X\n", old_val);
 580		break;
 581	case ATOM_SRC_WORD16:
 582		DEBUG(".[31:16] <- 0x%04X\n", old_val);
 583		break;
 584	case ATOM_SRC_BYTE0:
 585		DEBUG(".[7:0] <- 0x%02X\n", old_val);
 586		break;
 587	case ATOM_SRC_BYTE8:
 588		DEBUG(".[15:8] <- 0x%02X\n", old_val);
 589		break;
 590	case ATOM_SRC_BYTE16:
 591		DEBUG(".[23:16] <- 0x%02X\n", old_val);
 592		break;
 593	case ATOM_SRC_BYTE24:
 594		DEBUG(".[31:24] <- 0x%02X\n", old_val);
 595		break;
 596	}
 597}
 598
 599static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
 600{
 601	uint8_t attr = U8((*ptr)++);
 602	uint32_t dst, src, saved;
 603	int dptr = *ptr;
 604	SDEBUG("   dst: ");
 605	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 606	SDEBUG("   src: ");
 607	src = atom_get_src(ctx, attr, ptr);
 608	dst += src;
 609	SDEBUG("   dst: ");
 610	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 611}
 612
 613static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
 614{
 615	uint8_t attr = U8((*ptr)++);
 616	uint32_t dst, src, saved;
 617	int dptr = *ptr;
 618	SDEBUG("   dst: ");
 619	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 620	SDEBUG("   src: ");
 621	src = atom_get_src(ctx, attr, ptr);
 622	dst &= src;
 623	SDEBUG("   dst: ");
 624	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 625}
 626
 627static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
 628{
 629	printk("ATOM BIOS beeped!\n");
 630}
 631
 632static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
 633{
 634	int idx = U8((*ptr)++);
 635	int r = 0;
 636
 637	if (idx < ATOM_TABLE_NAMES_CNT)
 638		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
 639	else
 640		SDEBUG("   table: %d\n", idx);
 641	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
 642		r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
 643	if (r) {
 644		ctx->abort = true;
 645	}
 646}
 647
 648static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
 649{
 650	uint8_t attr = U8((*ptr)++);
 651	uint32_t saved;
 652	int dptr = *ptr;
 653	attr &= 0x38;
 654	attr |= atom_def_dst[attr >> 3] << 6;
 655	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
 656	SDEBUG("   dst: ");
 657	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
 658}
 659
 660static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
 661{
 662	uint8_t attr = U8((*ptr)++);
 663	uint32_t dst, src;
 664	SDEBUG("   src1: ");
 665	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 666	SDEBUG("   src2: ");
 667	src = atom_get_src(ctx, attr, ptr);
 668	ctx->ctx->cs_equal = (dst == src);
 669	ctx->ctx->cs_above = (dst > src);
 670	SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
 671	       ctx->ctx->cs_above ? "GT" : "LE");
 672}
 673
 674static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
 675{
 676	unsigned count = U8((*ptr)++);
 677	SDEBUG("   count: %d\n", count);
 678	if (arg == ATOM_UNIT_MICROSEC)
 679		udelay(count);
 680	else if (!drm_can_sleep())
 681		mdelay(count);
 682	else
 683		msleep(count);
 684}
 685
 686static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
 687{
 688	uint8_t attr = U8((*ptr)++);
 689	uint32_t dst, src;
 690	SDEBUG("   src1: ");
 691	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 692	SDEBUG("   src2: ");
 693	src = atom_get_src(ctx, attr, ptr);
 694	if (src != 0) {
 695		ctx->ctx->divmul[0] = dst / src;
 696		ctx->ctx->divmul[1] = dst % src;
 697	} else {
 698		ctx->ctx->divmul[0] = 0;
 699		ctx->ctx->divmul[1] = 0;
 700	}
 701}
 702
 703static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
 704{
 705	uint64_t val64;
 706	uint8_t attr = U8((*ptr)++);
 707	uint32_t dst, src;
 708	SDEBUG("   src1: ");
 709	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 710	SDEBUG("   src2: ");
 711	src = atom_get_src(ctx, attr, ptr);
 712	if (src != 0) {
 713		val64 = dst;
 714		val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
 715		do_div(val64, src);
 716		ctx->ctx->divmul[0] = lower_32_bits(val64);
 717		ctx->ctx->divmul[1] = upper_32_bits(val64);
 718	} else {
 719		ctx->ctx->divmul[0] = 0;
 720		ctx->ctx->divmul[1] = 0;
 721	}
 722}
 723
 724static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
 725{
 726	/* functionally, a nop */
 727}
 728
 729static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
 730{
 731	int execute = 0, target = U16(*ptr);
 732	unsigned long cjiffies;
 733
 734	(*ptr) += 2;
 735	switch (arg) {
 736	case ATOM_COND_ABOVE:
 737		execute = ctx->ctx->cs_above;
 738		break;
 739	case ATOM_COND_ABOVEOREQUAL:
 740		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
 741		break;
 742	case ATOM_COND_ALWAYS:
 743		execute = 1;
 744		break;
 745	case ATOM_COND_BELOW:
 746		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
 747		break;
 748	case ATOM_COND_BELOWOREQUAL:
 749		execute = !ctx->ctx->cs_above;
 750		break;
 751	case ATOM_COND_EQUAL:
 752		execute = ctx->ctx->cs_equal;
 753		break;
 754	case ATOM_COND_NOTEQUAL:
 755		execute = !ctx->ctx->cs_equal;
 756		break;
 757	}
 758	if (arg != ATOM_COND_ALWAYS)
 759		SDEBUG("   taken: %s\n", str_yes_no(execute));
 760	SDEBUG("   target: 0x%04X\n", target);
 761	if (execute) {
 762		if (ctx->last_jump == (ctx->start + target)) {
 763			cjiffies = jiffies;
 764			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
 765				cjiffies -= ctx->last_jump_jiffies;
 766				if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
 767					DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
 768						  ATOM_CMD_TIMEOUT_SEC);
 769					ctx->abort = true;
 770				}
 771			} else {
 772				/* jiffies wrap around we will just wait a little longer */
 773				ctx->last_jump_jiffies = jiffies;
 774			}
 775		} else {
 776			ctx->last_jump = ctx->start + target;
 777			ctx->last_jump_jiffies = jiffies;
 778		}
 779		*ptr = ctx->start + target;
 780	}
 781}
 782
 783static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
 784{
 785	uint8_t attr = U8((*ptr)++);
 786	uint32_t dst, mask, src, saved;
 787	int dptr = *ptr;
 788	SDEBUG("   dst: ");
 789	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 790	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
 791	SDEBUG("   mask: 0x%08x", mask);
 792	SDEBUG("   src: ");
 793	src = atom_get_src(ctx, attr, ptr);
 794	dst &= mask;
 795	dst |= src;
 796	SDEBUG("   dst: ");
 797	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 798}
 799
 800static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
 801{
 802	uint8_t attr = U8((*ptr)++);
 803	uint32_t src, saved;
 804	int dptr = *ptr;
 805	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
 806		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
 807	else {
 808		atom_skip_dst(ctx, arg, attr, ptr);
 809		saved = 0xCDCDCDCD;
 810	}
 811	SDEBUG("   src: ");
 812	src = atom_get_src(ctx, attr, ptr);
 813	SDEBUG("   dst: ");
 814	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
 815}
 816
 817static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
 818{
 819	uint8_t attr = U8((*ptr)++);
 820	uint32_t dst, src;
 821	SDEBUG("   src1: ");
 822	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 823	SDEBUG("   src2: ");
 824	src = atom_get_src(ctx, attr, ptr);
 825	ctx->ctx->divmul[0] = dst * src;
 826}
 827
 828static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
 829{
 830	uint64_t val64;
 831	uint8_t attr = U8((*ptr)++);
 832	uint32_t dst, src;
 833	SDEBUG("   src1: ");
 834	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 835	SDEBUG("   src2: ");
 836	src = atom_get_src(ctx, attr, ptr);
 837	val64 = (uint64_t)dst * (uint64_t)src;
 838	ctx->ctx->divmul[0] = lower_32_bits(val64);
 839	ctx->ctx->divmul[1] = upper_32_bits(val64);
 840}
 841
 842static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
 843{
 844	/* nothing */
 845}
 846
 847static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
 848{
 849	uint8_t attr = U8((*ptr)++);
 850	uint32_t dst, src, saved;
 851	int dptr = *ptr;
 852	SDEBUG("   dst: ");
 853	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 854	SDEBUG("   src: ");
 855	src = atom_get_src(ctx, attr, ptr);
 856	dst |= src;
 857	SDEBUG("   dst: ");
 858	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 859}
 860
 861static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
 862{
 863	uint8_t val = U8((*ptr)++);
 864	SDEBUG("POST card output: 0x%02X\n", val);
 865}
 866
 867static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
 868{
 869	pr_info("unimplemented!\n");
 870}
 871
 872static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
 873{
 874	pr_info("unimplemented!\n");
 875}
 876
 877static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
 878{
 879	pr_info("unimplemented!\n");
 880}
 881
 882static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
 883{
 884	int idx = U8(*ptr);
 885	(*ptr)++;
 886	SDEBUG("   block: %d\n", idx);
 887	if (!idx)
 888		ctx->ctx->data_block = 0;
 889	else if (idx == 255)
 890		ctx->ctx->data_block = ctx->start;
 891	else
 892		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
 893	SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
 894}
 895
 896static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
 897{
 898	uint8_t attr = U8((*ptr)++);
 899	SDEBUG("   fb_base: ");
 900	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
 901}
 902
 903static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
 904{
 905	int port;
 906	switch (arg) {
 907	case ATOM_PORT_ATI:
 908		port = U16(*ptr);
 909		if (port < ATOM_IO_NAMES_CNT)
 910			SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
 911		else
 912			SDEBUG("   port: %d\n", port);
 913		if (!port)
 914			ctx->ctx->io_mode = ATOM_IO_MM;
 915		else
 916			ctx->ctx->io_mode = ATOM_IO_IIO | port;
 917		(*ptr) += 2;
 918		break;
 919	case ATOM_PORT_PCI:
 920		ctx->ctx->io_mode = ATOM_IO_PCI;
 921		(*ptr)++;
 922		break;
 923	case ATOM_PORT_SYSIO:
 924		ctx->ctx->io_mode = ATOM_IO_SYSIO;
 925		(*ptr)++;
 926		break;
 927	}
 928}
 929
 930static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
 931{
 932	ctx->ctx->reg_block = U16(*ptr);
 933	(*ptr) += 2;
 934	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
 935}
 936
 937static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
 938{
 939	uint8_t attr = U8((*ptr)++), shift;
 940	uint32_t saved, dst;
 941	int dptr = *ptr;
 942	attr &= 0x38;
 943	attr |= atom_def_dst[attr >> 3] << 6;
 944	SDEBUG("   dst: ");
 945	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 946	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
 947	SDEBUG("   shift: %d\n", shift);
 948	dst <<= shift;
 949	SDEBUG("   dst: ");
 950	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 951}
 952
 953static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
 954{
 955	uint8_t attr = U8((*ptr)++), shift;
 956	uint32_t saved, dst;
 957	int dptr = *ptr;
 958	attr &= 0x38;
 959	attr |= atom_def_dst[attr >> 3] << 6;
 960	SDEBUG("   dst: ");
 961	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 962	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
 963	SDEBUG("   shift: %d\n", shift);
 964	dst >>= shift;
 965	SDEBUG("   dst: ");
 966	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 967}
 968
 969static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
 970{
 971	uint8_t attr = U8((*ptr)++), shift;
 972	uint32_t saved, dst;
 973	int dptr = *ptr;
 974	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
 975	SDEBUG("   dst: ");
 976	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 977	/* op needs to full dst value */
 978	dst = saved;
 979	shift = atom_get_src(ctx, attr, ptr);
 980	SDEBUG("   shift: %d\n", shift);
 981	dst <<= shift;
 982	dst &= atom_arg_mask[dst_align];
 983	dst >>= atom_arg_shift[dst_align];
 984	SDEBUG("   dst: ");
 985	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 986}
 987
 988static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
 989{
 990	uint8_t attr = U8((*ptr)++), shift;
 991	uint32_t saved, dst;
 992	int dptr = *ptr;
 993	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
 994	SDEBUG("   dst: ");
 995	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 996	/* op needs to full dst value */
 997	dst = saved;
 998	shift = atom_get_src(ctx, attr, ptr);
 999	SDEBUG("   shift: %d\n", shift);
1000	dst >>= shift;
1001	dst &= atom_arg_mask[dst_align];
1002	dst >>= atom_arg_shift[dst_align];
1003	SDEBUG("   dst: ");
1004	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1005}
1006
1007static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
1008{
1009	uint8_t attr = U8((*ptr)++);
1010	uint32_t dst, src, saved;
1011	int dptr = *ptr;
1012	SDEBUG("   dst: ");
1013	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1014	SDEBUG("   src: ");
1015	src = atom_get_src(ctx, attr, ptr);
1016	dst -= src;
1017	SDEBUG("   dst: ");
1018	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1019}
1020
1021static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1022{
1023	uint8_t attr = U8((*ptr)++);
1024	uint32_t src, val, target;
1025	SDEBUG("   switch: ");
1026	src = atom_get_src(ctx, attr, ptr);
1027	while (U16(*ptr) != ATOM_CASE_END)
1028		if (U8(*ptr) == ATOM_CASE_MAGIC) {
1029			(*ptr)++;
1030			SDEBUG("   case: ");
1031			val =
1032			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1033					 ptr);
1034			target = U16(*ptr);
1035			if (val == src) {
1036				SDEBUG("   target: %04X\n", target);
1037				*ptr = ctx->start + target;
1038				return;
1039			}
1040			(*ptr) += 2;
1041		} else {
1042			pr_info("Bad case\n");
1043			return;
1044		}
1045	(*ptr) += 2;
1046}
1047
1048static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1049{
1050	uint8_t attr = U8((*ptr)++);
1051	uint32_t dst, src;
1052	SDEBUG("   src1: ");
1053	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1054	SDEBUG("   src2: ");
1055	src = atom_get_src(ctx, attr, ptr);
1056	ctx->ctx->cs_equal = ((dst & src) == 0);
1057	SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1058}
1059
1060static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1061{
1062	uint8_t attr = U8((*ptr)++);
1063	uint32_t dst, src, saved;
1064	int dptr = *ptr;
1065	SDEBUG("   dst: ");
1066	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1067	SDEBUG("   src: ");
1068	src = atom_get_src(ctx, attr, ptr);
1069	dst ^= src;
1070	SDEBUG("   dst: ");
1071	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1072}
1073
1074static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1075{
1076	uint8_t val = U8((*ptr)++);
1077	SDEBUG("DEBUG output: 0x%02X\n", val);
1078}
1079
1080static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1081{
1082	uint16_t val = U16(*ptr);
1083	(*ptr) += val + 2;
1084	SDEBUG("PROCESSDS output: 0x%02X\n", val);
1085}
1086
1087static struct {
1088	void (*func) (atom_exec_context *, int *, int);
1089	int arg;
1090} opcode_table[ATOM_OP_CNT] = {
1091	{
1092	NULL, 0}, {
1093	atom_op_move, ATOM_ARG_REG}, {
1094	atom_op_move, ATOM_ARG_PS}, {
1095	atom_op_move, ATOM_ARG_WS}, {
1096	atom_op_move, ATOM_ARG_FB}, {
1097	atom_op_move, ATOM_ARG_PLL}, {
1098	atom_op_move, ATOM_ARG_MC}, {
1099	atom_op_and, ATOM_ARG_REG}, {
1100	atom_op_and, ATOM_ARG_PS}, {
1101	atom_op_and, ATOM_ARG_WS}, {
1102	atom_op_and, ATOM_ARG_FB}, {
1103	atom_op_and, ATOM_ARG_PLL}, {
1104	atom_op_and, ATOM_ARG_MC}, {
1105	atom_op_or, ATOM_ARG_REG}, {
1106	atom_op_or, ATOM_ARG_PS}, {
1107	atom_op_or, ATOM_ARG_WS}, {
1108	atom_op_or, ATOM_ARG_FB}, {
1109	atom_op_or, ATOM_ARG_PLL}, {
1110	atom_op_or, ATOM_ARG_MC}, {
1111	atom_op_shift_left, ATOM_ARG_REG}, {
1112	atom_op_shift_left, ATOM_ARG_PS}, {
1113	atom_op_shift_left, ATOM_ARG_WS}, {
1114	atom_op_shift_left, ATOM_ARG_FB}, {
1115	atom_op_shift_left, ATOM_ARG_PLL}, {
1116	atom_op_shift_left, ATOM_ARG_MC}, {
1117	atom_op_shift_right, ATOM_ARG_REG}, {
1118	atom_op_shift_right, ATOM_ARG_PS}, {
1119	atom_op_shift_right, ATOM_ARG_WS}, {
1120	atom_op_shift_right, ATOM_ARG_FB}, {
1121	atom_op_shift_right, ATOM_ARG_PLL}, {
1122	atom_op_shift_right, ATOM_ARG_MC}, {
1123	atom_op_mul, ATOM_ARG_REG}, {
1124	atom_op_mul, ATOM_ARG_PS}, {
1125	atom_op_mul, ATOM_ARG_WS}, {
1126	atom_op_mul, ATOM_ARG_FB}, {
1127	atom_op_mul, ATOM_ARG_PLL}, {
1128	atom_op_mul, ATOM_ARG_MC}, {
1129	atom_op_div, ATOM_ARG_REG}, {
1130	atom_op_div, ATOM_ARG_PS}, {
1131	atom_op_div, ATOM_ARG_WS}, {
1132	atom_op_div, ATOM_ARG_FB}, {
1133	atom_op_div, ATOM_ARG_PLL}, {
1134	atom_op_div, ATOM_ARG_MC}, {
1135	atom_op_add, ATOM_ARG_REG}, {
1136	atom_op_add, ATOM_ARG_PS}, {
1137	atom_op_add, ATOM_ARG_WS}, {
1138	atom_op_add, ATOM_ARG_FB}, {
1139	atom_op_add, ATOM_ARG_PLL}, {
1140	atom_op_add, ATOM_ARG_MC}, {
1141	atom_op_sub, ATOM_ARG_REG}, {
1142	atom_op_sub, ATOM_ARG_PS}, {
1143	atom_op_sub, ATOM_ARG_WS}, {
1144	atom_op_sub, ATOM_ARG_FB}, {
1145	atom_op_sub, ATOM_ARG_PLL}, {
1146	atom_op_sub, ATOM_ARG_MC}, {
1147	atom_op_setport, ATOM_PORT_ATI}, {
1148	atom_op_setport, ATOM_PORT_PCI}, {
1149	atom_op_setport, ATOM_PORT_SYSIO}, {
1150	atom_op_setregblock, 0}, {
1151	atom_op_setfbbase, 0}, {
1152	atom_op_compare, ATOM_ARG_REG}, {
1153	atom_op_compare, ATOM_ARG_PS}, {
1154	atom_op_compare, ATOM_ARG_WS}, {
1155	atom_op_compare, ATOM_ARG_FB}, {
1156	atom_op_compare, ATOM_ARG_PLL}, {
1157	atom_op_compare, ATOM_ARG_MC}, {
1158	atom_op_switch, 0}, {
1159	atom_op_jump, ATOM_COND_ALWAYS}, {
1160	atom_op_jump, ATOM_COND_EQUAL}, {
1161	atom_op_jump, ATOM_COND_BELOW}, {
1162	atom_op_jump, ATOM_COND_ABOVE}, {
1163	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1164	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1165	atom_op_jump, ATOM_COND_NOTEQUAL}, {
1166	atom_op_test, ATOM_ARG_REG}, {
1167	atom_op_test, ATOM_ARG_PS}, {
1168	atom_op_test, ATOM_ARG_WS}, {
1169	atom_op_test, ATOM_ARG_FB}, {
1170	atom_op_test, ATOM_ARG_PLL}, {
1171	atom_op_test, ATOM_ARG_MC}, {
1172	atom_op_delay, ATOM_UNIT_MILLISEC}, {
1173	atom_op_delay, ATOM_UNIT_MICROSEC}, {
1174	atom_op_calltable, 0}, {
1175	atom_op_repeat, 0}, {
1176	atom_op_clear, ATOM_ARG_REG}, {
1177	atom_op_clear, ATOM_ARG_PS}, {
1178	atom_op_clear, ATOM_ARG_WS}, {
1179	atom_op_clear, ATOM_ARG_FB}, {
1180	atom_op_clear, ATOM_ARG_PLL}, {
1181	atom_op_clear, ATOM_ARG_MC}, {
1182	atom_op_nop, 0}, {
1183	atom_op_eot, 0}, {
1184	atom_op_mask, ATOM_ARG_REG}, {
1185	atom_op_mask, ATOM_ARG_PS}, {
1186	atom_op_mask, ATOM_ARG_WS}, {
1187	atom_op_mask, ATOM_ARG_FB}, {
1188	atom_op_mask, ATOM_ARG_PLL}, {
1189	atom_op_mask, ATOM_ARG_MC}, {
1190	atom_op_postcard, 0}, {
1191	atom_op_beep, 0}, {
1192	atom_op_savereg, 0}, {
1193	atom_op_restorereg, 0}, {
1194	atom_op_setdatablock, 0}, {
1195	atom_op_xor, ATOM_ARG_REG}, {
1196	atom_op_xor, ATOM_ARG_PS}, {
1197	atom_op_xor, ATOM_ARG_WS}, {
1198	atom_op_xor, ATOM_ARG_FB}, {
1199	atom_op_xor, ATOM_ARG_PLL}, {
1200	atom_op_xor, ATOM_ARG_MC}, {
1201	atom_op_shl, ATOM_ARG_REG}, {
1202	atom_op_shl, ATOM_ARG_PS}, {
1203	atom_op_shl, ATOM_ARG_WS}, {
1204	atom_op_shl, ATOM_ARG_FB}, {
1205	atom_op_shl, ATOM_ARG_PLL}, {
1206	atom_op_shl, ATOM_ARG_MC}, {
1207	atom_op_shr, ATOM_ARG_REG}, {
1208	atom_op_shr, ATOM_ARG_PS}, {
1209	atom_op_shr, ATOM_ARG_WS}, {
1210	atom_op_shr, ATOM_ARG_FB}, {
1211	atom_op_shr, ATOM_ARG_PLL}, {
1212	atom_op_shr, ATOM_ARG_MC}, {
1213	atom_op_debug, 0}, {
1214	atom_op_processds, 0}, {
1215	atom_op_mul32, ATOM_ARG_PS}, {
1216	atom_op_mul32, ATOM_ARG_WS}, {
1217	atom_op_div32, ATOM_ARG_PS}, {
1218	atom_op_div32, ATOM_ARG_WS},
1219};
1220
1221static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
1222{
1223	int base = CU16(ctx->cmd_table + 4 + 2 * index);
1224	int len, ws, ps, ptr;
1225	unsigned char op;
1226	atom_exec_context ectx;
1227	int ret = 0;
1228
1229	if (!base)
1230		return -EINVAL;
1231
1232	len = CU16(base + ATOM_CT_SIZE_PTR);
1233	ws = CU8(base + ATOM_CT_WS_PTR);
1234	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1235	ptr = base + ATOM_CT_CODE_PTR;
1236
1237	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1238
1239	ectx.ctx = ctx;
1240	ectx.ps_shift = ps / 4;
1241	ectx.start = base;
1242	ectx.ps = params;
1243	ectx.ps_size = params_size;
1244	ectx.abort = false;
1245	ectx.last_jump = 0;
1246	if (ws) {
1247		ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1248		ectx.ws_size = ws;
1249	} else {
1250		ectx.ws = NULL;
1251		ectx.ws_size = 0;
1252	}
1253
1254	debug_depth++;
1255	while (1) {
1256		op = CU8(ptr++);
1257		if (op < ATOM_OP_NAMES_CNT)
1258			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1259		else
1260			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1261		if (ectx.abort) {
1262			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1263				base, len, ws, ps, ptr - 1);
1264			ret = -EINVAL;
1265			goto free;
1266		}
1267
1268		if (op < ATOM_OP_CNT && op > 0)
1269			opcode_table[op].func(&ectx, &ptr,
1270					      opcode_table[op].arg);
1271		else
1272			break;
1273
1274		if (op == ATOM_OP_EOT)
1275			break;
1276	}
1277	debug_depth--;
1278	SDEBUG("<<\n");
1279
1280free:
1281	if (ws)
1282		kfree(ectx.ws);
1283	return ret;
1284}
1285
1286int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
1287{
1288	int r;
1289
1290	mutex_lock(&ctx->mutex);
1291	/* reset data block */
1292	ctx->data_block = 0;
1293	/* reset reg block */
1294	ctx->reg_block = 0;
1295	/* reset fb window */
1296	ctx->fb_base = 0;
1297	/* reset io mode */
1298	ctx->io_mode = ATOM_IO_MM;
1299	/* reset divmul */
1300	ctx->divmul[0] = 0;
1301	ctx->divmul[1] = 0;
1302	r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size);
1303	mutex_unlock(&ctx->mutex);
1304	return r;
1305}
1306
1307static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1308
1309static void atom_index_iio(struct atom_context *ctx, int base)
1310{
1311	ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1312	if (!ctx->iio)
1313		return;
1314	while (CU8(base) == ATOM_IIO_START) {
1315		ctx->iio[CU8(base + 1)] = base + 2;
1316		base += 2;
1317		while (CU8(base) != ATOM_IIO_END)
1318			base += atom_iio_len[CU8(base)];
1319		base += 3;
1320	}
1321}
1322
1323static void atom_get_vbios_name(struct atom_context *ctx)
1324{
1325	unsigned char *p_rom;
1326	unsigned char str_num;
1327	unsigned short off_to_vbios_str;
1328	unsigned char *c_ptr;
1329	int name_size;
1330	int i;
1331
1332	const char *na = "--N/A--";
1333	char *back;
1334
1335	p_rom = ctx->bios;
1336
1337	str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
1338	if (str_num != 0) {
1339		off_to_vbios_str =
1340			*(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1341
1342		c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
1343	} else {
1344		/* do not know where to find name */
1345		memcpy(ctx->name, na, 7);
1346		ctx->name[7] = 0;
1347		return;
1348	}
1349
1350	/*
1351	 * skip the atombios strings, usually 4
1352	 * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
1353	 */
1354	for (i = 0; i < str_num; i++) {
1355		while (*c_ptr != 0)
1356			c_ptr++;
1357		c_ptr++;
1358	}
1359
1360	/* skip the following 2 chars: 0x0D 0x0A */
1361	c_ptr += 2;
1362
1363	name_size = strnlen(c_ptr, STRLEN_LONG - 1);
1364	memcpy(ctx->name, c_ptr, name_size);
1365	back = ctx->name + name_size;
1366	while ((*--back) == ' ')
1367		;
1368	*(back + 1) = '\0';
1369}
1370
1371static void atom_get_vbios_date(struct atom_context *ctx)
1372{
1373	unsigned char *p_rom;
1374	unsigned char *date_in_rom;
1375
1376	p_rom = ctx->bios;
1377
1378	date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
1379
1380	ctx->date[0] = '2';
1381	ctx->date[1] = '0';
1382	ctx->date[2] = date_in_rom[6];
1383	ctx->date[3] = date_in_rom[7];
1384	ctx->date[4] = '/';
1385	ctx->date[5] = date_in_rom[0];
1386	ctx->date[6] = date_in_rom[1];
1387	ctx->date[7] = '/';
1388	ctx->date[8] = date_in_rom[3];
1389	ctx->date[9] = date_in_rom[4];
1390	ctx->date[10] = ' ';
1391	ctx->date[11] = date_in_rom[9];
1392	ctx->date[12] = date_in_rom[10];
1393	ctx->date[13] = date_in_rom[11];
1394	ctx->date[14] = date_in_rom[12];
1395	ctx->date[15] = date_in_rom[13];
1396	ctx->date[16] = '\0';
1397}
1398
1399static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
1400					   int end, int maxlen)
1401{
1402	unsigned long str_off;
1403	unsigned char *p_rom;
1404	unsigned short str_len;
1405
1406	str_off = 0;
1407	str_len = strnlen(str, maxlen);
1408	p_rom = ctx->bios;
1409
1410	for (; start <= end; ++start) {
1411		for (str_off = 0; str_off < str_len; ++str_off) {
1412			if (str[str_off] != *(p_rom + start + str_off))
1413				break;
1414		}
1415
1416		if (str_off == str_len || str[str_off] == 0)
1417			return p_rom + start;
1418	}
1419	return NULL;
1420}
1421
1422static void atom_get_vbios_pn(struct atom_context *ctx)
1423{
1424	unsigned char *p_rom;
1425	unsigned short off_to_vbios_str;
1426	unsigned char *vbios_str;
1427	int count;
1428
1429	off_to_vbios_str = 0;
1430	p_rom = ctx->bios;
1431
1432	if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
1433		off_to_vbios_str =
1434			*(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1435
1436		vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
1437	} else {
1438		vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
1439	}
1440
1441	if (*vbios_str == 0) {
1442		vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
1443		if (vbios_str == NULL)
1444			vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
1445	}
1446	if (vbios_str != NULL && *vbios_str == 0)
1447		vbios_str++;
1448
1449	if (vbios_str != NULL) {
1450		count = 0;
1451		while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
1452		       vbios_str[count] <= 'z') {
1453			ctx->vbios_pn[count] = vbios_str[count];
1454			count++;
1455		}
1456
1457		ctx->vbios_pn[count] = 0;
1458	}
1459
1460	pr_info("ATOM BIOS: %s\n", ctx->vbios_pn);
1461}
1462
1463static void atom_get_vbios_version(struct atom_context *ctx)
1464{
1465	unsigned short start = 3, end;
1466	unsigned char *vbios_ver;
1467	unsigned char *p_rom;
1468
1469	p_rom = ctx->bios;
1470	/* Search from strings offset if it's present */
1471	start = *(unsigned short *)(p_rom +
1472				    OFFSET_TO_GET_ATOMBIOS_STRING_START);
1473
1474	/* Search till atom rom header start point */
1475	end = *(unsigned short *)(p_rom + OFFSET_TO_ATOM_ROM_HEADER_POINTER);
1476
1477	/* Use hardcoded offsets, if the offsets are not populated */
1478	if (end <= start) {
1479		start = 3;
1480		end = 1024;
1481	}
1482
1483	/* find anchor ATOMBIOSBK-AMD */
1484	vbios_ver =
1485		atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, start, end, 64);
1486	if (vbios_ver != NULL) {
1487		/* skip ATOMBIOSBK-AMD VER */
1488		vbios_ver += 18;
1489		memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
1490	} else {
1491		ctx->vbios_ver_str[0] = '\0';
1492	}
1493}
1494
1495struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1496{
1497	int base;
1498	struct atom_context *ctx =
1499	    kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1500	struct _ATOM_ROM_HEADER *atom_rom_header;
1501	struct _ATOM_MASTER_DATA_TABLE *master_table;
1502	struct _ATOM_FIRMWARE_INFO *atom_fw_info;
1503
1504	if (!ctx)
1505		return NULL;
1506
1507	ctx->card = card;
1508	ctx->bios = bios;
1509
1510	if (CU16(0) != ATOM_BIOS_MAGIC) {
1511		pr_info("Invalid BIOS magic\n");
1512		kfree(ctx);
1513		return NULL;
1514	}
1515	if (strncmp
1516	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1517	     strlen(ATOM_ATI_MAGIC))) {
1518		pr_info("Invalid ATI magic\n");
1519		kfree(ctx);
1520		return NULL;
1521	}
1522
1523	base = CU16(ATOM_ROM_TABLE_PTR);
1524	if (strncmp
1525	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1526	     strlen(ATOM_ROM_MAGIC))) {
1527		pr_info("Invalid ATOM magic\n");
1528		kfree(ctx);
1529		return NULL;
1530	}
1531
1532	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1533	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1534	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1535	if (!ctx->iio) {
1536		amdgpu_atom_destroy(ctx);
1537		return NULL;
1538	}
1539
1540	atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
1541	if (atom_rom_header->usMasterDataTableOffset != 0) {
1542		master_table = (struct _ATOM_MASTER_DATA_TABLE *)
1543				CSTR(atom_rom_header->usMasterDataTableOffset);
1544		if (master_table->ListOfDataTables.FirmwareInfo != 0) {
1545			atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
1546					CSTR(master_table->ListOfDataTables.FirmwareInfo);
1547			ctx->version = atom_fw_info->ulFirmwareRevision;
 
1548		}
1549	}
1550
1551	atom_get_vbios_name(ctx);
1552	atom_get_vbios_pn(ctx);
1553	atom_get_vbios_date(ctx);
1554	atom_get_vbios_version(ctx);
1555
1556	return ctx;
1557}
1558
1559int amdgpu_atom_asic_init(struct atom_context *ctx)
1560{
1561	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1562	uint32_t ps[16];
1563	int ret;
1564
1565	memset(ps, 0, 64);
1566
1567	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1568	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1569	if (!ps[0] || !ps[1])
1570		return 1;
1571
1572	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1573		return 1;
1574	ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
1575	if (ret)
1576		return ret;
1577
1578	memset(ps, 0, 64);
1579
1580	return ret;
1581}
1582
1583void amdgpu_atom_destroy(struct atom_context *ctx)
1584{
1585	kfree(ctx->iio);
1586	kfree(ctx);
1587}
1588
1589bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1590			    uint16_t *size, uint8_t *frev, uint8_t *crev,
1591			    uint16_t *data_start)
1592{
1593	int offset = index * 2 + 4;
1594	int idx = CU16(ctx->data_table + offset);
1595	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1596
1597	if (!mdt[index])
1598		return false;
1599
1600	if (size)
1601		*size = CU16(idx);
1602	if (frev)
1603		*frev = CU8(idx + 2);
1604	if (crev)
1605		*crev = CU8(idx + 3);
1606	*data_start = idx;
1607	return true;
1608}
1609
1610bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
1611			   uint8_t *crev)
1612{
1613	int offset = index * 2 + 4;
1614	int idx = CU16(ctx->cmd_table + offset);
1615	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1616
1617	if (!mct[index])
1618		return false;
1619
1620	if (frev)
1621		*frev = CU8(idx + 2);
1622	if (crev)
1623		*crev = CU8(idx + 3);
1624	return true;
1625}
1626
v4.10.11
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Author: Stanislaw Skowronek
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
 
 
  28#include <asm/unaligned.h>
  29
 
 
  30#define ATOM_DEBUG
  31
 
  32#include "atom.h"
  33#include "atom-names.h"
  34#include "atom-bits.h"
  35#include "amdgpu.h"
  36
  37#define ATOM_COND_ABOVE		0
  38#define ATOM_COND_ABOVEOREQUAL	1
  39#define ATOM_COND_ALWAYS	2
  40#define ATOM_COND_BELOW		3
  41#define ATOM_COND_BELOWOREQUAL	4
  42#define ATOM_COND_EQUAL		5
  43#define ATOM_COND_NOTEQUAL	6
  44
  45#define ATOM_PORT_ATI	0
  46#define ATOM_PORT_PCI	1
  47#define ATOM_PORT_SYSIO	2
  48
  49#define ATOM_UNIT_MICROSEC	0
  50#define ATOM_UNIT_MILLISEC	1
  51
  52#define PLL_INDEX	2
  53#define PLL_DATA	3
  54
 
 
  55typedef struct {
  56	struct atom_context *ctx;
  57	uint32_t *ps, *ws;
 
  58	int ps_shift;
  59	uint16_t start;
  60	unsigned last_jump;
  61	unsigned long last_jump_jiffies;
  62	bool abort;
  63} atom_exec_context;
  64
  65int amdgpu_atom_debug = 0;
  66static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
  67int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
  68
  69static uint32_t atom_arg_mask[8] =
  70    { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
  710xFF000000 };
  72static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
  73
  74static int atom_dst_to_src[8][4] = {
  75	/* translate destination alignment field to the source alignment encoding */
  76	{0, 0, 0, 0},
  77	{1, 2, 3, 0},
  78	{1, 2, 3, 0},
  79	{1, 2, 3, 0},
  80	{4, 5, 6, 7},
  81	{4, 5, 6, 7},
  82	{4, 5, 6, 7},
  83	{4, 5, 6, 7},
  84};
  85static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
  86
  87static int debug_depth = 0;
  88#ifdef ATOM_DEBUG
  89static void debug_print_spaces(int n)
  90{
  91	while (n--)
  92		printk("   ");
  93}
  94
  95#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
  96#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
  97#else
  98#define DEBUG(...) do { } while (0)
  99#define SDEBUG(...) do { } while (0)
 100#endif
 101
 102static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
 103				 uint32_t index, uint32_t data)
 104{
 105	uint32_t temp = 0xCDCDCDCD;
 106
 107	while (1)
 108		switch (CU8(base)) {
 109		case ATOM_IIO_NOP:
 110			base++;
 111			break;
 112		case ATOM_IIO_READ:
 113			temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
 114			base += 3;
 115			break;
 116		case ATOM_IIO_WRITE:
 117			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
 118			base += 3;
 119			break;
 120		case ATOM_IIO_CLEAR:
 121			temp &=
 122			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 123			      CU8(base + 2));
 124			base += 3;
 125			break;
 126		case ATOM_IIO_SET:
 127			temp |=
 128			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
 129									2);
 130			base += 3;
 131			break;
 132		case ATOM_IIO_MOVE_INDEX:
 133			temp &=
 134			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 135			      CU8(base + 3));
 136			temp |=
 137			    ((index >> CU8(base + 2)) &
 138			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
 139									  3);
 140			base += 4;
 141			break;
 142		case ATOM_IIO_MOVE_DATA:
 143			temp &=
 144			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 145			      CU8(base + 3));
 146			temp |=
 147			    ((data >> CU8(base + 2)) &
 148			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
 149									  3);
 150			base += 4;
 151			break;
 152		case ATOM_IIO_MOVE_ATTR:
 153			temp &=
 154			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
 155			      CU8(base + 3));
 156			temp |=
 157			    ((ctx->
 158			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
 159									  CU8
 160									  (base
 161									   +
 162									   1))))
 163			    << CU8(base + 3);
 164			base += 4;
 165			break;
 166		case ATOM_IIO_END:
 167			return temp;
 168		default:
 169			printk(KERN_INFO "Unknown IIO opcode.\n");
 170			return 0;
 171		}
 172}
 173
 174static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
 175				 int *ptr, uint32_t *saved, int print)
 176{
 177	uint32_t idx, val = 0xCDCDCDCD, align, arg;
 178	struct atom_context *gctx = ctx->ctx;
 179	arg = attr & 7;
 180	align = (attr >> 3) & 7;
 181	switch (arg) {
 182	case ATOM_ARG_REG:
 183		idx = U16(*ptr);
 184		(*ptr) += 2;
 185		if (print)
 186			DEBUG("REG[0x%04X]", idx);
 187		idx += gctx->reg_block;
 188		switch (gctx->io_mode) {
 189		case ATOM_IO_MM:
 190			val = gctx->card->reg_read(gctx->card, idx);
 191			break;
 192		case ATOM_IO_PCI:
 193			printk(KERN_INFO
 194			       "PCI registers are not implemented.\n");
 195			return 0;
 196		case ATOM_IO_SYSIO:
 197			printk(KERN_INFO
 198			       "SYSIO registers are not implemented.\n");
 199			return 0;
 200		default:
 201			if (!(gctx->io_mode & 0x80)) {
 202				printk(KERN_INFO "Bad IO mode.\n");
 203				return 0;
 204			}
 205			if (!gctx->iio[gctx->io_mode & 0x7F]) {
 206				printk(KERN_INFO
 207				       "Undefined indirect IO read method %d.\n",
 208				       gctx->io_mode & 0x7F);
 209				return 0;
 210			}
 211			val =
 212			    atom_iio_execute(gctx,
 213					     gctx->iio[gctx->io_mode & 0x7F],
 214					     idx, 0);
 215		}
 216		break;
 217	case ATOM_ARG_PS:
 218		idx = U8(*ptr);
 219		(*ptr)++;
 220		/* get_unaligned_le32 avoids unaligned accesses from atombios
 221		 * tables, noticed on a DEC Alpha. */
 222		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
 
 
 
 223		if (print)
 224			DEBUG("PS[0x%02X,0x%04X]", idx, val);
 225		break;
 226	case ATOM_ARG_WS:
 227		idx = U8(*ptr);
 228		(*ptr)++;
 229		if (print)
 230			DEBUG("WS[0x%02X]", idx);
 231		switch (idx) {
 232		case ATOM_WS_QUOTIENT:
 233			val = gctx->divmul[0];
 234			break;
 235		case ATOM_WS_REMAINDER:
 236			val = gctx->divmul[1];
 237			break;
 238		case ATOM_WS_DATAPTR:
 239			val = gctx->data_block;
 240			break;
 241		case ATOM_WS_SHIFT:
 242			val = gctx->shift;
 243			break;
 244		case ATOM_WS_OR_MASK:
 245			val = 1 << gctx->shift;
 246			break;
 247		case ATOM_WS_AND_MASK:
 248			val = ~(1 << gctx->shift);
 249			break;
 250		case ATOM_WS_FB_WINDOW:
 251			val = gctx->fb_base;
 252			break;
 253		case ATOM_WS_ATTRIBUTES:
 254			val = gctx->io_attr;
 255			break;
 256		case ATOM_WS_REGPTR:
 257			val = gctx->reg_block;
 258			break;
 259		default:
 260			val = ctx->ws[idx];
 
 
 
 261		}
 262		break;
 263	case ATOM_ARG_ID:
 264		idx = U16(*ptr);
 265		(*ptr) += 2;
 266		if (print) {
 267			if (gctx->data_block)
 268				DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
 269			else
 270				DEBUG("ID[0x%04X]", idx);
 271		}
 272		val = U32(idx + gctx->data_block);
 273		break;
 274	case ATOM_ARG_FB:
 275		idx = U8(*ptr);
 276		(*ptr)++;
 277		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
 278			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
 279				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
 280			val = 0;
 281		} else
 282			val = gctx->scratch[(gctx->fb_base / 4) + idx];
 283		if (print)
 284			DEBUG("FB[0x%02X]", idx);
 285		break;
 286	case ATOM_ARG_IMM:
 287		switch (align) {
 288		case ATOM_SRC_DWORD:
 289			val = U32(*ptr);
 290			(*ptr) += 4;
 291			if (print)
 292				DEBUG("IMM 0x%08X\n", val);
 293			return val;
 294		case ATOM_SRC_WORD0:
 295		case ATOM_SRC_WORD8:
 296		case ATOM_SRC_WORD16:
 297			val = U16(*ptr);
 298			(*ptr) += 2;
 299			if (print)
 300				DEBUG("IMM 0x%04X\n", val);
 301			return val;
 302		case ATOM_SRC_BYTE0:
 303		case ATOM_SRC_BYTE8:
 304		case ATOM_SRC_BYTE16:
 305		case ATOM_SRC_BYTE24:
 306			val = U8(*ptr);
 307			(*ptr)++;
 308			if (print)
 309				DEBUG("IMM 0x%02X\n", val);
 310			return val;
 311		}
 312		return 0;
 313	case ATOM_ARG_PLL:
 314		idx = U8(*ptr);
 315		(*ptr)++;
 316		if (print)
 317			DEBUG("PLL[0x%02X]", idx);
 318		val = gctx->card->pll_read(gctx->card, idx);
 319		break;
 320	case ATOM_ARG_MC:
 321		idx = U8(*ptr);
 322		(*ptr)++;
 323		if (print)
 324			DEBUG("MC[0x%02X]", idx);
 325		val = gctx->card->mc_read(gctx->card, idx);
 326		break;
 327	}
 328	if (saved)
 329		*saved = val;
 330	val &= atom_arg_mask[align];
 331	val >>= atom_arg_shift[align];
 332	if (print)
 333		switch (align) {
 334		case ATOM_SRC_DWORD:
 335			DEBUG(".[31:0] -> 0x%08X\n", val);
 336			break;
 337		case ATOM_SRC_WORD0:
 338			DEBUG(".[15:0] -> 0x%04X\n", val);
 339			break;
 340		case ATOM_SRC_WORD8:
 341			DEBUG(".[23:8] -> 0x%04X\n", val);
 342			break;
 343		case ATOM_SRC_WORD16:
 344			DEBUG(".[31:16] -> 0x%04X\n", val);
 345			break;
 346		case ATOM_SRC_BYTE0:
 347			DEBUG(".[7:0] -> 0x%02X\n", val);
 348			break;
 349		case ATOM_SRC_BYTE8:
 350			DEBUG(".[15:8] -> 0x%02X\n", val);
 351			break;
 352		case ATOM_SRC_BYTE16:
 353			DEBUG(".[23:16] -> 0x%02X\n", val);
 354			break;
 355		case ATOM_SRC_BYTE24:
 356			DEBUG(".[31:24] -> 0x%02X\n", val);
 357			break;
 358		}
 359	return val;
 360}
 361
 362static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
 363{
 364	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
 365	switch (arg) {
 366	case ATOM_ARG_REG:
 367	case ATOM_ARG_ID:
 368		(*ptr) += 2;
 369		break;
 370	case ATOM_ARG_PLL:
 371	case ATOM_ARG_MC:
 372	case ATOM_ARG_PS:
 373	case ATOM_ARG_WS:
 374	case ATOM_ARG_FB:
 375		(*ptr)++;
 376		break;
 377	case ATOM_ARG_IMM:
 378		switch (align) {
 379		case ATOM_SRC_DWORD:
 380			(*ptr) += 4;
 381			return;
 382		case ATOM_SRC_WORD0:
 383		case ATOM_SRC_WORD8:
 384		case ATOM_SRC_WORD16:
 385			(*ptr) += 2;
 386			return;
 387		case ATOM_SRC_BYTE0:
 388		case ATOM_SRC_BYTE8:
 389		case ATOM_SRC_BYTE16:
 390		case ATOM_SRC_BYTE24:
 391			(*ptr)++;
 392			return;
 393		}
 394		return;
 395	}
 396}
 397
 398static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
 399{
 400	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
 401}
 402
 403static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
 404{
 405	uint32_t val = 0xCDCDCDCD;
 406
 407	switch (align) {
 408	case ATOM_SRC_DWORD:
 409		val = U32(*ptr);
 410		(*ptr) += 4;
 411		break;
 412	case ATOM_SRC_WORD0:
 413	case ATOM_SRC_WORD8:
 414	case ATOM_SRC_WORD16:
 415		val = U16(*ptr);
 416		(*ptr) += 2;
 417		break;
 418	case ATOM_SRC_BYTE0:
 419	case ATOM_SRC_BYTE8:
 420	case ATOM_SRC_BYTE16:
 421	case ATOM_SRC_BYTE24:
 422		val = U8(*ptr);
 423		(*ptr)++;
 424		break;
 425	}
 426	return val;
 427}
 428
 429static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
 430			     int *ptr, uint32_t *saved, int print)
 431{
 432	return atom_get_src_int(ctx,
 433				arg | atom_dst_to_src[(attr >> 3) &
 434						      7][(attr >> 6) & 3] << 3,
 435				ptr, saved, print);
 436}
 437
 438static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
 439{
 440	atom_skip_src_int(ctx,
 441			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
 442								 3] << 3, ptr);
 443}
 444
 445static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
 446			 int *ptr, uint32_t val, uint32_t saved)
 447{
 448	uint32_t align =
 449	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
 450	    val, idx;
 451	struct atom_context *gctx = ctx->ctx;
 452	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
 453	val <<= atom_arg_shift[align];
 454	val &= atom_arg_mask[align];
 455	saved &= ~atom_arg_mask[align];
 456	val |= saved;
 457	switch (arg) {
 458	case ATOM_ARG_REG:
 459		idx = U16(*ptr);
 460		(*ptr) += 2;
 461		DEBUG("REG[0x%04X]", idx);
 462		idx += gctx->reg_block;
 463		switch (gctx->io_mode) {
 464		case ATOM_IO_MM:
 465			if (idx == 0)
 466				gctx->card->reg_write(gctx->card, idx,
 467						      val << 2);
 468			else
 469				gctx->card->reg_write(gctx->card, idx, val);
 470			break;
 471		case ATOM_IO_PCI:
 472			printk(KERN_INFO
 473			       "PCI registers are not implemented.\n");
 474			return;
 475		case ATOM_IO_SYSIO:
 476			printk(KERN_INFO
 477			       "SYSIO registers are not implemented.\n");
 478			return;
 479		default:
 480			if (!(gctx->io_mode & 0x80)) {
 481				printk(KERN_INFO "Bad IO mode.\n");
 482				return;
 483			}
 484			if (!gctx->iio[gctx->io_mode & 0xFF]) {
 485				printk(KERN_INFO
 486				       "Undefined indirect IO write method %d.\n",
 487				       gctx->io_mode & 0x7F);
 488				return;
 489			}
 490			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
 491					 idx, val);
 492		}
 493		break;
 494	case ATOM_ARG_PS:
 495		idx = U8(*ptr);
 496		(*ptr)++;
 497		DEBUG("PS[0x%02X]", idx);
 
 
 
 
 498		ctx->ps[idx] = cpu_to_le32(val);
 499		break;
 500	case ATOM_ARG_WS:
 501		idx = U8(*ptr);
 502		(*ptr)++;
 503		DEBUG("WS[0x%02X]", idx);
 504		switch (idx) {
 505		case ATOM_WS_QUOTIENT:
 506			gctx->divmul[0] = val;
 507			break;
 508		case ATOM_WS_REMAINDER:
 509			gctx->divmul[1] = val;
 510			break;
 511		case ATOM_WS_DATAPTR:
 512			gctx->data_block = val;
 513			break;
 514		case ATOM_WS_SHIFT:
 515			gctx->shift = val;
 516			break;
 517		case ATOM_WS_OR_MASK:
 518		case ATOM_WS_AND_MASK:
 519			break;
 520		case ATOM_WS_FB_WINDOW:
 521			gctx->fb_base = val;
 522			break;
 523		case ATOM_WS_ATTRIBUTES:
 524			gctx->io_attr = val;
 525			break;
 526		case ATOM_WS_REGPTR:
 527			gctx->reg_block = val;
 528			break;
 529		default:
 
 
 
 
 530			ctx->ws[idx] = val;
 531		}
 532		break;
 533	case ATOM_ARG_FB:
 534		idx = U8(*ptr);
 535		(*ptr)++;
 536		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
 537			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
 538				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
 539		} else
 540			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
 541		DEBUG("FB[0x%02X]", idx);
 542		break;
 543	case ATOM_ARG_PLL:
 544		idx = U8(*ptr);
 545		(*ptr)++;
 546		DEBUG("PLL[0x%02X]", idx);
 547		gctx->card->pll_write(gctx->card, idx, val);
 548		break;
 549	case ATOM_ARG_MC:
 550		idx = U8(*ptr);
 551		(*ptr)++;
 552		DEBUG("MC[0x%02X]", idx);
 553		gctx->card->mc_write(gctx->card, idx, val);
 554		return;
 555	}
 556	switch (align) {
 557	case ATOM_SRC_DWORD:
 558		DEBUG(".[31:0] <- 0x%08X\n", old_val);
 559		break;
 560	case ATOM_SRC_WORD0:
 561		DEBUG(".[15:0] <- 0x%04X\n", old_val);
 562		break;
 563	case ATOM_SRC_WORD8:
 564		DEBUG(".[23:8] <- 0x%04X\n", old_val);
 565		break;
 566	case ATOM_SRC_WORD16:
 567		DEBUG(".[31:16] <- 0x%04X\n", old_val);
 568		break;
 569	case ATOM_SRC_BYTE0:
 570		DEBUG(".[7:0] <- 0x%02X\n", old_val);
 571		break;
 572	case ATOM_SRC_BYTE8:
 573		DEBUG(".[15:8] <- 0x%02X\n", old_val);
 574		break;
 575	case ATOM_SRC_BYTE16:
 576		DEBUG(".[23:16] <- 0x%02X\n", old_val);
 577		break;
 578	case ATOM_SRC_BYTE24:
 579		DEBUG(".[31:24] <- 0x%02X\n", old_val);
 580		break;
 581	}
 582}
 583
 584static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
 585{
 586	uint8_t attr = U8((*ptr)++);
 587	uint32_t dst, src, saved;
 588	int dptr = *ptr;
 589	SDEBUG("   dst: ");
 590	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 591	SDEBUG("   src: ");
 592	src = atom_get_src(ctx, attr, ptr);
 593	dst += src;
 594	SDEBUG("   dst: ");
 595	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 596}
 597
 598static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
 599{
 600	uint8_t attr = U8((*ptr)++);
 601	uint32_t dst, src, saved;
 602	int dptr = *ptr;
 603	SDEBUG("   dst: ");
 604	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 605	SDEBUG("   src: ");
 606	src = atom_get_src(ctx, attr, ptr);
 607	dst &= src;
 608	SDEBUG("   dst: ");
 609	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 610}
 611
 612static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
 613{
 614	printk("ATOM BIOS beeped!\n");
 615}
 616
 617static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
 618{
 619	int idx = U8((*ptr)++);
 620	int r = 0;
 621
 622	if (idx < ATOM_TABLE_NAMES_CNT)
 623		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
 624	else
 625		SDEBUG("   table: %d\n", idx);
 626	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
 627		r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
 628	if (r) {
 629		ctx->abort = true;
 630	}
 631}
 632
 633static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
 634{
 635	uint8_t attr = U8((*ptr)++);
 636	uint32_t saved;
 637	int dptr = *ptr;
 638	attr &= 0x38;
 639	attr |= atom_def_dst[attr >> 3] << 6;
 640	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
 641	SDEBUG("   dst: ");
 642	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
 643}
 644
 645static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
 646{
 647	uint8_t attr = U8((*ptr)++);
 648	uint32_t dst, src;
 649	SDEBUG("   src1: ");
 650	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 651	SDEBUG("   src2: ");
 652	src = atom_get_src(ctx, attr, ptr);
 653	ctx->ctx->cs_equal = (dst == src);
 654	ctx->ctx->cs_above = (dst > src);
 655	SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
 656	       ctx->ctx->cs_above ? "GT" : "LE");
 657}
 658
 659static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
 660{
 661	unsigned count = U8((*ptr)++);
 662	SDEBUG("   count: %d\n", count);
 663	if (arg == ATOM_UNIT_MICROSEC)
 664		udelay(count);
 665	else if (!drm_can_sleep())
 666		mdelay(count);
 667	else
 668		msleep(count);
 669}
 670
 671static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
 672{
 673	uint8_t attr = U8((*ptr)++);
 674	uint32_t dst, src;
 675	SDEBUG("   src1: ");
 676	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 677	SDEBUG("   src2: ");
 678	src = atom_get_src(ctx, attr, ptr);
 679	if (src != 0) {
 680		ctx->ctx->divmul[0] = dst / src;
 681		ctx->ctx->divmul[1] = dst % src;
 682	} else {
 683		ctx->ctx->divmul[0] = 0;
 684		ctx->ctx->divmul[1] = 0;
 685	}
 686}
 687
 688static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
 689{
 690	uint64_t val64;
 691	uint8_t attr = U8((*ptr)++);
 692	uint32_t dst, src;
 693	SDEBUG("   src1: ");
 694	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 695	SDEBUG("   src2: ");
 696	src = atom_get_src(ctx, attr, ptr);
 697	if (src != 0) {
 698		val64 = dst;
 699		val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
 700		do_div(val64, src);
 701		ctx->ctx->divmul[0] = lower_32_bits(val64);
 702		ctx->ctx->divmul[1] = upper_32_bits(val64);
 703	} else {
 704		ctx->ctx->divmul[0] = 0;
 705		ctx->ctx->divmul[1] = 0;
 706	}
 707}
 708
 709static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
 710{
 711	/* functionally, a nop */
 712}
 713
 714static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
 715{
 716	int execute = 0, target = U16(*ptr);
 717	unsigned long cjiffies;
 718
 719	(*ptr) += 2;
 720	switch (arg) {
 721	case ATOM_COND_ABOVE:
 722		execute = ctx->ctx->cs_above;
 723		break;
 724	case ATOM_COND_ABOVEOREQUAL:
 725		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
 726		break;
 727	case ATOM_COND_ALWAYS:
 728		execute = 1;
 729		break;
 730	case ATOM_COND_BELOW:
 731		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
 732		break;
 733	case ATOM_COND_BELOWOREQUAL:
 734		execute = !ctx->ctx->cs_above;
 735		break;
 736	case ATOM_COND_EQUAL:
 737		execute = ctx->ctx->cs_equal;
 738		break;
 739	case ATOM_COND_NOTEQUAL:
 740		execute = !ctx->ctx->cs_equal;
 741		break;
 742	}
 743	if (arg != ATOM_COND_ALWAYS)
 744		SDEBUG("   taken: %s\n", execute ? "yes" : "no");
 745	SDEBUG("   target: 0x%04X\n", target);
 746	if (execute) {
 747		if (ctx->last_jump == (ctx->start + target)) {
 748			cjiffies = jiffies;
 749			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
 750				cjiffies -= ctx->last_jump_jiffies;
 751				if ((jiffies_to_msecs(cjiffies) > 5000)) {
 752					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
 
 753					ctx->abort = true;
 754				}
 755			} else {
 756				/* jiffies wrap around we will just wait a little longer */
 757				ctx->last_jump_jiffies = jiffies;
 758			}
 759		} else {
 760			ctx->last_jump = ctx->start + target;
 761			ctx->last_jump_jiffies = jiffies;
 762		}
 763		*ptr = ctx->start + target;
 764	}
 765}
 766
 767static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
 768{
 769	uint8_t attr = U8((*ptr)++);
 770	uint32_t dst, mask, src, saved;
 771	int dptr = *ptr;
 772	SDEBUG("   dst: ");
 773	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 774	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
 775	SDEBUG("   mask: 0x%08x", mask);
 776	SDEBUG("   src: ");
 777	src = atom_get_src(ctx, attr, ptr);
 778	dst &= mask;
 779	dst |= src;
 780	SDEBUG("   dst: ");
 781	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 782}
 783
 784static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
 785{
 786	uint8_t attr = U8((*ptr)++);
 787	uint32_t src, saved;
 788	int dptr = *ptr;
 789	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
 790		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
 791	else {
 792		atom_skip_dst(ctx, arg, attr, ptr);
 793		saved = 0xCDCDCDCD;
 794	}
 795	SDEBUG("   src: ");
 796	src = atom_get_src(ctx, attr, ptr);
 797	SDEBUG("   dst: ");
 798	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
 799}
 800
 801static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
 802{
 803	uint8_t attr = U8((*ptr)++);
 804	uint32_t dst, src;
 805	SDEBUG("   src1: ");
 806	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 807	SDEBUG("   src2: ");
 808	src = atom_get_src(ctx, attr, ptr);
 809	ctx->ctx->divmul[0] = dst * src;
 810}
 811
 812static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
 813{
 814	uint64_t val64;
 815	uint8_t attr = U8((*ptr)++);
 816	uint32_t dst, src;
 817	SDEBUG("   src1: ");
 818	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
 819	SDEBUG("   src2: ");
 820	src = atom_get_src(ctx, attr, ptr);
 821	val64 = (uint64_t)dst * (uint64_t)src;
 822	ctx->ctx->divmul[0] = lower_32_bits(val64);
 823	ctx->ctx->divmul[1] = upper_32_bits(val64);
 824}
 825
 826static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
 827{
 828	/* nothing */
 829}
 830
 831static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
 832{
 833	uint8_t attr = U8((*ptr)++);
 834	uint32_t dst, src, saved;
 835	int dptr = *ptr;
 836	SDEBUG("   dst: ");
 837	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 838	SDEBUG("   src: ");
 839	src = atom_get_src(ctx, attr, ptr);
 840	dst |= src;
 841	SDEBUG("   dst: ");
 842	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 843}
 844
 845static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
 846{
 847	uint8_t val = U8((*ptr)++);
 848	SDEBUG("POST card output: 0x%02X\n", val);
 849}
 850
 851static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
 852{
 853	printk(KERN_INFO "unimplemented!\n");
 854}
 855
 856static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
 857{
 858	printk(KERN_INFO "unimplemented!\n");
 859}
 860
 861static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
 862{
 863	printk(KERN_INFO "unimplemented!\n");
 864}
 865
 866static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
 867{
 868	int idx = U8(*ptr);
 869	(*ptr)++;
 870	SDEBUG("   block: %d\n", idx);
 871	if (!idx)
 872		ctx->ctx->data_block = 0;
 873	else if (idx == 255)
 874		ctx->ctx->data_block = ctx->start;
 875	else
 876		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
 877	SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
 878}
 879
 880static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
 881{
 882	uint8_t attr = U8((*ptr)++);
 883	SDEBUG("   fb_base: ");
 884	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
 885}
 886
 887static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
 888{
 889	int port;
 890	switch (arg) {
 891	case ATOM_PORT_ATI:
 892		port = U16(*ptr);
 893		if (port < ATOM_IO_NAMES_CNT)
 894			SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
 895		else
 896			SDEBUG("   port: %d\n", port);
 897		if (!port)
 898			ctx->ctx->io_mode = ATOM_IO_MM;
 899		else
 900			ctx->ctx->io_mode = ATOM_IO_IIO | port;
 901		(*ptr) += 2;
 902		break;
 903	case ATOM_PORT_PCI:
 904		ctx->ctx->io_mode = ATOM_IO_PCI;
 905		(*ptr)++;
 906		break;
 907	case ATOM_PORT_SYSIO:
 908		ctx->ctx->io_mode = ATOM_IO_SYSIO;
 909		(*ptr)++;
 910		break;
 911	}
 912}
 913
 914static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
 915{
 916	ctx->ctx->reg_block = U16(*ptr);
 917	(*ptr) += 2;
 918	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
 919}
 920
 921static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
 922{
 923	uint8_t attr = U8((*ptr)++), shift;
 924	uint32_t saved, dst;
 925	int dptr = *ptr;
 926	attr &= 0x38;
 927	attr |= atom_def_dst[attr >> 3] << 6;
 928	SDEBUG("   dst: ");
 929	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 930	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
 931	SDEBUG("   shift: %d\n", shift);
 932	dst <<= shift;
 933	SDEBUG("   dst: ");
 934	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 935}
 936
 937static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
 938{
 939	uint8_t attr = U8((*ptr)++), shift;
 940	uint32_t saved, dst;
 941	int dptr = *ptr;
 942	attr &= 0x38;
 943	attr |= atom_def_dst[attr >> 3] << 6;
 944	SDEBUG("   dst: ");
 945	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 946	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
 947	SDEBUG("   shift: %d\n", shift);
 948	dst >>= shift;
 949	SDEBUG("   dst: ");
 950	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 951}
 952
 953static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
 954{
 955	uint8_t attr = U8((*ptr)++), shift;
 956	uint32_t saved, dst;
 957	int dptr = *ptr;
 958	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
 959	SDEBUG("   dst: ");
 960	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 961	/* op needs to full dst value */
 962	dst = saved;
 963	shift = atom_get_src(ctx, attr, ptr);
 964	SDEBUG("   shift: %d\n", shift);
 965	dst <<= shift;
 966	dst &= atom_arg_mask[dst_align];
 967	dst >>= atom_arg_shift[dst_align];
 968	SDEBUG("   dst: ");
 969	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 970}
 971
 972static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
 973{
 974	uint8_t attr = U8((*ptr)++), shift;
 975	uint32_t saved, dst;
 976	int dptr = *ptr;
 977	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
 978	SDEBUG("   dst: ");
 979	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 980	/* op needs to full dst value */
 981	dst = saved;
 982	shift = atom_get_src(ctx, attr, ptr);
 983	SDEBUG("   shift: %d\n", shift);
 984	dst >>= shift;
 985	dst &= atom_arg_mask[dst_align];
 986	dst >>= atom_arg_shift[dst_align];
 987	SDEBUG("   dst: ");
 988	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 989}
 990
 991static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
 992{
 993	uint8_t attr = U8((*ptr)++);
 994	uint32_t dst, src, saved;
 995	int dptr = *ptr;
 996	SDEBUG("   dst: ");
 997	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
 998	SDEBUG("   src: ");
 999	src = atom_get_src(ctx, attr, ptr);
1000	dst -= src;
1001	SDEBUG("   dst: ");
1002	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1003}
1004
1005static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1006{
1007	uint8_t attr = U8((*ptr)++);
1008	uint32_t src, val, target;
1009	SDEBUG("   switch: ");
1010	src = atom_get_src(ctx, attr, ptr);
1011	while (U16(*ptr) != ATOM_CASE_END)
1012		if (U8(*ptr) == ATOM_CASE_MAGIC) {
1013			(*ptr)++;
1014			SDEBUG("   case: ");
1015			val =
1016			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1017					 ptr);
1018			target = U16(*ptr);
1019			if (val == src) {
1020				SDEBUG("   target: %04X\n", target);
1021				*ptr = ctx->start + target;
1022				return;
1023			}
1024			(*ptr) += 2;
1025		} else {
1026			printk(KERN_INFO "Bad case.\n");
1027			return;
1028		}
1029	(*ptr) += 2;
1030}
1031
1032static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1033{
1034	uint8_t attr = U8((*ptr)++);
1035	uint32_t dst, src;
1036	SDEBUG("   src1: ");
1037	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1038	SDEBUG("   src2: ");
1039	src = atom_get_src(ctx, attr, ptr);
1040	ctx->ctx->cs_equal = ((dst & src) == 0);
1041	SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1042}
1043
1044static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1045{
1046	uint8_t attr = U8((*ptr)++);
1047	uint32_t dst, src, saved;
1048	int dptr = *ptr;
1049	SDEBUG("   dst: ");
1050	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1051	SDEBUG("   src: ");
1052	src = atom_get_src(ctx, attr, ptr);
1053	dst ^= src;
1054	SDEBUG("   dst: ");
1055	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1056}
1057
1058static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1059{
1060	uint8_t val = U8((*ptr)++);
1061	SDEBUG("DEBUG output: 0x%02X\n", val);
1062}
1063
1064static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1065{
1066	uint16_t val = U16(*ptr);
1067	(*ptr) += val + 2;
1068	SDEBUG("PROCESSDS output: 0x%02X\n", val);
1069}
1070
1071static struct {
1072	void (*func) (atom_exec_context *, int *, int);
1073	int arg;
1074} opcode_table[ATOM_OP_CNT] = {
1075	{
1076	NULL, 0}, {
1077	atom_op_move, ATOM_ARG_REG}, {
1078	atom_op_move, ATOM_ARG_PS}, {
1079	atom_op_move, ATOM_ARG_WS}, {
1080	atom_op_move, ATOM_ARG_FB}, {
1081	atom_op_move, ATOM_ARG_PLL}, {
1082	atom_op_move, ATOM_ARG_MC}, {
1083	atom_op_and, ATOM_ARG_REG}, {
1084	atom_op_and, ATOM_ARG_PS}, {
1085	atom_op_and, ATOM_ARG_WS}, {
1086	atom_op_and, ATOM_ARG_FB}, {
1087	atom_op_and, ATOM_ARG_PLL}, {
1088	atom_op_and, ATOM_ARG_MC}, {
1089	atom_op_or, ATOM_ARG_REG}, {
1090	atom_op_or, ATOM_ARG_PS}, {
1091	atom_op_or, ATOM_ARG_WS}, {
1092	atom_op_or, ATOM_ARG_FB}, {
1093	atom_op_or, ATOM_ARG_PLL}, {
1094	atom_op_or, ATOM_ARG_MC}, {
1095	atom_op_shift_left, ATOM_ARG_REG}, {
1096	atom_op_shift_left, ATOM_ARG_PS}, {
1097	atom_op_shift_left, ATOM_ARG_WS}, {
1098	atom_op_shift_left, ATOM_ARG_FB}, {
1099	atom_op_shift_left, ATOM_ARG_PLL}, {
1100	atom_op_shift_left, ATOM_ARG_MC}, {
1101	atom_op_shift_right, ATOM_ARG_REG}, {
1102	atom_op_shift_right, ATOM_ARG_PS}, {
1103	atom_op_shift_right, ATOM_ARG_WS}, {
1104	atom_op_shift_right, ATOM_ARG_FB}, {
1105	atom_op_shift_right, ATOM_ARG_PLL}, {
1106	atom_op_shift_right, ATOM_ARG_MC}, {
1107	atom_op_mul, ATOM_ARG_REG}, {
1108	atom_op_mul, ATOM_ARG_PS}, {
1109	atom_op_mul, ATOM_ARG_WS}, {
1110	atom_op_mul, ATOM_ARG_FB}, {
1111	atom_op_mul, ATOM_ARG_PLL}, {
1112	atom_op_mul, ATOM_ARG_MC}, {
1113	atom_op_div, ATOM_ARG_REG}, {
1114	atom_op_div, ATOM_ARG_PS}, {
1115	atom_op_div, ATOM_ARG_WS}, {
1116	atom_op_div, ATOM_ARG_FB}, {
1117	atom_op_div, ATOM_ARG_PLL}, {
1118	atom_op_div, ATOM_ARG_MC}, {
1119	atom_op_add, ATOM_ARG_REG}, {
1120	atom_op_add, ATOM_ARG_PS}, {
1121	atom_op_add, ATOM_ARG_WS}, {
1122	atom_op_add, ATOM_ARG_FB}, {
1123	atom_op_add, ATOM_ARG_PLL}, {
1124	atom_op_add, ATOM_ARG_MC}, {
1125	atom_op_sub, ATOM_ARG_REG}, {
1126	atom_op_sub, ATOM_ARG_PS}, {
1127	atom_op_sub, ATOM_ARG_WS}, {
1128	atom_op_sub, ATOM_ARG_FB}, {
1129	atom_op_sub, ATOM_ARG_PLL}, {
1130	atom_op_sub, ATOM_ARG_MC}, {
1131	atom_op_setport, ATOM_PORT_ATI}, {
1132	atom_op_setport, ATOM_PORT_PCI}, {
1133	atom_op_setport, ATOM_PORT_SYSIO}, {
1134	atom_op_setregblock, 0}, {
1135	atom_op_setfbbase, 0}, {
1136	atom_op_compare, ATOM_ARG_REG}, {
1137	atom_op_compare, ATOM_ARG_PS}, {
1138	atom_op_compare, ATOM_ARG_WS}, {
1139	atom_op_compare, ATOM_ARG_FB}, {
1140	atom_op_compare, ATOM_ARG_PLL}, {
1141	atom_op_compare, ATOM_ARG_MC}, {
1142	atom_op_switch, 0}, {
1143	atom_op_jump, ATOM_COND_ALWAYS}, {
1144	atom_op_jump, ATOM_COND_EQUAL}, {
1145	atom_op_jump, ATOM_COND_BELOW}, {
1146	atom_op_jump, ATOM_COND_ABOVE}, {
1147	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1148	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1149	atom_op_jump, ATOM_COND_NOTEQUAL}, {
1150	atom_op_test, ATOM_ARG_REG}, {
1151	atom_op_test, ATOM_ARG_PS}, {
1152	atom_op_test, ATOM_ARG_WS}, {
1153	atom_op_test, ATOM_ARG_FB}, {
1154	atom_op_test, ATOM_ARG_PLL}, {
1155	atom_op_test, ATOM_ARG_MC}, {
1156	atom_op_delay, ATOM_UNIT_MILLISEC}, {
1157	atom_op_delay, ATOM_UNIT_MICROSEC}, {
1158	atom_op_calltable, 0}, {
1159	atom_op_repeat, 0}, {
1160	atom_op_clear, ATOM_ARG_REG}, {
1161	atom_op_clear, ATOM_ARG_PS}, {
1162	atom_op_clear, ATOM_ARG_WS}, {
1163	atom_op_clear, ATOM_ARG_FB}, {
1164	atom_op_clear, ATOM_ARG_PLL}, {
1165	atom_op_clear, ATOM_ARG_MC}, {
1166	atom_op_nop, 0}, {
1167	atom_op_eot, 0}, {
1168	atom_op_mask, ATOM_ARG_REG}, {
1169	atom_op_mask, ATOM_ARG_PS}, {
1170	atom_op_mask, ATOM_ARG_WS}, {
1171	atom_op_mask, ATOM_ARG_FB}, {
1172	atom_op_mask, ATOM_ARG_PLL}, {
1173	atom_op_mask, ATOM_ARG_MC}, {
1174	atom_op_postcard, 0}, {
1175	atom_op_beep, 0}, {
1176	atom_op_savereg, 0}, {
1177	atom_op_restorereg, 0}, {
1178	atom_op_setdatablock, 0}, {
1179	atom_op_xor, ATOM_ARG_REG}, {
1180	atom_op_xor, ATOM_ARG_PS}, {
1181	atom_op_xor, ATOM_ARG_WS}, {
1182	atom_op_xor, ATOM_ARG_FB}, {
1183	atom_op_xor, ATOM_ARG_PLL}, {
1184	atom_op_xor, ATOM_ARG_MC}, {
1185	atom_op_shl, ATOM_ARG_REG}, {
1186	atom_op_shl, ATOM_ARG_PS}, {
1187	atom_op_shl, ATOM_ARG_WS}, {
1188	atom_op_shl, ATOM_ARG_FB}, {
1189	atom_op_shl, ATOM_ARG_PLL}, {
1190	atom_op_shl, ATOM_ARG_MC}, {
1191	atom_op_shr, ATOM_ARG_REG}, {
1192	atom_op_shr, ATOM_ARG_PS}, {
1193	atom_op_shr, ATOM_ARG_WS}, {
1194	atom_op_shr, ATOM_ARG_FB}, {
1195	atom_op_shr, ATOM_ARG_PLL}, {
1196	atom_op_shr, ATOM_ARG_MC}, {
1197	atom_op_debug, 0}, {
1198	atom_op_processds, 0}, {
1199	atom_op_mul32, ATOM_ARG_PS}, {
1200	atom_op_mul32, ATOM_ARG_WS}, {
1201	atom_op_div32, ATOM_ARG_PS}, {
1202	atom_op_div32, ATOM_ARG_WS},
1203};
1204
1205static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1206{
1207	int base = CU16(ctx->cmd_table + 4 + 2 * index);
1208	int len, ws, ps, ptr;
1209	unsigned char op;
1210	atom_exec_context ectx;
1211	int ret = 0;
1212
1213	if (!base)
1214		return -EINVAL;
1215
1216	len = CU16(base + ATOM_CT_SIZE_PTR);
1217	ws = CU8(base + ATOM_CT_WS_PTR);
1218	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1219	ptr = base + ATOM_CT_CODE_PTR;
1220
1221	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1222
1223	ectx.ctx = ctx;
1224	ectx.ps_shift = ps / 4;
1225	ectx.start = base;
1226	ectx.ps = params;
 
1227	ectx.abort = false;
1228	ectx.last_jump = 0;
1229	if (ws)
1230		ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1231	else
 
1232		ectx.ws = NULL;
 
 
1233
1234	debug_depth++;
1235	while (1) {
1236		op = CU8(ptr++);
1237		if (op < ATOM_OP_NAMES_CNT)
1238			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1239		else
1240			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1241		if (ectx.abort) {
1242			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1243				base, len, ws, ps, ptr - 1);
1244			ret = -EINVAL;
1245			goto free;
1246		}
1247
1248		if (op < ATOM_OP_CNT && op > 0)
1249			opcode_table[op].func(&ectx, &ptr,
1250					      opcode_table[op].arg);
1251		else
1252			break;
1253
1254		if (op == ATOM_OP_EOT)
1255			break;
1256	}
1257	debug_depth--;
1258	SDEBUG("<<\n");
1259
1260free:
1261	if (ws)
1262		kfree(ectx.ws);
1263	return ret;
1264}
1265
1266int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1267{
1268	int r;
1269
1270	mutex_lock(&ctx->mutex);
1271	/* reset data block */
1272	ctx->data_block = 0;
1273	/* reset reg block */
1274	ctx->reg_block = 0;
1275	/* reset fb window */
1276	ctx->fb_base = 0;
1277	/* reset io mode */
1278	ctx->io_mode = ATOM_IO_MM;
1279	/* reset divmul */
1280	ctx->divmul[0] = 0;
1281	ctx->divmul[1] = 0;
1282	r = amdgpu_atom_execute_table_locked(ctx, index, params);
1283	mutex_unlock(&ctx->mutex);
1284	return r;
1285}
1286
1287static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1288
1289static void atom_index_iio(struct atom_context *ctx, int base)
1290{
1291	ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1292	if (!ctx->iio)
1293		return;
1294	while (CU8(base) == ATOM_IIO_START) {
1295		ctx->iio[CU8(base + 1)] = base + 2;
1296		base += 2;
1297		while (CU8(base) != ATOM_IIO_END)
1298			base += atom_iio_len[CU8(base)];
1299		base += 3;
1300	}
1301}
1302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1303struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1304{
1305	int base;
1306	struct atom_context *ctx =
1307	    kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1308	char *str;
1309	char name[512];
1310	int i;
1311
1312	if (!ctx)
1313		return NULL;
1314
1315	ctx->card = card;
1316	ctx->bios = bios;
1317
1318	if (CU16(0) != ATOM_BIOS_MAGIC) {
1319		printk(KERN_INFO "Invalid BIOS magic.\n");
1320		kfree(ctx);
1321		return NULL;
1322	}
1323	if (strncmp
1324	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1325	     strlen(ATOM_ATI_MAGIC))) {
1326		printk(KERN_INFO "Invalid ATI magic.\n");
1327		kfree(ctx);
1328		return NULL;
1329	}
1330
1331	base = CU16(ATOM_ROM_TABLE_PTR);
1332	if (strncmp
1333	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1334	     strlen(ATOM_ROM_MAGIC))) {
1335		printk(KERN_INFO "Invalid ATOM magic.\n");
1336		kfree(ctx);
1337		return NULL;
1338	}
1339
1340	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1341	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1342	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1343	if (!ctx->iio) {
1344		amdgpu_atom_destroy(ctx);
1345		return NULL;
1346	}
1347
1348	str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1349	while (*str && ((*str == '\n') || (*str == '\r')))
1350		str++;
1351	/* name string isn't always 0 terminated */
1352	for (i = 0; i < 511; i++) {
1353		name[i] = str[i];
1354		if (name[i] < '.' || name[i] > 'z') {
1355			name[i] = 0;
1356			break;
1357		}
1358	}
1359	printk(KERN_INFO "ATOM BIOS: %s\n", name);
 
 
 
 
1360
1361	return ctx;
1362}
1363
1364int amdgpu_atom_asic_init(struct atom_context *ctx)
1365{
1366	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1367	uint32_t ps[16];
1368	int ret;
1369
1370	memset(ps, 0, 64);
1371
1372	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1373	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1374	if (!ps[0] || !ps[1])
1375		return 1;
1376
1377	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1378		return 1;
1379	ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1380	if (ret)
1381		return ret;
1382
1383	memset(ps, 0, 64);
1384
1385	return ret;
1386}
1387
1388void amdgpu_atom_destroy(struct atom_context *ctx)
1389{
1390	kfree(ctx->iio);
1391	kfree(ctx);
1392}
1393
1394bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1395			    uint16_t * size, uint8_t * frev, uint8_t * crev,
1396			    uint16_t * data_start)
1397{
1398	int offset = index * 2 + 4;
1399	int idx = CU16(ctx->data_table + offset);
1400	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1401
1402	if (!mdt[index])
1403		return false;
1404
1405	if (size)
1406		*size = CU16(idx);
1407	if (frev)
1408		*frev = CU8(idx + 2);
1409	if (crev)
1410		*crev = CU8(idx + 3);
1411	*data_start = idx;
1412	return true;
1413}
1414
1415bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1416			   uint8_t * crev)
1417{
1418	int offset = index * 2 + 4;
1419	int idx = CU16(ctx->cmd_table + offset);
1420	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1421
1422	if (!mct[index])
1423		return false;
1424
1425	if (frev)
1426		*frev = CU8(idx + 2);
1427	if (crev)
1428		*crev = CU8(idx + 3);
1429	return true;
1430}
1431
1432int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx)
1433{
1434	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1435	uint16_t data_offset;
1436	int usage_bytes = 0;
1437	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1438
1439	if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1440		firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1441
1442		DRM_DEBUG("atom firmware requested %08x %dkb\n",
1443			  le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1444			  le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1445
1446		usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1447	}
1448	ctx->scratch_size_bytes = 0;
1449	if (usage_bytes == 0)
1450		usage_bytes = 20 * 1024;
1451	/* allocate some scratch memory */
1452	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1453	if (!ctx->scratch)
1454		return -ENOMEM;
1455	ctx->scratch_size_bytes = usage_bytes;
1456	return 0;
1457}