Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright (C) 2007 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include "drmP.h"
  28#include "drm.h"
  29#include "nouveau_drv.h"
  30#include "nouveau_ramht.h"
  31#include "nouveau_grctx.h"
  32#include "nouveau_dma.h"
  33#include "nouveau_vm.h"
  34#include "nv50_evo.h"
  35
  36struct nv50_graph_engine {
  37	struct nouveau_exec_engine base;
  38	u32 ctxprog[512];
  39	u32 ctxprog_size;
  40	u32 grctx_size;
  41};
  42
  43static void
  44nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
  45{
  46	const uint32_t mask = 0x00010001;
  47
  48	if (enabled)
  49		nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
  50	else
  51		nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
  52}
  53
  54static struct nouveau_channel *
  55nv50_graph_channel(struct drm_device *dev)
  56{
  57	struct drm_nouveau_private *dev_priv = dev->dev_private;
  58	uint32_t inst;
  59	int i;
  60
  61	/* Be sure we're not in the middle of a context switch or bad things
  62	 * will happen, such as unloading the wrong pgraph context.
  63	 */
  64	if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
  65		NV_ERROR(dev, "Ctxprog is still running\n");
  66
  67	inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
  68	if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
  69		return NULL;
  70	inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
  71
  72	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  73		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
  74
  75		if (chan && chan->ramin && chan->ramin->vinst == inst)
  76			return chan;
  77	}
  78
  79	return NULL;
  80}
  81
  82static int
  83nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
  84{
  85	uint32_t fifo = nv_rd32(dev, 0x400500);
  86
  87	nv_wr32(dev, 0x400500, fifo & ~1);
  88	nv_wr32(dev, 0x400784, inst);
  89	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
  90	nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
  91	nv_wr32(dev, 0x400040, 0xffffffff);
  92	(void)nv_rd32(dev, 0x400040);
  93	nv_wr32(dev, 0x400040, 0x00000000);
  94	nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
  95
  96	if (nouveau_wait_for_idle(dev))
  97		nv_wr32(dev, 0x40032c, inst | (1<<31));
  98	nv_wr32(dev, 0x400500, fifo);
  99
 100	return 0;
 101}
 102
 103static int
 104nv50_graph_unload_context(struct drm_device *dev)
 105{
 106	uint32_t inst;
 107
 108	inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
 109	if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
 110		return 0;
 111	inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
 112
 113	nouveau_wait_for_idle(dev);
 114	nv_wr32(dev, 0x400784, inst);
 115	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
 116	nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
 117	nouveau_wait_for_idle(dev);
 118
 119	nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
 120	return 0;
 121}
 122
 123static void
 124nv50_graph_init_reset(struct drm_device *dev)
 125{
 126	uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
 127	NV_DEBUG(dev, "\n");
 128
 129	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
 130	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
 131}
 132
 133static void
 134nv50_graph_init_intr(struct drm_device *dev)
 135{
 136	NV_DEBUG(dev, "\n");
 137
 138	nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
 139	nv_wr32(dev, 0x400138, 0xffffffff);
 140	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
 141}
 142
 143static void
 144nv50_graph_init_regs__nv(struct drm_device *dev)
 145{
 146	struct drm_nouveau_private *dev_priv = dev->dev_private;
 147	uint32_t units = nv_rd32(dev, 0x1540);
 148	int i;
 149
 150	NV_DEBUG(dev, "\n");
 151
 152	nv_wr32(dev, 0x400804, 0xc0000000);
 153	nv_wr32(dev, 0x406800, 0xc0000000);
 154	nv_wr32(dev, 0x400c04, 0xc0000000);
 155	nv_wr32(dev, 0x401800, 0xc0000000);
 156	nv_wr32(dev, 0x405018, 0xc0000000);
 157	nv_wr32(dev, 0x402000, 0xc0000000);
 158
 159	for (i = 0; i < 16; i++) {
 160		if (units & 1 << i) {
 161			if (dev_priv->chipset < 0xa0) {
 162				nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
 163				nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
 164				nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
 165			} else {
 166				nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
 167				nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
 168				nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
 169			}
 170		}
 171	}
 172
 173	nv_wr32(dev, 0x400108, 0xffffffff);
 174
 175	nv_wr32(dev, 0x400824, 0x00004000);
 176	nv_wr32(dev, 0x400500, 0x00010001);
 177}
 178
 179static void
 180nv50_graph_init_zcull(struct drm_device *dev)
 181{
 182	struct drm_nouveau_private *dev_priv = dev->dev_private;
 183	int i;
 184
 185	NV_DEBUG(dev, "\n");
 186
 187	switch (dev_priv->chipset & 0xf0) {
 188	case 0x50:
 189	case 0x80:
 190	case 0x90:
 191		nv_wr32(dev, 0x402ca8, 0x00000800);
 192		break;
 193	case 0xa0:
 194	default:
 195		nv_wr32(dev, 0x402cc0, 0x00000000);
 196		if (dev_priv->chipset == 0xa0 ||
 197		    dev_priv->chipset == 0xaa ||
 198		    dev_priv->chipset == 0xac) {
 199			nv_wr32(dev, 0x402ca8, 0x00000802);
 200		} else {
 201			nv_wr32(dev, 0x402cc0, 0x00000000);
 202			nv_wr32(dev, 0x402ca8, 0x00000002);
 203		}
 204
 205		break;
 206	}
 207
 208	/* zero out zcull regions */
 209	for (i = 0; i < 8; i++) {
 210		nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
 211		nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
 212		nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
 213		nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
 214	}
 215}
 216
 217static int
 218nv50_graph_init_ctxctl(struct drm_device *dev)
 219{
 220	struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
 221	int i;
 222
 223	NV_DEBUG(dev, "\n");
 224
 225	nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
 226	for (i = 0; i < pgraph->ctxprog_size; i++)
 227		nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
 228
 229	nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
 230	nv_wr32(dev, 0x400320, 4);
 231	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
 232	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
 233	return 0;
 234}
 235
 236static int
 237nv50_graph_init(struct drm_device *dev, int engine)
 238{
 239	int ret;
 240
 241	NV_DEBUG(dev, "\n");
 242
 243	nv50_graph_init_reset(dev);
 244	nv50_graph_init_regs__nv(dev);
 245	nv50_graph_init_zcull(dev);
 246
 247	ret = nv50_graph_init_ctxctl(dev);
 248	if (ret)
 249		return ret;
 250
 251	nv50_graph_init_intr(dev);
 252	return 0;
 253}
 254
 255static int
 256nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
 257{
 258	nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
 259	if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
 260		nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
 261		return -EBUSY;
 262	}
 263	nv50_graph_unload_context(dev);
 264	nv_wr32(dev, 0x40013c, 0x00000000);
 265	return 0;
 266}
 267
 268static int
 269nv50_graph_context_new(struct nouveau_channel *chan, int engine)
 270{
 271	struct drm_device *dev = chan->dev;
 272	struct drm_nouveau_private *dev_priv = dev->dev_private;
 273	struct nouveau_gpuobj *ramin = chan->ramin;
 274	struct nouveau_gpuobj *grctx = NULL;
 275	struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
 276	struct nouveau_grctx ctx = {};
 277	int hdr, ret;
 278
 279	NV_DEBUG(dev, "ch%d\n", chan->id);
 280
 281	ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
 282				 NVOBJ_FLAG_ZERO_ALLOC |
 283				 NVOBJ_FLAG_ZERO_FREE, &grctx);
 284	if (ret)
 285		return ret;
 286
 287	hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
 288	nv_wo32(ramin, hdr + 0x00, 0x00190002);
 289	nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
 290	nv_wo32(ramin, hdr + 0x08, grctx->vinst);
 291	nv_wo32(ramin, hdr + 0x0c, 0);
 292	nv_wo32(ramin, hdr + 0x10, 0);
 293	nv_wo32(ramin, hdr + 0x14, 0x00010000);
 294
 295	ctx.dev = chan->dev;
 296	ctx.mode = NOUVEAU_GRCTX_VALS;
 297	ctx.data = grctx;
 298	nv50_grctx_init(&ctx);
 299
 300	nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
 301
 302	dev_priv->engine.instmem.flush(dev);
 303
 304	atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
 305	chan->engctx[NVOBJ_ENGINE_GR] = grctx;
 306	return 0;
 307}
 308
 309static void
 310nv50_graph_context_del(struct nouveau_channel *chan, int engine)
 311{
 312	struct nouveau_gpuobj *grctx = chan->engctx[engine];
 313	struct drm_device *dev = chan->dev;
 314	struct drm_nouveau_private *dev_priv = dev->dev_private;
 315	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 316	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
 317	unsigned long flags;
 318
 319	NV_DEBUG(dev, "ch%d\n", chan->id);
 320
 321	if (!chan->ramin)
 322		return;
 323
 324	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 325	pfifo->reassign(dev, false);
 326	nv50_graph_fifo_access(dev, false);
 327
 328	if (nv50_graph_channel(dev) == chan)
 329		nv50_graph_unload_context(dev);
 330
 331	for (i = hdr; i < hdr + 24; i += 4)
 332		nv_wo32(chan->ramin, i, 0);
 333	dev_priv->engine.instmem.flush(dev);
 334
 335	nv50_graph_fifo_access(dev, true);
 336	pfifo->reassign(dev, true);
 337	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 338
 339	nouveau_gpuobj_ref(NULL, &grctx);
 340
 341	atomic_dec(&chan->vm->engref[engine]);
 342	chan->engctx[engine] = NULL;
 343}
 344
 345static int
 346nv50_graph_object_new(struct nouveau_channel *chan, int engine,
 347		      u32 handle, u16 class)
 348{
 349	struct drm_device *dev = chan->dev;
 350	struct drm_nouveau_private *dev_priv = dev->dev_private;
 351	struct nouveau_gpuobj *obj = NULL;
 352	int ret;
 353
 354	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
 355	if (ret)
 356		return ret;
 357	obj->engine = 1;
 358	obj->class  = class;
 359
 360	nv_wo32(obj, 0x00, class);
 361	nv_wo32(obj, 0x04, 0x00000000);
 362	nv_wo32(obj, 0x08, 0x00000000);
 363	nv_wo32(obj, 0x0c, 0x00000000);
 364	dev_priv->engine.instmem.flush(dev);
 365
 366	ret = nouveau_ramht_insert(chan, handle, obj);
 367	nouveau_gpuobj_ref(NULL, &obj);
 368	return ret;
 369}
 370
 371static void
 372nv50_graph_context_switch(struct drm_device *dev)
 373{
 374	uint32_t inst;
 375
 376	nv50_graph_unload_context(dev);
 377
 378	inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
 379	inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
 380	nv50_graph_do_load_context(dev, inst);
 381
 382	nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
 383		NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
 384}
 385
 386static int
 387nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
 388			   u32 class, u32 mthd, u32 data)
 389{
 390	struct nouveau_gpuobj *gpuobj;
 391
 392	gpuobj = nouveau_ramht_find(chan, data);
 393	if (!gpuobj)
 394		return -ENOENT;
 395
 396	if (nouveau_notifier_offset(gpuobj, NULL))
 397		return -EINVAL;
 398
 399	chan->nvsw.vblsem = gpuobj;
 400	chan->nvsw.vblsem_offset = ~0;
 401	return 0;
 402}
 403
 404static int
 405nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
 406			      u32 class, u32 mthd, u32 data)
 407{
 408	if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
 409		return -ERANGE;
 410
 411	chan->nvsw.vblsem_offset = data >> 2;
 412	return 0;
 413}
 414
 415static int
 416nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
 417				   u32 class, u32 mthd, u32 data)
 418{
 419	chan->nvsw.vblsem_rval = data;
 420	return 0;
 421}
 422
 423static int
 424nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
 425			       u32 class, u32 mthd, u32 data)
 426{
 427	struct drm_device *dev = chan->dev;
 428	struct drm_nouveau_private *dev_priv = dev->dev_private;
 429
 430	if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
 431		return -EINVAL;
 432
 433	drm_vblank_get(dev, data);
 434
 435	chan->nvsw.vblsem_head = data;
 436	list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
 437
 438	return 0;
 439}
 440
 441static int
 442nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
 443			       u32 class, u32 mthd, u32 data)
 444{
 445	nouveau_finish_page_flip(chan, NULL);
 446	return 0;
 447}
 448
 449
 450static void
 451nv50_graph_tlb_flush(struct drm_device *dev, int engine)
 452{
 453	nv50_vm_flush_engine(dev, 0);
 454}
 455
 456static void
 457nv84_graph_tlb_flush(struct drm_device *dev, int engine)
 458{
 459	struct drm_nouveau_private *dev_priv = dev->dev_private;
 460	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
 461	bool idle, timeout = false;
 462	unsigned long flags;
 463	u64 start;
 464	u32 tmp;
 465
 466	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 467	nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
 468
 469	start = ptimer->read(dev);
 470	do {
 471		idle = true;
 472
 473		for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
 474			if ((tmp & 7) == 1)
 475				idle = false;
 476		}
 477
 478		for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
 479			if ((tmp & 7) == 1)
 480				idle = false;
 481		}
 482
 483		for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
 484			if ((tmp & 7) == 1)
 485				idle = false;
 486		}
 487	} while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
 488
 489	if (timeout) {
 490		NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
 491			      "0x%08x 0x%08x 0x%08x 0x%08x\n",
 492			 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
 493			 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
 494	}
 495
 496	nv50_vm_flush_engine(dev, 0);
 497
 498	nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
 499	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 500}
 501
 502static struct nouveau_enum nv50_mp_exec_error_names[] = {
 503	{ 3, "STACK_UNDERFLOW", NULL },
 504	{ 4, "QUADON_ACTIVE", NULL },
 505	{ 8, "TIMEOUT", NULL },
 506	{ 0x10, "INVALID_OPCODE", NULL },
 507	{ 0x40, "BREAKPOINT", NULL },
 508	{}
 509};
 510
 511static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
 512	{ 0x00000001, "NOTIFY" },
 513	{ 0x00000002, "IN" },
 514	{ 0x00000004, "OUT" },
 515	{}
 516};
 517
 518static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
 519	{ 0x00000001, "FAULT" },
 520	{}
 521};
 522
 523static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
 524	{ 0x00000001, "FAULT" },
 525	{}
 526};
 527
 528static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
 529	{ 0x00000001, "FAULT" },
 530	{}
 531};
 532
 533/* There must be a *lot* of these. Will take some time to gather them up. */
 534struct nouveau_enum nv50_data_error_names[] = {
 535	{ 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
 536	{ 0x00000004, "INVALID_VALUE", NULL },
 537	{ 0x00000005, "INVALID_ENUM", NULL },
 538	{ 0x00000008, "INVALID_OBJECT", NULL },
 539	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
 540	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
 541	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
 542	{ 0x0000000c, "INVALID_BITFIELD", NULL },
 543	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
 544	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
 545	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
 546	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
 547	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
 548	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
 549	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
 550	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
 551	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
 552	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
 553	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
 554	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
 555	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
 556	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
 557	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
 558	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
 559	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
 560	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
 561	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
 562	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
 563	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
 564	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
 565	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
 566	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
 567	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
 568	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
 569	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
 570	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
 571	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
 572	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
 573	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
 574	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
 575	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
 576	{}
 577};
 578
 579static struct nouveau_bitfield nv50_graph_intr[] = {
 580	{ 0x00000001, "NOTIFY" },
 581	{ 0x00000002, "COMPUTE_QUERY" },
 582	{ 0x00000010, "ILLEGAL_MTHD" },
 583	{ 0x00000020, "ILLEGAL_CLASS" },
 584	{ 0x00000040, "DOUBLE_NOTIFY" },
 585	{ 0x00001000, "CONTEXT_SWITCH" },
 586	{ 0x00010000, "BUFFER_NOTIFY" },
 587	{ 0x00100000, "DATA_ERROR" },
 588	{ 0x00200000, "TRAP" },
 589	{ 0x01000000, "SINGLE_STEP" },
 590	{}
 591};
 592
 593static void
 594nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
 595{
 596	struct drm_nouveau_private *dev_priv = dev->dev_private;
 597	uint32_t units = nv_rd32(dev, 0x1540);
 598	uint32_t addr, mp10, status, pc, oplow, ophigh;
 599	int i;
 600	int mps = 0;
 601	for (i = 0; i < 4; i++) {
 602		if (!(units & 1 << (i+24)))
 603			continue;
 604		if (dev_priv->chipset < 0xa0)
 605			addr = 0x408200 + (tpid << 12) + (i << 7);
 606		else
 607			addr = 0x408100 + (tpid << 11) + (i << 7);
 608		mp10 = nv_rd32(dev, addr + 0x10);
 609		status = nv_rd32(dev, addr + 0x14);
 610		if (!status)
 611			continue;
 612		if (display) {
 613			nv_rd32(dev, addr + 0x20);
 614			pc = nv_rd32(dev, addr + 0x24);
 615			oplow = nv_rd32(dev, addr + 0x70);
 616			ophigh = nv_rd32(dev, addr + 0x74);
 617			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
 618					"TP %d MP %d: ", tpid, i);
 619			nouveau_enum_print(nv50_mp_exec_error_names, status);
 620			printk(" at %06x warp %d, opcode %08x %08x\n",
 621					pc&0xffffff, pc >> 24,
 622					oplow, ophigh);
 623		}
 624		nv_wr32(dev, addr + 0x10, mp10);
 625		nv_wr32(dev, addr + 0x14, 0);
 626		mps++;
 627	}
 628	if (!mps && display)
 629		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
 630				"No MPs claiming errors?\n", tpid);
 631}
 632
 633static void
 634nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
 635		uint32_t ustatus_new, int display, const char *name)
 636{
 637	struct drm_nouveau_private *dev_priv = dev->dev_private;
 638	int tps = 0;
 639	uint32_t units = nv_rd32(dev, 0x1540);
 640	int i, r;
 641	uint32_t ustatus_addr, ustatus;
 642	for (i = 0; i < 16; i++) {
 643		if (!(units & (1 << i)))
 644			continue;
 645		if (dev_priv->chipset < 0xa0)
 646			ustatus_addr = ustatus_old + (i << 12);
 647		else
 648			ustatus_addr = ustatus_new + (i << 11);
 649		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
 650		if (!ustatus)
 651			continue;
 652		tps++;
 653		switch (type) {
 654		case 6: /* texture error... unknown for now */
 655			if (display) {
 656				NV_ERROR(dev, "magic set %d:\n", i);
 657				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
 658					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
 659						nv_rd32(dev, r));
 660			}
 661			break;
 662		case 7: /* MP error */
 663			if (ustatus & 0x00010000) {
 664				nv50_pgraph_mp_trap(dev, i, display);
 665				ustatus &= ~0x00010000;
 666			}
 667			break;
 668		case 8: /* TPDMA error */
 669			{
 670			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
 671			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
 672			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
 673			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
 674			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
 675			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
 676			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
 677			/* 2d engine destination */
 678			if (ustatus & 0x00000010) {
 679				if (display) {
 680					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
 681							i, e14, e10);
 682					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
 683							i, e0c, e18, e1c, e20, e24);
 684				}
 685				ustatus &= ~0x00000010;
 686			}
 687			/* Render target */
 688			if (ustatus & 0x00000040) {
 689				if (display) {
 690					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
 691							i, e14, e10);
 692					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
 693							i, e0c, e18, e1c, e20, e24);
 694				}
 695				ustatus &= ~0x00000040;
 696			}
 697			/* CUDA memory: l[], g[] or stack. */
 698			if (ustatus & 0x00000080) {
 699				if (display) {
 700					if (e18 & 0x80000000) {
 701						/* g[] read fault? */
 702						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
 703								i, e14, e10 | ((e18 >> 24) & 0x1f));
 704						e18 &= ~0x1f000000;
 705					} else if (e18 & 0xc) {
 706						/* g[] write fault? */
 707						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
 708								i, e14, e10 | ((e18 >> 7) & 0x1f));
 709						e18 &= ~0x00000f80;
 710					} else {
 711						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
 712								i, e14, e10);
 713					}
 714					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
 715							i, e0c, e18, e1c, e20, e24);
 716				}
 717				ustatus &= ~0x00000080;
 718			}
 719			}
 720			break;
 721		}
 722		if (ustatus) {
 723			if (display)
 724				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
 725		}
 726		nv_wr32(dev, ustatus_addr, 0xc0000000);
 727	}
 728
 729	if (!tps && display)
 730		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
 731}
 732
 733static int
 734nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
 735{
 736	u32 status = nv_rd32(dev, 0x400108);
 737	u32 ustatus;
 738
 739	if (!status && display) {
 740		NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
 741		return 1;
 742	}
 743
 744	/* DISPATCH: Relays commands to other units and handles NOTIFY,
 745	 * COND, QUERY. If you get a trap from it, the command is still stuck
 746	 * in DISPATCH and you need to do something about it. */
 747	if (status & 0x001) {
 748		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
 749		if (!ustatus && display) {
 750			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
 751		}
 752
 753		nv_wr32(dev, 0x400500, 0x00000000);
 754
 755		/* Known to be triggered by screwed up NOTIFY and COND... */
 756		if (ustatus & 0x00000001) {
 757			u32 addr = nv_rd32(dev, 0x400808);
 758			u32 subc = (addr & 0x00070000) >> 16;
 759			u32 mthd = (addr & 0x00001ffc);
 760			u32 datal = nv_rd32(dev, 0x40080c);
 761			u32 datah = nv_rd32(dev, 0x400810);
 762			u32 class = nv_rd32(dev, 0x400814);
 763			u32 r848 = nv_rd32(dev, 0x400848);
 764
 765			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
 766			if (display && (addr & 0x80000000)) {
 767				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
 768					     "subc %d class 0x%04x mthd 0x%04x "
 769					     "data 0x%08x%08x "
 770					     "400808 0x%08x 400848 0x%08x\n",
 771					chid, inst, subc, class, mthd, datah,
 772					datal, addr, r848);
 773			} else
 774			if (display) {
 775				NV_INFO(dev, "PGRAPH - no stuck command?\n");
 776			}
 777
 778			nv_wr32(dev, 0x400808, 0);
 779			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
 780			nv_wr32(dev, 0x400848, 0);
 781			ustatus &= ~0x00000001;
 782		}
 783
 784		if (ustatus & 0x00000002) {
 785			u32 addr = nv_rd32(dev, 0x40084c);
 786			u32 subc = (addr & 0x00070000) >> 16;
 787			u32 mthd = (addr & 0x00001ffc);
 788			u32 data = nv_rd32(dev, 0x40085c);
 789			u32 class = nv_rd32(dev, 0x400814);
 790
 791			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
 792			if (display && (addr & 0x80000000)) {
 793				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
 794					     "subc %d class 0x%04x mthd 0x%04x "
 795					     "data 0x%08x 40084c 0x%08x\n",
 796					chid, inst, subc, class, mthd,
 797					data, addr);
 798			} else
 799			if (display) {
 800				NV_INFO(dev, "PGRAPH - no stuck command?\n");
 801			}
 802
 803			nv_wr32(dev, 0x40084c, 0);
 804			ustatus &= ~0x00000002;
 805		}
 806
 807		if (ustatus && display) {
 808			NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
 809				      "0x%08x)\n", ustatus);
 810		}
 811
 812		nv_wr32(dev, 0x400804, 0xc0000000);
 813		nv_wr32(dev, 0x400108, 0x001);
 814		status &= ~0x001;
 815		if (!status)
 816			return 0;
 817	}
 818
 819	/* M2MF: Memory to memory copy engine. */
 820	if (status & 0x002) {
 821		u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
 822		if (display) {
 823			NV_INFO(dev, "PGRAPH - TRAP_M2MF");
 824			nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
 825			printk("\n");
 826			NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
 827				nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
 828				nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
 829
 830		}
 831
 832		/* No sane way found yet -- just reset the bugger. */
 833		nv_wr32(dev, 0x400040, 2);
 834		nv_wr32(dev, 0x400040, 0);
 835		nv_wr32(dev, 0x406800, 0xc0000000);
 836		nv_wr32(dev, 0x400108, 0x002);
 837		status &= ~0x002;
 838	}
 839
 840	/* VFETCH: Fetches data from vertex buffers. */
 841	if (status & 0x004) {
 842		u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
 843		if (display) {
 844			NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
 845			nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
 846			printk("\n");
 847			NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
 848				nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
 849				nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
 850		}
 851
 852		nv_wr32(dev, 0x400c04, 0xc0000000);
 853		nv_wr32(dev, 0x400108, 0x004);
 854		status &= ~0x004;
 855	}
 856
 857	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
 858	if (status & 0x008) {
 859		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
 860		if (display) {
 861			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
 862			nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
 863			printk("\n");
 864			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
 865				nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
 866				nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
 867
 868		}
 869
 870		/* No sane way found yet -- just reset the bugger. */
 871		nv_wr32(dev, 0x400040, 0x80);
 872		nv_wr32(dev, 0x400040, 0);
 873		nv_wr32(dev, 0x401800, 0xc0000000);
 874		nv_wr32(dev, 0x400108, 0x008);
 875		status &= ~0x008;
 876	}
 877
 878	/* CCACHE: Handles code and c[] caches and fills them. */
 879	if (status & 0x010) {
 880		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
 881		if (display) {
 882			NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
 883			nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
 884			printk("\n");
 885			NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
 886				     " %08x %08x %08x\n",
 887				nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
 888				nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
 889				nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
 890				nv_rd32(dev, 0x40501c));
 891
 892		}
 893
 894		nv_wr32(dev, 0x405018, 0xc0000000);
 895		nv_wr32(dev, 0x400108, 0x010);
 896		status &= ~0x010;
 897	}
 898
 899	/* Unknown, not seen yet... 0x402000 is the only trap status reg
 900	 * remaining, so try to handle it anyway. Perhaps related to that
 901	 * unknown DMA slot on tesla? */
 902	if (status & 0x20) {
 903		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
 904		if (display)
 905			NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
 906		nv_wr32(dev, 0x402000, 0xc0000000);
 907		/* no status modifiction on purpose */
 908	}
 909
 910	/* TEXTURE: CUDA texturing units */
 911	if (status & 0x040) {
 912		nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
 913				    "PGRAPH - TRAP_TEXTURE");
 914		nv_wr32(dev, 0x400108, 0x040);
 915		status &= ~0x040;
 916	}
 917
 918	/* MP: CUDA execution engines. */
 919	if (status & 0x080) {
 920		nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
 921				    "PGRAPH - TRAP_MP");
 922		nv_wr32(dev, 0x400108, 0x080);
 923		status &= ~0x080;
 924	}
 925
 926	/* TPDMA:  Handles TP-initiated uncached memory accesses:
 927	 * l[], g[], stack, 2d surfaces, render targets. */
 928	if (status & 0x100) {
 929		nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
 930				    "PGRAPH - TRAP_TPDMA");
 931		nv_wr32(dev, 0x400108, 0x100);
 932		status &= ~0x100;
 933	}
 934
 935	if (status) {
 936		if (display)
 937			NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
 938		nv_wr32(dev, 0x400108, status);
 939	}
 940
 941	return 1;
 942}
 943
 944int
 945nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
 946{
 947	struct drm_nouveau_private *dev_priv = dev->dev_private;
 948	struct nouveau_channel *chan;
 949	unsigned long flags;
 950	int i;
 951
 952	spin_lock_irqsave(&dev_priv->channels.lock, flags);
 953	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
 954		chan = dev_priv->channels.ptr[i];
 955		if (!chan || !chan->ramin)
 956			continue;
 957
 958		if (inst == chan->ramin->vinst)
 959			break;
 960	}
 961	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 962	return i;
 963}
 964
 965static void
 966nv50_graph_isr(struct drm_device *dev)
 967{
 968	u32 stat;
 969
 970	while ((stat = nv_rd32(dev, 0x400100))) {
 971		u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
 972		u32 chid = nv50_graph_isr_chid(dev, inst);
 973		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
 974		u32 subc = (addr & 0x00070000) >> 16;
 975		u32 mthd = (addr & 0x00001ffc);
 976		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
 977		u32 class = nv_rd32(dev, 0x400814);
 978		u32 show = stat;
 979
 980		if (stat & 0x00000010) {
 981			if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
 982						       mthd, data))
 983				show &= ~0x00000010;
 984		}
 985
 986		if (stat & 0x00001000) {
 987			nv_wr32(dev, 0x400500, 0x00000000);
 988			nv_wr32(dev, 0x400100, 0x00001000);
 989			nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
 990			nv50_graph_context_switch(dev);
 991			stat &= ~0x00001000;
 992			show &= ~0x00001000;
 993		}
 994
 995		show = (show && nouveau_ratelimit()) ? show : 0;
 996
 997		if (show & 0x00100000) {
 998			u32 ecode = nv_rd32(dev, 0x400110);
 999			NV_INFO(dev, "PGRAPH - DATA_ERROR ");
1000			nouveau_enum_print(nv50_data_error_names, ecode);
1001			printk("\n");
1002		}
1003
1004		if (stat & 0x00200000) {
1005			if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
1006				show &= ~0x00200000;
1007		}
1008
1009		nv_wr32(dev, 0x400100, stat);
1010		nv_wr32(dev, 0x400500, 0x00010001);
1011
1012		if (show) {
1013			NV_INFO(dev, "PGRAPH -");
1014			nouveau_bitfield_print(nv50_graph_intr, show);
1015			printk("\n");
1016			NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
1017				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
1018				chid, inst, subc, class, mthd, data);
1019			nv50_fb_vm_trap(dev, 1);
1020		}
1021	}
1022
1023	if (nv_rd32(dev, 0x400824) & (1 << 31))
1024		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1025}
1026
1027static void
1028nv50_graph_destroy(struct drm_device *dev, int engine)
1029{
1030	struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
1031
1032	NVOBJ_ENGINE_DEL(dev, GR);
1033
1034	nouveau_irq_unregister(dev, 12);
1035	kfree(pgraph);
1036}
1037
1038int
1039nv50_graph_create(struct drm_device *dev)
1040{
1041	struct drm_nouveau_private *dev_priv = dev->dev_private;
1042	struct nv50_graph_engine *pgraph;
1043	struct nouveau_grctx ctx = {};
1044	int ret;
1045
1046	pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
1047	if (!pgraph)
1048		return -ENOMEM;
1049
1050	ctx.dev = dev;
1051	ctx.mode = NOUVEAU_GRCTX_PROG;
1052	ctx.data = pgraph->ctxprog;
1053	ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
1054
1055	ret = nv50_grctx_init(&ctx);
1056	if (ret) {
1057		NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
1058		kfree(pgraph);
1059		return 0;
1060	}
1061
1062	pgraph->grctx_size = ctx.ctxvals_pos * 4;
1063	pgraph->ctxprog_size = ctx.ctxprog_len;
1064
1065	pgraph->base.destroy = nv50_graph_destroy;
1066	pgraph->base.init = nv50_graph_init;
1067	pgraph->base.fini = nv50_graph_fini;
1068	pgraph->base.context_new = nv50_graph_context_new;
1069	pgraph->base.context_del = nv50_graph_context_del;
1070	pgraph->base.object_new = nv50_graph_object_new;
1071	if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
1072		pgraph->base.tlb_flush = nv50_graph_tlb_flush;
1073	else
1074		pgraph->base.tlb_flush = nv84_graph_tlb_flush;
1075
1076	nouveau_irq_register(dev, 12, nv50_graph_isr);
1077
1078	/* NVSW really doesn't live here... */
1079	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1080	NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
1081	NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
1082	NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
1083	NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
1084	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
1085
1086	NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1087	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1088	NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
1089	NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
1090
1091	/* tesla */
1092	if (dev_priv->chipset == 0x50)
1093		NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
1094	else
1095	if (dev_priv->chipset < 0xa0)
1096		NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
1097	else {
1098		switch (dev_priv->chipset) {
1099		case 0xa0:
1100		case 0xaa:
1101		case 0xac:
1102			NVOBJ_CLASS(dev, 0x8397, GR);
1103			break;
1104		case 0xa3:
1105		case 0xa5:
1106		case 0xa8:
1107			NVOBJ_CLASS(dev, 0x8597, GR);
1108			break;
1109		case 0xaf:
1110			NVOBJ_CLASS(dev, 0x8697, GR);
1111			break;
1112		}
1113	}
1114
1115	/* compute */
1116	NVOBJ_CLASS(dev, 0x50c0, GR);
1117	if (dev_priv->chipset  > 0xa0 &&
1118	    dev_priv->chipset != 0xaa &&
1119	    dev_priv->chipset != 0xac)
1120		NVOBJ_CLASS(dev, 0x85c0, GR);
1121
1122	return 0;
1123}