Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (C) 2015 Etnaviv Project
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License version 2 as published by
   6 * the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/component.h>
  18#include <linux/fence.h>
  19#include <linux/moduleparam.h>
  20#include <linux/of_device.h>
 
 
 
  21#include "etnaviv_dump.h"
  22#include "etnaviv_gpu.h"
  23#include "etnaviv_gem.h"
  24#include "etnaviv_mmu.h"
  25#include "etnaviv_iommu.h"
  26#include "etnaviv_iommu_v2.h"
  27#include "common.xml.h"
  28#include "state.xml.h"
  29#include "state_hi.xml.h"
  30#include "cmdstream.xml.h"
  31
 
 
 
 
  32static const struct platform_device_id gpu_ids[] = {
  33	{ .name = "etnaviv-gpu,2d" },
  34	{ },
  35};
  36
  37static bool etnaviv_dump_core = true;
  38module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
  39
  40/*
  41 * Driver functions:
  42 */
  43
  44int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
  45{
  46	switch (param) {
  47	case ETNAVIV_PARAM_GPU_MODEL:
  48		*value = gpu->identity.model;
  49		break;
  50
  51	case ETNAVIV_PARAM_GPU_REVISION:
  52		*value = gpu->identity.revision;
  53		break;
  54
  55	case ETNAVIV_PARAM_GPU_FEATURES_0:
  56		*value = gpu->identity.features;
  57		break;
  58
  59	case ETNAVIV_PARAM_GPU_FEATURES_1:
  60		*value = gpu->identity.minor_features0;
  61		break;
  62
  63	case ETNAVIV_PARAM_GPU_FEATURES_2:
  64		*value = gpu->identity.minor_features1;
  65		break;
  66
  67	case ETNAVIV_PARAM_GPU_FEATURES_3:
  68		*value = gpu->identity.minor_features2;
  69		break;
  70
  71	case ETNAVIV_PARAM_GPU_FEATURES_4:
  72		*value = gpu->identity.minor_features3;
  73		break;
  74
  75	case ETNAVIV_PARAM_GPU_FEATURES_5:
  76		*value = gpu->identity.minor_features4;
  77		break;
  78
  79	case ETNAVIV_PARAM_GPU_FEATURES_6:
  80		*value = gpu->identity.minor_features5;
  81		break;
  82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83	case ETNAVIV_PARAM_GPU_STREAM_COUNT:
  84		*value = gpu->identity.stream_count;
  85		break;
  86
  87	case ETNAVIV_PARAM_GPU_REGISTER_MAX:
  88		*value = gpu->identity.register_max;
  89		break;
  90
  91	case ETNAVIV_PARAM_GPU_THREAD_COUNT:
  92		*value = gpu->identity.thread_count;
  93		break;
  94
  95	case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
  96		*value = gpu->identity.vertex_cache_size;
  97		break;
  98
  99	case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
 100		*value = gpu->identity.shader_core_count;
 101		break;
 102
 103	case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
 104		*value = gpu->identity.pixel_pipes;
 105		break;
 106
 107	case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
 108		*value = gpu->identity.vertex_output_buffer_size;
 109		break;
 110
 111	case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
 112		*value = gpu->identity.buffer_size;
 113		break;
 114
 115	case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
 116		*value = gpu->identity.instruction_count;
 117		break;
 118
 119	case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
 120		*value = gpu->identity.num_constants;
 121		break;
 122
 123	case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
 124		*value = gpu->identity.varyings_count;
 125		break;
 126
 127	default:
 128		DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
 129		return -EINVAL;
 130	}
 131
 132	return 0;
 133}
 134
 135
 136#define etnaviv_is_model_rev(gpu, mod, rev) \
 137	((gpu)->identity.model == chipModel_##mod && \
 138	 (gpu)->identity.revision == rev)
 139#define etnaviv_field(val, field) \
 140	(((val) & field##__MASK) >> field##__SHIFT)
 141
 142static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 143{
 144	if (gpu->identity.minor_features0 &
 145	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 146		u32 specs[4];
 147		unsigned int streams;
 148
 149		specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
 150		specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
 151		specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
 152		specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
 153
 154		gpu->identity.stream_count = etnaviv_field(specs[0],
 155					VIVS_HI_CHIP_SPECS_STREAM_COUNT);
 156		gpu->identity.register_max = etnaviv_field(specs[0],
 157					VIVS_HI_CHIP_SPECS_REGISTER_MAX);
 158		gpu->identity.thread_count = etnaviv_field(specs[0],
 159					VIVS_HI_CHIP_SPECS_THREAD_COUNT);
 160		gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
 161					VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
 162		gpu->identity.shader_core_count = etnaviv_field(specs[0],
 163					VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
 164		gpu->identity.pixel_pipes = etnaviv_field(specs[0],
 165					VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
 166		gpu->identity.vertex_output_buffer_size =
 167			etnaviv_field(specs[0],
 168				VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
 169
 170		gpu->identity.buffer_size = etnaviv_field(specs[1],
 171					VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
 172		gpu->identity.instruction_count = etnaviv_field(specs[1],
 173					VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
 174		gpu->identity.num_constants = etnaviv_field(specs[1],
 175					VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
 176
 177		gpu->identity.varyings_count = etnaviv_field(specs[2],
 178					VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
 179
 180		/* This overrides the value from older register if non-zero */
 181		streams = etnaviv_field(specs[3],
 182					VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
 183		if (streams)
 184			gpu->identity.stream_count = streams;
 185	}
 186
 187	/* Fill in the stream count if not specified */
 188	if (gpu->identity.stream_count == 0) {
 189		if (gpu->identity.model >= 0x1000)
 190			gpu->identity.stream_count = 4;
 191		else
 192			gpu->identity.stream_count = 1;
 193	}
 194
 195	/* Convert the register max value */
 196	if (gpu->identity.register_max)
 197		gpu->identity.register_max = 1 << gpu->identity.register_max;
 198	else if (gpu->identity.model == chipModel_GC400)
 199		gpu->identity.register_max = 32;
 200	else
 201		gpu->identity.register_max = 64;
 202
 203	/* Convert thread count */
 204	if (gpu->identity.thread_count)
 205		gpu->identity.thread_count = 1 << gpu->identity.thread_count;
 206	else if (gpu->identity.model == chipModel_GC400)
 207		gpu->identity.thread_count = 64;
 208	else if (gpu->identity.model == chipModel_GC500 ||
 209		 gpu->identity.model == chipModel_GC530)
 210		gpu->identity.thread_count = 128;
 211	else
 212		gpu->identity.thread_count = 256;
 213
 214	if (gpu->identity.vertex_cache_size == 0)
 215		gpu->identity.vertex_cache_size = 8;
 216
 217	if (gpu->identity.shader_core_count == 0) {
 218		if (gpu->identity.model >= 0x1000)
 219			gpu->identity.shader_core_count = 2;
 220		else
 221			gpu->identity.shader_core_count = 1;
 222	}
 223
 224	if (gpu->identity.pixel_pipes == 0)
 225		gpu->identity.pixel_pipes = 1;
 226
 227	/* Convert virtex buffer size */
 228	if (gpu->identity.vertex_output_buffer_size) {
 229		gpu->identity.vertex_output_buffer_size =
 230			1 << gpu->identity.vertex_output_buffer_size;
 231	} else if (gpu->identity.model == chipModel_GC400) {
 232		if (gpu->identity.revision < 0x4000)
 233			gpu->identity.vertex_output_buffer_size = 512;
 234		else if (gpu->identity.revision < 0x4200)
 235			gpu->identity.vertex_output_buffer_size = 256;
 236		else
 237			gpu->identity.vertex_output_buffer_size = 128;
 238	} else {
 239		gpu->identity.vertex_output_buffer_size = 512;
 240	}
 241
 242	switch (gpu->identity.instruction_count) {
 243	case 0:
 244		if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 245		    gpu->identity.model == chipModel_GC880)
 246			gpu->identity.instruction_count = 512;
 247		else
 248			gpu->identity.instruction_count = 256;
 249		break;
 250
 251	case 1:
 252		gpu->identity.instruction_count = 1024;
 253		break;
 254
 255	case 2:
 256		gpu->identity.instruction_count = 2048;
 257		break;
 258
 259	default:
 260		gpu->identity.instruction_count = 256;
 261		break;
 262	}
 263
 264	if (gpu->identity.num_constants == 0)
 265		gpu->identity.num_constants = 168;
 266
 267	if (gpu->identity.varyings_count == 0) {
 268		if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
 269			gpu->identity.varyings_count = 12;
 270		else
 271			gpu->identity.varyings_count = 8;
 272	}
 273
 274	/*
 275	 * For some cores, two varyings are consumed for position, so the
 276	 * maximum varying count needs to be reduced by one.
 277	 */
 278	if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
 279	    etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
 280	    etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
 281	    etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
 282	    etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
 283	    etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
 284	    etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
 285	    etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 286	    etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
 287	    etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
 288	    etnaviv_is_model_rev(gpu, GC880, 0x5106))
 289		gpu->identity.varyings_count -= 1;
 290}
 291
 292static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 293{
 294	u32 chipIdentity;
 295
 296	chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
 297
 298	/* Special case for older graphic cores. */
 299	if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
 300		gpu->identity.model    = chipModel_GC500;
 301		gpu->identity.revision = etnaviv_field(chipIdentity,
 302					 VIVS_HI_CHIP_IDENTITY_REVISION);
 303	} else {
 304
 305		gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
 306		gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
 307
 308		/*
 309		 * !!!! HACK ALERT !!!!
 310		 * Because people change device IDs without letting software
 311		 * know about it - here is the hack to make it all look the
 312		 * same.  Only for GC400 family.
 313		 */
 314		if ((gpu->identity.model & 0xff00) == 0x0400 &&
 315		    gpu->identity.model != chipModel_GC420) {
 316			gpu->identity.model = gpu->identity.model & 0x0400;
 317		}
 318
 319		/* Another special case */
 320		if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
 321			u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
 322			u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 323
 324			if (chipDate == 0x20080814 && chipTime == 0x12051100) {
 325				/*
 326				 * This IP has an ECO; put the correct
 327				 * revision in it.
 328				 */
 329				gpu->identity.revision = 0x1051;
 330			}
 331		}
 
 
 
 
 
 
 
 
 
 
 
 
 332	}
 333
 334	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
 335		 gpu->identity.model, gpu->identity.revision);
 336
 
 
 
 
 
 
 
 337	gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
 338
 339	/* Disable fast clear on GC700. */
 340	if (gpu->identity.model == chipModel_GC700)
 341		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 342
 343	if ((gpu->identity.model == chipModel_GC500 &&
 344	     gpu->identity.revision < 2) ||
 345	    (gpu->identity.model == chipModel_GC300 &&
 346	     gpu->identity.revision < 0x2000)) {
 347
 348		/*
 349		 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
 350		 * registers.
 351		 */
 352		gpu->identity.minor_features0 = 0;
 353		gpu->identity.minor_features1 = 0;
 354		gpu->identity.minor_features2 = 0;
 355		gpu->identity.minor_features3 = 0;
 356		gpu->identity.minor_features4 = 0;
 357		gpu->identity.minor_features5 = 0;
 358	} else
 359		gpu->identity.minor_features0 =
 360				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
 361
 362	if (gpu->identity.minor_features0 &
 363	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 364		gpu->identity.minor_features1 =
 365				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
 366		gpu->identity.minor_features2 =
 367				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
 368		gpu->identity.minor_features3 =
 369				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
 370		gpu->identity.minor_features4 =
 371				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
 372		gpu->identity.minor_features5 =
 373				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
 374	}
 375
 376	/* GC600 idle register reports zero bits where modules aren't present */
 377	if (gpu->identity.model == chipModel_GC600) {
 378		gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
 379				 VIVS_HI_IDLE_STATE_RA |
 380				 VIVS_HI_IDLE_STATE_SE |
 381				 VIVS_HI_IDLE_STATE_PA |
 382				 VIVS_HI_IDLE_STATE_SH |
 383				 VIVS_HI_IDLE_STATE_PE |
 384				 VIVS_HI_IDLE_STATE_DE |
 385				 VIVS_HI_IDLE_STATE_FE;
 386	} else {
 387		gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
 388	}
 389
 390	etnaviv_hw_specs(gpu);
 391}
 392
 393static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 394{
 395	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
 396		  VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
 397	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 398}
 399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 401{
 402	u32 control, idle;
 403	unsigned long timeout;
 404	bool failed = true;
 405
 406	/* TODO
 407	 *
 408	 * - clock gating
 409	 * - puls eater
 410	 * - what about VG?
 411	 */
 412
 413	/* We hope that the GPU resets in under one second */
 414	timeout = jiffies + msecs_to_jiffies(1000);
 415
 416	while (time_is_after_jiffies(timeout)) {
 417		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
 418			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
 419
 420		/* enable clock */
 
 
 421		etnaviv_gpu_load_clock(gpu, control);
 422
 423		/* Wait for stable clock.  Vivante's code waited for 1ms */
 424		usleep_range(1000, 10000);
 425
 426		/* isolate the GPU. */
 427		control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 428		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 429
 430		/* set soft reset. */
 431		control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 432		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 
 
 
 
 
 433
 434		/* wait for reset. */
 435		msleep(1);
 436
 437		/* reset soft reset bit. */
 438		control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 439		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 440
 441		/* reset GPU isolation. */
 442		control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 443		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 444
 445		/* read idle register. */
 446		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 447
 448		/* try reseting again if FE it not idle */
 449		if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
 450			dev_dbg(gpu->dev, "FE is not idle\n");
 451			continue;
 452		}
 453
 454		/* read reset register. */
 455		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 456
 457		/* is the GPU idle? */
 458		if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
 459		    ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
 460			dev_dbg(gpu->dev, "GPU is not idle\n");
 461			continue;
 462		}
 463
 
 
 
 
 464		failed = false;
 465		break;
 466	}
 467
 468	if (failed) {
 469		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 470		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 471
 472		dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
 473			idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
 474			control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
 475			control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
 476
 477		return -EBUSY;
 478	}
 479
 480	/* We rely on the GPU running, so program the clock */
 481	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
 482		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
 483
 484	/* enable clock */
 485	etnaviv_gpu_load_clock(gpu, control);
 486
 487	return 0;
 488}
 489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 491{
 492	u16 prefetch;
 493
 494	if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
 495	     etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
 496	    gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
 497		u32 mc_memory_debug;
 498
 499		mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
 500
 501		if (gpu->identity.revision == 0x5007)
 502			mc_memory_debug |= 0x0c;
 503		else
 504			mc_memory_debug |= 0x08;
 505
 506		gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
 507	}
 508
 
 
 
 509	/*
 510	 * Update GPU AXI cache atttribute to "cacheable, no allocate".
 511	 * This is necessary to prevent the iMX6 SoC locking up.
 512	 */
 513	gpu_write(gpu, VIVS_HI_AXI_CONFIG,
 514		  VIVS_HI_AXI_CONFIG_AWCACHE(2) |
 515		  VIVS_HI_AXI_CONFIG_ARCACHE(2));
 516
 517	/* GC2000 rev 5108 needs a special bus config */
 518	if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
 519		u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
 520		bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
 521				VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
 522		bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
 523			      VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
 524		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 525	}
 526
 527	/* set base addresses */
 528	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
 529	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
 530	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
 531	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
 532	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
 
 
 533
 534	/* setup the MMU page table pointers */
 535	etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
 536
 537	/* Start command processor */
 538	prefetch = etnaviv_buffer_init(gpu);
 539
 540	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
 541	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
 542		  gpu->buffer->paddr - gpu->memory_base);
 543	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
 544		  VIVS_FE_COMMAND_CONTROL_ENABLE |
 545		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 546}
 547
 548int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 549{
 550	int ret, i;
 551	struct iommu_domain *iommu;
 552	enum etnaviv_iommu_version version;
 553	bool mmuv2;
 554
 555	ret = pm_runtime_get_sync(gpu->dev);
 556	if (ret < 0)
 
 557		return ret;
 
 558
 559	etnaviv_hw_identify(gpu);
 560
 561	if (gpu->identity.model == 0) {
 562		dev_err(gpu->dev, "Unknown GPU model\n");
 563		ret = -ENXIO;
 564		goto fail;
 565	}
 566
 567	/* Exclude VG cores with FE2.0 */
 568	if (gpu->identity.features & chipFeatures_PIPE_VG &&
 569	    gpu->identity.features & chipFeatures_FE20) {
 570		dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
 571		ret = -ENXIO;
 572		goto fail;
 573	}
 574
 575	/*
 576	 * Set the GPU linear window to be at the end of the DMA window, where
 577	 * the CMA area is likely to reside. This ensures that we are able to
 578	 * map the command buffers while having the linear window overlap as
 579	 * much RAM as possible, so we can optimize mappings for other buffers.
 580	 *
 581	 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
 582	 * to different views of the memory on the individual engines.
 583	 */
 584	if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
 585	    (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
 586		u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
 587		if (dma_mask < PHYS_OFFSET + SZ_2G)
 588			gpu->memory_base = PHYS_OFFSET;
 589		else
 590			gpu->memory_base = dma_mask - SZ_2G + 1;
 
 
 
 
 591	}
 592
 593	ret = etnaviv_hw_reset(gpu);
 594	if (ret)
 595		goto fail;
 596
 597	/* Setup IOMMU.. eventually we will (I think) do this once per context
 598	 * and have separate page tables per context.  For now, to keep things
 599	 * simple and to get something working, just use a single address space:
 600	 */
 601	mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
 602	dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
 
 603
 604	if (!mmuv2) {
 605		iommu = etnaviv_iommu_domain_alloc(gpu);
 606		version = ETNAVIV_IOMMU_V1;
 607	} else {
 608		iommu = etnaviv_iommu_v2_domain_alloc(gpu);
 609		version = ETNAVIV_IOMMU_V2;
 610	}
 611
 612	if (!iommu) {
 613		ret = -ENOMEM;
 
 
 614		goto fail;
 615	}
 616
 617	gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
 618	if (!gpu->mmu) {
 619		iommu_domain_free(iommu);
 620		ret = -ENOMEM;
 621		goto fail;
 622	}
 623
 624	/* Create buffer: */
 625	gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
 626	if (!gpu->buffer) {
 627		ret = -ENOMEM;
 628		dev_err(gpu->dev, "could not create command buffer\n");
 629		goto destroy_iommu;
 630	}
 631	if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
 
 
 632		ret = -EINVAL;
 633		dev_err(gpu->dev,
 634			"command buffer outside valid memory window\n");
 635		goto free_buffer;
 636	}
 637
 638	/* Setup event management */
 639	spin_lock_init(&gpu->event_spinlock);
 640	init_completion(&gpu->event_free);
 641	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 642		gpu->event[i].used = false;
 643		complete(&gpu->event_free);
 644	}
 645
 646	/* Now program the hardware */
 647	mutex_lock(&gpu->lock);
 648	etnaviv_gpu_hw_init(gpu);
 649	gpu->exec_state = -1;
 650	mutex_unlock(&gpu->lock);
 651
 652	pm_runtime_mark_last_busy(gpu->dev);
 653	pm_runtime_put_autosuspend(gpu->dev);
 654
 655	return 0;
 656
 657free_buffer:
 658	etnaviv_gpu_cmdbuf_free(gpu->buffer);
 659	gpu->buffer = NULL;
 660destroy_iommu:
 661	etnaviv_iommu_destroy(gpu->mmu);
 662	gpu->mmu = NULL;
 663fail:
 664	pm_runtime_mark_last_busy(gpu->dev);
 665	pm_runtime_put_autosuspend(gpu->dev);
 666
 667	return ret;
 668}
 669
 670#ifdef CONFIG_DEBUG_FS
 671struct dma_debug {
 672	u32 address[2];
 673	u32 state[2];
 674};
 675
 676static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
 677{
 678	u32 i;
 679
 680	debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 681	debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 682
 683	for (i = 0; i < 500; i++) {
 684		debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 685		debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 686
 687		if (debug->address[0] != debug->address[1])
 688			break;
 689
 690		if (debug->state[0] != debug->state[1])
 691			break;
 692	}
 693}
 694
 695int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 696{
 697	struct dma_debug debug;
 698	u32 dma_lo, dma_hi, axi, idle;
 699	int ret;
 700
 701	seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
 702
 703	ret = pm_runtime_get_sync(gpu->dev);
 704	if (ret < 0)
 705		return ret;
 706
 707	dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
 708	dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
 709	axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
 710	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 711
 712	verify_dma(gpu, &debug);
 713
 714	seq_puts(m, "\tfeatures\n");
 
 
 715	seq_printf(m, "\t minor_features0: 0x%08x\n",
 716		   gpu->identity.minor_features0);
 717	seq_printf(m, "\t minor_features1: 0x%08x\n",
 718		   gpu->identity.minor_features1);
 719	seq_printf(m, "\t minor_features2: 0x%08x\n",
 720		   gpu->identity.minor_features2);
 721	seq_printf(m, "\t minor_features3: 0x%08x\n",
 722		   gpu->identity.minor_features3);
 723	seq_printf(m, "\t minor_features4: 0x%08x\n",
 724		   gpu->identity.minor_features4);
 725	seq_printf(m, "\t minor_features5: 0x%08x\n",
 726		   gpu->identity.minor_features5);
 
 
 
 
 
 
 
 
 
 
 
 
 727
 728	seq_puts(m, "\tspecs\n");
 729	seq_printf(m, "\t stream_count:  %d\n",
 730			gpu->identity.stream_count);
 731	seq_printf(m, "\t register_max: %d\n",
 732			gpu->identity.register_max);
 733	seq_printf(m, "\t thread_count: %d\n",
 734			gpu->identity.thread_count);
 735	seq_printf(m, "\t vertex_cache_size: %d\n",
 736			gpu->identity.vertex_cache_size);
 737	seq_printf(m, "\t shader_core_count: %d\n",
 738			gpu->identity.shader_core_count);
 739	seq_printf(m, "\t pixel_pipes: %d\n",
 740			gpu->identity.pixel_pipes);
 741	seq_printf(m, "\t vertex_output_buffer_size: %d\n",
 742			gpu->identity.vertex_output_buffer_size);
 743	seq_printf(m, "\t buffer_size: %d\n",
 744			gpu->identity.buffer_size);
 745	seq_printf(m, "\t instruction_count: %d\n",
 746			gpu->identity.instruction_count);
 747	seq_printf(m, "\t num_constants: %d\n",
 748			gpu->identity.num_constants);
 749	seq_printf(m, "\t varyings_count: %d\n",
 750			gpu->identity.varyings_count);
 751
 752	seq_printf(m, "\taxi: 0x%08x\n", axi);
 753	seq_printf(m, "\tidle: 0x%08x\n", idle);
 754	idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
 755	if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
 756		seq_puts(m, "\t FE is not idle\n");
 757	if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
 758		seq_puts(m, "\t DE is not idle\n");
 759	if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
 760		seq_puts(m, "\t PE is not idle\n");
 761	if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
 762		seq_puts(m, "\t SH is not idle\n");
 763	if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
 764		seq_puts(m, "\t PA is not idle\n");
 765	if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
 766		seq_puts(m, "\t SE is not idle\n");
 767	if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
 768		seq_puts(m, "\t RA is not idle\n");
 769	if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
 770		seq_puts(m, "\t TX is not idle\n");
 771	if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
 772		seq_puts(m, "\t VG is not idle\n");
 773	if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
 774		seq_puts(m, "\t IM is not idle\n");
 775	if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
 776		seq_puts(m, "\t FP is not idle\n");
 777	if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
 778		seq_puts(m, "\t TS is not idle\n");
 779	if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
 780		seq_puts(m, "\t AXI low power mode\n");
 781
 782	if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
 783		u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
 784		u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
 785		u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
 786
 787		seq_puts(m, "\tMC\n");
 788		seq_printf(m, "\t read0: 0x%08x\n", read0);
 789		seq_printf(m, "\t read1: 0x%08x\n", read1);
 790		seq_printf(m, "\t write: 0x%08x\n", write);
 791	}
 792
 793	seq_puts(m, "\tDMA ");
 794
 795	if (debug.address[0] == debug.address[1] &&
 796	    debug.state[0] == debug.state[1]) {
 797		seq_puts(m, "seems to be stuck\n");
 798	} else if (debug.address[0] == debug.address[1]) {
 799		seq_puts(m, "adress is constant\n");
 800	} else {
 801		seq_puts(m, "is runing\n");
 802	}
 803
 804	seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
 805	seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
 806	seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
 807	seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
 808	seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
 809		   dma_lo, dma_hi);
 810
 811	ret = 0;
 812
 813	pm_runtime_mark_last_busy(gpu->dev);
 814	pm_runtime_put_autosuspend(gpu->dev);
 815
 816	return ret;
 817}
 818#endif
 819
 820/*
 821 * Power Management:
 822 */
 823static int enable_clk(struct etnaviv_gpu *gpu)
 824{
 825	if (gpu->clk_core)
 826		clk_prepare_enable(gpu->clk_core);
 827	if (gpu->clk_shader)
 828		clk_prepare_enable(gpu->clk_shader);
 829
 830	return 0;
 831}
 832
 833static int disable_clk(struct etnaviv_gpu *gpu)
 834{
 835	if (gpu->clk_core)
 836		clk_disable_unprepare(gpu->clk_core);
 837	if (gpu->clk_shader)
 838		clk_disable_unprepare(gpu->clk_shader);
 839
 840	return 0;
 841}
 842
 843static int enable_axi(struct etnaviv_gpu *gpu)
 844{
 845	if (gpu->clk_bus)
 846		clk_prepare_enable(gpu->clk_bus);
 847
 848	return 0;
 849}
 850
 851static int disable_axi(struct etnaviv_gpu *gpu)
 852{
 853	if (gpu->clk_bus)
 854		clk_disable_unprepare(gpu->clk_bus);
 855
 856	return 0;
 857}
 858
 859/*
 860 * Hangcheck detection for locked gpu:
 861 */
 862static void recover_worker(struct work_struct *work)
 863{
 864	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
 865					       recover_work);
 866	unsigned long flags;
 867	unsigned int i;
 868
 869	dev_err(gpu->dev, "hangcheck recover!\n");
 870
 871	if (pm_runtime_get_sync(gpu->dev) < 0)
 872		return;
 873
 874	mutex_lock(&gpu->lock);
 875
 876	/* Only catch the first event, or when manually re-armed */
 877	if (etnaviv_dump_core) {
 878		etnaviv_core_dump(gpu);
 879		etnaviv_dump_core = false;
 880	}
 881
 882	etnaviv_hw_reset(gpu);
 883
 884	/* complete all events, the GPU won't do it after the reset */
 885	spin_lock_irqsave(&gpu->event_spinlock, flags);
 886	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 887		if (!gpu->event[i].used)
 888			continue;
 889		fence_signal(gpu->event[i].fence);
 890		gpu->event[i].fence = NULL;
 891		gpu->event[i].used = false;
 892		complete(&gpu->event_free);
 893	}
 894	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
 895	gpu->completed_fence = gpu->active_fence;
 896
 897	etnaviv_gpu_hw_init(gpu);
 898	gpu->switch_context = true;
 899	gpu->exec_state = -1;
 900
 901	mutex_unlock(&gpu->lock);
 902	pm_runtime_mark_last_busy(gpu->dev);
 903	pm_runtime_put_autosuspend(gpu->dev);
 904
 905	/* Retire the buffer objects in a work */
 906	etnaviv_queue_work(gpu->drm, &gpu->retire_work);
 907}
 908
 909static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
 910{
 911	DBG("%s", dev_name(gpu->dev));
 912	mod_timer(&gpu->hangcheck_timer,
 913		  round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
 914}
 915
 916static void hangcheck_handler(unsigned long data)
 917{
 918	struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
 919	u32 fence = gpu->completed_fence;
 920	bool progress = false;
 921
 922	if (fence != gpu->hangcheck_fence) {
 923		gpu->hangcheck_fence = fence;
 924		progress = true;
 925	}
 926
 927	if (!progress) {
 928		u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 929		int change = dma_addr - gpu->hangcheck_dma_addr;
 930
 931		if (change < 0 || change > 16) {
 932			gpu->hangcheck_dma_addr = dma_addr;
 933			progress = true;
 934		}
 935	}
 936
 937	if (!progress && fence_after(gpu->active_fence, fence)) {
 938		dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
 939		dev_err(gpu->dev, "     completed fence: %u\n", fence);
 940		dev_err(gpu->dev, "     active fence: %u\n",
 941			gpu->active_fence);
 942		etnaviv_queue_work(gpu->drm, &gpu->recover_work);
 943	}
 944
 945	/* if still more pending work, reset the hangcheck timer: */
 946	if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
 947		hangcheck_timer_reset(gpu);
 948}
 949
 950static void hangcheck_disable(struct etnaviv_gpu *gpu)
 951{
 952	del_timer_sync(&gpu->hangcheck_timer);
 953	cancel_work_sync(&gpu->recover_work);
 954}
 955
 956/* fence object management */
 957struct etnaviv_fence {
 958	struct etnaviv_gpu *gpu;
 959	struct fence base;
 960};
 961
 962static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
 963{
 964	return container_of(fence, struct etnaviv_fence, base);
 965}
 966
 967static const char *etnaviv_fence_get_driver_name(struct fence *fence)
 968{
 969	return "etnaviv";
 970}
 971
 972static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
 973{
 974	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 975
 976	return dev_name(f->gpu->dev);
 977}
 978
 979static bool etnaviv_fence_enable_signaling(struct fence *fence)
 980{
 981	return true;
 982}
 983
 984static bool etnaviv_fence_signaled(struct fence *fence)
 985{
 986	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 987
 988	return fence_completed(f->gpu, f->base.seqno);
 989}
 990
 991static void etnaviv_fence_release(struct fence *fence)
 992{
 993	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 994
 995	kfree_rcu(f, base.rcu);
 996}
 997
 998static const struct fence_ops etnaviv_fence_ops = {
 999	.get_driver_name = etnaviv_fence_get_driver_name,
1000	.get_timeline_name = etnaviv_fence_get_timeline_name,
1001	.enable_signaling = etnaviv_fence_enable_signaling,
1002	.signaled = etnaviv_fence_signaled,
1003	.wait = fence_default_wait,
1004	.release = etnaviv_fence_release,
1005};
1006
1007static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1008{
1009	struct etnaviv_fence *f;
1010
 
 
 
 
 
 
1011	f = kzalloc(sizeof(*f), GFP_KERNEL);
1012	if (!f)
1013		return NULL;
1014
1015	f->gpu = gpu;
1016
1017	fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1018		   gpu->fence_context, ++gpu->next_fence);
1019
1020	return &f->base;
1021}
1022
1023int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1024	unsigned int context, bool exclusive)
1025{
1026	struct reservation_object *robj = etnaviv_obj->resv;
1027	struct reservation_object_list *fobj;
1028	struct fence *fence;
1029	int i, ret;
1030
1031	if (!exclusive) {
1032		ret = reservation_object_reserve_shared(robj);
1033		if (ret)
1034			return ret;
1035	}
1036
1037	/*
1038	 * If we have any shared fences, then the exclusive fence
1039	 * should be ignored as it will already have been signalled.
1040	 */
1041	fobj = reservation_object_get_list(robj);
1042	if (!fobj || fobj->shared_count == 0) {
1043		/* Wait on any existing exclusive fence which isn't our own */
1044		fence = reservation_object_get_excl(robj);
1045		if (fence && fence->context != context) {
1046			ret = fence_wait(fence, true);
1047			if (ret)
1048				return ret;
1049		}
1050	}
1051
1052	if (!exclusive || !fobj)
1053		return 0;
1054
1055	for (i = 0; i < fobj->shared_count; i++) {
1056		fence = rcu_dereference_protected(fobj->shared[i],
1057						reservation_object_held(robj));
1058		if (fence->context != context) {
1059			ret = fence_wait(fence, true);
1060			if (ret)
1061				return ret;
1062		}
1063	}
1064
1065	return 0;
1066}
1067
1068/*
1069 * event management:
1070 */
1071
1072static unsigned int event_alloc(struct etnaviv_gpu *gpu)
 
1073{
1074	unsigned long ret, flags;
1075	unsigned int i, event = ~0U;
1076
1077	ret = wait_for_completion_timeout(&gpu->event_free,
1078					  msecs_to_jiffies(10 * 10000));
1079	if (!ret)
1080		dev_err(gpu->dev, "wait_for_completion_timeout failed");
1081
1082	spin_lock_irqsave(&gpu->event_spinlock, flags);
1083
1084	/* find first free event */
1085	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1086		if (gpu->event[i].used == false) {
1087			gpu->event[i].used = true;
1088			event = i;
1089			break;
1090		}
 
 
 
 
 
 
 
 
 
 
 
 
 
1091	}
1092
1093	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1094
1095	return event;
 
 
 
 
 
 
1096}
1097
1098static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1099{
1100	unsigned long flags;
1101
1102	spin_lock_irqsave(&gpu->event_spinlock, flags);
1103
1104	if (gpu->event[event].used == false) {
1105		dev_warn(gpu->dev, "event %u is already marked as free",
1106			 event);
1107		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1108	} else {
1109		gpu->event[event].used = false;
1110		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1111
1112		complete(&gpu->event_free);
1113	}
1114}
1115
1116/*
1117 * Cmdstream submission/retirement:
1118 */
1119
1120struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1121	size_t nr_bos)
1122{
1123	struct etnaviv_cmdbuf *cmdbuf;
1124	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
1125				 sizeof(*cmdbuf));
1126
1127	cmdbuf = kzalloc(sz, GFP_KERNEL);
1128	if (!cmdbuf)
1129		return NULL;
1130
1131	cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
1132				     GFP_KERNEL);
1133	if (!cmdbuf->vaddr) {
1134		kfree(cmdbuf);
1135		return NULL;
1136	}
1137
1138	cmdbuf->gpu = gpu;
1139	cmdbuf->size = size;
1140
1141	return cmdbuf;
1142}
1143
1144void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1145{
1146	dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
1147		    cmdbuf->paddr);
1148	kfree(cmdbuf);
1149}
1150
1151static void retire_worker(struct work_struct *work)
1152{
1153	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1154					       retire_work);
1155	u32 fence = gpu->completed_fence;
1156	struct etnaviv_cmdbuf *cmdbuf, *tmp;
1157	unsigned int i;
1158
1159	mutex_lock(&gpu->lock);
1160	list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1161		if (!fence_is_signaled(cmdbuf->fence))
1162			break;
1163
1164		list_del(&cmdbuf->node);
1165		fence_put(cmdbuf->fence);
1166
1167		for (i = 0; i < cmdbuf->nr_bos; i++) {
1168			struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
1169			struct etnaviv_gem_object *etnaviv_obj = mapping->object;
1170
1171			atomic_dec(&etnaviv_obj->gpu_active);
1172			/* drop the refcount taken in etnaviv_gpu_submit */
1173			etnaviv_gem_mapping_unreference(mapping);
1174		}
1175
1176		etnaviv_gpu_cmdbuf_free(cmdbuf);
1177		/*
1178		 * We need to balance the runtime PM count caused by
1179		 * each submission.  Upon submission, we increment
1180		 * the runtime PM counter, and allocate one event.
1181		 * So here, we put the runtime PM count for each
1182		 * completed event.
1183		 */
1184		pm_runtime_put_autosuspend(gpu->dev);
1185	}
1186
1187	gpu->retired_fence = fence;
1188
1189	mutex_unlock(&gpu->lock);
1190
1191	wake_up_all(&gpu->fence_event);
1192}
1193
1194int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1195	u32 fence, struct timespec *timeout)
1196{
 
1197	int ret;
1198
1199	if (fence_after(fence, gpu->next_fence)) {
1200		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1201				fence, gpu->next_fence);
1202		return -EINVAL;
1203	}
 
 
 
 
 
 
 
 
1204
1205	if (!timeout) {
1206		/* No timeout was requested: just test for completion */
1207		ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1208	} else {
1209		unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1210
1211		ret = wait_event_interruptible_timeout(gpu->fence_event,
1212						fence_completed(gpu, fence),
1213						remaining);
1214		if (ret == 0) {
1215			DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1216				fence, gpu->retired_fence,
1217				gpu->completed_fence);
1218			ret = -ETIMEDOUT;
1219		} else if (ret != -ERESTARTSYS) {
1220			ret = 0;
1221		}
1222	}
1223
 
1224	return ret;
1225}
1226
1227/*
1228 * Wait for an object to become inactive.  This, on it's own, is not race
1229 * free: the object is moved by the retire worker off the active list, and
1230 * then the iova is put.  Moreover, the object could be re-submitted just
1231 * after we notice that it's become inactive.
1232 *
1233 * Although the retirement happens under the gpu lock, we don't want to hold
1234 * that lock in this function while waiting.
1235 */
1236int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1237	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1238{
1239	unsigned long remaining;
1240	long ret;
1241
1242	if (!timeout)
1243		return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1244
1245	remaining = etnaviv_timeout_to_jiffies(timeout);
1246
1247	ret = wait_event_interruptible_timeout(gpu->fence_event,
1248					       !is_active(etnaviv_obj),
1249					       remaining);
1250	if (ret > 0) {
1251		struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1252
1253		/* Synchronise with the retire worker */
1254		flush_workqueue(priv->wq);
1255		return 0;
1256	} else if (ret == -ERESTARTSYS) {
1257		return -ERESTARTSYS;
1258	} else {
1259		return -ETIMEDOUT;
 
 
 
 
 
 
 
 
 
 
 
 
 
1260	}
1261}
1262
1263int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
 
1264{
1265	return pm_runtime_get_sync(gpu->dev);
 
 
 
 
 
 
 
 
 
 
 
 
1266}
1267
1268void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
 
1269{
1270	pm_runtime_mark_last_busy(gpu->dev);
1271	pm_runtime_put_autosuspend(gpu->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1272}
1273
 
1274/* add bo's to gpu's ring, and kick gpu: */
1275int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1276	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1277{
1278	struct fence *fence;
1279	unsigned int event, i;
 
1280	int ret;
1281
1282	ret = etnaviv_gpu_pm_get_sync(gpu);
1283	if (ret < 0)
1284		return ret;
1285
1286	mutex_lock(&gpu->lock);
 
1287
1288	/*
1289	 * TODO
1290	 *
1291	 * - flush
1292	 * - data endian
1293	 * - prefetch
1294	 *
1295	 */
 
 
1296
1297	event = event_alloc(gpu);
1298	if (unlikely(event == ~0U)) {
1299		DRM_ERROR("no free event\n");
1300		ret = -EBUSY;
1301		goto out_unlock;
1302	}
1303
1304	fence = etnaviv_gpu_fence_alloc(gpu);
1305	if (!fence) {
1306		event_free(gpu, event);
1307		ret = -ENOMEM;
 
 
 
1308		goto out_unlock;
1309	}
1310
1311	gpu->event[event].fence = fence;
1312	submit->fence = fence->seqno;
1313	gpu->active_fence = submit->fence;
1314
1315	if (gpu->lastctx != cmdbuf->ctx) {
1316		gpu->mmu->need_flush = true;
1317		gpu->switch_context = true;
1318		gpu->lastctx = cmdbuf->ctx;
 
 
 
 
 
 
 
 
 
 
 
 
 
1319	}
1320
1321	etnaviv_buffer_queue(gpu, event, cmdbuf);
 
1322
1323	cmdbuf->fence = fence;
1324	list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1325
1326	/* We're committed to adding this command buffer, hold a PM reference */
1327	pm_runtime_get_noresume(gpu->dev);
 
 
 
 
1328
1329	for (i = 0; i < submit->nr_bos; i++) {
1330		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
 
1331
1332		/* Each cmdbuf takes a refcount on the mapping */
1333		etnaviv_gem_mapping_reference(submit->bos[i].mapping);
1334		cmdbuf->bo_map[i] = submit->bos[i].mapping;
1335		atomic_inc(&etnaviv_obj->gpu_active);
1336
1337		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1338			reservation_object_add_excl_fence(etnaviv_obj->resv,
1339							  fence);
1340		else
1341			reservation_object_add_shared_fence(etnaviv_obj->resv,
1342							    fence);
1343	}
1344	cmdbuf->nr_bos = submit->nr_bos;
1345	hangcheck_timer_reset(gpu);
1346	ret = 0;
1347
1348out_unlock:
1349	mutex_unlock(&gpu->lock);
 
 
1350
1351	etnaviv_gpu_pm_put(gpu);
 
1352
1353	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
1354}
1355
1356/*
1357 * Init/Cleanup:
1358 */
1359static irqreturn_t irq_handler(int irq, void *data)
1360{
1361	struct etnaviv_gpu *gpu = data;
1362	irqreturn_t ret = IRQ_NONE;
1363
1364	u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1365
1366	if (intr != 0) {
1367		int event;
1368
1369		pm_runtime_mark_last_busy(gpu->dev);
1370
1371		dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1372
1373		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1374			dev_err(gpu->dev, "AXI bus error\n");
1375			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1376		}
1377
 
 
 
 
 
1378		while ((event = ffs(intr)) != 0) {
1379			struct fence *fence;
1380
1381			event -= 1;
1382
1383			intr &= ~(1 << event);
1384
1385			dev_dbg(gpu->dev, "event %u\n", event);
1386
 
 
 
 
 
1387			fence = gpu->event[event].fence;
 
 
 
1388			gpu->event[event].fence = NULL;
1389			fence_signal(fence);
1390
1391			/*
1392			 * Events can be processed out of order.  Eg,
1393			 * - allocate and queue event 0
1394			 * - allocate event 1
1395			 * - event 0 completes, we process it
1396			 * - allocate and queue event 0
1397			 * - event 1 and event 0 complete
1398			 * we can end up processing event 0 first, then 1.
1399			 */
1400			if (fence_after(fence->seqno, gpu->completed_fence))
1401				gpu->completed_fence = fence->seqno;
 
1402
1403			event_free(gpu, event);
1404		}
1405
1406		/* Retire the buffer objects in a work */
1407		etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1408
1409		ret = IRQ_HANDLED;
1410	}
1411
1412	return ret;
1413}
1414
1415static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1416{
1417	int ret;
1418
1419	ret = enable_clk(gpu);
1420	if (ret)
1421		return ret;
 
 
1422
1423	ret = enable_axi(gpu);
1424	if (ret) {
1425		disable_clk(gpu);
1426		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
1427	}
1428
1429	return 0;
 
 
 
 
 
 
 
 
 
1430}
1431
1432static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1433{
1434	int ret;
 
 
 
 
 
 
 
1435
1436	ret = disable_axi(gpu);
1437	if (ret)
1438		return ret;
1439
1440	ret = disable_clk(gpu);
1441	if (ret)
1442		return ret;
1443
1444	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1445}
1446
1447static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1448{
1449	if (gpu->buffer) {
1450		unsigned long timeout;
1451
1452		/* Replace the last WAIT with END */
 
1453		etnaviv_buffer_end(gpu);
 
1454
1455		/*
1456		 * We know that only the FE is busy here, this should
1457		 * happen quickly (as the WAIT is only 200 cycles).  If
1458		 * we fail, just warn and continue.
1459		 */
1460		timeout = jiffies + msecs_to_jiffies(100);
1461		do {
1462			u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1463
1464			if ((idle & gpu->idle_mask) == gpu->idle_mask)
1465				break;
1466
1467			if (time_is_before_jiffies(timeout)) {
1468				dev_warn(gpu->dev,
1469					 "timed out waiting for idle: idle=0x%x\n",
1470					 idle);
1471				break;
1472			}
1473
1474			udelay(5);
1475		} while (1);
1476	}
1477
1478	return etnaviv_gpu_clk_disable(gpu);
1479}
1480
1481#ifdef CONFIG_PM
1482static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1483{
1484	u32 clock;
1485	int ret;
1486
1487	ret = mutex_lock_killable(&gpu->lock);
1488	if (ret)
1489		return ret;
1490
1491	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1492		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1493
1494	etnaviv_gpu_load_clock(gpu, clock);
1495	etnaviv_gpu_hw_init(gpu);
1496
1497	gpu->switch_context = true;
1498	gpu->exec_state = -1;
1499
1500	mutex_unlock(&gpu->lock);
1501
1502	return 0;
1503}
1504#endif
1505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1507	void *data)
1508{
1509	struct drm_device *drm = data;
1510	struct etnaviv_drm_private *priv = drm->dev_private;
1511	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1512	int ret;
1513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1514#ifdef CONFIG_PM
1515	ret = pm_runtime_get_sync(gpu->dev);
1516#else
1517	ret = etnaviv_gpu_clk_enable(gpu);
1518#endif
1519	if (ret < 0)
1520		return ret;
 
1521
1522	gpu->drm = drm;
1523	gpu->fence_context = fence_context_alloc(1);
 
1524	spin_lock_init(&gpu->fence_spinlock);
1525
1526	INIT_LIST_HEAD(&gpu->active_cmd_list);
1527	INIT_WORK(&gpu->retire_work, retire_worker);
1528	INIT_WORK(&gpu->recover_work, recover_worker);
1529	init_waitqueue_head(&gpu->fence_event);
1530
1531	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
1532			(unsigned long)gpu);
1533
1534	priv->gpu[priv->num_gpus++] = gpu;
1535
1536	pm_runtime_mark_last_busy(gpu->dev);
1537	pm_runtime_put_autosuspend(gpu->dev);
1538
1539	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1540}
1541
1542static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1543	void *data)
1544{
1545	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1546
1547	DBG("%s", dev_name(gpu->dev));
1548
1549	hangcheck_disable(gpu);
 
 
 
1550
1551#ifdef CONFIG_PM
1552	pm_runtime_get_sync(gpu->dev);
1553	pm_runtime_put_sync_suspend(gpu->dev);
1554#else
1555	etnaviv_gpu_hw_suspend(gpu);
1556#endif
1557
1558	if (gpu->buffer) {
1559		etnaviv_gpu_cmdbuf_free(gpu->buffer);
1560		gpu->buffer = NULL;
 
 
 
1561	}
1562
1563	if (gpu->mmu) {
1564		etnaviv_iommu_destroy(gpu->mmu);
1565		gpu->mmu = NULL;
1566	}
1567
1568	gpu->drm = NULL;
 
 
 
 
 
1569}
1570
1571static const struct component_ops gpu_ops = {
1572	.bind = etnaviv_gpu_bind,
1573	.unbind = etnaviv_gpu_unbind,
1574};
1575
1576static const struct of_device_id etnaviv_gpu_match[] = {
1577	{
1578		.compatible = "vivante,gc"
1579	},
1580	{ /* sentinel */ }
1581};
 
1582
1583static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1584{
1585	struct device *dev = &pdev->dev;
1586	struct etnaviv_gpu *gpu;
1587	int err = 0;
1588
1589	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1590	if (!gpu)
1591		return -ENOMEM;
1592
1593	gpu->dev = &pdev->dev;
1594	mutex_init(&gpu->lock);
 
1595
1596	/* Map registers: */
1597	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1598	if (IS_ERR(gpu->mmio))
1599		return PTR_ERR(gpu->mmio);
1600
1601	/* Get Interrupt: */
1602	gpu->irq = platform_get_irq(pdev, 0);
1603	if (gpu->irq < 0) {
1604		err = gpu->irq;
1605		dev_err(dev, "failed to get irq: %d\n", err);
1606		goto fail;
1607	}
1608
1609	err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1610			       dev_name(gpu->dev), gpu);
1611	if (err) {
1612		dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1613		goto fail;
1614	}
1615
1616	/* Get Clocks: */
 
 
 
 
 
1617	gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1618	DBG("clk_bus: %p", gpu->clk_bus);
1619	if (IS_ERR(gpu->clk_bus))
1620		gpu->clk_bus = NULL;
1621
1622	gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1623	DBG("clk_core: %p", gpu->clk_core);
1624	if (IS_ERR(gpu->clk_core))
1625		gpu->clk_core = NULL;
 
1626
1627	gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1628	DBG("clk_shader: %p", gpu->clk_shader);
1629	if (IS_ERR(gpu->clk_shader))
1630		gpu->clk_shader = NULL;
 
1631
1632	/* TODO: figure out max mapped size */
1633	dev_set_drvdata(dev, gpu);
1634
1635	/*
1636	 * We treat the device as initially suspended.  The runtime PM
1637	 * autosuspend delay is rather arbitary: no measurements have
1638	 * yet been performed to determine an appropriate value.
1639	 */
1640	pm_runtime_use_autosuspend(gpu->dev);
1641	pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1642	pm_runtime_enable(gpu->dev);
1643
1644	err = component_add(&pdev->dev, &gpu_ops);
1645	if (err < 0) {
1646		dev_err(&pdev->dev, "failed to register component: %d\n", err);
1647		goto fail;
1648	}
1649
1650	return 0;
1651
1652fail:
1653	return err;
1654}
1655
1656static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1657{
1658	component_del(&pdev->dev, &gpu_ops);
1659	pm_runtime_disable(&pdev->dev);
1660	return 0;
1661}
1662
1663#ifdef CONFIG_PM
1664static int etnaviv_gpu_rpm_suspend(struct device *dev)
1665{
1666	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1667	u32 idle, mask;
1668
1669	/* If we have outstanding fences, we're not idle */
1670	if (gpu->completed_fence != gpu->active_fence)
1671		return -EBUSY;
1672
1673	/* Check whether the hardware (except FE) is idle */
1674	mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1675	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1676	if (idle != mask)
1677		return -EBUSY;
1678
1679	return etnaviv_gpu_hw_suspend(gpu);
1680}
1681
1682static int etnaviv_gpu_rpm_resume(struct device *dev)
1683{
1684	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1685	int ret;
1686
1687	ret = etnaviv_gpu_clk_enable(gpu);
1688	if (ret)
1689		return ret;
1690
1691	/* Re-initialise the basic hardware state */
1692	if (gpu->drm && gpu->buffer) {
1693		ret = etnaviv_gpu_hw_resume(gpu);
1694		if (ret) {
1695			etnaviv_gpu_clk_disable(gpu);
1696			return ret;
1697		}
1698	}
1699
1700	return 0;
1701}
1702#endif
1703
1704static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1705	SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1706			   NULL)
1707};
1708
1709struct platform_driver etnaviv_gpu_driver = {
1710	.driver = {
1711		.name = "etnaviv-gpu",
1712		.owner = THIS_MODULE,
1713		.pm = &etnaviv_gpu_pm_ops,
1714		.of_match_table = etnaviv_gpu_match,
1715	},
1716	.probe = etnaviv_gpu_platform_probe,
1717	.remove = etnaviv_gpu_platform_remove,
1718	.id_table = gpu_ids,
1719};
v4.17
   1/*
   2 * Copyright (C) 2015 Etnaviv Project
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License version 2 as published by
   6 * the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/component.h>
  18#include <linux/dma-fence.h>
  19#include <linux/moduleparam.h>
  20#include <linux/of_device.h>
  21#include <linux/thermal.h>
  22
  23#include "etnaviv_cmdbuf.h"
  24#include "etnaviv_dump.h"
  25#include "etnaviv_gpu.h"
  26#include "etnaviv_gem.h"
  27#include "etnaviv_mmu.h"
  28#include "etnaviv_perfmon.h"
  29#include "etnaviv_sched.h"
  30#include "common.xml.h"
  31#include "state.xml.h"
  32#include "state_hi.xml.h"
  33#include "cmdstream.xml.h"
  34
  35#ifndef PHYS_OFFSET
  36#define PHYS_OFFSET 0
  37#endif
  38
  39static const struct platform_device_id gpu_ids[] = {
  40	{ .name = "etnaviv-gpu,2d" },
  41	{ },
  42};
  43
 
 
 
  44/*
  45 * Driver functions:
  46 */
  47
  48int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
  49{
  50	switch (param) {
  51	case ETNAVIV_PARAM_GPU_MODEL:
  52		*value = gpu->identity.model;
  53		break;
  54
  55	case ETNAVIV_PARAM_GPU_REVISION:
  56		*value = gpu->identity.revision;
  57		break;
  58
  59	case ETNAVIV_PARAM_GPU_FEATURES_0:
  60		*value = gpu->identity.features;
  61		break;
  62
  63	case ETNAVIV_PARAM_GPU_FEATURES_1:
  64		*value = gpu->identity.minor_features0;
  65		break;
  66
  67	case ETNAVIV_PARAM_GPU_FEATURES_2:
  68		*value = gpu->identity.minor_features1;
  69		break;
  70
  71	case ETNAVIV_PARAM_GPU_FEATURES_3:
  72		*value = gpu->identity.minor_features2;
  73		break;
  74
  75	case ETNAVIV_PARAM_GPU_FEATURES_4:
  76		*value = gpu->identity.minor_features3;
  77		break;
  78
  79	case ETNAVIV_PARAM_GPU_FEATURES_5:
  80		*value = gpu->identity.minor_features4;
  81		break;
  82
  83	case ETNAVIV_PARAM_GPU_FEATURES_6:
  84		*value = gpu->identity.minor_features5;
  85		break;
  86
  87	case ETNAVIV_PARAM_GPU_FEATURES_7:
  88		*value = gpu->identity.minor_features6;
  89		break;
  90
  91	case ETNAVIV_PARAM_GPU_FEATURES_8:
  92		*value = gpu->identity.minor_features7;
  93		break;
  94
  95	case ETNAVIV_PARAM_GPU_FEATURES_9:
  96		*value = gpu->identity.minor_features8;
  97		break;
  98
  99	case ETNAVIV_PARAM_GPU_FEATURES_10:
 100		*value = gpu->identity.minor_features9;
 101		break;
 102
 103	case ETNAVIV_PARAM_GPU_FEATURES_11:
 104		*value = gpu->identity.minor_features10;
 105		break;
 106
 107	case ETNAVIV_PARAM_GPU_FEATURES_12:
 108		*value = gpu->identity.minor_features11;
 109		break;
 110
 111	case ETNAVIV_PARAM_GPU_STREAM_COUNT:
 112		*value = gpu->identity.stream_count;
 113		break;
 114
 115	case ETNAVIV_PARAM_GPU_REGISTER_MAX:
 116		*value = gpu->identity.register_max;
 117		break;
 118
 119	case ETNAVIV_PARAM_GPU_THREAD_COUNT:
 120		*value = gpu->identity.thread_count;
 121		break;
 122
 123	case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
 124		*value = gpu->identity.vertex_cache_size;
 125		break;
 126
 127	case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
 128		*value = gpu->identity.shader_core_count;
 129		break;
 130
 131	case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
 132		*value = gpu->identity.pixel_pipes;
 133		break;
 134
 135	case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
 136		*value = gpu->identity.vertex_output_buffer_size;
 137		break;
 138
 139	case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
 140		*value = gpu->identity.buffer_size;
 141		break;
 142
 143	case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
 144		*value = gpu->identity.instruction_count;
 145		break;
 146
 147	case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
 148		*value = gpu->identity.num_constants;
 149		break;
 150
 151	case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
 152		*value = gpu->identity.varyings_count;
 153		break;
 154
 155	default:
 156		DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
 157		return -EINVAL;
 158	}
 159
 160	return 0;
 161}
 162
 163
 164#define etnaviv_is_model_rev(gpu, mod, rev) \
 165	((gpu)->identity.model == chipModel_##mod && \
 166	 (gpu)->identity.revision == rev)
 167#define etnaviv_field(val, field) \
 168	(((val) & field##__MASK) >> field##__SHIFT)
 169
 170static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 171{
 172	if (gpu->identity.minor_features0 &
 173	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 174		u32 specs[4];
 175		unsigned int streams;
 176
 177		specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
 178		specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
 179		specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
 180		specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
 181
 182		gpu->identity.stream_count = etnaviv_field(specs[0],
 183					VIVS_HI_CHIP_SPECS_STREAM_COUNT);
 184		gpu->identity.register_max = etnaviv_field(specs[0],
 185					VIVS_HI_CHIP_SPECS_REGISTER_MAX);
 186		gpu->identity.thread_count = etnaviv_field(specs[0],
 187					VIVS_HI_CHIP_SPECS_THREAD_COUNT);
 188		gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
 189					VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
 190		gpu->identity.shader_core_count = etnaviv_field(specs[0],
 191					VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
 192		gpu->identity.pixel_pipes = etnaviv_field(specs[0],
 193					VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
 194		gpu->identity.vertex_output_buffer_size =
 195			etnaviv_field(specs[0],
 196				VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
 197
 198		gpu->identity.buffer_size = etnaviv_field(specs[1],
 199					VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
 200		gpu->identity.instruction_count = etnaviv_field(specs[1],
 201					VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
 202		gpu->identity.num_constants = etnaviv_field(specs[1],
 203					VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
 204
 205		gpu->identity.varyings_count = etnaviv_field(specs[2],
 206					VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
 207
 208		/* This overrides the value from older register if non-zero */
 209		streams = etnaviv_field(specs[3],
 210					VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
 211		if (streams)
 212			gpu->identity.stream_count = streams;
 213	}
 214
 215	/* Fill in the stream count if not specified */
 216	if (gpu->identity.stream_count == 0) {
 217		if (gpu->identity.model >= 0x1000)
 218			gpu->identity.stream_count = 4;
 219		else
 220			gpu->identity.stream_count = 1;
 221	}
 222
 223	/* Convert the register max value */
 224	if (gpu->identity.register_max)
 225		gpu->identity.register_max = 1 << gpu->identity.register_max;
 226	else if (gpu->identity.model == chipModel_GC400)
 227		gpu->identity.register_max = 32;
 228	else
 229		gpu->identity.register_max = 64;
 230
 231	/* Convert thread count */
 232	if (gpu->identity.thread_count)
 233		gpu->identity.thread_count = 1 << gpu->identity.thread_count;
 234	else if (gpu->identity.model == chipModel_GC400)
 235		gpu->identity.thread_count = 64;
 236	else if (gpu->identity.model == chipModel_GC500 ||
 237		 gpu->identity.model == chipModel_GC530)
 238		gpu->identity.thread_count = 128;
 239	else
 240		gpu->identity.thread_count = 256;
 241
 242	if (gpu->identity.vertex_cache_size == 0)
 243		gpu->identity.vertex_cache_size = 8;
 244
 245	if (gpu->identity.shader_core_count == 0) {
 246		if (gpu->identity.model >= 0x1000)
 247			gpu->identity.shader_core_count = 2;
 248		else
 249			gpu->identity.shader_core_count = 1;
 250	}
 251
 252	if (gpu->identity.pixel_pipes == 0)
 253		gpu->identity.pixel_pipes = 1;
 254
 255	/* Convert virtex buffer size */
 256	if (gpu->identity.vertex_output_buffer_size) {
 257		gpu->identity.vertex_output_buffer_size =
 258			1 << gpu->identity.vertex_output_buffer_size;
 259	} else if (gpu->identity.model == chipModel_GC400) {
 260		if (gpu->identity.revision < 0x4000)
 261			gpu->identity.vertex_output_buffer_size = 512;
 262		else if (gpu->identity.revision < 0x4200)
 263			gpu->identity.vertex_output_buffer_size = 256;
 264		else
 265			gpu->identity.vertex_output_buffer_size = 128;
 266	} else {
 267		gpu->identity.vertex_output_buffer_size = 512;
 268	}
 269
 270	switch (gpu->identity.instruction_count) {
 271	case 0:
 272		if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 273		    gpu->identity.model == chipModel_GC880)
 274			gpu->identity.instruction_count = 512;
 275		else
 276			gpu->identity.instruction_count = 256;
 277		break;
 278
 279	case 1:
 280		gpu->identity.instruction_count = 1024;
 281		break;
 282
 283	case 2:
 284		gpu->identity.instruction_count = 2048;
 285		break;
 286
 287	default:
 288		gpu->identity.instruction_count = 256;
 289		break;
 290	}
 291
 292	if (gpu->identity.num_constants == 0)
 293		gpu->identity.num_constants = 168;
 294
 295	if (gpu->identity.varyings_count == 0) {
 296		if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
 297			gpu->identity.varyings_count = 12;
 298		else
 299			gpu->identity.varyings_count = 8;
 300	}
 301
 302	/*
 303	 * For some cores, two varyings are consumed for position, so the
 304	 * maximum varying count needs to be reduced by one.
 305	 */
 306	if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
 307	    etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
 308	    etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
 309	    etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
 310	    etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
 311	    etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
 312	    etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
 313	    etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 314	    etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
 315	    etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
 316	    etnaviv_is_model_rev(gpu, GC880, 0x5106))
 317		gpu->identity.varyings_count -= 1;
 318}
 319
 320static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 321{
 322	u32 chipIdentity;
 323
 324	chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
 325
 326	/* Special case for older graphic cores. */
 327	if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
 328		gpu->identity.model    = chipModel_GC500;
 329		gpu->identity.revision = etnaviv_field(chipIdentity,
 330					 VIVS_HI_CHIP_IDENTITY_REVISION);
 331	} else {
 332
 333		gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
 334		gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
 335
 336		/*
 337		 * !!!! HACK ALERT !!!!
 338		 * Because people change device IDs without letting software
 339		 * know about it - here is the hack to make it all look the
 340		 * same.  Only for GC400 family.
 341		 */
 342		if ((gpu->identity.model & 0xff00) == 0x0400 &&
 343		    gpu->identity.model != chipModel_GC420) {
 344			gpu->identity.model = gpu->identity.model & 0x0400;
 345		}
 346
 347		/* Another special case */
 348		if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
 349			u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
 350			u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 351
 352			if (chipDate == 0x20080814 && chipTime == 0x12051100) {
 353				/*
 354				 * This IP has an ECO; put the correct
 355				 * revision in it.
 356				 */
 357				gpu->identity.revision = 0x1051;
 358			}
 359		}
 360
 361		/*
 362		 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
 363		 * reality it's just a re-branded GC3000. We can identify this
 364		 * core by the upper half of the revision register being all 1.
 365		 * Fix model/rev here, so all other places can refer to this
 366		 * core by its real identity.
 367		 */
 368		if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
 369			gpu->identity.model = chipModel_GC3000;
 370			gpu->identity.revision &= 0xffff;
 371		}
 372	}
 373
 374	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
 375		 gpu->identity.model, gpu->identity.revision);
 376
 377	/*
 378	 * If there is a match in the HWDB, we aren't interested in the
 379	 * remaining register values, as they might be wrong.
 380	 */
 381	if (etnaviv_fill_identity_from_hwdb(gpu))
 382		return;
 383
 384	gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
 385
 386	/* Disable fast clear on GC700. */
 387	if (gpu->identity.model == chipModel_GC700)
 388		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 389
 390	if ((gpu->identity.model == chipModel_GC500 &&
 391	     gpu->identity.revision < 2) ||
 392	    (gpu->identity.model == chipModel_GC300 &&
 393	     gpu->identity.revision < 0x2000)) {
 394
 395		/*
 396		 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
 397		 * registers.
 398		 */
 399		gpu->identity.minor_features0 = 0;
 400		gpu->identity.minor_features1 = 0;
 401		gpu->identity.minor_features2 = 0;
 402		gpu->identity.minor_features3 = 0;
 403		gpu->identity.minor_features4 = 0;
 404		gpu->identity.minor_features5 = 0;
 405	} else
 406		gpu->identity.minor_features0 =
 407				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
 408
 409	if (gpu->identity.minor_features0 &
 410	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 411		gpu->identity.minor_features1 =
 412				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
 413		gpu->identity.minor_features2 =
 414				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
 415		gpu->identity.minor_features3 =
 416				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
 417		gpu->identity.minor_features4 =
 418				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
 419		gpu->identity.minor_features5 =
 420				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
 421	}
 422
 423	/* GC600 idle register reports zero bits where modules aren't present */
 424	if (gpu->identity.model == chipModel_GC600) {
 425		gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
 426				 VIVS_HI_IDLE_STATE_RA |
 427				 VIVS_HI_IDLE_STATE_SE |
 428				 VIVS_HI_IDLE_STATE_PA |
 429				 VIVS_HI_IDLE_STATE_SH |
 430				 VIVS_HI_IDLE_STATE_PE |
 431				 VIVS_HI_IDLE_STATE_DE |
 432				 VIVS_HI_IDLE_STATE_FE;
 433	} else {
 434		gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
 435	}
 436
 437	etnaviv_hw_specs(gpu);
 438}
 439
 440static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 441{
 442	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
 443		  VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
 444	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 445}
 446
 447static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
 448{
 449	if (gpu->identity.minor_features2 &
 450	    chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
 451		clk_set_rate(gpu->clk_core,
 452			     gpu->base_rate_core >> gpu->freq_scale);
 453		clk_set_rate(gpu->clk_shader,
 454			     gpu->base_rate_shader >> gpu->freq_scale);
 455	} else {
 456		unsigned int fscale = 1 << (6 - gpu->freq_scale);
 457		u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 458
 459		clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
 460		clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
 461		etnaviv_gpu_load_clock(gpu, clock);
 462	}
 463}
 464
 465static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 466{
 467	u32 control, idle;
 468	unsigned long timeout;
 469	bool failed = true;
 470
 
 
 
 
 
 
 
 471	/* We hope that the GPU resets in under one second */
 472	timeout = jiffies + msecs_to_jiffies(1000);
 473
 474	while (time_is_after_jiffies(timeout)) {
 
 
 
 475		/* enable clock */
 476		unsigned int fscale = 1 << (6 - gpu->freq_scale);
 477		control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
 478		etnaviv_gpu_load_clock(gpu, control);
 479
 
 
 
 480		/* isolate the GPU. */
 481		control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 482		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 483
 484		if (gpu->sec_mode == ETNA_SEC_KERNEL) {
 485			gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
 486			          VIVS_MMUv2_AHB_CONTROL_RESET);
 487		} else {
 488			/* set soft reset. */
 489			control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 490			gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 491		}
 492
 493		/* wait for reset. */
 494		usleep_range(10, 20);
 495
 496		/* reset soft reset bit. */
 497		control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 498		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 499
 500		/* reset GPU isolation. */
 501		control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 502		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 503
 504		/* read idle register. */
 505		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 506
 507		/* try reseting again if FE it not idle */
 508		if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
 509			dev_dbg(gpu->dev, "FE is not idle\n");
 510			continue;
 511		}
 512
 513		/* read reset register. */
 514		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 515
 516		/* is the GPU idle? */
 517		if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
 518		    ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
 519			dev_dbg(gpu->dev, "GPU is not idle\n");
 520			continue;
 521		}
 522
 523		/* disable debug registers, as they are not normally needed */
 524		control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
 525		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 526
 527		failed = false;
 528		break;
 529	}
 530
 531	if (failed) {
 532		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 533		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 534
 535		dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
 536			idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
 537			control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
 538			control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
 539
 540		return -EBUSY;
 541	}
 542
 543	/* We rely on the GPU running, so program the clock */
 544	etnaviv_gpu_update_clock(gpu);
 
 
 
 
 545
 546	return 0;
 547}
 548
 549static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
 550{
 551	u32 pmc, ppc;
 552
 553	/* enable clock gating */
 554	ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
 555	ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
 556
 557	/* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
 558	if (gpu->identity.revision == 0x4301 ||
 559	    gpu->identity.revision == 0x4302)
 560		ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
 561
 562	gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
 563
 564	pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
 565
 566	/* Disable PA clock gating for GC400+ without bugfix except for GC420 */
 567	if (gpu->identity.model >= chipModel_GC400 &&
 568	    gpu->identity.model != chipModel_GC420 &&
 569	    !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
 570		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
 571
 572	/*
 573	 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
 574	 * present without a bug fix.
 575	 */
 576	if (gpu->identity.revision < 0x5000 &&
 577	    gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
 578	    !(gpu->identity.minor_features1 &
 579	      chipMinorFeatures1_DISABLE_PE_GATING))
 580		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
 581
 582	if (gpu->identity.revision < 0x5422)
 583		pmc |= BIT(15); /* Unknown bit */
 584
 585	/* Disable TX clock gating on affected core revisions. */
 586	if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
 587	    etnaviv_is_model_rev(gpu, GC2000, 0x5108))
 588		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
 589
 590	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
 591	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
 592
 593	gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
 594}
 595
 596void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
 597{
 598	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
 599	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
 600		  VIVS_FE_COMMAND_CONTROL_ENABLE |
 601		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 602
 603	if (gpu->sec_mode == ETNA_SEC_KERNEL) {
 604		gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
 605			  VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
 606			  VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
 607	}
 608}
 609
 610static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
 611{
 612	/*
 613	 * Base value for VIVS_PM_PULSE_EATER register on models where it
 614	 * cannot be read, extracted from vivante kernel driver.
 615	 */
 616	u32 pulse_eater = 0x01590880;
 617
 618	if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
 619	    etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
 620		pulse_eater |= BIT(23);
 621
 622	}
 623
 624	if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
 625	    etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
 626		pulse_eater &= ~BIT(16);
 627		pulse_eater |= BIT(17);
 628	}
 629
 630	if ((gpu->identity.revision > 0x5420) &&
 631	    (gpu->identity.features & chipFeatures_PIPE_3D))
 632	{
 633		/* Performance fix: disable internal DFS */
 634		pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
 635		pulse_eater |= BIT(18);
 636	}
 637
 638	gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
 639}
 640
 641static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 642{
 643	u16 prefetch;
 644
 645	if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
 646	     etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
 647	    gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
 648		u32 mc_memory_debug;
 649
 650		mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
 651
 652		if (gpu->identity.revision == 0x5007)
 653			mc_memory_debug |= 0x0c;
 654		else
 655			mc_memory_debug |= 0x08;
 656
 657		gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
 658	}
 659
 660	/* enable module-level clock gating */
 661	etnaviv_gpu_enable_mlcg(gpu);
 662
 663	/*
 664	 * Update GPU AXI cache atttribute to "cacheable, no allocate".
 665	 * This is necessary to prevent the iMX6 SoC locking up.
 666	 */
 667	gpu_write(gpu, VIVS_HI_AXI_CONFIG,
 668		  VIVS_HI_AXI_CONFIG_AWCACHE(2) |
 669		  VIVS_HI_AXI_CONFIG_ARCACHE(2));
 670
 671	/* GC2000 rev 5108 needs a special bus config */
 672	if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
 673		u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
 674		bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
 675				VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
 676		bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
 677			      VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
 678		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 679	}
 680
 681	if (gpu->sec_mode == ETNA_SEC_KERNEL) {
 682		u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
 683		val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
 684		gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
 685	}
 686
 687	/* setup the pulse eater */
 688	etnaviv_gpu_setup_pulse_eater(gpu);
 689
 690	/* setup the MMU */
 691	etnaviv_iommu_restore(gpu);
 692
 693	/* Start command processor */
 694	prefetch = etnaviv_buffer_init(gpu);
 695
 696	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
 697	etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
 698			     prefetch);
 
 
 
 699}
 700
 701int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 702{
 703	int ret, i;
 
 
 
 704
 705	ret = pm_runtime_get_sync(gpu->dev);
 706	if (ret < 0) {
 707		dev_err(gpu->dev, "Failed to enable GPU power domain\n");
 708		return ret;
 709	}
 710
 711	etnaviv_hw_identify(gpu);
 712
 713	if (gpu->identity.model == 0) {
 714		dev_err(gpu->dev, "Unknown GPU model\n");
 715		ret = -ENXIO;
 716		goto fail;
 717	}
 718
 719	/* Exclude VG cores with FE2.0 */
 720	if (gpu->identity.features & chipFeatures_PIPE_VG &&
 721	    gpu->identity.features & chipFeatures_FE20) {
 722		dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
 723		ret = -ENXIO;
 724		goto fail;
 725	}
 726
 727	/*
 728	 * Set the GPU linear window to be at the end of the DMA window, where
 729	 * the CMA area is likely to reside. This ensures that we are able to
 730	 * map the command buffers while having the linear window overlap as
 731	 * much RAM as possible, so we can optimize mappings for other buffers.
 732	 *
 733	 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
 734	 * to different views of the memory on the individual engines.
 735	 */
 736	if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
 737	    (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
 738		u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
 739		if (dma_mask < PHYS_OFFSET + SZ_2G)
 740			gpu->memory_base = PHYS_OFFSET;
 741		else
 742			gpu->memory_base = dma_mask - SZ_2G + 1;
 743	} else if (PHYS_OFFSET >= SZ_2G) {
 744		dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
 745		gpu->memory_base = PHYS_OFFSET;
 746		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 747	}
 748
 749	/*
 750	 * On cores with security features supported, we claim control over the
 751	 * security states.
 
 
 
 
 752	 */
 753	if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
 754	    (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
 755		gpu->sec_mode = ETNA_SEC_KERNEL;
 756
 757	ret = etnaviv_hw_reset(gpu);
 758	if (ret) {
 759		dev_err(gpu->dev, "GPU reset failed\n");
 760		goto fail;
 
 
 761	}
 762
 763	gpu->mmu = etnaviv_iommu_new(gpu);
 764	if (IS_ERR(gpu->mmu)) {
 765		dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
 766		ret = PTR_ERR(gpu->mmu);
 767		goto fail;
 768	}
 769
 770	gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
 771	if (IS_ERR(gpu->cmdbuf_suballoc)) {
 772		dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
 773		ret = PTR_ERR(gpu->cmdbuf_suballoc);
 774		goto fail;
 775	}
 776
 777	/* Create buffer: */
 778	ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
 779				  PAGE_SIZE);
 780	if (ret) {
 781		dev_err(gpu->dev, "could not create command buffer\n");
 782		goto destroy_iommu;
 783	}
 784
 785	if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
 786	    etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
 787		ret = -EINVAL;
 788		dev_err(gpu->dev,
 789			"command buffer outside valid memory window\n");
 790		goto free_buffer;
 791	}
 792
 793	/* Setup event management */
 794	spin_lock_init(&gpu->event_spinlock);
 795	init_completion(&gpu->event_free);
 796	bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
 797	for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
 798		complete(&gpu->event_free);
 
 799
 800	/* Now program the hardware */
 801	mutex_lock(&gpu->lock);
 802	etnaviv_gpu_hw_init(gpu);
 803	gpu->exec_state = -1;
 804	mutex_unlock(&gpu->lock);
 805
 806	pm_runtime_mark_last_busy(gpu->dev);
 807	pm_runtime_put_autosuspend(gpu->dev);
 808
 809	return 0;
 810
 811free_buffer:
 812	etnaviv_cmdbuf_free(&gpu->buffer);
 
 813destroy_iommu:
 814	etnaviv_iommu_destroy(gpu->mmu);
 815	gpu->mmu = NULL;
 816fail:
 817	pm_runtime_mark_last_busy(gpu->dev);
 818	pm_runtime_put_autosuspend(gpu->dev);
 819
 820	return ret;
 821}
 822
 823#ifdef CONFIG_DEBUG_FS
 824struct dma_debug {
 825	u32 address[2];
 826	u32 state[2];
 827};
 828
 829static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
 830{
 831	u32 i;
 832
 833	debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 834	debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 835
 836	for (i = 0; i < 500; i++) {
 837		debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 838		debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 839
 840		if (debug->address[0] != debug->address[1])
 841			break;
 842
 843		if (debug->state[0] != debug->state[1])
 844			break;
 845	}
 846}
 847
 848int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 849{
 850	struct dma_debug debug;
 851	u32 dma_lo, dma_hi, axi, idle;
 852	int ret;
 853
 854	seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
 855
 856	ret = pm_runtime_get_sync(gpu->dev);
 857	if (ret < 0)
 858		return ret;
 859
 860	dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
 861	dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
 862	axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
 863	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 864
 865	verify_dma(gpu, &debug);
 866
 867	seq_puts(m, "\tfeatures\n");
 868	seq_printf(m, "\t major_features: 0x%08x\n",
 869		   gpu->identity.features);
 870	seq_printf(m, "\t minor_features0: 0x%08x\n",
 871		   gpu->identity.minor_features0);
 872	seq_printf(m, "\t minor_features1: 0x%08x\n",
 873		   gpu->identity.minor_features1);
 874	seq_printf(m, "\t minor_features2: 0x%08x\n",
 875		   gpu->identity.minor_features2);
 876	seq_printf(m, "\t minor_features3: 0x%08x\n",
 877		   gpu->identity.minor_features3);
 878	seq_printf(m, "\t minor_features4: 0x%08x\n",
 879		   gpu->identity.minor_features4);
 880	seq_printf(m, "\t minor_features5: 0x%08x\n",
 881		   gpu->identity.minor_features5);
 882	seq_printf(m, "\t minor_features6: 0x%08x\n",
 883		   gpu->identity.minor_features6);
 884	seq_printf(m, "\t minor_features7: 0x%08x\n",
 885		   gpu->identity.minor_features7);
 886	seq_printf(m, "\t minor_features8: 0x%08x\n",
 887		   gpu->identity.minor_features8);
 888	seq_printf(m, "\t minor_features9: 0x%08x\n",
 889		   gpu->identity.minor_features9);
 890	seq_printf(m, "\t minor_features10: 0x%08x\n",
 891		   gpu->identity.minor_features10);
 892	seq_printf(m, "\t minor_features11: 0x%08x\n",
 893		   gpu->identity.minor_features11);
 894
 895	seq_puts(m, "\tspecs\n");
 896	seq_printf(m, "\t stream_count:  %d\n",
 897			gpu->identity.stream_count);
 898	seq_printf(m, "\t register_max: %d\n",
 899			gpu->identity.register_max);
 900	seq_printf(m, "\t thread_count: %d\n",
 901			gpu->identity.thread_count);
 902	seq_printf(m, "\t vertex_cache_size: %d\n",
 903			gpu->identity.vertex_cache_size);
 904	seq_printf(m, "\t shader_core_count: %d\n",
 905			gpu->identity.shader_core_count);
 906	seq_printf(m, "\t pixel_pipes: %d\n",
 907			gpu->identity.pixel_pipes);
 908	seq_printf(m, "\t vertex_output_buffer_size: %d\n",
 909			gpu->identity.vertex_output_buffer_size);
 910	seq_printf(m, "\t buffer_size: %d\n",
 911			gpu->identity.buffer_size);
 912	seq_printf(m, "\t instruction_count: %d\n",
 913			gpu->identity.instruction_count);
 914	seq_printf(m, "\t num_constants: %d\n",
 915			gpu->identity.num_constants);
 916	seq_printf(m, "\t varyings_count: %d\n",
 917			gpu->identity.varyings_count);
 918
 919	seq_printf(m, "\taxi: 0x%08x\n", axi);
 920	seq_printf(m, "\tidle: 0x%08x\n", idle);
 921	idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
 922	if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
 923		seq_puts(m, "\t FE is not idle\n");
 924	if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
 925		seq_puts(m, "\t DE is not idle\n");
 926	if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
 927		seq_puts(m, "\t PE is not idle\n");
 928	if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
 929		seq_puts(m, "\t SH is not idle\n");
 930	if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
 931		seq_puts(m, "\t PA is not idle\n");
 932	if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
 933		seq_puts(m, "\t SE is not idle\n");
 934	if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
 935		seq_puts(m, "\t RA is not idle\n");
 936	if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
 937		seq_puts(m, "\t TX is not idle\n");
 938	if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
 939		seq_puts(m, "\t VG is not idle\n");
 940	if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
 941		seq_puts(m, "\t IM is not idle\n");
 942	if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
 943		seq_puts(m, "\t FP is not idle\n");
 944	if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
 945		seq_puts(m, "\t TS is not idle\n");
 946	if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
 947		seq_puts(m, "\t AXI low power mode\n");
 948
 949	if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
 950		u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
 951		u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
 952		u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
 953
 954		seq_puts(m, "\tMC\n");
 955		seq_printf(m, "\t read0: 0x%08x\n", read0);
 956		seq_printf(m, "\t read1: 0x%08x\n", read1);
 957		seq_printf(m, "\t write: 0x%08x\n", write);
 958	}
 959
 960	seq_puts(m, "\tDMA ");
 961
 962	if (debug.address[0] == debug.address[1] &&
 963	    debug.state[0] == debug.state[1]) {
 964		seq_puts(m, "seems to be stuck\n");
 965	} else if (debug.address[0] == debug.address[1]) {
 966		seq_puts(m, "address is constant\n");
 967	} else {
 968		seq_puts(m, "is running\n");
 969	}
 970
 971	seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
 972	seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
 973	seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
 974	seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
 975	seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
 976		   dma_lo, dma_hi);
 977
 978	ret = 0;
 979
 980	pm_runtime_mark_last_busy(gpu->dev);
 981	pm_runtime_put_autosuspend(gpu->dev);
 982
 983	return ret;
 984}
 985#endif
 986
 987void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
 
 
 
 988{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 989	unsigned long flags;
 990	unsigned int i = 0;
 991
 992	dev_err(gpu->dev, "recover hung GPU!\n");
 993
 994	if (pm_runtime_get_sync(gpu->dev) < 0)
 995		return;
 996
 997	mutex_lock(&gpu->lock);
 998
 
 
 
 
 
 
 999	etnaviv_hw_reset(gpu);
1000
1001	/* complete all events, the GPU won't do it after the reset */
1002	spin_lock_irqsave(&gpu->event_spinlock, flags);
1003	for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
 
 
 
 
 
1004		complete(&gpu->event_free);
1005	bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
1006	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1007	gpu->completed_fence = gpu->active_fence;
1008
1009	etnaviv_gpu_hw_init(gpu);
1010	gpu->lastctx = NULL;
1011	gpu->exec_state = -1;
1012
1013	mutex_unlock(&gpu->lock);
1014	pm_runtime_mark_last_busy(gpu->dev);
1015	pm_runtime_put_autosuspend(gpu->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016}
1017
1018/* fence object management */
1019struct etnaviv_fence {
1020	struct etnaviv_gpu *gpu;
1021	struct dma_fence base;
1022};
1023
1024static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1025{
1026	return container_of(fence, struct etnaviv_fence, base);
1027}
1028
1029static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1030{
1031	return "etnaviv";
1032}
1033
1034static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1035{
1036	struct etnaviv_fence *f = to_etnaviv_fence(fence);
1037
1038	return dev_name(f->gpu->dev);
1039}
1040
1041static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
1042{
1043	return true;
1044}
1045
1046static bool etnaviv_fence_signaled(struct dma_fence *fence)
1047{
1048	struct etnaviv_fence *f = to_etnaviv_fence(fence);
1049
1050	return fence_completed(f->gpu, f->base.seqno);
1051}
1052
1053static void etnaviv_fence_release(struct dma_fence *fence)
1054{
1055	struct etnaviv_fence *f = to_etnaviv_fence(fence);
1056
1057	kfree_rcu(f, base.rcu);
1058}
1059
1060static const struct dma_fence_ops etnaviv_fence_ops = {
1061	.get_driver_name = etnaviv_fence_get_driver_name,
1062	.get_timeline_name = etnaviv_fence_get_timeline_name,
1063	.enable_signaling = etnaviv_fence_enable_signaling,
1064	.signaled = etnaviv_fence_signaled,
1065	.wait = dma_fence_default_wait,
1066	.release = etnaviv_fence_release,
1067};
1068
1069static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1070{
1071	struct etnaviv_fence *f;
1072
1073	/*
1074	 * GPU lock must already be held, otherwise fence completion order might
1075	 * not match the seqno order assigned here.
1076	 */
1077	lockdep_assert_held(&gpu->lock);
1078
1079	f = kzalloc(sizeof(*f), GFP_KERNEL);
1080	if (!f)
1081		return NULL;
1082
1083	f->gpu = gpu;
1084
1085	dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1086		       gpu->fence_context, ++gpu->next_fence);
1087
1088	return &f->base;
1089}
1090
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1091/*
1092 * event management:
1093 */
1094
1095static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1096	unsigned int *events)
1097{
1098	unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
1099	unsigned i, acquired = 0;
1100
1101	for (i = 0; i < nr_events; i++) {
1102		unsigned long ret;
 
 
1103
1104		ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1105
1106		if (!ret) {
1107			dev_err(gpu->dev, "wait_for_completion_timeout failed");
1108			goto out;
 
 
 
1109		}
1110
1111		acquired++;
1112		timeout = ret;
1113	}
1114
1115	spin_lock_irqsave(&gpu->event_spinlock, flags);
1116
1117	for (i = 0; i < nr_events; i++) {
1118		int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1119
1120		events[i] = event;
1121		memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1122		set_bit(event, gpu->event_bitmap);
1123	}
1124
1125	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1126
1127	return 0;
1128
1129out:
1130	for (i = 0; i < acquired; i++)
1131		complete(&gpu->event_free);
1132
1133	return -EBUSY;
1134}
1135
1136static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1137{
1138	unsigned long flags;
1139
1140	spin_lock_irqsave(&gpu->event_spinlock, flags);
1141
1142	if (!test_bit(event, gpu->event_bitmap)) {
1143		dev_warn(gpu->dev, "event %u is already marked as free",
1144			 event);
1145		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1146	} else {
1147		clear_bit(event, gpu->event_bitmap);
1148		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1149
1150		complete(&gpu->event_free);
1151	}
1152}
1153
1154/*
1155 * Cmdstream submission/retirement:
1156 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1158	u32 id, struct timespec *timeout)
1159{
1160	struct dma_fence *fence;
1161	int ret;
1162
1163	/*
1164	 * Look up the fence and take a reference. We might still find a fence
1165	 * whose refcount has already dropped to zero. dma_fence_get_rcu
1166	 * pretends we didn't find a fence in that case.
1167	 */
1168	rcu_read_lock();
1169	fence = idr_find(&gpu->fence_idr, id);
1170	if (fence)
1171		fence = dma_fence_get_rcu(fence);
1172	rcu_read_unlock();
1173
1174	if (!fence)
1175		return 0;
1176
1177	if (!timeout) {
1178		/* No timeout was requested: just test for completion */
1179		ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
1180	} else {
1181		unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1182
1183		ret = dma_fence_wait_timeout(fence, true, remaining);
1184		if (ret == 0)
 
 
 
 
 
1185			ret = -ETIMEDOUT;
1186		else if (ret != -ERESTARTSYS)
1187			ret = 0;
1188
1189	}
1190
1191	dma_fence_put(fence);
1192	return ret;
1193}
1194
1195/*
1196 * Wait for an object to become inactive.  This, on it's own, is not race
1197 * free: the object is moved by the scheduler off the active list, and
1198 * then the iova is put.  Moreover, the object could be re-submitted just
1199 * after we notice that it's become inactive.
1200 *
1201 * Although the retirement happens under the gpu lock, we don't want to hold
1202 * that lock in this function while waiting.
1203 */
1204int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1205	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1206{
1207	unsigned long remaining;
1208	long ret;
1209
1210	if (!timeout)
1211		return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1212
1213	remaining = etnaviv_timeout_to_jiffies(timeout);
1214
1215	ret = wait_event_interruptible_timeout(gpu->fence_event,
1216					       !is_active(etnaviv_obj),
1217					       remaining);
1218	if (ret > 0)
 
 
 
 
1219		return 0;
1220	else if (ret == -ERESTARTSYS)
1221		return -ERESTARTSYS;
1222	else
1223		return -ETIMEDOUT;
1224}
1225
1226static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1227	struct etnaviv_event *event, unsigned int flags)
1228{
1229	const struct etnaviv_gem_submit *submit = event->submit;
1230	unsigned int i;
1231
1232	for (i = 0; i < submit->nr_pmrs; i++) {
1233		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1234
1235		if (pmr->flags == flags)
1236			etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1237	}
1238}
1239
1240static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1241	struct etnaviv_event *event)
1242{
1243	u32 val;
1244
1245	/* disable clock gating */
1246	val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1247	val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1248	gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1249
1250	/* enable debug register */
1251	val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1252	val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1253	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1254
1255	sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1256}
1257
1258static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1259	struct etnaviv_event *event)
1260{
1261	const struct etnaviv_gem_submit *submit = event->submit;
1262	unsigned int i;
1263	u32 val;
1264
1265	sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1266
1267	for (i = 0; i < submit->nr_pmrs; i++) {
1268		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1269
1270		*pmr->bo_vma = pmr->sequence;
1271	}
1272
1273	/* disable debug register */
1274	val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1275	val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1276	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1277
1278	/* enable clock gating */
1279	val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1280	val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1281	gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1282}
1283
1284
1285/* add bo's to gpu's ring, and kick gpu: */
1286struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
 
1287{
1288	struct etnaviv_gpu *gpu = submit->gpu;
1289	struct dma_fence *gpu_fence;
1290	unsigned int i, nr_events = 1, event[3];
1291	int ret;
1292
1293	if (!submit->runtime_resumed) {
1294		ret = pm_runtime_get_sync(gpu->dev);
1295		if (ret < 0)
1296			return NULL;
1297		submit->runtime_resumed = true;
1298	}
1299
1300	/*
1301	 * if there are performance monitor requests we need to have
1302	 * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1303	 *   requests.
1304	 * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1305	 *   and update the sequence number for userspace.
 
1306	 */
1307	if (submit->nr_pmrs)
1308		nr_events = 3;
1309
1310	ret = event_alloc(gpu, nr_events, event);
1311	if (ret) {
1312		DRM_ERROR("no free events\n");
1313		return NULL;
 
1314	}
1315
1316	mutex_lock(&gpu->lock);
1317
1318	gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1319	if (!gpu_fence) {
1320		for (i = 0; i < nr_events; i++)
1321			event_free(gpu, event[i]);
1322
1323		goto out_unlock;
1324	}
1325
1326	gpu->active_fence = gpu_fence->seqno;
 
 
1327
1328	if (submit->nr_pmrs) {
1329		gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1330		kref_get(&submit->refcount);
1331		gpu->event[event[1]].submit = submit;
1332		etnaviv_sync_point_queue(gpu, event[1]);
1333	}
1334
1335	gpu->event[event[0]].fence = gpu_fence;
1336	submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
1337	etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
1338			     &submit->cmdbuf);
1339
1340	if (submit->nr_pmrs) {
1341		gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1342		kref_get(&submit->refcount);
1343		gpu->event[event[2]].submit = submit;
1344		etnaviv_sync_point_queue(gpu, event[2]);
1345	}
1346
1347out_unlock:
1348	mutex_unlock(&gpu->lock);
1349
1350	return gpu_fence;
1351}
1352
1353static void sync_point_worker(struct work_struct *work)
1354{
1355	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1356					       sync_point_work);
1357	struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1358	u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1359
1360	event->sync_point(gpu, event);
1361	etnaviv_submit_put(event->submit);
1362	event_free(gpu, gpu->sync_point_event);
1363
1364	/* restart FE last to avoid GPU and IRQ racing against this worker */
1365	etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1366}
 
1367
1368static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1369{
1370	u32 status_reg, status;
1371	int i;
 
 
 
 
 
 
1372
1373	if (gpu->sec_mode == ETNA_SEC_NONE)
1374		status_reg = VIVS_MMUv2_STATUS;
1375	else
1376		status_reg = VIVS_MMUv2_SEC_STATUS;
1377
1378	status = gpu_read(gpu, status_reg);
1379	dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1380
1381	for (i = 0; i < 4; i++) {
1382		u32 address_reg;
1383
1384		if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
1385			continue;
1386
1387		if (gpu->sec_mode == ETNA_SEC_NONE)
1388			address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
1389		else
1390			address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
1391
1392		dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
1393				    gpu_read(gpu, address_reg));
1394	}
1395}
1396
 
 
 
1397static irqreturn_t irq_handler(int irq, void *data)
1398{
1399	struct etnaviv_gpu *gpu = data;
1400	irqreturn_t ret = IRQ_NONE;
1401
1402	u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1403
1404	if (intr != 0) {
1405		int event;
1406
1407		pm_runtime_mark_last_busy(gpu->dev);
1408
1409		dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1410
1411		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1412			dev_err(gpu->dev, "AXI bus error\n");
1413			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1414		}
1415
1416		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1417			dump_mmu_fault(gpu);
1418			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1419		}
1420
1421		while ((event = ffs(intr)) != 0) {
1422			struct dma_fence *fence;
1423
1424			event -= 1;
1425
1426			intr &= ~(1 << event);
1427
1428			dev_dbg(gpu->dev, "event %u\n", event);
1429
1430			if (gpu->event[event].sync_point) {
1431				gpu->sync_point_event = event;
1432				queue_work(gpu->wq, &gpu->sync_point_work);
1433			}
1434
1435			fence = gpu->event[event].fence;
1436			if (!fence)
1437				continue;
1438
1439			gpu->event[event].fence = NULL;
 
1440
1441			/*
1442			 * Events can be processed out of order.  Eg,
1443			 * - allocate and queue event 0
1444			 * - allocate event 1
1445			 * - event 0 completes, we process it
1446			 * - allocate and queue event 0
1447			 * - event 1 and event 0 complete
1448			 * we can end up processing event 0 first, then 1.
1449			 */
1450			if (fence_after(fence->seqno, gpu->completed_fence))
1451				gpu->completed_fence = fence->seqno;
1452			dma_fence_signal(fence);
1453
1454			event_free(gpu, event);
1455		}
1456
 
 
 
1457		ret = IRQ_HANDLED;
1458	}
1459
1460	return ret;
1461}
1462
1463static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1464{
1465	int ret;
1466
1467	if (gpu->clk_reg) {
1468		ret = clk_prepare_enable(gpu->clk_reg);
1469		if (ret)
1470			return ret;
1471	}
1472
1473	if (gpu->clk_bus) {
1474		ret = clk_prepare_enable(gpu->clk_bus);
1475		if (ret)
1476			return ret;
1477	}
1478
1479	if (gpu->clk_core) {
1480		ret = clk_prepare_enable(gpu->clk_core);
1481		if (ret)
1482			goto disable_clk_bus;
1483	}
1484
1485	if (gpu->clk_shader) {
1486		ret = clk_prepare_enable(gpu->clk_shader);
1487		if (ret)
1488			goto disable_clk_core;
1489	}
1490
1491	return 0;
1492
1493disable_clk_core:
1494	if (gpu->clk_core)
1495		clk_disable_unprepare(gpu->clk_core);
1496disable_clk_bus:
1497	if (gpu->clk_bus)
1498		clk_disable_unprepare(gpu->clk_bus);
1499
1500	return ret;
1501}
1502
1503static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1504{
1505	if (gpu->clk_shader)
1506		clk_disable_unprepare(gpu->clk_shader);
1507	if (gpu->clk_core)
1508		clk_disable_unprepare(gpu->clk_core);
1509	if (gpu->clk_bus)
1510		clk_disable_unprepare(gpu->clk_bus);
1511	if (gpu->clk_reg)
1512		clk_disable_unprepare(gpu->clk_reg);
1513
1514	return 0;
1515}
 
1516
1517int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1518{
1519	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1520
1521	do {
1522		u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1523
1524		if ((idle & gpu->idle_mask) == gpu->idle_mask)
1525			return 0;
1526
1527		if (time_is_before_jiffies(timeout)) {
1528			dev_warn(gpu->dev,
1529				 "timed out waiting for idle: idle=0x%x\n",
1530				 idle);
1531			return -ETIMEDOUT;
1532		}
1533
1534		udelay(5);
1535	} while (1);
1536}
1537
1538static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1539{
1540	if (gpu->buffer.suballoc) {
 
 
1541		/* Replace the last WAIT with END */
1542		mutex_lock(&gpu->lock);
1543		etnaviv_buffer_end(gpu);
1544		mutex_unlock(&gpu->lock);
1545
1546		/*
1547		 * We know that only the FE is busy here, this should
1548		 * happen quickly (as the WAIT is only 200 cycles).  If
1549		 * we fail, just warn and continue.
1550		 */
1551		etnaviv_gpu_wait_idle(gpu, 100);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1552	}
1553
1554	return etnaviv_gpu_clk_disable(gpu);
1555}
1556
1557#ifdef CONFIG_PM
1558static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1559{
 
1560	int ret;
1561
1562	ret = mutex_lock_killable(&gpu->lock);
1563	if (ret)
1564		return ret;
1565
1566	etnaviv_gpu_update_clock(gpu);
 
 
 
1567	etnaviv_gpu_hw_init(gpu);
1568
1569	gpu->lastctx = NULL;
1570	gpu->exec_state = -1;
1571
1572	mutex_unlock(&gpu->lock);
1573
1574	return 0;
1575}
1576#endif
1577
1578static int
1579etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1580				  unsigned long *state)
1581{
1582	*state = 6;
1583
1584	return 0;
1585}
1586
1587static int
1588etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1589				  unsigned long *state)
1590{
1591	struct etnaviv_gpu *gpu = cdev->devdata;
1592
1593	*state = gpu->freq_scale;
1594
1595	return 0;
1596}
1597
1598static int
1599etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1600				  unsigned long state)
1601{
1602	struct etnaviv_gpu *gpu = cdev->devdata;
1603
1604	mutex_lock(&gpu->lock);
1605	gpu->freq_scale = state;
1606	if (!pm_runtime_suspended(gpu->dev))
1607		etnaviv_gpu_update_clock(gpu);
1608	mutex_unlock(&gpu->lock);
1609
1610	return 0;
1611}
1612
1613static struct thermal_cooling_device_ops cooling_ops = {
1614	.get_max_state = etnaviv_gpu_cooling_get_max_state,
1615	.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1616	.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1617};
1618
1619static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1620	void *data)
1621{
1622	struct drm_device *drm = data;
1623	struct etnaviv_drm_private *priv = drm->dev_private;
1624	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1625	int ret;
1626
1627	if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1628		gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1629				(char *)dev_name(dev), gpu, &cooling_ops);
1630		if (IS_ERR(gpu->cooling))
1631			return PTR_ERR(gpu->cooling);
1632	}
1633
1634	gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1635	if (!gpu->wq) {
1636		ret = -ENOMEM;
1637		goto out_thermal;
1638	}
1639
1640	ret = etnaviv_sched_init(gpu);
1641	if (ret)
1642		goto out_workqueue;
1643
1644#ifdef CONFIG_PM
1645	ret = pm_runtime_get_sync(gpu->dev);
1646#else
1647	ret = etnaviv_gpu_clk_enable(gpu);
1648#endif
1649	if (ret < 0)
1650		goto out_sched;
1651
1652
1653	gpu->drm = drm;
1654	gpu->fence_context = dma_fence_context_alloc(1);
1655	idr_init(&gpu->fence_idr);
1656	spin_lock_init(&gpu->fence_spinlock);
1657
1658	INIT_WORK(&gpu->sync_point_work, sync_point_worker);
 
 
1659	init_waitqueue_head(&gpu->fence_event);
1660
 
 
 
1661	priv->gpu[priv->num_gpus++] = gpu;
1662
1663	pm_runtime_mark_last_busy(gpu->dev);
1664	pm_runtime_put_autosuspend(gpu->dev);
1665
1666	return 0;
1667
1668out_sched:
1669	etnaviv_sched_fini(gpu);
1670
1671out_workqueue:
1672	destroy_workqueue(gpu->wq);
1673
1674out_thermal:
1675	if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1676		thermal_cooling_device_unregister(gpu->cooling);
1677
1678	return ret;
1679}
1680
1681static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1682	void *data)
1683{
1684	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1685
1686	DBG("%s", dev_name(gpu->dev));
1687
1688	flush_workqueue(gpu->wq);
1689	destroy_workqueue(gpu->wq);
1690
1691	etnaviv_sched_fini(gpu);
1692
1693#ifdef CONFIG_PM
1694	pm_runtime_get_sync(gpu->dev);
1695	pm_runtime_put_sync_suspend(gpu->dev);
1696#else
1697	etnaviv_gpu_hw_suspend(gpu);
1698#endif
1699
1700	if (gpu->buffer.suballoc)
1701		etnaviv_cmdbuf_free(&gpu->buffer);
1702
1703	if (gpu->cmdbuf_suballoc) {
1704		etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
1705		gpu->cmdbuf_suballoc = NULL;
1706	}
1707
1708	if (gpu->mmu) {
1709		etnaviv_iommu_destroy(gpu->mmu);
1710		gpu->mmu = NULL;
1711	}
1712
1713	gpu->drm = NULL;
1714	idr_destroy(&gpu->fence_idr);
1715
1716	if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1717		thermal_cooling_device_unregister(gpu->cooling);
1718	gpu->cooling = NULL;
1719}
1720
1721static const struct component_ops gpu_ops = {
1722	.bind = etnaviv_gpu_bind,
1723	.unbind = etnaviv_gpu_unbind,
1724};
1725
1726static const struct of_device_id etnaviv_gpu_match[] = {
1727	{
1728		.compatible = "vivante,gc"
1729	},
1730	{ /* sentinel */ }
1731};
1732MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
1733
1734static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1735{
1736	struct device *dev = &pdev->dev;
1737	struct etnaviv_gpu *gpu;
1738	int err;
1739
1740	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1741	if (!gpu)
1742		return -ENOMEM;
1743
1744	gpu->dev = &pdev->dev;
1745	mutex_init(&gpu->lock);
1746	mutex_init(&gpu->fence_idr_lock);
1747
1748	/* Map registers: */
1749	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1750	if (IS_ERR(gpu->mmio))
1751		return PTR_ERR(gpu->mmio);
1752
1753	/* Get Interrupt: */
1754	gpu->irq = platform_get_irq(pdev, 0);
1755	if (gpu->irq < 0) {
1756		dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1757		return gpu->irq;
 
1758	}
1759
1760	err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1761			       dev_name(gpu->dev), gpu);
1762	if (err) {
1763		dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1764		return err;
1765	}
1766
1767	/* Get Clocks: */
1768	gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
1769	DBG("clk_reg: %p", gpu->clk_reg);
1770	if (IS_ERR(gpu->clk_reg))
1771		gpu->clk_reg = NULL;
1772
1773	gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1774	DBG("clk_bus: %p", gpu->clk_bus);
1775	if (IS_ERR(gpu->clk_bus))
1776		gpu->clk_bus = NULL;
1777
1778	gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1779	DBG("clk_core: %p", gpu->clk_core);
1780	if (IS_ERR(gpu->clk_core))
1781		gpu->clk_core = NULL;
1782	gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1783
1784	gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1785	DBG("clk_shader: %p", gpu->clk_shader);
1786	if (IS_ERR(gpu->clk_shader))
1787		gpu->clk_shader = NULL;
1788	gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1789
1790	/* TODO: figure out max mapped size */
1791	dev_set_drvdata(dev, gpu);
1792
1793	/*
1794	 * We treat the device as initially suspended.  The runtime PM
1795	 * autosuspend delay is rather arbitary: no measurements have
1796	 * yet been performed to determine an appropriate value.
1797	 */
1798	pm_runtime_use_autosuspend(gpu->dev);
1799	pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1800	pm_runtime_enable(gpu->dev);
1801
1802	err = component_add(&pdev->dev, &gpu_ops);
1803	if (err < 0) {
1804		dev_err(&pdev->dev, "failed to register component: %d\n", err);
1805		return err;
1806	}
1807
1808	return 0;
 
 
 
1809}
1810
1811static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1812{
1813	component_del(&pdev->dev, &gpu_ops);
1814	pm_runtime_disable(&pdev->dev);
1815	return 0;
1816}
1817
1818#ifdef CONFIG_PM
1819static int etnaviv_gpu_rpm_suspend(struct device *dev)
1820{
1821	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1822	u32 idle, mask;
1823
1824	/* If we have outstanding fences, we're not idle */
1825	if (gpu->completed_fence != gpu->active_fence)
1826		return -EBUSY;
1827
1828	/* Check whether the hardware (except FE) is idle */
1829	mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1830	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1831	if (idle != mask)
1832		return -EBUSY;
1833
1834	return etnaviv_gpu_hw_suspend(gpu);
1835}
1836
1837static int etnaviv_gpu_rpm_resume(struct device *dev)
1838{
1839	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1840	int ret;
1841
1842	ret = etnaviv_gpu_clk_enable(gpu);
1843	if (ret)
1844		return ret;
1845
1846	/* Re-initialise the basic hardware state */
1847	if (gpu->drm && gpu->buffer.suballoc) {
1848		ret = etnaviv_gpu_hw_resume(gpu);
1849		if (ret) {
1850			etnaviv_gpu_clk_disable(gpu);
1851			return ret;
1852		}
1853	}
1854
1855	return 0;
1856}
1857#endif
1858
1859static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1860	SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1861			   NULL)
1862};
1863
1864struct platform_driver etnaviv_gpu_driver = {
1865	.driver = {
1866		.name = "etnaviv-gpu",
1867		.owner = THIS_MODULE,
1868		.pm = &etnaviv_gpu_pm_ops,
1869		.of_match_table = etnaviv_gpu_match,
1870	},
1871	.probe = etnaviv_gpu_platform_probe,
1872	.remove = etnaviv_gpu_platform_remove,
1873	.id_table = gpu_ids,
1874};