Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (C) 2015 Etnaviv Project
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License version 2 as published by
   6 * the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/component.h>
  18#include <linux/fence.h>
  19#include <linux/moduleparam.h>
  20#include <linux/of_device.h>
  21#include "etnaviv_dump.h"
  22#include "etnaviv_gpu.h"
  23#include "etnaviv_gem.h"
  24#include "etnaviv_mmu.h"
  25#include "etnaviv_iommu.h"
  26#include "etnaviv_iommu_v2.h"
  27#include "common.xml.h"
  28#include "state.xml.h"
  29#include "state_hi.xml.h"
  30#include "cmdstream.xml.h"
  31
  32static const struct platform_device_id gpu_ids[] = {
  33	{ .name = "etnaviv-gpu,2d" },
  34	{ },
  35};
  36
  37static bool etnaviv_dump_core = true;
  38module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
  39
  40/*
  41 * Driver functions:
  42 */
  43
  44int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
  45{
  46	switch (param) {
  47	case ETNAVIV_PARAM_GPU_MODEL:
  48		*value = gpu->identity.model;
  49		break;
  50
  51	case ETNAVIV_PARAM_GPU_REVISION:
  52		*value = gpu->identity.revision;
  53		break;
  54
  55	case ETNAVIV_PARAM_GPU_FEATURES_0:
  56		*value = gpu->identity.features;
  57		break;
  58
  59	case ETNAVIV_PARAM_GPU_FEATURES_1:
  60		*value = gpu->identity.minor_features0;
  61		break;
  62
  63	case ETNAVIV_PARAM_GPU_FEATURES_2:
  64		*value = gpu->identity.minor_features1;
  65		break;
  66
  67	case ETNAVIV_PARAM_GPU_FEATURES_3:
  68		*value = gpu->identity.minor_features2;
  69		break;
  70
  71	case ETNAVIV_PARAM_GPU_FEATURES_4:
  72		*value = gpu->identity.minor_features3;
  73		break;
  74
  75	case ETNAVIV_PARAM_GPU_FEATURES_5:
  76		*value = gpu->identity.minor_features4;
  77		break;
  78
  79	case ETNAVIV_PARAM_GPU_FEATURES_6:
  80		*value = gpu->identity.minor_features5;
  81		break;
  82
  83	case ETNAVIV_PARAM_GPU_STREAM_COUNT:
  84		*value = gpu->identity.stream_count;
  85		break;
  86
  87	case ETNAVIV_PARAM_GPU_REGISTER_MAX:
  88		*value = gpu->identity.register_max;
  89		break;
  90
  91	case ETNAVIV_PARAM_GPU_THREAD_COUNT:
  92		*value = gpu->identity.thread_count;
  93		break;
  94
  95	case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
  96		*value = gpu->identity.vertex_cache_size;
  97		break;
  98
  99	case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
 100		*value = gpu->identity.shader_core_count;
 101		break;
 102
 103	case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
 104		*value = gpu->identity.pixel_pipes;
 105		break;
 106
 107	case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
 108		*value = gpu->identity.vertex_output_buffer_size;
 109		break;
 110
 111	case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
 112		*value = gpu->identity.buffer_size;
 113		break;
 114
 115	case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
 116		*value = gpu->identity.instruction_count;
 117		break;
 118
 119	case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
 120		*value = gpu->identity.num_constants;
 121		break;
 122
 123	case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
 124		*value = gpu->identity.varyings_count;
 125		break;
 126
 127	default:
 128		DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
 129		return -EINVAL;
 130	}
 131
 132	return 0;
 133}
 134
 135
 136#define etnaviv_is_model_rev(gpu, mod, rev) \
 137	((gpu)->identity.model == chipModel_##mod && \
 138	 (gpu)->identity.revision == rev)
 139#define etnaviv_field(val, field) \
 140	(((val) & field##__MASK) >> field##__SHIFT)
 141
 142static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 143{
 144	if (gpu->identity.minor_features0 &
 145	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 146		u32 specs[4];
 147		unsigned int streams;
 148
 149		specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
 150		specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
 151		specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
 152		specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
 153
 154		gpu->identity.stream_count = etnaviv_field(specs[0],
 155					VIVS_HI_CHIP_SPECS_STREAM_COUNT);
 156		gpu->identity.register_max = etnaviv_field(specs[0],
 157					VIVS_HI_CHIP_SPECS_REGISTER_MAX);
 158		gpu->identity.thread_count = etnaviv_field(specs[0],
 159					VIVS_HI_CHIP_SPECS_THREAD_COUNT);
 160		gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
 161					VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
 162		gpu->identity.shader_core_count = etnaviv_field(specs[0],
 163					VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
 164		gpu->identity.pixel_pipes = etnaviv_field(specs[0],
 165					VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
 166		gpu->identity.vertex_output_buffer_size =
 167			etnaviv_field(specs[0],
 168				VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
 169
 170		gpu->identity.buffer_size = etnaviv_field(specs[1],
 171					VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
 172		gpu->identity.instruction_count = etnaviv_field(specs[1],
 173					VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
 174		gpu->identity.num_constants = etnaviv_field(specs[1],
 175					VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
 176
 177		gpu->identity.varyings_count = etnaviv_field(specs[2],
 178					VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
 179
 180		/* This overrides the value from older register if non-zero */
 181		streams = etnaviv_field(specs[3],
 182					VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
 183		if (streams)
 184			gpu->identity.stream_count = streams;
 185	}
 186
 187	/* Fill in the stream count if not specified */
 188	if (gpu->identity.stream_count == 0) {
 189		if (gpu->identity.model >= 0x1000)
 190			gpu->identity.stream_count = 4;
 191		else
 192			gpu->identity.stream_count = 1;
 193	}
 194
 195	/* Convert the register max value */
 196	if (gpu->identity.register_max)
 197		gpu->identity.register_max = 1 << gpu->identity.register_max;
 198	else if (gpu->identity.model == chipModel_GC400)
 199		gpu->identity.register_max = 32;
 200	else
 201		gpu->identity.register_max = 64;
 202
 203	/* Convert thread count */
 204	if (gpu->identity.thread_count)
 205		gpu->identity.thread_count = 1 << gpu->identity.thread_count;
 206	else if (gpu->identity.model == chipModel_GC400)
 207		gpu->identity.thread_count = 64;
 208	else if (gpu->identity.model == chipModel_GC500 ||
 209		 gpu->identity.model == chipModel_GC530)
 210		gpu->identity.thread_count = 128;
 211	else
 212		gpu->identity.thread_count = 256;
 213
 214	if (gpu->identity.vertex_cache_size == 0)
 215		gpu->identity.vertex_cache_size = 8;
 216
 217	if (gpu->identity.shader_core_count == 0) {
 218		if (gpu->identity.model >= 0x1000)
 219			gpu->identity.shader_core_count = 2;
 220		else
 221			gpu->identity.shader_core_count = 1;
 222	}
 223
 224	if (gpu->identity.pixel_pipes == 0)
 225		gpu->identity.pixel_pipes = 1;
 226
 227	/* Convert virtex buffer size */
 228	if (gpu->identity.vertex_output_buffer_size) {
 229		gpu->identity.vertex_output_buffer_size =
 230			1 << gpu->identity.vertex_output_buffer_size;
 231	} else if (gpu->identity.model == chipModel_GC400) {
 232		if (gpu->identity.revision < 0x4000)
 233			gpu->identity.vertex_output_buffer_size = 512;
 234		else if (gpu->identity.revision < 0x4200)
 235			gpu->identity.vertex_output_buffer_size = 256;
 236		else
 237			gpu->identity.vertex_output_buffer_size = 128;
 238	} else {
 239		gpu->identity.vertex_output_buffer_size = 512;
 240	}
 241
 242	switch (gpu->identity.instruction_count) {
 243	case 0:
 244		if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 245		    gpu->identity.model == chipModel_GC880)
 246			gpu->identity.instruction_count = 512;
 247		else
 248			gpu->identity.instruction_count = 256;
 249		break;
 250
 251	case 1:
 252		gpu->identity.instruction_count = 1024;
 253		break;
 254
 255	case 2:
 256		gpu->identity.instruction_count = 2048;
 257		break;
 258
 259	default:
 260		gpu->identity.instruction_count = 256;
 261		break;
 262	}
 263
 264	if (gpu->identity.num_constants == 0)
 265		gpu->identity.num_constants = 168;
 266
 267	if (gpu->identity.varyings_count == 0) {
 268		if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
 269			gpu->identity.varyings_count = 12;
 270		else
 271			gpu->identity.varyings_count = 8;
 272	}
 273
 274	/*
 275	 * For some cores, two varyings are consumed for position, so the
 276	 * maximum varying count needs to be reduced by one.
 277	 */
 278	if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
 279	    etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
 280	    etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
 281	    etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
 282	    etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
 283	    etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
 284	    etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
 285	    etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 286	    etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
 287	    etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
 288	    etnaviv_is_model_rev(gpu, GC880, 0x5106))
 289		gpu->identity.varyings_count -= 1;
 290}
 291
 292static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 293{
 294	u32 chipIdentity;
 295
 296	chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
 297
 298	/* Special case for older graphic cores. */
 299	if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
 300		gpu->identity.model    = chipModel_GC500;
 301		gpu->identity.revision = etnaviv_field(chipIdentity,
 302					 VIVS_HI_CHIP_IDENTITY_REVISION);
 303	} else {
 304
 305		gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
 306		gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
 307
 308		/*
 309		 * !!!! HACK ALERT !!!!
 310		 * Because people change device IDs without letting software
 311		 * know about it - here is the hack to make it all look the
 312		 * same.  Only for GC400 family.
 313		 */
 314		if ((gpu->identity.model & 0xff00) == 0x0400 &&
 315		    gpu->identity.model != chipModel_GC420) {
 316			gpu->identity.model = gpu->identity.model & 0x0400;
 317		}
 318
 319		/* Another special case */
 320		if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
 321			u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
 322			u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 323
 324			if (chipDate == 0x20080814 && chipTime == 0x12051100) {
 325				/*
 326				 * This IP has an ECO; put the correct
 327				 * revision in it.
 328				 */
 329				gpu->identity.revision = 0x1051;
 330			}
 331		}
 
 
 
 
 
 
 
 
 
 
 
 
 332	}
 333
 334	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
 335		 gpu->identity.model, gpu->identity.revision);
 336
 337	gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
 338
 339	/* Disable fast clear on GC700. */
 340	if (gpu->identity.model == chipModel_GC700)
 341		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 342
 343	if ((gpu->identity.model == chipModel_GC500 &&
 344	     gpu->identity.revision < 2) ||
 345	    (gpu->identity.model == chipModel_GC300 &&
 346	     gpu->identity.revision < 0x2000)) {
 347
 348		/*
 349		 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
 350		 * registers.
 351		 */
 352		gpu->identity.minor_features0 = 0;
 353		gpu->identity.minor_features1 = 0;
 354		gpu->identity.minor_features2 = 0;
 355		gpu->identity.minor_features3 = 0;
 356		gpu->identity.minor_features4 = 0;
 357		gpu->identity.minor_features5 = 0;
 358	} else
 359		gpu->identity.minor_features0 =
 360				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
 361
 362	if (gpu->identity.minor_features0 &
 363	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 364		gpu->identity.minor_features1 =
 365				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
 366		gpu->identity.minor_features2 =
 367				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
 368		gpu->identity.minor_features3 =
 369				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
 370		gpu->identity.minor_features4 =
 371				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
 372		gpu->identity.minor_features5 =
 373				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
 374	}
 375
 376	/* GC600 idle register reports zero bits where modules aren't present */
 377	if (gpu->identity.model == chipModel_GC600) {
 378		gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
 379				 VIVS_HI_IDLE_STATE_RA |
 380				 VIVS_HI_IDLE_STATE_SE |
 381				 VIVS_HI_IDLE_STATE_PA |
 382				 VIVS_HI_IDLE_STATE_SH |
 383				 VIVS_HI_IDLE_STATE_PE |
 384				 VIVS_HI_IDLE_STATE_DE |
 385				 VIVS_HI_IDLE_STATE_FE;
 386	} else {
 387		gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
 388	}
 389
 390	etnaviv_hw_specs(gpu);
 391}
 392
 393static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 394{
 395	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
 396		  VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
 397	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 398}
 399
 400static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 401{
 402	u32 control, idle;
 403	unsigned long timeout;
 404	bool failed = true;
 405
 406	/* TODO
 407	 *
 408	 * - clock gating
 409	 * - puls eater
 410	 * - what about VG?
 411	 */
 412
 413	/* We hope that the GPU resets in under one second */
 414	timeout = jiffies + msecs_to_jiffies(1000);
 415
 416	while (time_is_after_jiffies(timeout)) {
 417		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
 418			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
 419
 420		/* enable clock */
 421		etnaviv_gpu_load_clock(gpu, control);
 422
 423		/* Wait for stable clock.  Vivante's code waited for 1ms */
 424		usleep_range(1000, 10000);
 425
 426		/* isolate the GPU. */
 427		control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 428		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 429
 430		/* set soft reset. */
 431		control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 432		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 433
 434		/* wait for reset. */
 435		msleep(1);
 436
 437		/* reset soft reset bit. */
 438		control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 439		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 440
 441		/* reset GPU isolation. */
 442		control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 443		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 444
 445		/* read idle register. */
 446		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 447
 448		/* try reseting again if FE it not idle */
 449		if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
 450			dev_dbg(gpu->dev, "FE is not idle\n");
 451			continue;
 452		}
 453
 454		/* read reset register. */
 455		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 456
 457		/* is the GPU idle? */
 458		if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
 459		    ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
 460			dev_dbg(gpu->dev, "GPU is not idle\n");
 461			continue;
 462		}
 463
 464		failed = false;
 465		break;
 466	}
 467
 468	if (failed) {
 469		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 470		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 471
 472		dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
 473			idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
 474			control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
 475			control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
 476
 477		return -EBUSY;
 478	}
 479
 480	/* We rely on the GPU running, so program the clock */
 481	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
 482		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
 483
 484	/* enable clock */
 485	etnaviv_gpu_load_clock(gpu, control);
 486
 487	return 0;
 488}
 489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 491{
 492	u16 prefetch;
 493
 494	if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
 495	     etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
 496	    gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
 497		u32 mc_memory_debug;
 498
 499		mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
 500
 501		if (gpu->identity.revision == 0x5007)
 502			mc_memory_debug |= 0x0c;
 503		else
 504			mc_memory_debug |= 0x08;
 505
 506		gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
 507	}
 508
 
 
 
 509	/*
 510	 * Update GPU AXI cache atttribute to "cacheable, no allocate".
 511	 * This is necessary to prevent the iMX6 SoC locking up.
 512	 */
 513	gpu_write(gpu, VIVS_HI_AXI_CONFIG,
 514		  VIVS_HI_AXI_CONFIG_AWCACHE(2) |
 515		  VIVS_HI_AXI_CONFIG_ARCACHE(2));
 516
 517	/* GC2000 rev 5108 needs a special bus config */
 518	if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
 519		u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
 520		bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
 521				VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
 522		bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
 523			      VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
 524		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 525	}
 526
 527	/* set base addresses */
 528	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
 529	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
 530	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
 531	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
 532	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
 533
 534	/* setup the MMU page table pointers */
 535	etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
 536
 537	/* Start command processor */
 538	prefetch = etnaviv_buffer_init(gpu);
 539
 540	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
 541	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
 542		  gpu->buffer->paddr - gpu->memory_base);
 543	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
 544		  VIVS_FE_COMMAND_CONTROL_ENABLE |
 545		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 546}
 547
 548int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 549{
 550	int ret, i;
 551	struct iommu_domain *iommu;
 552	enum etnaviv_iommu_version version;
 553	bool mmuv2;
 554
 555	ret = pm_runtime_get_sync(gpu->dev);
 556	if (ret < 0)
 
 557		return ret;
 
 558
 559	etnaviv_hw_identify(gpu);
 560
 561	if (gpu->identity.model == 0) {
 562		dev_err(gpu->dev, "Unknown GPU model\n");
 563		ret = -ENXIO;
 564		goto fail;
 565	}
 566
 567	/* Exclude VG cores with FE2.0 */
 568	if (gpu->identity.features & chipFeatures_PIPE_VG &&
 569	    gpu->identity.features & chipFeatures_FE20) {
 570		dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
 571		ret = -ENXIO;
 572		goto fail;
 573	}
 574
 575	/*
 576	 * Set the GPU linear window to be at the end of the DMA window, where
 577	 * the CMA area is likely to reside. This ensures that we are able to
 578	 * map the command buffers while having the linear window overlap as
 579	 * much RAM as possible, so we can optimize mappings for other buffers.
 580	 *
 581	 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
 582	 * to different views of the memory on the individual engines.
 583	 */
 584	if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
 585	    (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
 586		u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
 587		if (dma_mask < PHYS_OFFSET + SZ_2G)
 588			gpu->memory_base = PHYS_OFFSET;
 589		else
 590			gpu->memory_base = dma_mask - SZ_2G + 1;
 
 
 
 
 591	}
 592
 593	ret = etnaviv_hw_reset(gpu);
 594	if (ret)
 595		goto fail;
 596
 597	/* Setup IOMMU.. eventually we will (I think) do this once per context
 598	 * and have separate page tables per context.  For now, to keep things
 599	 * simple and to get something working, just use a single address space:
 600	 */
 601	mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
 602	dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
 603
 604	if (!mmuv2) {
 605		iommu = etnaviv_iommu_domain_alloc(gpu);
 606		version = ETNAVIV_IOMMU_V1;
 607	} else {
 608		iommu = etnaviv_iommu_v2_domain_alloc(gpu);
 609		version = ETNAVIV_IOMMU_V2;
 610	}
 611
 612	if (!iommu) {
 613		ret = -ENOMEM;
 614		goto fail;
 615	}
 616
 617	gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
 618	if (!gpu->mmu) {
 619		iommu_domain_free(iommu);
 620		ret = -ENOMEM;
 621		goto fail;
 622	}
 623
 624	/* Create buffer: */
 625	gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
 626	if (!gpu->buffer) {
 627		ret = -ENOMEM;
 628		dev_err(gpu->dev, "could not create command buffer\n");
 629		goto destroy_iommu;
 630	}
 631	if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
 
 
 632		ret = -EINVAL;
 633		dev_err(gpu->dev,
 634			"command buffer outside valid memory window\n");
 635		goto free_buffer;
 636	}
 637
 638	/* Setup event management */
 639	spin_lock_init(&gpu->event_spinlock);
 640	init_completion(&gpu->event_free);
 641	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 642		gpu->event[i].used = false;
 643		complete(&gpu->event_free);
 644	}
 645
 646	/* Now program the hardware */
 647	mutex_lock(&gpu->lock);
 648	etnaviv_gpu_hw_init(gpu);
 649	gpu->exec_state = -1;
 650	mutex_unlock(&gpu->lock);
 651
 652	pm_runtime_mark_last_busy(gpu->dev);
 653	pm_runtime_put_autosuspend(gpu->dev);
 654
 655	return 0;
 656
 657free_buffer:
 658	etnaviv_gpu_cmdbuf_free(gpu->buffer);
 659	gpu->buffer = NULL;
 660destroy_iommu:
 661	etnaviv_iommu_destroy(gpu->mmu);
 662	gpu->mmu = NULL;
 663fail:
 664	pm_runtime_mark_last_busy(gpu->dev);
 665	pm_runtime_put_autosuspend(gpu->dev);
 666
 667	return ret;
 668}
 669
 670#ifdef CONFIG_DEBUG_FS
 671struct dma_debug {
 672	u32 address[2];
 673	u32 state[2];
 674};
 675
 676static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
 677{
 678	u32 i;
 679
 680	debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 681	debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 682
 683	for (i = 0; i < 500; i++) {
 684		debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 685		debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 686
 687		if (debug->address[0] != debug->address[1])
 688			break;
 689
 690		if (debug->state[0] != debug->state[1])
 691			break;
 692	}
 693}
 694
 695int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 696{
 697	struct dma_debug debug;
 698	u32 dma_lo, dma_hi, axi, idle;
 699	int ret;
 700
 701	seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
 702
 703	ret = pm_runtime_get_sync(gpu->dev);
 704	if (ret < 0)
 705		return ret;
 706
 707	dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
 708	dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
 709	axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
 710	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 711
 712	verify_dma(gpu, &debug);
 713
 714	seq_puts(m, "\tfeatures\n");
 715	seq_printf(m, "\t minor_features0: 0x%08x\n",
 716		   gpu->identity.minor_features0);
 717	seq_printf(m, "\t minor_features1: 0x%08x\n",
 718		   gpu->identity.minor_features1);
 719	seq_printf(m, "\t minor_features2: 0x%08x\n",
 720		   gpu->identity.minor_features2);
 721	seq_printf(m, "\t minor_features3: 0x%08x\n",
 722		   gpu->identity.minor_features3);
 723	seq_printf(m, "\t minor_features4: 0x%08x\n",
 724		   gpu->identity.minor_features4);
 725	seq_printf(m, "\t minor_features5: 0x%08x\n",
 726		   gpu->identity.minor_features5);
 727
 728	seq_puts(m, "\tspecs\n");
 729	seq_printf(m, "\t stream_count:  %d\n",
 730			gpu->identity.stream_count);
 731	seq_printf(m, "\t register_max: %d\n",
 732			gpu->identity.register_max);
 733	seq_printf(m, "\t thread_count: %d\n",
 734			gpu->identity.thread_count);
 735	seq_printf(m, "\t vertex_cache_size: %d\n",
 736			gpu->identity.vertex_cache_size);
 737	seq_printf(m, "\t shader_core_count: %d\n",
 738			gpu->identity.shader_core_count);
 739	seq_printf(m, "\t pixel_pipes: %d\n",
 740			gpu->identity.pixel_pipes);
 741	seq_printf(m, "\t vertex_output_buffer_size: %d\n",
 742			gpu->identity.vertex_output_buffer_size);
 743	seq_printf(m, "\t buffer_size: %d\n",
 744			gpu->identity.buffer_size);
 745	seq_printf(m, "\t instruction_count: %d\n",
 746			gpu->identity.instruction_count);
 747	seq_printf(m, "\t num_constants: %d\n",
 748			gpu->identity.num_constants);
 749	seq_printf(m, "\t varyings_count: %d\n",
 750			gpu->identity.varyings_count);
 751
 752	seq_printf(m, "\taxi: 0x%08x\n", axi);
 753	seq_printf(m, "\tidle: 0x%08x\n", idle);
 754	idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
 755	if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
 756		seq_puts(m, "\t FE is not idle\n");
 757	if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
 758		seq_puts(m, "\t DE is not idle\n");
 759	if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
 760		seq_puts(m, "\t PE is not idle\n");
 761	if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
 762		seq_puts(m, "\t SH is not idle\n");
 763	if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
 764		seq_puts(m, "\t PA is not idle\n");
 765	if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
 766		seq_puts(m, "\t SE is not idle\n");
 767	if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
 768		seq_puts(m, "\t RA is not idle\n");
 769	if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
 770		seq_puts(m, "\t TX is not idle\n");
 771	if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
 772		seq_puts(m, "\t VG is not idle\n");
 773	if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
 774		seq_puts(m, "\t IM is not idle\n");
 775	if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
 776		seq_puts(m, "\t FP is not idle\n");
 777	if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
 778		seq_puts(m, "\t TS is not idle\n");
 779	if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
 780		seq_puts(m, "\t AXI low power mode\n");
 781
 782	if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
 783		u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
 784		u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
 785		u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
 786
 787		seq_puts(m, "\tMC\n");
 788		seq_printf(m, "\t read0: 0x%08x\n", read0);
 789		seq_printf(m, "\t read1: 0x%08x\n", read1);
 790		seq_printf(m, "\t write: 0x%08x\n", write);
 791	}
 792
 793	seq_puts(m, "\tDMA ");
 794
 795	if (debug.address[0] == debug.address[1] &&
 796	    debug.state[0] == debug.state[1]) {
 797		seq_puts(m, "seems to be stuck\n");
 798	} else if (debug.address[0] == debug.address[1]) {
 799		seq_puts(m, "adress is constant\n");
 800	} else {
 801		seq_puts(m, "is runing\n");
 802	}
 803
 804	seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
 805	seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
 806	seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
 807	seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
 808	seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
 809		   dma_lo, dma_hi);
 810
 811	ret = 0;
 812
 813	pm_runtime_mark_last_busy(gpu->dev);
 814	pm_runtime_put_autosuspend(gpu->dev);
 815
 816	return ret;
 817}
 818#endif
 819
 820/*
 821 * Power Management:
 822 */
 823static int enable_clk(struct etnaviv_gpu *gpu)
 824{
 825	if (gpu->clk_core)
 826		clk_prepare_enable(gpu->clk_core);
 827	if (gpu->clk_shader)
 828		clk_prepare_enable(gpu->clk_shader);
 829
 830	return 0;
 831}
 832
 833static int disable_clk(struct etnaviv_gpu *gpu)
 834{
 835	if (gpu->clk_core)
 836		clk_disable_unprepare(gpu->clk_core);
 837	if (gpu->clk_shader)
 838		clk_disable_unprepare(gpu->clk_shader);
 839
 840	return 0;
 841}
 842
 843static int enable_axi(struct etnaviv_gpu *gpu)
 844{
 845	if (gpu->clk_bus)
 846		clk_prepare_enable(gpu->clk_bus);
 847
 848	return 0;
 849}
 850
 851static int disable_axi(struct etnaviv_gpu *gpu)
 852{
 853	if (gpu->clk_bus)
 854		clk_disable_unprepare(gpu->clk_bus);
 855
 856	return 0;
 857}
 858
 859/*
 860 * Hangcheck detection for locked gpu:
 861 */
 862static void recover_worker(struct work_struct *work)
 863{
 864	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
 865					       recover_work);
 866	unsigned long flags;
 867	unsigned int i;
 868
 869	dev_err(gpu->dev, "hangcheck recover!\n");
 870
 871	if (pm_runtime_get_sync(gpu->dev) < 0)
 872		return;
 873
 874	mutex_lock(&gpu->lock);
 875
 876	/* Only catch the first event, or when manually re-armed */
 877	if (etnaviv_dump_core) {
 878		etnaviv_core_dump(gpu);
 879		etnaviv_dump_core = false;
 880	}
 881
 882	etnaviv_hw_reset(gpu);
 883
 884	/* complete all events, the GPU won't do it after the reset */
 885	spin_lock_irqsave(&gpu->event_spinlock, flags);
 886	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 887		if (!gpu->event[i].used)
 888			continue;
 889		fence_signal(gpu->event[i].fence);
 890		gpu->event[i].fence = NULL;
 891		gpu->event[i].used = false;
 892		complete(&gpu->event_free);
 893	}
 894	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
 895	gpu->completed_fence = gpu->active_fence;
 896
 897	etnaviv_gpu_hw_init(gpu);
 898	gpu->switch_context = true;
 899	gpu->exec_state = -1;
 900
 901	mutex_unlock(&gpu->lock);
 902	pm_runtime_mark_last_busy(gpu->dev);
 903	pm_runtime_put_autosuspend(gpu->dev);
 904
 905	/* Retire the buffer objects in a work */
 906	etnaviv_queue_work(gpu->drm, &gpu->retire_work);
 907}
 908
 909static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
 910{
 911	DBG("%s", dev_name(gpu->dev));
 912	mod_timer(&gpu->hangcheck_timer,
 913		  round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
 914}
 915
 916static void hangcheck_handler(unsigned long data)
 917{
 918	struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
 919	u32 fence = gpu->completed_fence;
 920	bool progress = false;
 921
 922	if (fence != gpu->hangcheck_fence) {
 923		gpu->hangcheck_fence = fence;
 924		progress = true;
 925	}
 926
 927	if (!progress) {
 928		u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 929		int change = dma_addr - gpu->hangcheck_dma_addr;
 930
 931		if (change < 0 || change > 16) {
 932			gpu->hangcheck_dma_addr = dma_addr;
 933			progress = true;
 934		}
 935	}
 936
 937	if (!progress && fence_after(gpu->active_fence, fence)) {
 938		dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
 939		dev_err(gpu->dev, "     completed fence: %u\n", fence);
 940		dev_err(gpu->dev, "     active fence: %u\n",
 941			gpu->active_fence);
 942		etnaviv_queue_work(gpu->drm, &gpu->recover_work);
 943	}
 944
 945	/* if still more pending work, reset the hangcheck timer: */
 946	if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
 947		hangcheck_timer_reset(gpu);
 948}
 949
 950static void hangcheck_disable(struct etnaviv_gpu *gpu)
 951{
 952	del_timer_sync(&gpu->hangcheck_timer);
 953	cancel_work_sync(&gpu->recover_work);
 954}
 955
 956/* fence object management */
 957struct etnaviv_fence {
 958	struct etnaviv_gpu *gpu;
 959	struct fence base;
 960};
 961
 962static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
 963{
 964	return container_of(fence, struct etnaviv_fence, base);
 965}
 966
 967static const char *etnaviv_fence_get_driver_name(struct fence *fence)
 968{
 969	return "etnaviv";
 970}
 971
 972static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
 973{
 974	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 975
 976	return dev_name(f->gpu->dev);
 977}
 978
 979static bool etnaviv_fence_enable_signaling(struct fence *fence)
 980{
 981	return true;
 982}
 983
 984static bool etnaviv_fence_signaled(struct fence *fence)
 985{
 986	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 987
 988	return fence_completed(f->gpu, f->base.seqno);
 989}
 990
 991static void etnaviv_fence_release(struct fence *fence)
 992{
 993	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 994
 995	kfree_rcu(f, base.rcu);
 996}
 997
 998static const struct fence_ops etnaviv_fence_ops = {
 999	.get_driver_name = etnaviv_fence_get_driver_name,
1000	.get_timeline_name = etnaviv_fence_get_timeline_name,
1001	.enable_signaling = etnaviv_fence_enable_signaling,
1002	.signaled = etnaviv_fence_signaled,
1003	.wait = fence_default_wait,
1004	.release = etnaviv_fence_release,
1005};
1006
1007static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1008{
1009	struct etnaviv_fence *f;
1010
1011	f = kzalloc(sizeof(*f), GFP_KERNEL);
1012	if (!f)
1013		return NULL;
1014
1015	f->gpu = gpu;
1016
1017	fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1018		   gpu->fence_context, ++gpu->next_fence);
1019
1020	return &f->base;
1021}
1022
1023int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1024	unsigned int context, bool exclusive)
1025{
1026	struct reservation_object *robj = etnaviv_obj->resv;
1027	struct reservation_object_list *fobj;
1028	struct fence *fence;
1029	int i, ret;
1030
1031	if (!exclusive) {
1032		ret = reservation_object_reserve_shared(robj);
1033		if (ret)
1034			return ret;
1035	}
1036
1037	/*
1038	 * If we have any shared fences, then the exclusive fence
1039	 * should be ignored as it will already have been signalled.
1040	 */
1041	fobj = reservation_object_get_list(robj);
1042	if (!fobj || fobj->shared_count == 0) {
1043		/* Wait on any existing exclusive fence which isn't our own */
1044		fence = reservation_object_get_excl(robj);
1045		if (fence && fence->context != context) {
1046			ret = fence_wait(fence, true);
1047			if (ret)
1048				return ret;
1049		}
1050	}
1051
1052	if (!exclusive || !fobj)
1053		return 0;
1054
1055	for (i = 0; i < fobj->shared_count; i++) {
1056		fence = rcu_dereference_protected(fobj->shared[i],
1057						reservation_object_held(robj));
1058		if (fence->context != context) {
1059			ret = fence_wait(fence, true);
1060			if (ret)
1061				return ret;
1062		}
1063	}
1064
1065	return 0;
1066}
1067
1068/*
1069 * event management:
1070 */
1071
1072static unsigned int event_alloc(struct etnaviv_gpu *gpu)
1073{
1074	unsigned long ret, flags;
1075	unsigned int i, event = ~0U;
1076
1077	ret = wait_for_completion_timeout(&gpu->event_free,
1078					  msecs_to_jiffies(10 * 10000));
1079	if (!ret)
1080		dev_err(gpu->dev, "wait_for_completion_timeout failed");
1081
1082	spin_lock_irqsave(&gpu->event_spinlock, flags);
1083
1084	/* find first free event */
1085	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1086		if (gpu->event[i].used == false) {
1087			gpu->event[i].used = true;
1088			event = i;
1089			break;
1090		}
1091	}
1092
1093	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1094
1095	return event;
1096}
1097
1098static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1099{
1100	unsigned long flags;
1101
1102	spin_lock_irqsave(&gpu->event_spinlock, flags);
1103
1104	if (gpu->event[event].used == false) {
1105		dev_warn(gpu->dev, "event %u is already marked as free",
1106			 event);
1107		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1108	} else {
1109		gpu->event[event].used = false;
1110		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1111
1112		complete(&gpu->event_free);
1113	}
1114}
1115
1116/*
1117 * Cmdstream submission/retirement:
1118 */
1119
1120struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1121	size_t nr_bos)
1122{
1123	struct etnaviv_cmdbuf *cmdbuf;
1124	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
1125				 sizeof(*cmdbuf));
1126
1127	cmdbuf = kzalloc(sz, GFP_KERNEL);
1128	if (!cmdbuf)
1129		return NULL;
1130
 
 
 
1131	cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
1132				     GFP_KERNEL);
1133	if (!cmdbuf->vaddr) {
1134		kfree(cmdbuf);
1135		return NULL;
1136	}
1137
1138	cmdbuf->gpu = gpu;
1139	cmdbuf->size = size;
1140
1141	return cmdbuf;
1142}
1143
1144void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1145{
 
1146	dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
1147		    cmdbuf->paddr);
1148	kfree(cmdbuf);
1149}
1150
1151static void retire_worker(struct work_struct *work)
1152{
1153	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1154					       retire_work);
1155	u32 fence = gpu->completed_fence;
1156	struct etnaviv_cmdbuf *cmdbuf, *tmp;
1157	unsigned int i;
1158
1159	mutex_lock(&gpu->lock);
1160	list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1161		if (!fence_is_signaled(cmdbuf->fence))
1162			break;
1163
1164		list_del(&cmdbuf->node);
1165		fence_put(cmdbuf->fence);
1166
1167		for (i = 0; i < cmdbuf->nr_bos; i++) {
1168			struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
1169			struct etnaviv_gem_object *etnaviv_obj = mapping->object;
1170
1171			atomic_dec(&etnaviv_obj->gpu_active);
1172			/* drop the refcount taken in etnaviv_gpu_submit */
1173			etnaviv_gem_mapping_unreference(mapping);
1174		}
1175
1176		etnaviv_gpu_cmdbuf_free(cmdbuf);
1177		/*
1178		 * We need to balance the runtime PM count caused by
1179		 * each submission.  Upon submission, we increment
1180		 * the runtime PM counter, and allocate one event.
1181		 * So here, we put the runtime PM count for each
1182		 * completed event.
1183		 */
1184		pm_runtime_put_autosuspend(gpu->dev);
1185	}
1186
1187	gpu->retired_fence = fence;
1188
1189	mutex_unlock(&gpu->lock);
1190
1191	wake_up_all(&gpu->fence_event);
1192}
1193
1194int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1195	u32 fence, struct timespec *timeout)
1196{
1197	int ret;
1198
1199	if (fence_after(fence, gpu->next_fence)) {
1200		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1201				fence, gpu->next_fence);
1202		return -EINVAL;
1203	}
1204
1205	if (!timeout) {
1206		/* No timeout was requested: just test for completion */
1207		ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1208	} else {
1209		unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1210
1211		ret = wait_event_interruptible_timeout(gpu->fence_event,
1212						fence_completed(gpu, fence),
1213						remaining);
1214		if (ret == 0) {
1215			DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1216				fence, gpu->retired_fence,
1217				gpu->completed_fence);
1218			ret = -ETIMEDOUT;
1219		} else if (ret != -ERESTARTSYS) {
1220			ret = 0;
1221		}
1222	}
1223
1224	return ret;
1225}
1226
1227/*
1228 * Wait for an object to become inactive.  This, on it's own, is not race
1229 * free: the object is moved by the retire worker off the active list, and
1230 * then the iova is put.  Moreover, the object could be re-submitted just
1231 * after we notice that it's become inactive.
1232 *
1233 * Although the retirement happens under the gpu lock, we don't want to hold
1234 * that lock in this function while waiting.
1235 */
1236int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1237	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1238{
1239	unsigned long remaining;
1240	long ret;
1241
1242	if (!timeout)
1243		return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1244
1245	remaining = etnaviv_timeout_to_jiffies(timeout);
1246
1247	ret = wait_event_interruptible_timeout(gpu->fence_event,
1248					       !is_active(etnaviv_obj),
1249					       remaining);
1250	if (ret > 0) {
1251		struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1252
1253		/* Synchronise with the retire worker */
1254		flush_workqueue(priv->wq);
1255		return 0;
1256	} else if (ret == -ERESTARTSYS) {
1257		return -ERESTARTSYS;
1258	} else {
1259		return -ETIMEDOUT;
1260	}
1261}
1262
1263int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1264{
1265	return pm_runtime_get_sync(gpu->dev);
1266}
1267
1268void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1269{
1270	pm_runtime_mark_last_busy(gpu->dev);
1271	pm_runtime_put_autosuspend(gpu->dev);
1272}
1273
1274/* add bo's to gpu's ring, and kick gpu: */
1275int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1276	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1277{
1278	struct fence *fence;
1279	unsigned int event, i;
1280	int ret;
1281
1282	ret = etnaviv_gpu_pm_get_sync(gpu);
1283	if (ret < 0)
1284		return ret;
1285
1286	mutex_lock(&gpu->lock);
1287
1288	/*
1289	 * TODO
1290	 *
1291	 * - flush
1292	 * - data endian
1293	 * - prefetch
1294	 *
1295	 */
1296
1297	event = event_alloc(gpu);
1298	if (unlikely(event == ~0U)) {
1299		DRM_ERROR("no free event\n");
1300		ret = -EBUSY;
1301		goto out_unlock;
1302	}
1303
 
 
1304	fence = etnaviv_gpu_fence_alloc(gpu);
1305	if (!fence) {
1306		event_free(gpu, event);
1307		ret = -ENOMEM;
1308		goto out_unlock;
1309	}
1310
1311	gpu->event[event].fence = fence;
1312	submit->fence = fence->seqno;
1313	gpu->active_fence = submit->fence;
1314
1315	if (gpu->lastctx != cmdbuf->ctx) {
1316		gpu->mmu->need_flush = true;
1317		gpu->switch_context = true;
1318		gpu->lastctx = cmdbuf->ctx;
1319	}
1320
1321	etnaviv_buffer_queue(gpu, event, cmdbuf);
1322
1323	cmdbuf->fence = fence;
1324	list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1325
1326	/* We're committed to adding this command buffer, hold a PM reference */
1327	pm_runtime_get_noresume(gpu->dev);
1328
1329	for (i = 0; i < submit->nr_bos; i++) {
1330		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1331
1332		/* Each cmdbuf takes a refcount on the mapping */
1333		etnaviv_gem_mapping_reference(submit->bos[i].mapping);
1334		cmdbuf->bo_map[i] = submit->bos[i].mapping;
1335		atomic_inc(&etnaviv_obj->gpu_active);
1336
1337		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1338			reservation_object_add_excl_fence(etnaviv_obj->resv,
1339							  fence);
1340		else
1341			reservation_object_add_shared_fence(etnaviv_obj->resv,
1342							    fence);
1343	}
1344	cmdbuf->nr_bos = submit->nr_bos;
1345	hangcheck_timer_reset(gpu);
1346	ret = 0;
1347
1348out_unlock:
1349	mutex_unlock(&gpu->lock);
1350
 
1351	etnaviv_gpu_pm_put(gpu);
1352
1353	return ret;
1354}
1355
1356/*
1357 * Init/Cleanup:
1358 */
1359static irqreturn_t irq_handler(int irq, void *data)
1360{
1361	struct etnaviv_gpu *gpu = data;
1362	irqreturn_t ret = IRQ_NONE;
1363
1364	u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1365
1366	if (intr != 0) {
1367		int event;
1368
1369		pm_runtime_mark_last_busy(gpu->dev);
1370
1371		dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1372
1373		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1374			dev_err(gpu->dev, "AXI bus error\n");
1375			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1376		}
1377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1378		while ((event = ffs(intr)) != 0) {
1379			struct fence *fence;
1380
1381			event -= 1;
1382
1383			intr &= ~(1 << event);
1384
1385			dev_dbg(gpu->dev, "event %u\n", event);
1386
1387			fence = gpu->event[event].fence;
1388			gpu->event[event].fence = NULL;
1389			fence_signal(fence);
1390
1391			/*
1392			 * Events can be processed out of order.  Eg,
1393			 * - allocate and queue event 0
1394			 * - allocate event 1
1395			 * - event 0 completes, we process it
1396			 * - allocate and queue event 0
1397			 * - event 1 and event 0 complete
1398			 * we can end up processing event 0 first, then 1.
1399			 */
1400			if (fence_after(fence->seqno, gpu->completed_fence))
1401				gpu->completed_fence = fence->seqno;
1402
1403			event_free(gpu, event);
1404		}
1405
1406		/* Retire the buffer objects in a work */
1407		etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1408
1409		ret = IRQ_HANDLED;
1410	}
1411
1412	return ret;
1413}
1414
1415static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1416{
1417	int ret;
1418
1419	ret = enable_clk(gpu);
1420	if (ret)
1421		return ret;
 
 
1422
1423	ret = enable_axi(gpu);
1424	if (ret) {
1425		disable_clk(gpu);
1426		return ret;
 
 
 
 
 
 
1427	}
1428
1429	return 0;
 
 
 
 
 
 
 
 
 
1430}
1431
1432static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1433{
1434	int ret;
 
 
 
 
 
1435
1436	ret = disable_axi(gpu);
1437	if (ret)
1438		return ret;
1439
1440	ret = disable_clk(gpu);
1441	if (ret)
1442		return ret;
1443
1444	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1445}
1446
1447static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1448{
1449	if (gpu->buffer) {
1450		unsigned long timeout;
1451
1452		/* Replace the last WAIT with END */
1453		etnaviv_buffer_end(gpu);
1454
1455		/*
1456		 * We know that only the FE is busy here, this should
1457		 * happen quickly (as the WAIT is only 200 cycles).  If
1458		 * we fail, just warn and continue.
1459		 */
1460		timeout = jiffies + msecs_to_jiffies(100);
1461		do {
1462			u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1463
1464			if ((idle & gpu->idle_mask) == gpu->idle_mask)
1465				break;
1466
1467			if (time_is_before_jiffies(timeout)) {
1468				dev_warn(gpu->dev,
1469					 "timed out waiting for idle: idle=0x%x\n",
1470					 idle);
1471				break;
1472			}
1473
1474			udelay(5);
1475		} while (1);
1476	}
1477
1478	return etnaviv_gpu_clk_disable(gpu);
1479}
1480
1481#ifdef CONFIG_PM
1482static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1483{
1484	u32 clock;
1485	int ret;
1486
1487	ret = mutex_lock_killable(&gpu->lock);
1488	if (ret)
1489		return ret;
1490
1491	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1492		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1493
1494	etnaviv_gpu_load_clock(gpu, clock);
1495	etnaviv_gpu_hw_init(gpu);
1496
1497	gpu->switch_context = true;
1498	gpu->exec_state = -1;
1499
1500	mutex_unlock(&gpu->lock);
1501
1502	return 0;
1503}
1504#endif
1505
1506static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1507	void *data)
1508{
1509	struct drm_device *drm = data;
1510	struct etnaviv_drm_private *priv = drm->dev_private;
1511	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1512	int ret;
1513
1514#ifdef CONFIG_PM
1515	ret = pm_runtime_get_sync(gpu->dev);
1516#else
1517	ret = etnaviv_gpu_clk_enable(gpu);
1518#endif
1519	if (ret < 0)
1520		return ret;
1521
1522	gpu->drm = drm;
1523	gpu->fence_context = fence_context_alloc(1);
1524	spin_lock_init(&gpu->fence_spinlock);
1525
1526	INIT_LIST_HEAD(&gpu->active_cmd_list);
1527	INIT_WORK(&gpu->retire_work, retire_worker);
1528	INIT_WORK(&gpu->recover_work, recover_worker);
1529	init_waitqueue_head(&gpu->fence_event);
1530
1531	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
1532			(unsigned long)gpu);
1533
1534	priv->gpu[priv->num_gpus++] = gpu;
1535
1536	pm_runtime_mark_last_busy(gpu->dev);
1537	pm_runtime_put_autosuspend(gpu->dev);
1538
1539	return 0;
1540}
1541
1542static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1543	void *data)
1544{
1545	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1546
1547	DBG("%s", dev_name(gpu->dev));
1548
1549	hangcheck_disable(gpu);
1550
1551#ifdef CONFIG_PM
1552	pm_runtime_get_sync(gpu->dev);
1553	pm_runtime_put_sync_suspend(gpu->dev);
1554#else
1555	etnaviv_gpu_hw_suspend(gpu);
1556#endif
1557
1558	if (gpu->buffer) {
1559		etnaviv_gpu_cmdbuf_free(gpu->buffer);
1560		gpu->buffer = NULL;
1561	}
1562
1563	if (gpu->mmu) {
1564		etnaviv_iommu_destroy(gpu->mmu);
1565		gpu->mmu = NULL;
1566	}
1567
1568	gpu->drm = NULL;
1569}
1570
1571static const struct component_ops gpu_ops = {
1572	.bind = etnaviv_gpu_bind,
1573	.unbind = etnaviv_gpu_unbind,
1574};
1575
1576static const struct of_device_id etnaviv_gpu_match[] = {
1577	{
1578		.compatible = "vivante,gc"
1579	},
1580	{ /* sentinel */ }
1581};
1582
1583static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1584{
1585	struct device *dev = &pdev->dev;
1586	struct etnaviv_gpu *gpu;
1587	int err = 0;
1588
1589	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1590	if (!gpu)
1591		return -ENOMEM;
1592
1593	gpu->dev = &pdev->dev;
1594	mutex_init(&gpu->lock);
1595
1596	/* Map registers: */
1597	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1598	if (IS_ERR(gpu->mmio))
1599		return PTR_ERR(gpu->mmio);
1600
1601	/* Get Interrupt: */
1602	gpu->irq = platform_get_irq(pdev, 0);
1603	if (gpu->irq < 0) {
1604		err = gpu->irq;
1605		dev_err(dev, "failed to get irq: %d\n", err);
1606		goto fail;
1607	}
1608
1609	err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1610			       dev_name(gpu->dev), gpu);
1611	if (err) {
1612		dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1613		goto fail;
1614	}
1615
1616	/* Get Clocks: */
1617	gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1618	DBG("clk_bus: %p", gpu->clk_bus);
1619	if (IS_ERR(gpu->clk_bus))
1620		gpu->clk_bus = NULL;
1621
1622	gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1623	DBG("clk_core: %p", gpu->clk_core);
1624	if (IS_ERR(gpu->clk_core))
1625		gpu->clk_core = NULL;
1626
1627	gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1628	DBG("clk_shader: %p", gpu->clk_shader);
1629	if (IS_ERR(gpu->clk_shader))
1630		gpu->clk_shader = NULL;
1631
1632	/* TODO: figure out max mapped size */
1633	dev_set_drvdata(dev, gpu);
1634
1635	/*
1636	 * We treat the device as initially suspended.  The runtime PM
1637	 * autosuspend delay is rather arbitary: no measurements have
1638	 * yet been performed to determine an appropriate value.
1639	 */
1640	pm_runtime_use_autosuspend(gpu->dev);
1641	pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1642	pm_runtime_enable(gpu->dev);
1643
1644	err = component_add(&pdev->dev, &gpu_ops);
1645	if (err < 0) {
1646		dev_err(&pdev->dev, "failed to register component: %d\n", err);
1647		goto fail;
1648	}
1649
1650	return 0;
1651
1652fail:
1653	return err;
1654}
1655
1656static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1657{
1658	component_del(&pdev->dev, &gpu_ops);
1659	pm_runtime_disable(&pdev->dev);
1660	return 0;
1661}
1662
1663#ifdef CONFIG_PM
1664static int etnaviv_gpu_rpm_suspend(struct device *dev)
1665{
1666	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1667	u32 idle, mask;
1668
1669	/* If we have outstanding fences, we're not idle */
1670	if (gpu->completed_fence != gpu->active_fence)
1671		return -EBUSY;
1672
1673	/* Check whether the hardware (except FE) is idle */
1674	mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1675	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1676	if (idle != mask)
1677		return -EBUSY;
1678
1679	return etnaviv_gpu_hw_suspend(gpu);
1680}
1681
1682static int etnaviv_gpu_rpm_resume(struct device *dev)
1683{
1684	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1685	int ret;
1686
1687	ret = etnaviv_gpu_clk_enable(gpu);
1688	if (ret)
1689		return ret;
1690
1691	/* Re-initialise the basic hardware state */
1692	if (gpu->drm && gpu->buffer) {
1693		ret = etnaviv_gpu_hw_resume(gpu);
1694		if (ret) {
1695			etnaviv_gpu_clk_disable(gpu);
1696			return ret;
1697		}
1698	}
1699
1700	return 0;
1701}
1702#endif
1703
1704static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1705	SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1706			   NULL)
1707};
1708
1709struct platform_driver etnaviv_gpu_driver = {
1710	.driver = {
1711		.name = "etnaviv-gpu",
1712		.owner = THIS_MODULE,
1713		.pm = &etnaviv_gpu_pm_ops,
1714		.of_match_table = etnaviv_gpu_match,
1715	},
1716	.probe = etnaviv_gpu_platform_probe,
1717	.remove = etnaviv_gpu_platform_remove,
1718	.id_table = gpu_ids,
1719};
v4.10.11
   1/*
   2 * Copyright (C) 2015 Etnaviv Project
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License version 2 as published by
   6 * the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/component.h>
  18#include <linux/dma-fence.h>
  19#include <linux/moduleparam.h>
  20#include <linux/of_device.h>
  21#include "etnaviv_dump.h"
  22#include "etnaviv_gpu.h"
  23#include "etnaviv_gem.h"
  24#include "etnaviv_mmu.h"
 
 
  25#include "common.xml.h"
  26#include "state.xml.h"
  27#include "state_hi.xml.h"
  28#include "cmdstream.xml.h"
  29
  30static const struct platform_device_id gpu_ids[] = {
  31	{ .name = "etnaviv-gpu,2d" },
  32	{ },
  33};
  34
  35static bool etnaviv_dump_core = true;
  36module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
  37
  38/*
  39 * Driver functions:
  40 */
  41
  42int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
  43{
  44	switch (param) {
  45	case ETNAVIV_PARAM_GPU_MODEL:
  46		*value = gpu->identity.model;
  47		break;
  48
  49	case ETNAVIV_PARAM_GPU_REVISION:
  50		*value = gpu->identity.revision;
  51		break;
  52
  53	case ETNAVIV_PARAM_GPU_FEATURES_0:
  54		*value = gpu->identity.features;
  55		break;
  56
  57	case ETNAVIV_PARAM_GPU_FEATURES_1:
  58		*value = gpu->identity.minor_features0;
  59		break;
  60
  61	case ETNAVIV_PARAM_GPU_FEATURES_2:
  62		*value = gpu->identity.minor_features1;
  63		break;
  64
  65	case ETNAVIV_PARAM_GPU_FEATURES_3:
  66		*value = gpu->identity.minor_features2;
  67		break;
  68
  69	case ETNAVIV_PARAM_GPU_FEATURES_4:
  70		*value = gpu->identity.minor_features3;
  71		break;
  72
  73	case ETNAVIV_PARAM_GPU_FEATURES_5:
  74		*value = gpu->identity.minor_features4;
  75		break;
  76
  77	case ETNAVIV_PARAM_GPU_FEATURES_6:
  78		*value = gpu->identity.minor_features5;
  79		break;
  80
  81	case ETNAVIV_PARAM_GPU_STREAM_COUNT:
  82		*value = gpu->identity.stream_count;
  83		break;
  84
  85	case ETNAVIV_PARAM_GPU_REGISTER_MAX:
  86		*value = gpu->identity.register_max;
  87		break;
  88
  89	case ETNAVIV_PARAM_GPU_THREAD_COUNT:
  90		*value = gpu->identity.thread_count;
  91		break;
  92
  93	case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
  94		*value = gpu->identity.vertex_cache_size;
  95		break;
  96
  97	case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
  98		*value = gpu->identity.shader_core_count;
  99		break;
 100
 101	case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
 102		*value = gpu->identity.pixel_pipes;
 103		break;
 104
 105	case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
 106		*value = gpu->identity.vertex_output_buffer_size;
 107		break;
 108
 109	case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
 110		*value = gpu->identity.buffer_size;
 111		break;
 112
 113	case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
 114		*value = gpu->identity.instruction_count;
 115		break;
 116
 117	case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
 118		*value = gpu->identity.num_constants;
 119		break;
 120
 121	case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
 122		*value = gpu->identity.varyings_count;
 123		break;
 124
 125	default:
 126		DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
 127		return -EINVAL;
 128	}
 129
 130	return 0;
 131}
 132
 133
 134#define etnaviv_is_model_rev(gpu, mod, rev) \
 135	((gpu)->identity.model == chipModel_##mod && \
 136	 (gpu)->identity.revision == rev)
 137#define etnaviv_field(val, field) \
 138	(((val) & field##__MASK) >> field##__SHIFT)
 139
 140static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 141{
 142	if (gpu->identity.minor_features0 &
 143	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 144		u32 specs[4];
 145		unsigned int streams;
 146
 147		specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
 148		specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
 149		specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
 150		specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
 151
 152		gpu->identity.stream_count = etnaviv_field(specs[0],
 153					VIVS_HI_CHIP_SPECS_STREAM_COUNT);
 154		gpu->identity.register_max = etnaviv_field(specs[0],
 155					VIVS_HI_CHIP_SPECS_REGISTER_MAX);
 156		gpu->identity.thread_count = etnaviv_field(specs[0],
 157					VIVS_HI_CHIP_SPECS_THREAD_COUNT);
 158		gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
 159					VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
 160		gpu->identity.shader_core_count = etnaviv_field(specs[0],
 161					VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
 162		gpu->identity.pixel_pipes = etnaviv_field(specs[0],
 163					VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
 164		gpu->identity.vertex_output_buffer_size =
 165			etnaviv_field(specs[0],
 166				VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
 167
 168		gpu->identity.buffer_size = etnaviv_field(specs[1],
 169					VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
 170		gpu->identity.instruction_count = etnaviv_field(specs[1],
 171					VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
 172		gpu->identity.num_constants = etnaviv_field(specs[1],
 173					VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
 174
 175		gpu->identity.varyings_count = etnaviv_field(specs[2],
 176					VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
 177
 178		/* This overrides the value from older register if non-zero */
 179		streams = etnaviv_field(specs[3],
 180					VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
 181		if (streams)
 182			gpu->identity.stream_count = streams;
 183	}
 184
 185	/* Fill in the stream count if not specified */
 186	if (gpu->identity.stream_count == 0) {
 187		if (gpu->identity.model >= 0x1000)
 188			gpu->identity.stream_count = 4;
 189		else
 190			gpu->identity.stream_count = 1;
 191	}
 192
 193	/* Convert the register max value */
 194	if (gpu->identity.register_max)
 195		gpu->identity.register_max = 1 << gpu->identity.register_max;
 196	else if (gpu->identity.model == chipModel_GC400)
 197		gpu->identity.register_max = 32;
 198	else
 199		gpu->identity.register_max = 64;
 200
 201	/* Convert thread count */
 202	if (gpu->identity.thread_count)
 203		gpu->identity.thread_count = 1 << gpu->identity.thread_count;
 204	else if (gpu->identity.model == chipModel_GC400)
 205		gpu->identity.thread_count = 64;
 206	else if (gpu->identity.model == chipModel_GC500 ||
 207		 gpu->identity.model == chipModel_GC530)
 208		gpu->identity.thread_count = 128;
 209	else
 210		gpu->identity.thread_count = 256;
 211
 212	if (gpu->identity.vertex_cache_size == 0)
 213		gpu->identity.vertex_cache_size = 8;
 214
 215	if (gpu->identity.shader_core_count == 0) {
 216		if (gpu->identity.model >= 0x1000)
 217			gpu->identity.shader_core_count = 2;
 218		else
 219			gpu->identity.shader_core_count = 1;
 220	}
 221
 222	if (gpu->identity.pixel_pipes == 0)
 223		gpu->identity.pixel_pipes = 1;
 224
 225	/* Convert virtex buffer size */
 226	if (gpu->identity.vertex_output_buffer_size) {
 227		gpu->identity.vertex_output_buffer_size =
 228			1 << gpu->identity.vertex_output_buffer_size;
 229	} else if (gpu->identity.model == chipModel_GC400) {
 230		if (gpu->identity.revision < 0x4000)
 231			gpu->identity.vertex_output_buffer_size = 512;
 232		else if (gpu->identity.revision < 0x4200)
 233			gpu->identity.vertex_output_buffer_size = 256;
 234		else
 235			gpu->identity.vertex_output_buffer_size = 128;
 236	} else {
 237		gpu->identity.vertex_output_buffer_size = 512;
 238	}
 239
 240	switch (gpu->identity.instruction_count) {
 241	case 0:
 242		if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 243		    gpu->identity.model == chipModel_GC880)
 244			gpu->identity.instruction_count = 512;
 245		else
 246			gpu->identity.instruction_count = 256;
 247		break;
 248
 249	case 1:
 250		gpu->identity.instruction_count = 1024;
 251		break;
 252
 253	case 2:
 254		gpu->identity.instruction_count = 2048;
 255		break;
 256
 257	default:
 258		gpu->identity.instruction_count = 256;
 259		break;
 260	}
 261
 262	if (gpu->identity.num_constants == 0)
 263		gpu->identity.num_constants = 168;
 264
 265	if (gpu->identity.varyings_count == 0) {
 266		if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
 267			gpu->identity.varyings_count = 12;
 268		else
 269			gpu->identity.varyings_count = 8;
 270	}
 271
 272	/*
 273	 * For some cores, two varyings are consumed for position, so the
 274	 * maximum varying count needs to be reduced by one.
 275	 */
 276	if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
 277	    etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
 278	    etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
 279	    etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
 280	    etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
 281	    etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
 282	    etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
 283	    etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 284	    etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
 285	    etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
 286	    etnaviv_is_model_rev(gpu, GC880, 0x5106))
 287		gpu->identity.varyings_count -= 1;
 288}
 289
 290static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 291{
 292	u32 chipIdentity;
 293
 294	chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
 295
 296	/* Special case for older graphic cores. */
 297	if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
 298		gpu->identity.model    = chipModel_GC500;
 299		gpu->identity.revision = etnaviv_field(chipIdentity,
 300					 VIVS_HI_CHIP_IDENTITY_REVISION);
 301	} else {
 302
 303		gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
 304		gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
 305
 306		/*
 307		 * !!!! HACK ALERT !!!!
 308		 * Because people change device IDs without letting software
 309		 * know about it - here is the hack to make it all look the
 310		 * same.  Only for GC400 family.
 311		 */
 312		if ((gpu->identity.model & 0xff00) == 0x0400 &&
 313		    gpu->identity.model != chipModel_GC420) {
 314			gpu->identity.model = gpu->identity.model & 0x0400;
 315		}
 316
 317		/* Another special case */
 318		if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
 319			u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
 320			u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 321
 322			if (chipDate == 0x20080814 && chipTime == 0x12051100) {
 323				/*
 324				 * This IP has an ECO; put the correct
 325				 * revision in it.
 326				 */
 327				gpu->identity.revision = 0x1051;
 328			}
 329		}
 330
 331		/*
 332		 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
 333		 * reality it's just a re-branded GC3000. We can identify this
 334		 * core by the upper half of the revision register being all 1.
 335		 * Fix model/rev here, so all other places can refer to this
 336		 * core by its real identity.
 337		 */
 338		if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
 339			gpu->identity.model = chipModel_GC3000;
 340			gpu->identity.revision &= 0xffff;
 341		}
 342	}
 343
 344	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
 345		 gpu->identity.model, gpu->identity.revision);
 346
 347	gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
 348
 349	/* Disable fast clear on GC700. */
 350	if (gpu->identity.model == chipModel_GC700)
 351		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 352
 353	if ((gpu->identity.model == chipModel_GC500 &&
 354	     gpu->identity.revision < 2) ||
 355	    (gpu->identity.model == chipModel_GC300 &&
 356	     gpu->identity.revision < 0x2000)) {
 357
 358		/*
 359		 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
 360		 * registers.
 361		 */
 362		gpu->identity.minor_features0 = 0;
 363		gpu->identity.minor_features1 = 0;
 364		gpu->identity.minor_features2 = 0;
 365		gpu->identity.minor_features3 = 0;
 366		gpu->identity.minor_features4 = 0;
 367		gpu->identity.minor_features5 = 0;
 368	} else
 369		gpu->identity.minor_features0 =
 370				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
 371
 372	if (gpu->identity.minor_features0 &
 373	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
 374		gpu->identity.minor_features1 =
 375				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
 376		gpu->identity.minor_features2 =
 377				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
 378		gpu->identity.minor_features3 =
 379				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
 380		gpu->identity.minor_features4 =
 381				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
 382		gpu->identity.minor_features5 =
 383				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
 384	}
 385
 386	/* GC600 idle register reports zero bits where modules aren't present */
 387	if (gpu->identity.model == chipModel_GC600) {
 388		gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
 389				 VIVS_HI_IDLE_STATE_RA |
 390				 VIVS_HI_IDLE_STATE_SE |
 391				 VIVS_HI_IDLE_STATE_PA |
 392				 VIVS_HI_IDLE_STATE_SH |
 393				 VIVS_HI_IDLE_STATE_PE |
 394				 VIVS_HI_IDLE_STATE_DE |
 395				 VIVS_HI_IDLE_STATE_FE;
 396	} else {
 397		gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
 398	}
 399
 400	etnaviv_hw_specs(gpu);
 401}
 402
 403static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 404{
 405	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
 406		  VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
 407	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 408}
 409
 410static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 411{
 412	u32 control, idle;
 413	unsigned long timeout;
 414	bool failed = true;
 415
 416	/* TODO
 417	 *
 418	 * - clock gating
 419	 * - puls eater
 420	 * - what about VG?
 421	 */
 422
 423	/* We hope that the GPU resets in under one second */
 424	timeout = jiffies + msecs_to_jiffies(1000);
 425
 426	while (time_is_after_jiffies(timeout)) {
 427		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
 428			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
 429
 430		/* enable clock */
 431		etnaviv_gpu_load_clock(gpu, control);
 432
 433		/* Wait for stable clock.  Vivante's code waited for 1ms */
 434		usleep_range(1000, 10000);
 435
 436		/* isolate the GPU. */
 437		control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 438		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 439
 440		/* set soft reset. */
 441		control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 442		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 443
 444		/* wait for reset. */
 445		msleep(1);
 446
 447		/* reset soft reset bit. */
 448		control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 449		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 450
 451		/* reset GPU isolation. */
 452		control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 453		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 454
 455		/* read idle register. */
 456		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 457
 458		/* try reseting again if FE it not idle */
 459		if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
 460			dev_dbg(gpu->dev, "FE is not idle\n");
 461			continue;
 462		}
 463
 464		/* read reset register. */
 465		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 466
 467		/* is the GPU idle? */
 468		if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
 469		    ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
 470			dev_dbg(gpu->dev, "GPU is not idle\n");
 471			continue;
 472		}
 473
 474		failed = false;
 475		break;
 476	}
 477
 478	if (failed) {
 479		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 480		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 481
 482		dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
 483			idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
 484			control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
 485			control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
 486
 487		return -EBUSY;
 488	}
 489
 490	/* We rely on the GPU running, so program the clock */
 491	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
 492		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
 493
 494	/* enable clock */
 495	etnaviv_gpu_load_clock(gpu, control);
 496
 497	return 0;
 498}
 499
 500static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
 501{
 502	u32 pmc, ppc;
 503
 504	/* enable clock gating */
 505	ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
 506	ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
 507
 508	/* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
 509	if (gpu->identity.revision == 0x4301 ||
 510	    gpu->identity.revision == 0x4302)
 511		ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
 512
 513	gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
 514
 515	pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
 516
 517	/* Disable PA clock gating for GC400+ except for GC420 */
 518	if (gpu->identity.model >= chipModel_GC400 &&
 519	    gpu->identity.model != chipModel_GC420)
 520		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
 521
 522	/*
 523	 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
 524	 * present without a bug fix.
 525	 */
 526	if (gpu->identity.revision < 0x5000 &&
 527	    gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
 528	    !(gpu->identity.minor_features1 &
 529	      chipMinorFeatures1_DISABLE_PE_GATING))
 530		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
 531
 532	if (gpu->identity.revision < 0x5422)
 533		pmc |= BIT(15); /* Unknown bit */
 534
 535	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
 536	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
 537
 538	gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
 539}
 540
 541void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
 542{
 543	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
 544	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
 545		  VIVS_FE_COMMAND_CONTROL_ENABLE |
 546		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 547}
 548
 549static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 550{
 551	u16 prefetch;
 552
 553	if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
 554	     etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
 555	    gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
 556		u32 mc_memory_debug;
 557
 558		mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
 559
 560		if (gpu->identity.revision == 0x5007)
 561			mc_memory_debug |= 0x0c;
 562		else
 563			mc_memory_debug |= 0x08;
 564
 565		gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
 566	}
 567
 568	/* enable module-level clock gating */
 569	etnaviv_gpu_enable_mlcg(gpu);
 570
 571	/*
 572	 * Update GPU AXI cache atttribute to "cacheable, no allocate".
 573	 * This is necessary to prevent the iMX6 SoC locking up.
 574	 */
 575	gpu_write(gpu, VIVS_HI_AXI_CONFIG,
 576		  VIVS_HI_AXI_CONFIG_AWCACHE(2) |
 577		  VIVS_HI_AXI_CONFIG_ARCACHE(2));
 578
 579	/* GC2000 rev 5108 needs a special bus config */
 580	if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
 581		u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
 582		bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
 583				VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
 584		bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
 585			      VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
 586		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 587	}
 588
 589	/* setup the MMU */
 590	etnaviv_iommu_restore(gpu);
 
 
 
 
 
 
 
 591
 592	/* Start command processor */
 593	prefetch = etnaviv_buffer_init(gpu);
 594
 595	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
 596	etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
 597			     prefetch);
 
 
 
 598}
 599
 600int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 601{
 602	int ret, i;
 
 
 
 603
 604	ret = pm_runtime_get_sync(gpu->dev);
 605	if (ret < 0) {
 606		dev_err(gpu->dev, "Failed to enable GPU power domain\n");
 607		return ret;
 608	}
 609
 610	etnaviv_hw_identify(gpu);
 611
 612	if (gpu->identity.model == 0) {
 613		dev_err(gpu->dev, "Unknown GPU model\n");
 614		ret = -ENXIO;
 615		goto fail;
 616	}
 617
 618	/* Exclude VG cores with FE2.0 */
 619	if (gpu->identity.features & chipFeatures_PIPE_VG &&
 620	    gpu->identity.features & chipFeatures_FE20) {
 621		dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
 622		ret = -ENXIO;
 623		goto fail;
 624	}
 625
 626	/*
 627	 * Set the GPU linear window to be at the end of the DMA window, where
 628	 * the CMA area is likely to reside. This ensures that we are able to
 629	 * map the command buffers while having the linear window overlap as
 630	 * much RAM as possible, so we can optimize mappings for other buffers.
 631	 *
 632	 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
 633	 * to different views of the memory on the individual engines.
 634	 */
 635	if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
 636	    (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
 637		u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
 638		if (dma_mask < PHYS_OFFSET + SZ_2G)
 639			gpu->memory_base = PHYS_OFFSET;
 640		else
 641			gpu->memory_base = dma_mask - SZ_2G + 1;
 642	} else if (PHYS_OFFSET >= SZ_2G) {
 643		dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
 644		gpu->memory_base = PHYS_OFFSET;
 645		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 646	}
 647
 648	ret = etnaviv_hw_reset(gpu);
 649	if (ret) {
 650		dev_err(gpu->dev, "GPU reset failed\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 651		goto fail;
 652	}
 653
 654	gpu->mmu = etnaviv_iommu_new(gpu);
 655	if (IS_ERR(gpu->mmu)) {
 656		dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
 657		ret = PTR_ERR(gpu->mmu);
 658		goto fail;
 659	}
 660
 661	/* Create buffer: */
 662	gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
 663	if (!gpu->buffer) {
 664		ret = -ENOMEM;
 665		dev_err(gpu->dev, "could not create command buffer\n");
 666		goto destroy_iommu;
 667	}
 668
 669	if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
 670	    gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
 671		ret = -EINVAL;
 672		dev_err(gpu->dev,
 673			"command buffer outside valid memory window\n");
 674		goto free_buffer;
 675	}
 676
 677	/* Setup event management */
 678	spin_lock_init(&gpu->event_spinlock);
 679	init_completion(&gpu->event_free);
 680	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 681		gpu->event[i].used = false;
 682		complete(&gpu->event_free);
 683	}
 684
 685	/* Now program the hardware */
 686	mutex_lock(&gpu->lock);
 687	etnaviv_gpu_hw_init(gpu);
 688	gpu->exec_state = -1;
 689	mutex_unlock(&gpu->lock);
 690
 691	pm_runtime_mark_last_busy(gpu->dev);
 692	pm_runtime_put_autosuspend(gpu->dev);
 693
 694	return 0;
 695
 696free_buffer:
 697	etnaviv_gpu_cmdbuf_free(gpu->buffer);
 698	gpu->buffer = NULL;
 699destroy_iommu:
 700	etnaviv_iommu_destroy(gpu->mmu);
 701	gpu->mmu = NULL;
 702fail:
 703	pm_runtime_mark_last_busy(gpu->dev);
 704	pm_runtime_put_autosuspend(gpu->dev);
 705
 706	return ret;
 707}
 708
 709#ifdef CONFIG_DEBUG_FS
 710struct dma_debug {
 711	u32 address[2];
 712	u32 state[2];
 713};
 714
 715static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
 716{
 717	u32 i;
 718
 719	debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 720	debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 721
 722	for (i = 0; i < 500; i++) {
 723		debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 724		debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 725
 726		if (debug->address[0] != debug->address[1])
 727			break;
 728
 729		if (debug->state[0] != debug->state[1])
 730			break;
 731	}
 732}
 733
 734int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 735{
 736	struct dma_debug debug;
 737	u32 dma_lo, dma_hi, axi, idle;
 738	int ret;
 739
 740	seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
 741
 742	ret = pm_runtime_get_sync(gpu->dev);
 743	if (ret < 0)
 744		return ret;
 745
 746	dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
 747	dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
 748	axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
 749	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 750
 751	verify_dma(gpu, &debug);
 752
 753	seq_puts(m, "\tfeatures\n");
 754	seq_printf(m, "\t minor_features0: 0x%08x\n",
 755		   gpu->identity.minor_features0);
 756	seq_printf(m, "\t minor_features1: 0x%08x\n",
 757		   gpu->identity.minor_features1);
 758	seq_printf(m, "\t minor_features2: 0x%08x\n",
 759		   gpu->identity.minor_features2);
 760	seq_printf(m, "\t minor_features3: 0x%08x\n",
 761		   gpu->identity.minor_features3);
 762	seq_printf(m, "\t minor_features4: 0x%08x\n",
 763		   gpu->identity.minor_features4);
 764	seq_printf(m, "\t minor_features5: 0x%08x\n",
 765		   gpu->identity.minor_features5);
 766
 767	seq_puts(m, "\tspecs\n");
 768	seq_printf(m, "\t stream_count:  %d\n",
 769			gpu->identity.stream_count);
 770	seq_printf(m, "\t register_max: %d\n",
 771			gpu->identity.register_max);
 772	seq_printf(m, "\t thread_count: %d\n",
 773			gpu->identity.thread_count);
 774	seq_printf(m, "\t vertex_cache_size: %d\n",
 775			gpu->identity.vertex_cache_size);
 776	seq_printf(m, "\t shader_core_count: %d\n",
 777			gpu->identity.shader_core_count);
 778	seq_printf(m, "\t pixel_pipes: %d\n",
 779			gpu->identity.pixel_pipes);
 780	seq_printf(m, "\t vertex_output_buffer_size: %d\n",
 781			gpu->identity.vertex_output_buffer_size);
 782	seq_printf(m, "\t buffer_size: %d\n",
 783			gpu->identity.buffer_size);
 784	seq_printf(m, "\t instruction_count: %d\n",
 785			gpu->identity.instruction_count);
 786	seq_printf(m, "\t num_constants: %d\n",
 787			gpu->identity.num_constants);
 788	seq_printf(m, "\t varyings_count: %d\n",
 789			gpu->identity.varyings_count);
 790
 791	seq_printf(m, "\taxi: 0x%08x\n", axi);
 792	seq_printf(m, "\tidle: 0x%08x\n", idle);
 793	idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
 794	if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
 795		seq_puts(m, "\t FE is not idle\n");
 796	if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
 797		seq_puts(m, "\t DE is not idle\n");
 798	if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
 799		seq_puts(m, "\t PE is not idle\n");
 800	if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
 801		seq_puts(m, "\t SH is not idle\n");
 802	if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
 803		seq_puts(m, "\t PA is not idle\n");
 804	if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
 805		seq_puts(m, "\t SE is not idle\n");
 806	if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
 807		seq_puts(m, "\t RA is not idle\n");
 808	if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
 809		seq_puts(m, "\t TX is not idle\n");
 810	if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
 811		seq_puts(m, "\t VG is not idle\n");
 812	if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
 813		seq_puts(m, "\t IM is not idle\n");
 814	if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
 815		seq_puts(m, "\t FP is not idle\n");
 816	if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
 817		seq_puts(m, "\t TS is not idle\n");
 818	if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
 819		seq_puts(m, "\t AXI low power mode\n");
 820
 821	if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
 822		u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
 823		u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
 824		u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
 825
 826		seq_puts(m, "\tMC\n");
 827		seq_printf(m, "\t read0: 0x%08x\n", read0);
 828		seq_printf(m, "\t read1: 0x%08x\n", read1);
 829		seq_printf(m, "\t write: 0x%08x\n", write);
 830	}
 831
 832	seq_puts(m, "\tDMA ");
 833
 834	if (debug.address[0] == debug.address[1] &&
 835	    debug.state[0] == debug.state[1]) {
 836		seq_puts(m, "seems to be stuck\n");
 837	} else if (debug.address[0] == debug.address[1]) {
 838		seq_puts(m, "address is constant\n");
 839	} else {
 840		seq_puts(m, "is running\n");
 841	}
 842
 843	seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
 844	seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
 845	seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
 846	seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
 847	seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
 848		   dma_lo, dma_hi);
 849
 850	ret = 0;
 851
 852	pm_runtime_mark_last_busy(gpu->dev);
 853	pm_runtime_put_autosuspend(gpu->dev);
 854
 855	return ret;
 856}
 857#endif
 858
 859/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860 * Hangcheck detection for locked gpu:
 861 */
 862static void recover_worker(struct work_struct *work)
 863{
 864	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
 865					       recover_work);
 866	unsigned long flags;
 867	unsigned int i;
 868
 869	dev_err(gpu->dev, "hangcheck recover!\n");
 870
 871	if (pm_runtime_get_sync(gpu->dev) < 0)
 872		return;
 873
 874	mutex_lock(&gpu->lock);
 875
 876	/* Only catch the first event, or when manually re-armed */
 877	if (etnaviv_dump_core) {
 878		etnaviv_core_dump(gpu);
 879		etnaviv_dump_core = false;
 880	}
 881
 882	etnaviv_hw_reset(gpu);
 883
 884	/* complete all events, the GPU won't do it after the reset */
 885	spin_lock_irqsave(&gpu->event_spinlock, flags);
 886	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 887		if (!gpu->event[i].used)
 888			continue;
 889		dma_fence_signal(gpu->event[i].fence);
 890		gpu->event[i].fence = NULL;
 891		gpu->event[i].used = false;
 892		complete(&gpu->event_free);
 893	}
 894	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
 895	gpu->completed_fence = gpu->active_fence;
 896
 897	etnaviv_gpu_hw_init(gpu);
 898	gpu->lastctx = NULL;
 899	gpu->exec_state = -1;
 900
 901	mutex_unlock(&gpu->lock);
 902	pm_runtime_mark_last_busy(gpu->dev);
 903	pm_runtime_put_autosuspend(gpu->dev);
 904
 905	/* Retire the buffer objects in a work */
 906	etnaviv_queue_work(gpu->drm, &gpu->retire_work);
 907}
 908
 909static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
 910{
 911	DBG("%s", dev_name(gpu->dev));
 912	mod_timer(&gpu->hangcheck_timer,
 913		  round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
 914}
 915
 916static void hangcheck_handler(unsigned long data)
 917{
 918	struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
 919	u32 fence = gpu->completed_fence;
 920	bool progress = false;
 921
 922	if (fence != gpu->hangcheck_fence) {
 923		gpu->hangcheck_fence = fence;
 924		progress = true;
 925	}
 926
 927	if (!progress) {
 928		u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 929		int change = dma_addr - gpu->hangcheck_dma_addr;
 930
 931		if (change < 0 || change > 16) {
 932			gpu->hangcheck_dma_addr = dma_addr;
 933			progress = true;
 934		}
 935	}
 936
 937	if (!progress && fence_after(gpu->active_fence, fence)) {
 938		dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
 939		dev_err(gpu->dev, "     completed fence: %u\n", fence);
 940		dev_err(gpu->dev, "     active fence: %u\n",
 941			gpu->active_fence);
 942		etnaviv_queue_work(gpu->drm, &gpu->recover_work);
 943	}
 944
 945	/* if still more pending work, reset the hangcheck timer: */
 946	if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
 947		hangcheck_timer_reset(gpu);
 948}
 949
 950static void hangcheck_disable(struct etnaviv_gpu *gpu)
 951{
 952	del_timer_sync(&gpu->hangcheck_timer);
 953	cancel_work_sync(&gpu->recover_work);
 954}
 955
 956/* fence object management */
 957struct etnaviv_fence {
 958	struct etnaviv_gpu *gpu;
 959	struct dma_fence base;
 960};
 961
 962static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
 963{
 964	return container_of(fence, struct etnaviv_fence, base);
 965}
 966
 967static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
 968{
 969	return "etnaviv";
 970}
 971
 972static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
 973{
 974	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 975
 976	return dev_name(f->gpu->dev);
 977}
 978
 979static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
 980{
 981	return true;
 982}
 983
 984static bool etnaviv_fence_signaled(struct dma_fence *fence)
 985{
 986	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 987
 988	return fence_completed(f->gpu, f->base.seqno);
 989}
 990
 991static void etnaviv_fence_release(struct dma_fence *fence)
 992{
 993	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 994
 995	kfree_rcu(f, base.rcu);
 996}
 997
 998static const struct dma_fence_ops etnaviv_fence_ops = {
 999	.get_driver_name = etnaviv_fence_get_driver_name,
1000	.get_timeline_name = etnaviv_fence_get_timeline_name,
1001	.enable_signaling = etnaviv_fence_enable_signaling,
1002	.signaled = etnaviv_fence_signaled,
1003	.wait = dma_fence_default_wait,
1004	.release = etnaviv_fence_release,
1005};
1006
1007static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1008{
1009	struct etnaviv_fence *f;
1010
1011	f = kzalloc(sizeof(*f), GFP_KERNEL);
1012	if (!f)
1013		return NULL;
1014
1015	f->gpu = gpu;
1016
1017	dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1018		       gpu->fence_context, ++gpu->next_fence);
1019
1020	return &f->base;
1021}
1022
1023int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1024	unsigned int context, bool exclusive)
1025{
1026	struct reservation_object *robj = etnaviv_obj->resv;
1027	struct reservation_object_list *fobj;
1028	struct dma_fence *fence;
1029	int i, ret;
1030
1031	if (!exclusive) {
1032		ret = reservation_object_reserve_shared(robj);
1033		if (ret)
1034			return ret;
1035	}
1036
1037	/*
1038	 * If we have any shared fences, then the exclusive fence
1039	 * should be ignored as it will already have been signalled.
1040	 */
1041	fobj = reservation_object_get_list(robj);
1042	if (!fobj || fobj->shared_count == 0) {
1043		/* Wait on any existing exclusive fence which isn't our own */
1044		fence = reservation_object_get_excl(robj);
1045		if (fence && fence->context != context) {
1046			ret = dma_fence_wait(fence, true);
1047			if (ret)
1048				return ret;
1049		}
1050	}
1051
1052	if (!exclusive || !fobj)
1053		return 0;
1054
1055	for (i = 0; i < fobj->shared_count; i++) {
1056		fence = rcu_dereference_protected(fobj->shared[i],
1057						reservation_object_held(robj));
1058		if (fence->context != context) {
1059			ret = dma_fence_wait(fence, true);
1060			if (ret)
1061				return ret;
1062		}
1063	}
1064
1065	return 0;
1066}
1067
1068/*
1069 * event management:
1070 */
1071
1072static unsigned int event_alloc(struct etnaviv_gpu *gpu)
1073{
1074	unsigned long ret, flags;
1075	unsigned int i, event = ~0U;
1076
1077	ret = wait_for_completion_timeout(&gpu->event_free,
1078					  msecs_to_jiffies(10 * 10000));
1079	if (!ret)
1080		dev_err(gpu->dev, "wait_for_completion_timeout failed");
1081
1082	spin_lock_irqsave(&gpu->event_spinlock, flags);
1083
1084	/* find first free event */
1085	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1086		if (gpu->event[i].used == false) {
1087			gpu->event[i].used = true;
1088			event = i;
1089			break;
1090		}
1091	}
1092
1093	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1094
1095	return event;
1096}
1097
1098static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1099{
1100	unsigned long flags;
1101
1102	spin_lock_irqsave(&gpu->event_spinlock, flags);
1103
1104	if (gpu->event[event].used == false) {
1105		dev_warn(gpu->dev, "event %u is already marked as free",
1106			 event);
1107		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1108	} else {
1109		gpu->event[event].used = false;
1110		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1111
1112		complete(&gpu->event_free);
1113	}
1114}
1115
1116/*
1117 * Cmdstream submission/retirement:
1118 */
1119
1120struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1121	size_t nr_bos)
1122{
1123	struct etnaviv_cmdbuf *cmdbuf;
1124	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
1125				 sizeof(*cmdbuf));
1126
1127	cmdbuf = kzalloc(sz, GFP_KERNEL);
1128	if (!cmdbuf)
1129		return NULL;
1130
1131	if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
1132		size = ALIGN(size, SZ_4K);
1133
1134	cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
1135				     GFP_KERNEL);
1136	if (!cmdbuf->vaddr) {
1137		kfree(cmdbuf);
1138		return NULL;
1139	}
1140
1141	cmdbuf->gpu = gpu;
1142	cmdbuf->size = size;
1143
1144	return cmdbuf;
1145}
1146
1147void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1148{
1149	etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
1150	dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
1151		    cmdbuf->paddr);
1152	kfree(cmdbuf);
1153}
1154
1155static void retire_worker(struct work_struct *work)
1156{
1157	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1158					       retire_work);
1159	u32 fence = gpu->completed_fence;
1160	struct etnaviv_cmdbuf *cmdbuf, *tmp;
1161	unsigned int i;
1162
1163	mutex_lock(&gpu->lock);
1164	list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1165		if (!dma_fence_is_signaled(cmdbuf->fence))
1166			break;
1167
1168		list_del(&cmdbuf->node);
1169		dma_fence_put(cmdbuf->fence);
1170
1171		for (i = 0; i < cmdbuf->nr_bos; i++) {
1172			struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
1173			struct etnaviv_gem_object *etnaviv_obj = mapping->object;
1174
1175			atomic_dec(&etnaviv_obj->gpu_active);
1176			/* drop the refcount taken in etnaviv_gpu_submit */
1177			etnaviv_gem_mapping_unreference(mapping);
1178		}
1179
1180		etnaviv_gpu_cmdbuf_free(cmdbuf);
1181		/*
1182		 * We need to balance the runtime PM count caused by
1183		 * each submission.  Upon submission, we increment
1184		 * the runtime PM counter, and allocate one event.
1185		 * So here, we put the runtime PM count for each
1186		 * completed event.
1187		 */
1188		pm_runtime_put_autosuspend(gpu->dev);
1189	}
1190
1191	gpu->retired_fence = fence;
1192
1193	mutex_unlock(&gpu->lock);
1194
1195	wake_up_all(&gpu->fence_event);
1196}
1197
1198int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1199	u32 fence, struct timespec *timeout)
1200{
1201	int ret;
1202
1203	if (fence_after(fence, gpu->next_fence)) {
1204		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1205				fence, gpu->next_fence);
1206		return -EINVAL;
1207	}
1208
1209	if (!timeout) {
1210		/* No timeout was requested: just test for completion */
1211		ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1212	} else {
1213		unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1214
1215		ret = wait_event_interruptible_timeout(gpu->fence_event,
1216						fence_completed(gpu, fence),
1217						remaining);
1218		if (ret == 0) {
1219			DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1220				fence, gpu->retired_fence,
1221				gpu->completed_fence);
1222			ret = -ETIMEDOUT;
1223		} else if (ret != -ERESTARTSYS) {
1224			ret = 0;
1225		}
1226	}
1227
1228	return ret;
1229}
1230
1231/*
1232 * Wait for an object to become inactive.  This, on it's own, is not race
1233 * free: the object is moved by the retire worker off the active list, and
1234 * then the iova is put.  Moreover, the object could be re-submitted just
1235 * after we notice that it's become inactive.
1236 *
1237 * Although the retirement happens under the gpu lock, we don't want to hold
1238 * that lock in this function while waiting.
1239 */
1240int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1241	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1242{
1243	unsigned long remaining;
1244	long ret;
1245
1246	if (!timeout)
1247		return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1248
1249	remaining = etnaviv_timeout_to_jiffies(timeout);
1250
1251	ret = wait_event_interruptible_timeout(gpu->fence_event,
1252					       !is_active(etnaviv_obj),
1253					       remaining);
1254	if (ret > 0) {
1255		struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1256
1257		/* Synchronise with the retire worker */
1258		flush_workqueue(priv->wq);
1259		return 0;
1260	} else if (ret == -ERESTARTSYS) {
1261		return -ERESTARTSYS;
1262	} else {
1263		return -ETIMEDOUT;
1264	}
1265}
1266
1267int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1268{
1269	return pm_runtime_get_sync(gpu->dev);
1270}
1271
1272void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1273{
1274	pm_runtime_mark_last_busy(gpu->dev);
1275	pm_runtime_put_autosuspend(gpu->dev);
1276}
1277
1278/* add bo's to gpu's ring, and kick gpu: */
1279int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1280	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1281{
1282	struct dma_fence *fence;
1283	unsigned int event, i;
1284	int ret;
1285
1286	ret = etnaviv_gpu_pm_get_sync(gpu);
1287	if (ret < 0)
1288		return ret;
1289
 
 
1290	/*
1291	 * TODO
1292	 *
1293	 * - flush
1294	 * - data endian
1295	 * - prefetch
1296	 *
1297	 */
1298
1299	event = event_alloc(gpu);
1300	if (unlikely(event == ~0U)) {
1301		DRM_ERROR("no free event\n");
1302		ret = -EBUSY;
1303		goto out_pm_put;
1304	}
1305
1306	mutex_lock(&gpu->lock);
1307
1308	fence = etnaviv_gpu_fence_alloc(gpu);
1309	if (!fence) {
1310		event_free(gpu, event);
1311		ret = -ENOMEM;
1312		goto out_pm_put;
1313	}
1314
1315	gpu->event[event].fence = fence;
1316	submit->fence = fence->seqno;
1317	gpu->active_fence = submit->fence;
1318
1319	if (gpu->lastctx != cmdbuf->ctx) {
1320		gpu->mmu->need_flush = true;
1321		gpu->switch_context = true;
1322		gpu->lastctx = cmdbuf->ctx;
1323	}
1324
1325	etnaviv_buffer_queue(gpu, event, cmdbuf);
1326
1327	cmdbuf->fence = fence;
1328	list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1329
1330	/* We're committed to adding this command buffer, hold a PM reference */
1331	pm_runtime_get_noresume(gpu->dev);
1332
1333	for (i = 0; i < submit->nr_bos; i++) {
1334		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1335
1336		/* Each cmdbuf takes a refcount on the mapping */
1337		etnaviv_gem_mapping_reference(submit->bos[i].mapping);
1338		cmdbuf->bo_map[i] = submit->bos[i].mapping;
1339		atomic_inc(&etnaviv_obj->gpu_active);
1340
1341		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1342			reservation_object_add_excl_fence(etnaviv_obj->resv,
1343							  fence);
1344		else
1345			reservation_object_add_shared_fence(etnaviv_obj->resv,
1346							    fence);
1347	}
1348	cmdbuf->nr_bos = submit->nr_bos;
1349	hangcheck_timer_reset(gpu);
1350	ret = 0;
1351
 
1352	mutex_unlock(&gpu->lock);
1353
1354out_pm_put:
1355	etnaviv_gpu_pm_put(gpu);
1356
1357	return ret;
1358}
1359
1360/*
1361 * Init/Cleanup:
1362 */
1363static irqreturn_t irq_handler(int irq, void *data)
1364{
1365	struct etnaviv_gpu *gpu = data;
1366	irqreturn_t ret = IRQ_NONE;
1367
1368	u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1369
1370	if (intr != 0) {
1371		int event;
1372
1373		pm_runtime_mark_last_busy(gpu->dev);
1374
1375		dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1376
1377		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1378			dev_err(gpu->dev, "AXI bus error\n");
1379			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1380		}
1381
1382		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1383			int i;
1384
1385			dev_err_ratelimited(gpu->dev,
1386				"MMU fault status 0x%08x\n",
1387				gpu_read(gpu, VIVS_MMUv2_STATUS));
1388			for (i = 0; i < 4; i++) {
1389				dev_err_ratelimited(gpu->dev,
1390					"MMU %d fault addr 0x%08x\n",
1391					i, gpu_read(gpu,
1392					VIVS_MMUv2_EXCEPTION_ADDR(i)));
1393			}
1394			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1395		}
1396
1397		while ((event = ffs(intr)) != 0) {
1398			struct dma_fence *fence;
1399
1400			event -= 1;
1401
1402			intr &= ~(1 << event);
1403
1404			dev_dbg(gpu->dev, "event %u\n", event);
1405
1406			fence = gpu->event[event].fence;
1407			gpu->event[event].fence = NULL;
1408			dma_fence_signal(fence);
1409
1410			/*
1411			 * Events can be processed out of order.  Eg,
1412			 * - allocate and queue event 0
1413			 * - allocate event 1
1414			 * - event 0 completes, we process it
1415			 * - allocate and queue event 0
1416			 * - event 1 and event 0 complete
1417			 * we can end up processing event 0 first, then 1.
1418			 */
1419			if (fence_after(fence->seqno, gpu->completed_fence))
1420				gpu->completed_fence = fence->seqno;
1421
1422			event_free(gpu, event);
1423		}
1424
1425		/* Retire the buffer objects in a work */
1426		etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1427
1428		ret = IRQ_HANDLED;
1429	}
1430
1431	return ret;
1432}
1433
1434static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1435{
1436	int ret;
1437
1438	if (gpu->clk_bus) {
1439		ret = clk_prepare_enable(gpu->clk_bus);
1440		if (ret)
1441			return ret;
1442	}
1443
1444	if (gpu->clk_core) {
1445		ret = clk_prepare_enable(gpu->clk_core);
1446		if (ret)
1447			goto disable_clk_bus;
1448	}
1449
1450	if (gpu->clk_shader) {
1451		ret = clk_prepare_enable(gpu->clk_shader);
1452		if (ret)
1453			goto disable_clk_core;
1454	}
1455
1456	return 0;
1457
1458disable_clk_core:
1459	if (gpu->clk_core)
1460		clk_disable_unprepare(gpu->clk_core);
1461disable_clk_bus:
1462	if (gpu->clk_bus)
1463		clk_disable_unprepare(gpu->clk_bus);
1464
1465	return ret;
1466}
1467
1468static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1469{
1470	if (gpu->clk_shader)
1471		clk_disable_unprepare(gpu->clk_shader);
1472	if (gpu->clk_core)
1473		clk_disable_unprepare(gpu->clk_core);
1474	if (gpu->clk_bus)
1475		clk_disable_unprepare(gpu->clk_bus);
1476
1477	return 0;
1478}
 
1479
1480int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1481{
1482	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1483
1484	do {
1485		u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1486
1487		if ((idle & gpu->idle_mask) == gpu->idle_mask)
1488			return 0;
1489
1490		if (time_is_before_jiffies(timeout)) {
1491			dev_warn(gpu->dev,
1492				 "timed out waiting for idle: idle=0x%x\n",
1493				 idle);
1494			return -ETIMEDOUT;
1495		}
1496
1497		udelay(5);
1498	} while (1);
1499}
1500
1501static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1502{
1503	if (gpu->buffer) {
 
 
1504		/* Replace the last WAIT with END */
1505		etnaviv_buffer_end(gpu);
1506
1507		/*
1508		 * We know that only the FE is busy here, this should
1509		 * happen quickly (as the WAIT is only 200 cycles).  If
1510		 * we fail, just warn and continue.
1511		 */
1512		etnaviv_gpu_wait_idle(gpu, 100);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1513	}
1514
1515	return etnaviv_gpu_clk_disable(gpu);
1516}
1517
1518#ifdef CONFIG_PM
1519static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1520{
1521	u32 clock;
1522	int ret;
1523
1524	ret = mutex_lock_killable(&gpu->lock);
1525	if (ret)
1526		return ret;
1527
1528	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1529		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1530
1531	etnaviv_gpu_load_clock(gpu, clock);
1532	etnaviv_gpu_hw_init(gpu);
1533
1534	gpu->switch_context = true;
1535	gpu->exec_state = -1;
1536
1537	mutex_unlock(&gpu->lock);
1538
1539	return 0;
1540}
1541#endif
1542
1543static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1544	void *data)
1545{
1546	struct drm_device *drm = data;
1547	struct etnaviv_drm_private *priv = drm->dev_private;
1548	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1549	int ret;
1550
1551#ifdef CONFIG_PM
1552	ret = pm_runtime_get_sync(gpu->dev);
1553#else
1554	ret = etnaviv_gpu_clk_enable(gpu);
1555#endif
1556	if (ret < 0)
1557		return ret;
1558
1559	gpu->drm = drm;
1560	gpu->fence_context = dma_fence_context_alloc(1);
1561	spin_lock_init(&gpu->fence_spinlock);
1562
1563	INIT_LIST_HEAD(&gpu->active_cmd_list);
1564	INIT_WORK(&gpu->retire_work, retire_worker);
1565	INIT_WORK(&gpu->recover_work, recover_worker);
1566	init_waitqueue_head(&gpu->fence_event);
1567
1568	setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
1569			       (unsigned long)gpu);
1570
1571	priv->gpu[priv->num_gpus++] = gpu;
1572
1573	pm_runtime_mark_last_busy(gpu->dev);
1574	pm_runtime_put_autosuspend(gpu->dev);
1575
1576	return 0;
1577}
1578
1579static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1580	void *data)
1581{
1582	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1583
1584	DBG("%s", dev_name(gpu->dev));
1585
1586	hangcheck_disable(gpu);
1587
1588#ifdef CONFIG_PM
1589	pm_runtime_get_sync(gpu->dev);
1590	pm_runtime_put_sync_suspend(gpu->dev);
1591#else
1592	etnaviv_gpu_hw_suspend(gpu);
1593#endif
1594
1595	if (gpu->buffer) {
1596		etnaviv_gpu_cmdbuf_free(gpu->buffer);
1597		gpu->buffer = NULL;
1598	}
1599
1600	if (gpu->mmu) {
1601		etnaviv_iommu_destroy(gpu->mmu);
1602		gpu->mmu = NULL;
1603	}
1604
1605	gpu->drm = NULL;
1606}
1607
1608static const struct component_ops gpu_ops = {
1609	.bind = etnaviv_gpu_bind,
1610	.unbind = etnaviv_gpu_unbind,
1611};
1612
1613static const struct of_device_id etnaviv_gpu_match[] = {
1614	{
1615		.compatible = "vivante,gc"
1616	},
1617	{ /* sentinel */ }
1618};
1619
1620static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1621{
1622	struct device *dev = &pdev->dev;
1623	struct etnaviv_gpu *gpu;
1624	int err;
1625
1626	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1627	if (!gpu)
1628		return -ENOMEM;
1629
1630	gpu->dev = &pdev->dev;
1631	mutex_init(&gpu->lock);
1632
1633	/* Map registers: */
1634	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1635	if (IS_ERR(gpu->mmio))
1636		return PTR_ERR(gpu->mmio);
1637
1638	/* Get Interrupt: */
1639	gpu->irq = platform_get_irq(pdev, 0);
1640	if (gpu->irq < 0) {
1641		dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1642		return gpu->irq;
 
1643	}
1644
1645	err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1646			       dev_name(gpu->dev), gpu);
1647	if (err) {
1648		dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1649		return err;
1650	}
1651
1652	/* Get Clocks: */
1653	gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1654	DBG("clk_bus: %p", gpu->clk_bus);
1655	if (IS_ERR(gpu->clk_bus))
1656		gpu->clk_bus = NULL;
1657
1658	gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1659	DBG("clk_core: %p", gpu->clk_core);
1660	if (IS_ERR(gpu->clk_core))
1661		gpu->clk_core = NULL;
1662
1663	gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1664	DBG("clk_shader: %p", gpu->clk_shader);
1665	if (IS_ERR(gpu->clk_shader))
1666		gpu->clk_shader = NULL;
1667
1668	/* TODO: figure out max mapped size */
1669	dev_set_drvdata(dev, gpu);
1670
1671	/*
1672	 * We treat the device as initially suspended.  The runtime PM
1673	 * autosuspend delay is rather arbitary: no measurements have
1674	 * yet been performed to determine an appropriate value.
1675	 */
1676	pm_runtime_use_autosuspend(gpu->dev);
1677	pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1678	pm_runtime_enable(gpu->dev);
1679
1680	err = component_add(&pdev->dev, &gpu_ops);
1681	if (err < 0) {
1682		dev_err(&pdev->dev, "failed to register component: %d\n", err);
1683		return err;
1684	}
1685
1686	return 0;
 
 
 
1687}
1688
1689static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1690{
1691	component_del(&pdev->dev, &gpu_ops);
1692	pm_runtime_disable(&pdev->dev);
1693	return 0;
1694}
1695
1696#ifdef CONFIG_PM
1697static int etnaviv_gpu_rpm_suspend(struct device *dev)
1698{
1699	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1700	u32 idle, mask;
1701
1702	/* If we have outstanding fences, we're not idle */
1703	if (gpu->completed_fence != gpu->active_fence)
1704		return -EBUSY;
1705
1706	/* Check whether the hardware (except FE) is idle */
1707	mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1708	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1709	if (idle != mask)
1710		return -EBUSY;
1711
1712	return etnaviv_gpu_hw_suspend(gpu);
1713}
1714
1715static int etnaviv_gpu_rpm_resume(struct device *dev)
1716{
1717	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1718	int ret;
1719
1720	ret = etnaviv_gpu_clk_enable(gpu);
1721	if (ret)
1722		return ret;
1723
1724	/* Re-initialise the basic hardware state */
1725	if (gpu->drm && gpu->buffer) {
1726		ret = etnaviv_gpu_hw_resume(gpu);
1727		if (ret) {
1728			etnaviv_gpu_clk_disable(gpu);
1729			return ret;
1730		}
1731	}
1732
1733	return 0;
1734}
1735#endif
1736
1737static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1738	SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1739			   NULL)
1740};
1741
1742struct platform_driver etnaviv_gpu_driver = {
1743	.driver = {
1744		.name = "etnaviv-gpu",
1745		.owner = THIS_MODULE,
1746		.pm = &etnaviv_gpu_pm_ops,
1747		.of_match_table = etnaviv_gpu_match,
1748	},
1749	.probe = etnaviv_gpu_platform_probe,
1750	.remove = etnaviv_gpu_platform_remove,
1751	.id_table = gpu_ids,
1752};