Loading...
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/component.h>
18#include <linux/fence.h>
19#include <linux/moduleparam.h>
20#include <linux/of_device.h>
21#include "etnaviv_dump.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_gem.h"
24#include "etnaviv_mmu.h"
25#include "etnaviv_iommu.h"
26#include "etnaviv_iommu_v2.h"
27#include "common.xml.h"
28#include "state.xml.h"
29#include "state_hi.xml.h"
30#include "cmdstream.xml.h"
31
32static const struct platform_device_id gpu_ids[] = {
33 { .name = "etnaviv-gpu,2d" },
34 { },
35};
36
37static bool etnaviv_dump_core = true;
38module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
39
40/*
41 * Driver functions:
42 */
43
44int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
45{
46 switch (param) {
47 case ETNAVIV_PARAM_GPU_MODEL:
48 *value = gpu->identity.model;
49 break;
50
51 case ETNAVIV_PARAM_GPU_REVISION:
52 *value = gpu->identity.revision;
53 break;
54
55 case ETNAVIV_PARAM_GPU_FEATURES_0:
56 *value = gpu->identity.features;
57 break;
58
59 case ETNAVIV_PARAM_GPU_FEATURES_1:
60 *value = gpu->identity.minor_features0;
61 break;
62
63 case ETNAVIV_PARAM_GPU_FEATURES_2:
64 *value = gpu->identity.minor_features1;
65 break;
66
67 case ETNAVIV_PARAM_GPU_FEATURES_3:
68 *value = gpu->identity.minor_features2;
69 break;
70
71 case ETNAVIV_PARAM_GPU_FEATURES_4:
72 *value = gpu->identity.minor_features3;
73 break;
74
75 case ETNAVIV_PARAM_GPU_FEATURES_5:
76 *value = gpu->identity.minor_features4;
77 break;
78
79 case ETNAVIV_PARAM_GPU_FEATURES_6:
80 *value = gpu->identity.minor_features5;
81 break;
82
83 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
84 *value = gpu->identity.stream_count;
85 break;
86
87 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
88 *value = gpu->identity.register_max;
89 break;
90
91 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
92 *value = gpu->identity.thread_count;
93 break;
94
95 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
96 *value = gpu->identity.vertex_cache_size;
97 break;
98
99 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
100 *value = gpu->identity.shader_core_count;
101 break;
102
103 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
104 *value = gpu->identity.pixel_pipes;
105 break;
106
107 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
108 *value = gpu->identity.vertex_output_buffer_size;
109 break;
110
111 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
112 *value = gpu->identity.buffer_size;
113 break;
114
115 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
116 *value = gpu->identity.instruction_count;
117 break;
118
119 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
120 *value = gpu->identity.num_constants;
121 break;
122
123 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
124 *value = gpu->identity.varyings_count;
125 break;
126
127 default:
128 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
129 return -EINVAL;
130 }
131
132 return 0;
133}
134
135
136#define etnaviv_is_model_rev(gpu, mod, rev) \
137 ((gpu)->identity.model == chipModel_##mod && \
138 (gpu)->identity.revision == rev)
139#define etnaviv_field(val, field) \
140 (((val) & field##__MASK) >> field##__SHIFT)
141
142static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
143{
144 if (gpu->identity.minor_features0 &
145 chipMinorFeatures0_MORE_MINOR_FEATURES) {
146 u32 specs[4];
147 unsigned int streams;
148
149 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
150 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
151 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
152 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
153
154 gpu->identity.stream_count = etnaviv_field(specs[0],
155 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
156 gpu->identity.register_max = etnaviv_field(specs[0],
157 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
158 gpu->identity.thread_count = etnaviv_field(specs[0],
159 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
160 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
161 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
162 gpu->identity.shader_core_count = etnaviv_field(specs[0],
163 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
164 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
165 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
166 gpu->identity.vertex_output_buffer_size =
167 etnaviv_field(specs[0],
168 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
169
170 gpu->identity.buffer_size = etnaviv_field(specs[1],
171 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
172 gpu->identity.instruction_count = etnaviv_field(specs[1],
173 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
174 gpu->identity.num_constants = etnaviv_field(specs[1],
175 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
176
177 gpu->identity.varyings_count = etnaviv_field(specs[2],
178 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
179
180 /* This overrides the value from older register if non-zero */
181 streams = etnaviv_field(specs[3],
182 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
183 if (streams)
184 gpu->identity.stream_count = streams;
185 }
186
187 /* Fill in the stream count if not specified */
188 if (gpu->identity.stream_count == 0) {
189 if (gpu->identity.model >= 0x1000)
190 gpu->identity.stream_count = 4;
191 else
192 gpu->identity.stream_count = 1;
193 }
194
195 /* Convert the register max value */
196 if (gpu->identity.register_max)
197 gpu->identity.register_max = 1 << gpu->identity.register_max;
198 else if (gpu->identity.model == chipModel_GC400)
199 gpu->identity.register_max = 32;
200 else
201 gpu->identity.register_max = 64;
202
203 /* Convert thread count */
204 if (gpu->identity.thread_count)
205 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
206 else if (gpu->identity.model == chipModel_GC400)
207 gpu->identity.thread_count = 64;
208 else if (gpu->identity.model == chipModel_GC500 ||
209 gpu->identity.model == chipModel_GC530)
210 gpu->identity.thread_count = 128;
211 else
212 gpu->identity.thread_count = 256;
213
214 if (gpu->identity.vertex_cache_size == 0)
215 gpu->identity.vertex_cache_size = 8;
216
217 if (gpu->identity.shader_core_count == 0) {
218 if (gpu->identity.model >= 0x1000)
219 gpu->identity.shader_core_count = 2;
220 else
221 gpu->identity.shader_core_count = 1;
222 }
223
224 if (gpu->identity.pixel_pipes == 0)
225 gpu->identity.pixel_pipes = 1;
226
227 /* Convert virtex buffer size */
228 if (gpu->identity.vertex_output_buffer_size) {
229 gpu->identity.vertex_output_buffer_size =
230 1 << gpu->identity.vertex_output_buffer_size;
231 } else if (gpu->identity.model == chipModel_GC400) {
232 if (gpu->identity.revision < 0x4000)
233 gpu->identity.vertex_output_buffer_size = 512;
234 else if (gpu->identity.revision < 0x4200)
235 gpu->identity.vertex_output_buffer_size = 256;
236 else
237 gpu->identity.vertex_output_buffer_size = 128;
238 } else {
239 gpu->identity.vertex_output_buffer_size = 512;
240 }
241
242 switch (gpu->identity.instruction_count) {
243 case 0:
244 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
245 gpu->identity.model == chipModel_GC880)
246 gpu->identity.instruction_count = 512;
247 else
248 gpu->identity.instruction_count = 256;
249 break;
250
251 case 1:
252 gpu->identity.instruction_count = 1024;
253 break;
254
255 case 2:
256 gpu->identity.instruction_count = 2048;
257 break;
258
259 default:
260 gpu->identity.instruction_count = 256;
261 break;
262 }
263
264 if (gpu->identity.num_constants == 0)
265 gpu->identity.num_constants = 168;
266
267 if (gpu->identity.varyings_count == 0) {
268 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
269 gpu->identity.varyings_count = 12;
270 else
271 gpu->identity.varyings_count = 8;
272 }
273
274 /*
275 * For some cores, two varyings are consumed for position, so the
276 * maximum varying count needs to be reduced by one.
277 */
278 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
279 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
280 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
281 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
282 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
283 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
284 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
285 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
286 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
287 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
288 etnaviv_is_model_rev(gpu, GC880, 0x5106))
289 gpu->identity.varyings_count -= 1;
290}
291
292static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
293{
294 u32 chipIdentity;
295
296 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
297
298 /* Special case for older graphic cores. */
299 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
300 gpu->identity.model = chipModel_GC500;
301 gpu->identity.revision = etnaviv_field(chipIdentity,
302 VIVS_HI_CHIP_IDENTITY_REVISION);
303 } else {
304
305 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
306 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
307
308 /*
309 * !!!! HACK ALERT !!!!
310 * Because people change device IDs without letting software
311 * know about it - here is the hack to make it all look the
312 * same. Only for GC400 family.
313 */
314 if ((gpu->identity.model & 0xff00) == 0x0400 &&
315 gpu->identity.model != chipModel_GC420) {
316 gpu->identity.model = gpu->identity.model & 0x0400;
317 }
318
319 /* Another special case */
320 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
321 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
322 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
323
324 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
325 /*
326 * This IP has an ECO; put the correct
327 * revision in it.
328 */
329 gpu->identity.revision = 0x1051;
330 }
331 }
332 }
333
334 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
335 gpu->identity.model, gpu->identity.revision);
336
337 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
338
339 /* Disable fast clear on GC700. */
340 if (gpu->identity.model == chipModel_GC700)
341 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
342
343 if ((gpu->identity.model == chipModel_GC500 &&
344 gpu->identity.revision < 2) ||
345 (gpu->identity.model == chipModel_GC300 &&
346 gpu->identity.revision < 0x2000)) {
347
348 /*
349 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
350 * registers.
351 */
352 gpu->identity.minor_features0 = 0;
353 gpu->identity.minor_features1 = 0;
354 gpu->identity.minor_features2 = 0;
355 gpu->identity.minor_features3 = 0;
356 gpu->identity.minor_features4 = 0;
357 gpu->identity.minor_features5 = 0;
358 } else
359 gpu->identity.minor_features0 =
360 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
361
362 if (gpu->identity.minor_features0 &
363 chipMinorFeatures0_MORE_MINOR_FEATURES) {
364 gpu->identity.minor_features1 =
365 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
366 gpu->identity.minor_features2 =
367 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
368 gpu->identity.minor_features3 =
369 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
370 gpu->identity.minor_features4 =
371 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
372 gpu->identity.minor_features5 =
373 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
374 }
375
376 /* GC600 idle register reports zero bits where modules aren't present */
377 if (gpu->identity.model == chipModel_GC600) {
378 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
379 VIVS_HI_IDLE_STATE_RA |
380 VIVS_HI_IDLE_STATE_SE |
381 VIVS_HI_IDLE_STATE_PA |
382 VIVS_HI_IDLE_STATE_SH |
383 VIVS_HI_IDLE_STATE_PE |
384 VIVS_HI_IDLE_STATE_DE |
385 VIVS_HI_IDLE_STATE_FE;
386 } else {
387 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
388 }
389
390 etnaviv_hw_specs(gpu);
391}
392
393static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
394{
395 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
396 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
397 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
398}
399
400static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
401{
402 u32 control, idle;
403 unsigned long timeout;
404 bool failed = true;
405
406 /* TODO
407 *
408 * - clock gating
409 * - puls eater
410 * - what about VG?
411 */
412
413 /* We hope that the GPU resets in under one second */
414 timeout = jiffies + msecs_to_jiffies(1000);
415
416 while (time_is_after_jiffies(timeout)) {
417 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
418 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
419
420 /* enable clock */
421 etnaviv_gpu_load_clock(gpu, control);
422
423 /* Wait for stable clock. Vivante's code waited for 1ms */
424 usleep_range(1000, 10000);
425
426 /* isolate the GPU. */
427 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
428 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
429
430 /* set soft reset. */
431 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
432 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
433
434 /* wait for reset. */
435 msleep(1);
436
437 /* reset soft reset bit. */
438 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
439 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
440
441 /* reset GPU isolation. */
442 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
443 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
444
445 /* read idle register. */
446 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
447
448 /* try reseting again if FE it not idle */
449 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
450 dev_dbg(gpu->dev, "FE is not idle\n");
451 continue;
452 }
453
454 /* read reset register. */
455 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
456
457 /* is the GPU idle? */
458 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
459 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
460 dev_dbg(gpu->dev, "GPU is not idle\n");
461 continue;
462 }
463
464 failed = false;
465 break;
466 }
467
468 if (failed) {
469 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
470 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
471
472 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
473 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
474 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
475 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
476
477 return -EBUSY;
478 }
479
480 /* We rely on the GPU running, so program the clock */
481 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
482 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
483
484 /* enable clock */
485 etnaviv_gpu_load_clock(gpu, control);
486
487 return 0;
488}
489
490static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
491{
492 u16 prefetch;
493
494 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
495 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
496 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
497 u32 mc_memory_debug;
498
499 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
500
501 if (gpu->identity.revision == 0x5007)
502 mc_memory_debug |= 0x0c;
503 else
504 mc_memory_debug |= 0x08;
505
506 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
507 }
508
509 /*
510 * Update GPU AXI cache atttribute to "cacheable, no allocate".
511 * This is necessary to prevent the iMX6 SoC locking up.
512 */
513 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
514 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
515 VIVS_HI_AXI_CONFIG_ARCACHE(2));
516
517 /* GC2000 rev 5108 needs a special bus config */
518 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
519 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
520 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
521 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
522 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
523 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
524 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
525 }
526
527 /* set base addresses */
528 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
529 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
530 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
531 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
532 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
533
534 /* setup the MMU page table pointers */
535 etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
536
537 /* Start command processor */
538 prefetch = etnaviv_buffer_init(gpu);
539
540 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
541 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
542 gpu->buffer->paddr - gpu->memory_base);
543 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
544 VIVS_FE_COMMAND_CONTROL_ENABLE |
545 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
546}
547
548int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
549{
550 int ret, i;
551 struct iommu_domain *iommu;
552 enum etnaviv_iommu_version version;
553 bool mmuv2;
554
555 ret = pm_runtime_get_sync(gpu->dev);
556 if (ret < 0)
557 return ret;
558
559 etnaviv_hw_identify(gpu);
560
561 if (gpu->identity.model == 0) {
562 dev_err(gpu->dev, "Unknown GPU model\n");
563 ret = -ENXIO;
564 goto fail;
565 }
566
567 /* Exclude VG cores with FE2.0 */
568 if (gpu->identity.features & chipFeatures_PIPE_VG &&
569 gpu->identity.features & chipFeatures_FE20) {
570 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
571 ret = -ENXIO;
572 goto fail;
573 }
574
575 /*
576 * Set the GPU linear window to be at the end of the DMA window, where
577 * the CMA area is likely to reside. This ensures that we are able to
578 * map the command buffers while having the linear window overlap as
579 * much RAM as possible, so we can optimize mappings for other buffers.
580 *
581 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
582 * to different views of the memory on the individual engines.
583 */
584 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
585 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
586 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
587 if (dma_mask < PHYS_OFFSET + SZ_2G)
588 gpu->memory_base = PHYS_OFFSET;
589 else
590 gpu->memory_base = dma_mask - SZ_2G + 1;
591 }
592
593 ret = etnaviv_hw_reset(gpu);
594 if (ret)
595 goto fail;
596
597 /* Setup IOMMU.. eventually we will (I think) do this once per context
598 * and have separate page tables per context. For now, to keep things
599 * simple and to get something working, just use a single address space:
600 */
601 mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
602 dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
603
604 if (!mmuv2) {
605 iommu = etnaviv_iommu_domain_alloc(gpu);
606 version = ETNAVIV_IOMMU_V1;
607 } else {
608 iommu = etnaviv_iommu_v2_domain_alloc(gpu);
609 version = ETNAVIV_IOMMU_V2;
610 }
611
612 if (!iommu) {
613 ret = -ENOMEM;
614 goto fail;
615 }
616
617 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
618 if (!gpu->mmu) {
619 iommu_domain_free(iommu);
620 ret = -ENOMEM;
621 goto fail;
622 }
623
624 /* Create buffer: */
625 gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
626 if (!gpu->buffer) {
627 ret = -ENOMEM;
628 dev_err(gpu->dev, "could not create command buffer\n");
629 goto destroy_iommu;
630 }
631 if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
632 ret = -EINVAL;
633 dev_err(gpu->dev,
634 "command buffer outside valid memory window\n");
635 goto free_buffer;
636 }
637
638 /* Setup event management */
639 spin_lock_init(&gpu->event_spinlock);
640 init_completion(&gpu->event_free);
641 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
642 gpu->event[i].used = false;
643 complete(&gpu->event_free);
644 }
645
646 /* Now program the hardware */
647 mutex_lock(&gpu->lock);
648 etnaviv_gpu_hw_init(gpu);
649 gpu->exec_state = -1;
650 mutex_unlock(&gpu->lock);
651
652 pm_runtime_mark_last_busy(gpu->dev);
653 pm_runtime_put_autosuspend(gpu->dev);
654
655 return 0;
656
657free_buffer:
658 etnaviv_gpu_cmdbuf_free(gpu->buffer);
659 gpu->buffer = NULL;
660destroy_iommu:
661 etnaviv_iommu_destroy(gpu->mmu);
662 gpu->mmu = NULL;
663fail:
664 pm_runtime_mark_last_busy(gpu->dev);
665 pm_runtime_put_autosuspend(gpu->dev);
666
667 return ret;
668}
669
670#ifdef CONFIG_DEBUG_FS
671struct dma_debug {
672 u32 address[2];
673 u32 state[2];
674};
675
676static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
677{
678 u32 i;
679
680 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
681 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
682
683 for (i = 0; i < 500; i++) {
684 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
685 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
686
687 if (debug->address[0] != debug->address[1])
688 break;
689
690 if (debug->state[0] != debug->state[1])
691 break;
692 }
693}
694
695int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
696{
697 struct dma_debug debug;
698 u32 dma_lo, dma_hi, axi, idle;
699 int ret;
700
701 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
702
703 ret = pm_runtime_get_sync(gpu->dev);
704 if (ret < 0)
705 return ret;
706
707 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
708 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
709 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
710 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
711
712 verify_dma(gpu, &debug);
713
714 seq_puts(m, "\tfeatures\n");
715 seq_printf(m, "\t minor_features0: 0x%08x\n",
716 gpu->identity.minor_features0);
717 seq_printf(m, "\t minor_features1: 0x%08x\n",
718 gpu->identity.minor_features1);
719 seq_printf(m, "\t minor_features2: 0x%08x\n",
720 gpu->identity.minor_features2);
721 seq_printf(m, "\t minor_features3: 0x%08x\n",
722 gpu->identity.minor_features3);
723 seq_printf(m, "\t minor_features4: 0x%08x\n",
724 gpu->identity.minor_features4);
725 seq_printf(m, "\t minor_features5: 0x%08x\n",
726 gpu->identity.minor_features5);
727
728 seq_puts(m, "\tspecs\n");
729 seq_printf(m, "\t stream_count: %d\n",
730 gpu->identity.stream_count);
731 seq_printf(m, "\t register_max: %d\n",
732 gpu->identity.register_max);
733 seq_printf(m, "\t thread_count: %d\n",
734 gpu->identity.thread_count);
735 seq_printf(m, "\t vertex_cache_size: %d\n",
736 gpu->identity.vertex_cache_size);
737 seq_printf(m, "\t shader_core_count: %d\n",
738 gpu->identity.shader_core_count);
739 seq_printf(m, "\t pixel_pipes: %d\n",
740 gpu->identity.pixel_pipes);
741 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
742 gpu->identity.vertex_output_buffer_size);
743 seq_printf(m, "\t buffer_size: %d\n",
744 gpu->identity.buffer_size);
745 seq_printf(m, "\t instruction_count: %d\n",
746 gpu->identity.instruction_count);
747 seq_printf(m, "\t num_constants: %d\n",
748 gpu->identity.num_constants);
749 seq_printf(m, "\t varyings_count: %d\n",
750 gpu->identity.varyings_count);
751
752 seq_printf(m, "\taxi: 0x%08x\n", axi);
753 seq_printf(m, "\tidle: 0x%08x\n", idle);
754 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
755 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
756 seq_puts(m, "\t FE is not idle\n");
757 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
758 seq_puts(m, "\t DE is not idle\n");
759 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
760 seq_puts(m, "\t PE is not idle\n");
761 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
762 seq_puts(m, "\t SH is not idle\n");
763 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
764 seq_puts(m, "\t PA is not idle\n");
765 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
766 seq_puts(m, "\t SE is not idle\n");
767 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
768 seq_puts(m, "\t RA is not idle\n");
769 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
770 seq_puts(m, "\t TX is not idle\n");
771 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
772 seq_puts(m, "\t VG is not idle\n");
773 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
774 seq_puts(m, "\t IM is not idle\n");
775 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
776 seq_puts(m, "\t FP is not idle\n");
777 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
778 seq_puts(m, "\t TS is not idle\n");
779 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
780 seq_puts(m, "\t AXI low power mode\n");
781
782 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
783 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
784 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
785 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
786
787 seq_puts(m, "\tMC\n");
788 seq_printf(m, "\t read0: 0x%08x\n", read0);
789 seq_printf(m, "\t read1: 0x%08x\n", read1);
790 seq_printf(m, "\t write: 0x%08x\n", write);
791 }
792
793 seq_puts(m, "\tDMA ");
794
795 if (debug.address[0] == debug.address[1] &&
796 debug.state[0] == debug.state[1]) {
797 seq_puts(m, "seems to be stuck\n");
798 } else if (debug.address[0] == debug.address[1]) {
799 seq_puts(m, "adress is constant\n");
800 } else {
801 seq_puts(m, "is runing\n");
802 }
803
804 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
805 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
806 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
807 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
808 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
809 dma_lo, dma_hi);
810
811 ret = 0;
812
813 pm_runtime_mark_last_busy(gpu->dev);
814 pm_runtime_put_autosuspend(gpu->dev);
815
816 return ret;
817}
818#endif
819
820/*
821 * Power Management:
822 */
823static int enable_clk(struct etnaviv_gpu *gpu)
824{
825 if (gpu->clk_core)
826 clk_prepare_enable(gpu->clk_core);
827 if (gpu->clk_shader)
828 clk_prepare_enable(gpu->clk_shader);
829
830 return 0;
831}
832
833static int disable_clk(struct etnaviv_gpu *gpu)
834{
835 if (gpu->clk_core)
836 clk_disable_unprepare(gpu->clk_core);
837 if (gpu->clk_shader)
838 clk_disable_unprepare(gpu->clk_shader);
839
840 return 0;
841}
842
843static int enable_axi(struct etnaviv_gpu *gpu)
844{
845 if (gpu->clk_bus)
846 clk_prepare_enable(gpu->clk_bus);
847
848 return 0;
849}
850
851static int disable_axi(struct etnaviv_gpu *gpu)
852{
853 if (gpu->clk_bus)
854 clk_disable_unprepare(gpu->clk_bus);
855
856 return 0;
857}
858
859/*
860 * Hangcheck detection for locked gpu:
861 */
862static void recover_worker(struct work_struct *work)
863{
864 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
865 recover_work);
866 unsigned long flags;
867 unsigned int i;
868
869 dev_err(gpu->dev, "hangcheck recover!\n");
870
871 if (pm_runtime_get_sync(gpu->dev) < 0)
872 return;
873
874 mutex_lock(&gpu->lock);
875
876 /* Only catch the first event, or when manually re-armed */
877 if (etnaviv_dump_core) {
878 etnaviv_core_dump(gpu);
879 etnaviv_dump_core = false;
880 }
881
882 etnaviv_hw_reset(gpu);
883
884 /* complete all events, the GPU won't do it after the reset */
885 spin_lock_irqsave(&gpu->event_spinlock, flags);
886 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
887 if (!gpu->event[i].used)
888 continue;
889 fence_signal(gpu->event[i].fence);
890 gpu->event[i].fence = NULL;
891 gpu->event[i].used = false;
892 complete(&gpu->event_free);
893 }
894 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
895 gpu->completed_fence = gpu->active_fence;
896
897 etnaviv_gpu_hw_init(gpu);
898 gpu->switch_context = true;
899 gpu->exec_state = -1;
900
901 mutex_unlock(&gpu->lock);
902 pm_runtime_mark_last_busy(gpu->dev);
903 pm_runtime_put_autosuspend(gpu->dev);
904
905 /* Retire the buffer objects in a work */
906 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
907}
908
909static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
910{
911 DBG("%s", dev_name(gpu->dev));
912 mod_timer(&gpu->hangcheck_timer,
913 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
914}
915
916static void hangcheck_handler(unsigned long data)
917{
918 struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
919 u32 fence = gpu->completed_fence;
920 bool progress = false;
921
922 if (fence != gpu->hangcheck_fence) {
923 gpu->hangcheck_fence = fence;
924 progress = true;
925 }
926
927 if (!progress) {
928 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
929 int change = dma_addr - gpu->hangcheck_dma_addr;
930
931 if (change < 0 || change > 16) {
932 gpu->hangcheck_dma_addr = dma_addr;
933 progress = true;
934 }
935 }
936
937 if (!progress && fence_after(gpu->active_fence, fence)) {
938 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
939 dev_err(gpu->dev, " completed fence: %u\n", fence);
940 dev_err(gpu->dev, " active fence: %u\n",
941 gpu->active_fence);
942 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
943 }
944
945 /* if still more pending work, reset the hangcheck timer: */
946 if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
947 hangcheck_timer_reset(gpu);
948}
949
950static void hangcheck_disable(struct etnaviv_gpu *gpu)
951{
952 del_timer_sync(&gpu->hangcheck_timer);
953 cancel_work_sync(&gpu->recover_work);
954}
955
956/* fence object management */
957struct etnaviv_fence {
958 struct etnaviv_gpu *gpu;
959 struct fence base;
960};
961
962static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
963{
964 return container_of(fence, struct etnaviv_fence, base);
965}
966
967static const char *etnaviv_fence_get_driver_name(struct fence *fence)
968{
969 return "etnaviv";
970}
971
972static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
973{
974 struct etnaviv_fence *f = to_etnaviv_fence(fence);
975
976 return dev_name(f->gpu->dev);
977}
978
979static bool etnaviv_fence_enable_signaling(struct fence *fence)
980{
981 return true;
982}
983
984static bool etnaviv_fence_signaled(struct fence *fence)
985{
986 struct etnaviv_fence *f = to_etnaviv_fence(fence);
987
988 return fence_completed(f->gpu, f->base.seqno);
989}
990
991static void etnaviv_fence_release(struct fence *fence)
992{
993 struct etnaviv_fence *f = to_etnaviv_fence(fence);
994
995 kfree_rcu(f, base.rcu);
996}
997
998static const struct fence_ops etnaviv_fence_ops = {
999 .get_driver_name = etnaviv_fence_get_driver_name,
1000 .get_timeline_name = etnaviv_fence_get_timeline_name,
1001 .enable_signaling = etnaviv_fence_enable_signaling,
1002 .signaled = etnaviv_fence_signaled,
1003 .wait = fence_default_wait,
1004 .release = etnaviv_fence_release,
1005};
1006
1007static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1008{
1009 struct etnaviv_fence *f;
1010
1011 f = kzalloc(sizeof(*f), GFP_KERNEL);
1012 if (!f)
1013 return NULL;
1014
1015 f->gpu = gpu;
1016
1017 fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1018 gpu->fence_context, ++gpu->next_fence);
1019
1020 return &f->base;
1021}
1022
1023int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1024 unsigned int context, bool exclusive)
1025{
1026 struct reservation_object *robj = etnaviv_obj->resv;
1027 struct reservation_object_list *fobj;
1028 struct fence *fence;
1029 int i, ret;
1030
1031 if (!exclusive) {
1032 ret = reservation_object_reserve_shared(robj);
1033 if (ret)
1034 return ret;
1035 }
1036
1037 /*
1038 * If we have any shared fences, then the exclusive fence
1039 * should be ignored as it will already have been signalled.
1040 */
1041 fobj = reservation_object_get_list(robj);
1042 if (!fobj || fobj->shared_count == 0) {
1043 /* Wait on any existing exclusive fence which isn't our own */
1044 fence = reservation_object_get_excl(robj);
1045 if (fence && fence->context != context) {
1046 ret = fence_wait(fence, true);
1047 if (ret)
1048 return ret;
1049 }
1050 }
1051
1052 if (!exclusive || !fobj)
1053 return 0;
1054
1055 for (i = 0; i < fobj->shared_count; i++) {
1056 fence = rcu_dereference_protected(fobj->shared[i],
1057 reservation_object_held(robj));
1058 if (fence->context != context) {
1059 ret = fence_wait(fence, true);
1060 if (ret)
1061 return ret;
1062 }
1063 }
1064
1065 return 0;
1066}
1067
1068/*
1069 * event management:
1070 */
1071
1072static unsigned int event_alloc(struct etnaviv_gpu *gpu)
1073{
1074 unsigned long ret, flags;
1075 unsigned int i, event = ~0U;
1076
1077 ret = wait_for_completion_timeout(&gpu->event_free,
1078 msecs_to_jiffies(10 * 10000));
1079 if (!ret)
1080 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1081
1082 spin_lock_irqsave(&gpu->event_spinlock, flags);
1083
1084 /* find first free event */
1085 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1086 if (gpu->event[i].used == false) {
1087 gpu->event[i].used = true;
1088 event = i;
1089 break;
1090 }
1091 }
1092
1093 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1094
1095 return event;
1096}
1097
1098static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1099{
1100 unsigned long flags;
1101
1102 spin_lock_irqsave(&gpu->event_spinlock, flags);
1103
1104 if (gpu->event[event].used == false) {
1105 dev_warn(gpu->dev, "event %u is already marked as free",
1106 event);
1107 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1108 } else {
1109 gpu->event[event].used = false;
1110 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1111
1112 complete(&gpu->event_free);
1113 }
1114}
1115
1116/*
1117 * Cmdstream submission/retirement:
1118 */
1119
1120struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1121 size_t nr_bos)
1122{
1123 struct etnaviv_cmdbuf *cmdbuf;
1124 size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
1125 sizeof(*cmdbuf));
1126
1127 cmdbuf = kzalloc(sz, GFP_KERNEL);
1128 if (!cmdbuf)
1129 return NULL;
1130
1131 cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
1132 GFP_KERNEL);
1133 if (!cmdbuf->vaddr) {
1134 kfree(cmdbuf);
1135 return NULL;
1136 }
1137
1138 cmdbuf->gpu = gpu;
1139 cmdbuf->size = size;
1140
1141 return cmdbuf;
1142}
1143
1144void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1145{
1146 dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
1147 cmdbuf->paddr);
1148 kfree(cmdbuf);
1149}
1150
1151static void retire_worker(struct work_struct *work)
1152{
1153 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1154 retire_work);
1155 u32 fence = gpu->completed_fence;
1156 struct etnaviv_cmdbuf *cmdbuf, *tmp;
1157 unsigned int i;
1158
1159 mutex_lock(&gpu->lock);
1160 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1161 if (!fence_is_signaled(cmdbuf->fence))
1162 break;
1163
1164 list_del(&cmdbuf->node);
1165 fence_put(cmdbuf->fence);
1166
1167 for (i = 0; i < cmdbuf->nr_bos; i++) {
1168 struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
1169 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
1170
1171 atomic_dec(&etnaviv_obj->gpu_active);
1172 /* drop the refcount taken in etnaviv_gpu_submit */
1173 etnaviv_gem_mapping_unreference(mapping);
1174 }
1175
1176 etnaviv_gpu_cmdbuf_free(cmdbuf);
1177 /*
1178 * We need to balance the runtime PM count caused by
1179 * each submission. Upon submission, we increment
1180 * the runtime PM counter, and allocate one event.
1181 * So here, we put the runtime PM count for each
1182 * completed event.
1183 */
1184 pm_runtime_put_autosuspend(gpu->dev);
1185 }
1186
1187 gpu->retired_fence = fence;
1188
1189 mutex_unlock(&gpu->lock);
1190
1191 wake_up_all(&gpu->fence_event);
1192}
1193
1194int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1195 u32 fence, struct timespec *timeout)
1196{
1197 int ret;
1198
1199 if (fence_after(fence, gpu->next_fence)) {
1200 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1201 fence, gpu->next_fence);
1202 return -EINVAL;
1203 }
1204
1205 if (!timeout) {
1206 /* No timeout was requested: just test for completion */
1207 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1208 } else {
1209 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1210
1211 ret = wait_event_interruptible_timeout(gpu->fence_event,
1212 fence_completed(gpu, fence),
1213 remaining);
1214 if (ret == 0) {
1215 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1216 fence, gpu->retired_fence,
1217 gpu->completed_fence);
1218 ret = -ETIMEDOUT;
1219 } else if (ret != -ERESTARTSYS) {
1220 ret = 0;
1221 }
1222 }
1223
1224 return ret;
1225}
1226
1227/*
1228 * Wait for an object to become inactive. This, on it's own, is not race
1229 * free: the object is moved by the retire worker off the active list, and
1230 * then the iova is put. Moreover, the object could be re-submitted just
1231 * after we notice that it's become inactive.
1232 *
1233 * Although the retirement happens under the gpu lock, we don't want to hold
1234 * that lock in this function while waiting.
1235 */
1236int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1237 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1238{
1239 unsigned long remaining;
1240 long ret;
1241
1242 if (!timeout)
1243 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1244
1245 remaining = etnaviv_timeout_to_jiffies(timeout);
1246
1247 ret = wait_event_interruptible_timeout(gpu->fence_event,
1248 !is_active(etnaviv_obj),
1249 remaining);
1250 if (ret > 0) {
1251 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1252
1253 /* Synchronise with the retire worker */
1254 flush_workqueue(priv->wq);
1255 return 0;
1256 } else if (ret == -ERESTARTSYS) {
1257 return -ERESTARTSYS;
1258 } else {
1259 return -ETIMEDOUT;
1260 }
1261}
1262
1263int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1264{
1265 return pm_runtime_get_sync(gpu->dev);
1266}
1267
1268void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1269{
1270 pm_runtime_mark_last_busy(gpu->dev);
1271 pm_runtime_put_autosuspend(gpu->dev);
1272}
1273
1274/* add bo's to gpu's ring, and kick gpu: */
1275int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1276 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1277{
1278 struct fence *fence;
1279 unsigned int event, i;
1280 int ret;
1281
1282 ret = etnaviv_gpu_pm_get_sync(gpu);
1283 if (ret < 0)
1284 return ret;
1285
1286 mutex_lock(&gpu->lock);
1287
1288 /*
1289 * TODO
1290 *
1291 * - flush
1292 * - data endian
1293 * - prefetch
1294 *
1295 */
1296
1297 event = event_alloc(gpu);
1298 if (unlikely(event == ~0U)) {
1299 DRM_ERROR("no free event\n");
1300 ret = -EBUSY;
1301 goto out_unlock;
1302 }
1303
1304 fence = etnaviv_gpu_fence_alloc(gpu);
1305 if (!fence) {
1306 event_free(gpu, event);
1307 ret = -ENOMEM;
1308 goto out_unlock;
1309 }
1310
1311 gpu->event[event].fence = fence;
1312 submit->fence = fence->seqno;
1313 gpu->active_fence = submit->fence;
1314
1315 if (gpu->lastctx != cmdbuf->ctx) {
1316 gpu->mmu->need_flush = true;
1317 gpu->switch_context = true;
1318 gpu->lastctx = cmdbuf->ctx;
1319 }
1320
1321 etnaviv_buffer_queue(gpu, event, cmdbuf);
1322
1323 cmdbuf->fence = fence;
1324 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1325
1326 /* We're committed to adding this command buffer, hold a PM reference */
1327 pm_runtime_get_noresume(gpu->dev);
1328
1329 for (i = 0; i < submit->nr_bos; i++) {
1330 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1331
1332 /* Each cmdbuf takes a refcount on the mapping */
1333 etnaviv_gem_mapping_reference(submit->bos[i].mapping);
1334 cmdbuf->bo_map[i] = submit->bos[i].mapping;
1335 atomic_inc(&etnaviv_obj->gpu_active);
1336
1337 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1338 reservation_object_add_excl_fence(etnaviv_obj->resv,
1339 fence);
1340 else
1341 reservation_object_add_shared_fence(etnaviv_obj->resv,
1342 fence);
1343 }
1344 cmdbuf->nr_bos = submit->nr_bos;
1345 hangcheck_timer_reset(gpu);
1346 ret = 0;
1347
1348out_unlock:
1349 mutex_unlock(&gpu->lock);
1350
1351 etnaviv_gpu_pm_put(gpu);
1352
1353 return ret;
1354}
1355
1356/*
1357 * Init/Cleanup:
1358 */
1359static irqreturn_t irq_handler(int irq, void *data)
1360{
1361 struct etnaviv_gpu *gpu = data;
1362 irqreturn_t ret = IRQ_NONE;
1363
1364 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1365
1366 if (intr != 0) {
1367 int event;
1368
1369 pm_runtime_mark_last_busy(gpu->dev);
1370
1371 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1372
1373 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1374 dev_err(gpu->dev, "AXI bus error\n");
1375 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1376 }
1377
1378 while ((event = ffs(intr)) != 0) {
1379 struct fence *fence;
1380
1381 event -= 1;
1382
1383 intr &= ~(1 << event);
1384
1385 dev_dbg(gpu->dev, "event %u\n", event);
1386
1387 fence = gpu->event[event].fence;
1388 gpu->event[event].fence = NULL;
1389 fence_signal(fence);
1390
1391 /*
1392 * Events can be processed out of order. Eg,
1393 * - allocate and queue event 0
1394 * - allocate event 1
1395 * - event 0 completes, we process it
1396 * - allocate and queue event 0
1397 * - event 1 and event 0 complete
1398 * we can end up processing event 0 first, then 1.
1399 */
1400 if (fence_after(fence->seqno, gpu->completed_fence))
1401 gpu->completed_fence = fence->seqno;
1402
1403 event_free(gpu, event);
1404 }
1405
1406 /* Retire the buffer objects in a work */
1407 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1408
1409 ret = IRQ_HANDLED;
1410 }
1411
1412 return ret;
1413}
1414
1415static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1416{
1417 int ret;
1418
1419 ret = enable_clk(gpu);
1420 if (ret)
1421 return ret;
1422
1423 ret = enable_axi(gpu);
1424 if (ret) {
1425 disable_clk(gpu);
1426 return ret;
1427 }
1428
1429 return 0;
1430}
1431
1432static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1433{
1434 int ret;
1435
1436 ret = disable_axi(gpu);
1437 if (ret)
1438 return ret;
1439
1440 ret = disable_clk(gpu);
1441 if (ret)
1442 return ret;
1443
1444 return 0;
1445}
1446
1447static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1448{
1449 if (gpu->buffer) {
1450 unsigned long timeout;
1451
1452 /* Replace the last WAIT with END */
1453 etnaviv_buffer_end(gpu);
1454
1455 /*
1456 * We know that only the FE is busy here, this should
1457 * happen quickly (as the WAIT is only 200 cycles). If
1458 * we fail, just warn and continue.
1459 */
1460 timeout = jiffies + msecs_to_jiffies(100);
1461 do {
1462 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1463
1464 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1465 break;
1466
1467 if (time_is_before_jiffies(timeout)) {
1468 dev_warn(gpu->dev,
1469 "timed out waiting for idle: idle=0x%x\n",
1470 idle);
1471 break;
1472 }
1473
1474 udelay(5);
1475 } while (1);
1476 }
1477
1478 return etnaviv_gpu_clk_disable(gpu);
1479}
1480
1481#ifdef CONFIG_PM
1482static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1483{
1484 u32 clock;
1485 int ret;
1486
1487 ret = mutex_lock_killable(&gpu->lock);
1488 if (ret)
1489 return ret;
1490
1491 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1492 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1493
1494 etnaviv_gpu_load_clock(gpu, clock);
1495 etnaviv_gpu_hw_init(gpu);
1496
1497 gpu->switch_context = true;
1498 gpu->exec_state = -1;
1499
1500 mutex_unlock(&gpu->lock);
1501
1502 return 0;
1503}
1504#endif
1505
1506static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1507 void *data)
1508{
1509 struct drm_device *drm = data;
1510 struct etnaviv_drm_private *priv = drm->dev_private;
1511 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1512 int ret;
1513
1514#ifdef CONFIG_PM
1515 ret = pm_runtime_get_sync(gpu->dev);
1516#else
1517 ret = etnaviv_gpu_clk_enable(gpu);
1518#endif
1519 if (ret < 0)
1520 return ret;
1521
1522 gpu->drm = drm;
1523 gpu->fence_context = fence_context_alloc(1);
1524 spin_lock_init(&gpu->fence_spinlock);
1525
1526 INIT_LIST_HEAD(&gpu->active_cmd_list);
1527 INIT_WORK(&gpu->retire_work, retire_worker);
1528 INIT_WORK(&gpu->recover_work, recover_worker);
1529 init_waitqueue_head(&gpu->fence_event);
1530
1531 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
1532 (unsigned long)gpu);
1533
1534 priv->gpu[priv->num_gpus++] = gpu;
1535
1536 pm_runtime_mark_last_busy(gpu->dev);
1537 pm_runtime_put_autosuspend(gpu->dev);
1538
1539 return 0;
1540}
1541
1542static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1543 void *data)
1544{
1545 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1546
1547 DBG("%s", dev_name(gpu->dev));
1548
1549 hangcheck_disable(gpu);
1550
1551#ifdef CONFIG_PM
1552 pm_runtime_get_sync(gpu->dev);
1553 pm_runtime_put_sync_suspend(gpu->dev);
1554#else
1555 etnaviv_gpu_hw_suspend(gpu);
1556#endif
1557
1558 if (gpu->buffer) {
1559 etnaviv_gpu_cmdbuf_free(gpu->buffer);
1560 gpu->buffer = NULL;
1561 }
1562
1563 if (gpu->mmu) {
1564 etnaviv_iommu_destroy(gpu->mmu);
1565 gpu->mmu = NULL;
1566 }
1567
1568 gpu->drm = NULL;
1569}
1570
1571static const struct component_ops gpu_ops = {
1572 .bind = etnaviv_gpu_bind,
1573 .unbind = etnaviv_gpu_unbind,
1574};
1575
1576static const struct of_device_id etnaviv_gpu_match[] = {
1577 {
1578 .compatible = "vivante,gc"
1579 },
1580 { /* sentinel */ }
1581};
1582
1583static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1584{
1585 struct device *dev = &pdev->dev;
1586 struct etnaviv_gpu *gpu;
1587 int err = 0;
1588
1589 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1590 if (!gpu)
1591 return -ENOMEM;
1592
1593 gpu->dev = &pdev->dev;
1594 mutex_init(&gpu->lock);
1595
1596 /* Map registers: */
1597 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1598 if (IS_ERR(gpu->mmio))
1599 return PTR_ERR(gpu->mmio);
1600
1601 /* Get Interrupt: */
1602 gpu->irq = platform_get_irq(pdev, 0);
1603 if (gpu->irq < 0) {
1604 err = gpu->irq;
1605 dev_err(dev, "failed to get irq: %d\n", err);
1606 goto fail;
1607 }
1608
1609 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1610 dev_name(gpu->dev), gpu);
1611 if (err) {
1612 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1613 goto fail;
1614 }
1615
1616 /* Get Clocks: */
1617 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1618 DBG("clk_bus: %p", gpu->clk_bus);
1619 if (IS_ERR(gpu->clk_bus))
1620 gpu->clk_bus = NULL;
1621
1622 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1623 DBG("clk_core: %p", gpu->clk_core);
1624 if (IS_ERR(gpu->clk_core))
1625 gpu->clk_core = NULL;
1626
1627 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1628 DBG("clk_shader: %p", gpu->clk_shader);
1629 if (IS_ERR(gpu->clk_shader))
1630 gpu->clk_shader = NULL;
1631
1632 /* TODO: figure out max mapped size */
1633 dev_set_drvdata(dev, gpu);
1634
1635 /*
1636 * We treat the device as initially suspended. The runtime PM
1637 * autosuspend delay is rather arbitary: no measurements have
1638 * yet been performed to determine an appropriate value.
1639 */
1640 pm_runtime_use_autosuspend(gpu->dev);
1641 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1642 pm_runtime_enable(gpu->dev);
1643
1644 err = component_add(&pdev->dev, &gpu_ops);
1645 if (err < 0) {
1646 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1647 goto fail;
1648 }
1649
1650 return 0;
1651
1652fail:
1653 return err;
1654}
1655
1656static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1657{
1658 component_del(&pdev->dev, &gpu_ops);
1659 pm_runtime_disable(&pdev->dev);
1660 return 0;
1661}
1662
1663#ifdef CONFIG_PM
1664static int etnaviv_gpu_rpm_suspend(struct device *dev)
1665{
1666 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1667 u32 idle, mask;
1668
1669 /* If we have outstanding fences, we're not idle */
1670 if (gpu->completed_fence != gpu->active_fence)
1671 return -EBUSY;
1672
1673 /* Check whether the hardware (except FE) is idle */
1674 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1675 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1676 if (idle != mask)
1677 return -EBUSY;
1678
1679 return etnaviv_gpu_hw_suspend(gpu);
1680}
1681
1682static int etnaviv_gpu_rpm_resume(struct device *dev)
1683{
1684 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1685 int ret;
1686
1687 ret = etnaviv_gpu_clk_enable(gpu);
1688 if (ret)
1689 return ret;
1690
1691 /* Re-initialise the basic hardware state */
1692 if (gpu->drm && gpu->buffer) {
1693 ret = etnaviv_gpu_hw_resume(gpu);
1694 if (ret) {
1695 etnaviv_gpu_clk_disable(gpu);
1696 return ret;
1697 }
1698 }
1699
1700 return 0;
1701}
1702#endif
1703
1704static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1705 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1706 NULL)
1707};
1708
1709struct platform_driver etnaviv_gpu_driver = {
1710 .driver = {
1711 .name = "etnaviv-gpu",
1712 .owner = THIS_MODULE,
1713 .pm = &etnaviv_gpu_pm_ops,
1714 .of_match_table = etnaviv_gpu_match,
1715 },
1716 .probe = etnaviv_gpu_platform_probe,
1717 .remove = etnaviv_gpu_platform_remove,
1718 .id_table = gpu_ids,
1719};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/clk.h>
7#include <linux/component.h>
8#include <linux/delay.h>
9#include <linux/dma-fence.h>
10#include <linux/dma-mapping.h>
11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/platform_device.h>
14#include <linux/pm_runtime.h>
15#include <linux/regulator/consumer.h>
16#include <linux/thermal.h>
17
18#include "etnaviv_cmdbuf.h"
19#include "etnaviv_dump.h"
20#include "etnaviv_gpu.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_mmu.h"
23#include "etnaviv_perfmon.h"
24#include "etnaviv_sched.h"
25#include "common.xml.h"
26#include "state.xml.h"
27#include "state_hi.xml.h"
28#include "cmdstream.xml.h"
29
30#ifndef PHYS_OFFSET
31#define PHYS_OFFSET 0
32#endif
33
34static const struct platform_device_id gpu_ids[] = {
35 { .name = "etnaviv-gpu,2d" },
36 { },
37};
38
39/*
40 * Driver functions:
41 */
42
43int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
44{
45 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
46
47 switch (param) {
48 case ETNAVIV_PARAM_GPU_MODEL:
49 *value = gpu->identity.model;
50 break;
51
52 case ETNAVIV_PARAM_GPU_REVISION:
53 *value = gpu->identity.revision;
54 break;
55
56 case ETNAVIV_PARAM_GPU_FEATURES_0:
57 *value = gpu->identity.features;
58 break;
59
60 case ETNAVIV_PARAM_GPU_FEATURES_1:
61 *value = gpu->identity.minor_features0;
62 break;
63
64 case ETNAVIV_PARAM_GPU_FEATURES_2:
65 *value = gpu->identity.minor_features1;
66 break;
67
68 case ETNAVIV_PARAM_GPU_FEATURES_3:
69 *value = gpu->identity.minor_features2;
70 break;
71
72 case ETNAVIV_PARAM_GPU_FEATURES_4:
73 *value = gpu->identity.minor_features3;
74 break;
75
76 case ETNAVIV_PARAM_GPU_FEATURES_5:
77 *value = gpu->identity.minor_features4;
78 break;
79
80 case ETNAVIV_PARAM_GPU_FEATURES_6:
81 *value = gpu->identity.minor_features5;
82 break;
83
84 case ETNAVIV_PARAM_GPU_FEATURES_7:
85 *value = gpu->identity.minor_features6;
86 break;
87
88 case ETNAVIV_PARAM_GPU_FEATURES_8:
89 *value = gpu->identity.minor_features7;
90 break;
91
92 case ETNAVIV_PARAM_GPU_FEATURES_9:
93 *value = gpu->identity.minor_features8;
94 break;
95
96 case ETNAVIV_PARAM_GPU_FEATURES_10:
97 *value = gpu->identity.minor_features9;
98 break;
99
100 case ETNAVIV_PARAM_GPU_FEATURES_11:
101 *value = gpu->identity.minor_features10;
102 break;
103
104 case ETNAVIV_PARAM_GPU_FEATURES_12:
105 *value = gpu->identity.minor_features11;
106 break;
107
108 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
109 *value = gpu->identity.stream_count;
110 break;
111
112 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
113 *value = gpu->identity.register_max;
114 break;
115
116 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
117 *value = gpu->identity.thread_count;
118 break;
119
120 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
121 *value = gpu->identity.vertex_cache_size;
122 break;
123
124 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
125 *value = gpu->identity.shader_core_count;
126 break;
127
128 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
129 *value = gpu->identity.pixel_pipes;
130 break;
131
132 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
133 *value = gpu->identity.vertex_output_buffer_size;
134 break;
135
136 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
137 *value = gpu->identity.buffer_size;
138 break;
139
140 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
141 *value = gpu->identity.instruction_count;
142 break;
143
144 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
145 *value = gpu->identity.num_constants;
146 break;
147
148 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
149 *value = gpu->identity.varyings_count;
150 break;
151
152 case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
153 if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
154 *value = ETNAVIV_SOFTPIN_START_ADDRESS;
155 else
156 *value = ~0ULL;
157 break;
158
159 default:
160 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
161 return -EINVAL;
162 }
163
164 return 0;
165}
166
167
168#define etnaviv_is_model_rev(gpu, mod, rev) \
169 ((gpu)->identity.model == chipModel_##mod && \
170 (gpu)->identity.revision == rev)
171#define etnaviv_field(val, field) \
172 (((val) & field##__MASK) >> field##__SHIFT)
173
174static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
175{
176 if (gpu->identity.minor_features0 &
177 chipMinorFeatures0_MORE_MINOR_FEATURES) {
178 u32 specs[4];
179 unsigned int streams;
180
181 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
182 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
183 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
184 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
185
186 gpu->identity.stream_count = etnaviv_field(specs[0],
187 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
188 gpu->identity.register_max = etnaviv_field(specs[0],
189 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
190 gpu->identity.thread_count = etnaviv_field(specs[0],
191 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
192 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
193 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
194 gpu->identity.shader_core_count = etnaviv_field(specs[0],
195 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
196 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
197 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
198 gpu->identity.vertex_output_buffer_size =
199 etnaviv_field(specs[0],
200 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
201
202 gpu->identity.buffer_size = etnaviv_field(specs[1],
203 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
204 gpu->identity.instruction_count = etnaviv_field(specs[1],
205 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
206 gpu->identity.num_constants = etnaviv_field(specs[1],
207 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
208
209 gpu->identity.varyings_count = etnaviv_field(specs[2],
210 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
211
212 /* This overrides the value from older register if non-zero */
213 streams = etnaviv_field(specs[3],
214 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
215 if (streams)
216 gpu->identity.stream_count = streams;
217 }
218
219 /* Fill in the stream count if not specified */
220 if (gpu->identity.stream_count == 0) {
221 if (gpu->identity.model >= 0x1000)
222 gpu->identity.stream_count = 4;
223 else
224 gpu->identity.stream_count = 1;
225 }
226
227 /* Convert the register max value */
228 if (gpu->identity.register_max)
229 gpu->identity.register_max = 1 << gpu->identity.register_max;
230 else if (gpu->identity.model == chipModel_GC400)
231 gpu->identity.register_max = 32;
232 else
233 gpu->identity.register_max = 64;
234
235 /* Convert thread count */
236 if (gpu->identity.thread_count)
237 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
238 else if (gpu->identity.model == chipModel_GC400)
239 gpu->identity.thread_count = 64;
240 else if (gpu->identity.model == chipModel_GC500 ||
241 gpu->identity.model == chipModel_GC530)
242 gpu->identity.thread_count = 128;
243 else
244 gpu->identity.thread_count = 256;
245
246 if (gpu->identity.vertex_cache_size == 0)
247 gpu->identity.vertex_cache_size = 8;
248
249 if (gpu->identity.shader_core_count == 0) {
250 if (gpu->identity.model >= 0x1000)
251 gpu->identity.shader_core_count = 2;
252 else
253 gpu->identity.shader_core_count = 1;
254 }
255
256 if (gpu->identity.pixel_pipes == 0)
257 gpu->identity.pixel_pipes = 1;
258
259 /* Convert virtex buffer size */
260 if (gpu->identity.vertex_output_buffer_size) {
261 gpu->identity.vertex_output_buffer_size =
262 1 << gpu->identity.vertex_output_buffer_size;
263 } else if (gpu->identity.model == chipModel_GC400) {
264 if (gpu->identity.revision < 0x4000)
265 gpu->identity.vertex_output_buffer_size = 512;
266 else if (gpu->identity.revision < 0x4200)
267 gpu->identity.vertex_output_buffer_size = 256;
268 else
269 gpu->identity.vertex_output_buffer_size = 128;
270 } else {
271 gpu->identity.vertex_output_buffer_size = 512;
272 }
273
274 switch (gpu->identity.instruction_count) {
275 case 0:
276 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
277 gpu->identity.model == chipModel_GC880)
278 gpu->identity.instruction_count = 512;
279 else
280 gpu->identity.instruction_count = 256;
281 break;
282
283 case 1:
284 gpu->identity.instruction_count = 1024;
285 break;
286
287 case 2:
288 gpu->identity.instruction_count = 2048;
289 break;
290
291 default:
292 gpu->identity.instruction_count = 256;
293 break;
294 }
295
296 if (gpu->identity.num_constants == 0)
297 gpu->identity.num_constants = 168;
298
299 if (gpu->identity.varyings_count == 0) {
300 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
301 gpu->identity.varyings_count = 12;
302 else
303 gpu->identity.varyings_count = 8;
304 }
305
306 /*
307 * For some cores, two varyings are consumed for position, so the
308 * maximum varying count needs to be reduced by one.
309 */
310 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
311 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
312 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
313 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
314 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
315 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
316 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
317 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
318 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
319 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
320 etnaviv_is_model_rev(gpu, GC880, 0x5106))
321 gpu->identity.varyings_count -= 1;
322}
323
324static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
325{
326 u32 chipIdentity;
327
328 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
329
330 /* Special case for older graphic cores. */
331 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
332 gpu->identity.model = chipModel_GC500;
333 gpu->identity.revision = etnaviv_field(chipIdentity,
334 VIVS_HI_CHIP_IDENTITY_REVISION);
335 } else {
336
337 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
338 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
339
340 /*
341 * !!!! HACK ALERT !!!!
342 * Because people change device IDs without letting software
343 * know about it - here is the hack to make it all look the
344 * same. Only for GC400 family.
345 */
346 if ((gpu->identity.model & 0xff00) == 0x0400 &&
347 gpu->identity.model != chipModel_GC420) {
348 gpu->identity.model = gpu->identity.model & 0x0400;
349 }
350
351 /* Another special case */
352 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
353 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
354 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
355
356 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
357 /*
358 * This IP has an ECO; put the correct
359 * revision in it.
360 */
361 gpu->identity.revision = 0x1051;
362 }
363 }
364
365 /*
366 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
367 * reality it's just a re-branded GC3000. We can identify this
368 * core by the upper half of the revision register being all 1.
369 * Fix model/rev here, so all other places can refer to this
370 * core by its real identity.
371 */
372 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
373 gpu->identity.model = chipModel_GC3000;
374 gpu->identity.revision &= 0xffff;
375 }
376 }
377
378 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
379 gpu->identity.model, gpu->identity.revision);
380
381 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
382 /*
383 * If there is a match in the HWDB, we aren't interested in the
384 * remaining register values, as they might be wrong.
385 */
386 if (etnaviv_fill_identity_from_hwdb(gpu))
387 return;
388
389 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
390
391 /* Disable fast clear on GC700. */
392 if (gpu->identity.model == chipModel_GC700)
393 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
394
395 if ((gpu->identity.model == chipModel_GC500 &&
396 gpu->identity.revision < 2) ||
397 (gpu->identity.model == chipModel_GC300 &&
398 gpu->identity.revision < 0x2000)) {
399
400 /*
401 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
402 * registers.
403 */
404 gpu->identity.minor_features0 = 0;
405 gpu->identity.minor_features1 = 0;
406 gpu->identity.minor_features2 = 0;
407 gpu->identity.minor_features3 = 0;
408 gpu->identity.minor_features4 = 0;
409 gpu->identity.minor_features5 = 0;
410 } else
411 gpu->identity.minor_features0 =
412 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
413
414 if (gpu->identity.minor_features0 &
415 chipMinorFeatures0_MORE_MINOR_FEATURES) {
416 gpu->identity.minor_features1 =
417 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
418 gpu->identity.minor_features2 =
419 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
420 gpu->identity.minor_features3 =
421 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
422 gpu->identity.minor_features4 =
423 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
424 gpu->identity.minor_features5 =
425 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
426 }
427
428 /* GC600 idle register reports zero bits where modules aren't present */
429 if (gpu->identity.model == chipModel_GC600)
430 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
431 VIVS_HI_IDLE_STATE_RA |
432 VIVS_HI_IDLE_STATE_SE |
433 VIVS_HI_IDLE_STATE_PA |
434 VIVS_HI_IDLE_STATE_SH |
435 VIVS_HI_IDLE_STATE_PE |
436 VIVS_HI_IDLE_STATE_DE |
437 VIVS_HI_IDLE_STATE_FE;
438
439 etnaviv_hw_specs(gpu);
440}
441
442static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
443{
444 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
445 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
446 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
447}
448
449static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
450{
451 if (gpu->identity.minor_features2 &
452 chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
453 clk_set_rate(gpu->clk_core,
454 gpu->base_rate_core >> gpu->freq_scale);
455 clk_set_rate(gpu->clk_shader,
456 gpu->base_rate_shader >> gpu->freq_scale);
457 } else {
458 unsigned int fscale = 1 << (6 - gpu->freq_scale);
459 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
460
461 clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
462 clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
463 etnaviv_gpu_load_clock(gpu, clock);
464 }
465}
466
467static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
468{
469 u32 control, idle;
470 unsigned long timeout;
471 bool failed = true;
472
473 /* We hope that the GPU resets in under one second */
474 timeout = jiffies + msecs_to_jiffies(1000);
475
476 while (time_is_after_jiffies(timeout)) {
477 /* enable clock */
478 unsigned int fscale = 1 << (6 - gpu->freq_scale);
479 control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
480 etnaviv_gpu_load_clock(gpu, control);
481
482 /* isolate the GPU. */
483 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
484 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
485
486 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
487 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
488 VIVS_MMUv2_AHB_CONTROL_RESET);
489 } else {
490 /* set soft reset. */
491 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
492 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
493 }
494
495 /* wait for reset. */
496 usleep_range(10, 20);
497
498 /* reset soft reset bit. */
499 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
500 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
501
502 /* reset GPU isolation. */
503 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
504 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
505
506 /* read idle register. */
507 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
508
509 /* try reseting again if FE it not idle */
510 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
511 dev_dbg(gpu->dev, "FE is not idle\n");
512 continue;
513 }
514
515 /* read reset register. */
516 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
517
518 /* is the GPU idle? */
519 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
520 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
521 dev_dbg(gpu->dev, "GPU is not idle\n");
522 continue;
523 }
524
525 /* disable debug registers, as they are not normally needed */
526 control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
527 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
528
529 failed = false;
530 break;
531 }
532
533 if (failed) {
534 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
535 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
536
537 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
538 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
539 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
540 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
541
542 return -EBUSY;
543 }
544
545 /* We rely on the GPU running, so program the clock */
546 etnaviv_gpu_update_clock(gpu);
547
548 return 0;
549}
550
551static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
552{
553 u32 pmc, ppc;
554
555 /* enable clock gating */
556 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
557 ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
558
559 /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
560 if (gpu->identity.revision == 0x4301 ||
561 gpu->identity.revision == 0x4302)
562 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
563
564 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
565
566 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
567
568 /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
569 if (gpu->identity.model >= chipModel_GC400 &&
570 gpu->identity.model != chipModel_GC420 &&
571 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
572 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
573
574 /*
575 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
576 * present without a bug fix.
577 */
578 if (gpu->identity.revision < 0x5000 &&
579 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
580 !(gpu->identity.minor_features1 &
581 chipMinorFeatures1_DISABLE_PE_GATING))
582 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
583
584 if (gpu->identity.revision < 0x5422)
585 pmc |= BIT(15); /* Unknown bit */
586
587 /* Disable TX clock gating on affected core revisions. */
588 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
589 etnaviv_is_model_rev(gpu, GC2000, 0x5108))
590 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
591
592 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
593 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
594
595 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
596}
597
598void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
599{
600 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
601 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
602 VIVS_FE_COMMAND_CONTROL_ENABLE |
603 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
604
605 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
606 gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
607 VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
608 VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
609 }
610}
611
612static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
613{
614 u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
615 &gpu->mmu_context->cmdbuf_mapping);
616 u16 prefetch;
617
618 /* setup the MMU */
619 etnaviv_iommu_restore(gpu, gpu->mmu_context);
620
621 /* Start command processor */
622 prefetch = etnaviv_buffer_init(gpu);
623
624 etnaviv_gpu_start_fe(gpu, address, prefetch);
625}
626
627static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
628{
629 /*
630 * Base value for VIVS_PM_PULSE_EATER register on models where it
631 * cannot be read, extracted from vivante kernel driver.
632 */
633 u32 pulse_eater = 0x01590880;
634
635 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
636 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
637 pulse_eater |= BIT(23);
638
639 }
640
641 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
642 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
643 pulse_eater &= ~BIT(16);
644 pulse_eater |= BIT(17);
645 }
646
647 if ((gpu->identity.revision > 0x5420) &&
648 (gpu->identity.features & chipFeatures_PIPE_3D))
649 {
650 /* Performance fix: disable internal DFS */
651 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
652 pulse_eater |= BIT(18);
653 }
654
655 gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
656}
657
658static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
659{
660 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
661 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
662 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
663 u32 mc_memory_debug;
664
665 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
666
667 if (gpu->identity.revision == 0x5007)
668 mc_memory_debug |= 0x0c;
669 else
670 mc_memory_debug |= 0x08;
671
672 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
673 }
674
675 /* enable module-level clock gating */
676 etnaviv_gpu_enable_mlcg(gpu);
677
678 /*
679 * Update GPU AXI cache atttribute to "cacheable, no allocate".
680 * This is necessary to prevent the iMX6 SoC locking up.
681 */
682 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
683 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
684 VIVS_HI_AXI_CONFIG_ARCACHE(2));
685
686 /* GC2000 rev 5108 needs a special bus config */
687 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
688 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
689 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
690 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
691 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
692 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
693 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
694 }
695
696 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
697 u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
698 val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
699 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
700 }
701
702 /* setup the pulse eater */
703 etnaviv_gpu_setup_pulse_eater(gpu);
704
705 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
706}
707
708int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
709{
710 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
711 int ret, i;
712
713 ret = pm_runtime_get_sync(gpu->dev);
714 if (ret < 0) {
715 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
716 return ret;
717 }
718
719 etnaviv_hw_identify(gpu);
720
721 if (gpu->identity.model == 0) {
722 dev_err(gpu->dev, "Unknown GPU model\n");
723 ret = -ENXIO;
724 goto fail;
725 }
726
727 /* Exclude VG cores with FE2.0 */
728 if (gpu->identity.features & chipFeatures_PIPE_VG &&
729 gpu->identity.features & chipFeatures_FE20) {
730 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
731 ret = -ENXIO;
732 goto fail;
733 }
734
735 /*
736 * On cores with security features supported, we claim control over the
737 * security states.
738 */
739 if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
740 (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
741 gpu->sec_mode = ETNA_SEC_KERNEL;
742
743 ret = etnaviv_hw_reset(gpu);
744 if (ret) {
745 dev_err(gpu->dev, "GPU reset failed\n");
746 goto fail;
747 }
748
749 ret = etnaviv_iommu_global_init(gpu);
750 if (ret)
751 goto fail;
752
753 /*
754 * Set the GPU linear window to be at the end of the DMA window, where
755 * the CMA area is likely to reside. This ensures that we are able to
756 * map the command buffers while having the linear window overlap as
757 * much RAM as possible, so we can optimize mappings for other buffers.
758 *
759 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
760 * to different views of the memory on the individual engines.
761 */
762 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
763 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
764 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
765 if (dma_mask < PHYS_OFFSET + SZ_2G)
766 priv->mmu_global->memory_base = PHYS_OFFSET;
767 else
768 priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
769 } else if (PHYS_OFFSET >= SZ_2G) {
770 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
771 priv->mmu_global->memory_base = PHYS_OFFSET;
772 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
773 }
774
775 /* Create buffer: */
776 ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
777 PAGE_SIZE);
778 if (ret) {
779 dev_err(gpu->dev, "could not create command buffer\n");
780 goto fail;
781 }
782
783 /* Setup event management */
784 spin_lock_init(&gpu->event_spinlock);
785 init_completion(&gpu->event_free);
786 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
787 for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
788 complete(&gpu->event_free);
789
790 /* Now program the hardware */
791 mutex_lock(&gpu->lock);
792 etnaviv_gpu_hw_init(gpu);
793 gpu->exec_state = -1;
794 mutex_unlock(&gpu->lock);
795
796 pm_runtime_mark_last_busy(gpu->dev);
797 pm_runtime_put_autosuspend(gpu->dev);
798
799 gpu->initialized = true;
800
801 return 0;
802
803fail:
804 pm_runtime_mark_last_busy(gpu->dev);
805 pm_runtime_put_autosuspend(gpu->dev);
806
807 return ret;
808}
809
810#ifdef CONFIG_DEBUG_FS
811struct dma_debug {
812 u32 address[2];
813 u32 state[2];
814};
815
816static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
817{
818 u32 i;
819
820 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
821 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
822
823 for (i = 0; i < 500; i++) {
824 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
825 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
826
827 if (debug->address[0] != debug->address[1])
828 break;
829
830 if (debug->state[0] != debug->state[1])
831 break;
832 }
833}
834
835int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
836{
837 struct dma_debug debug;
838 u32 dma_lo, dma_hi, axi, idle;
839 int ret;
840
841 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
842
843 ret = pm_runtime_get_sync(gpu->dev);
844 if (ret < 0)
845 return ret;
846
847 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
848 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
849 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
850 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
851
852 verify_dma(gpu, &debug);
853
854 seq_puts(m, "\tfeatures\n");
855 seq_printf(m, "\t major_features: 0x%08x\n",
856 gpu->identity.features);
857 seq_printf(m, "\t minor_features0: 0x%08x\n",
858 gpu->identity.minor_features0);
859 seq_printf(m, "\t minor_features1: 0x%08x\n",
860 gpu->identity.minor_features1);
861 seq_printf(m, "\t minor_features2: 0x%08x\n",
862 gpu->identity.minor_features2);
863 seq_printf(m, "\t minor_features3: 0x%08x\n",
864 gpu->identity.minor_features3);
865 seq_printf(m, "\t minor_features4: 0x%08x\n",
866 gpu->identity.minor_features4);
867 seq_printf(m, "\t minor_features5: 0x%08x\n",
868 gpu->identity.minor_features5);
869 seq_printf(m, "\t minor_features6: 0x%08x\n",
870 gpu->identity.minor_features6);
871 seq_printf(m, "\t minor_features7: 0x%08x\n",
872 gpu->identity.minor_features7);
873 seq_printf(m, "\t minor_features8: 0x%08x\n",
874 gpu->identity.minor_features8);
875 seq_printf(m, "\t minor_features9: 0x%08x\n",
876 gpu->identity.minor_features9);
877 seq_printf(m, "\t minor_features10: 0x%08x\n",
878 gpu->identity.minor_features10);
879 seq_printf(m, "\t minor_features11: 0x%08x\n",
880 gpu->identity.minor_features11);
881
882 seq_puts(m, "\tspecs\n");
883 seq_printf(m, "\t stream_count: %d\n",
884 gpu->identity.stream_count);
885 seq_printf(m, "\t register_max: %d\n",
886 gpu->identity.register_max);
887 seq_printf(m, "\t thread_count: %d\n",
888 gpu->identity.thread_count);
889 seq_printf(m, "\t vertex_cache_size: %d\n",
890 gpu->identity.vertex_cache_size);
891 seq_printf(m, "\t shader_core_count: %d\n",
892 gpu->identity.shader_core_count);
893 seq_printf(m, "\t pixel_pipes: %d\n",
894 gpu->identity.pixel_pipes);
895 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
896 gpu->identity.vertex_output_buffer_size);
897 seq_printf(m, "\t buffer_size: %d\n",
898 gpu->identity.buffer_size);
899 seq_printf(m, "\t instruction_count: %d\n",
900 gpu->identity.instruction_count);
901 seq_printf(m, "\t num_constants: %d\n",
902 gpu->identity.num_constants);
903 seq_printf(m, "\t varyings_count: %d\n",
904 gpu->identity.varyings_count);
905
906 seq_printf(m, "\taxi: 0x%08x\n", axi);
907 seq_printf(m, "\tidle: 0x%08x\n", idle);
908 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
909 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
910 seq_puts(m, "\t FE is not idle\n");
911 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
912 seq_puts(m, "\t DE is not idle\n");
913 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
914 seq_puts(m, "\t PE is not idle\n");
915 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
916 seq_puts(m, "\t SH is not idle\n");
917 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
918 seq_puts(m, "\t PA is not idle\n");
919 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
920 seq_puts(m, "\t SE is not idle\n");
921 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
922 seq_puts(m, "\t RA is not idle\n");
923 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
924 seq_puts(m, "\t TX is not idle\n");
925 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
926 seq_puts(m, "\t VG is not idle\n");
927 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
928 seq_puts(m, "\t IM is not idle\n");
929 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
930 seq_puts(m, "\t FP is not idle\n");
931 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
932 seq_puts(m, "\t TS is not idle\n");
933 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
934 seq_puts(m, "\t AXI low power mode\n");
935
936 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
937 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
938 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
939 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
940
941 seq_puts(m, "\tMC\n");
942 seq_printf(m, "\t read0: 0x%08x\n", read0);
943 seq_printf(m, "\t read1: 0x%08x\n", read1);
944 seq_printf(m, "\t write: 0x%08x\n", write);
945 }
946
947 seq_puts(m, "\tDMA ");
948
949 if (debug.address[0] == debug.address[1] &&
950 debug.state[0] == debug.state[1]) {
951 seq_puts(m, "seems to be stuck\n");
952 } else if (debug.address[0] == debug.address[1]) {
953 seq_puts(m, "address is constant\n");
954 } else {
955 seq_puts(m, "is running\n");
956 }
957
958 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
959 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
960 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
961 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
962 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
963 dma_lo, dma_hi);
964
965 ret = 0;
966
967 pm_runtime_mark_last_busy(gpu->dev);
968 pm_runtime_put_autosuspend(gpu->dev);
969
970 return ret;
971}
972#endif
973
974void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
975{
976 unsigned int i = 0;
977
978 dev_err(gpu->dev, "recover hung GPU!\n");
979
980 if (pm_runtime_get_sync(gpu->dev) < 0)
981 return;
982
983 mutex_lock(&gpu->lock);
984
985 etnaviv_hw_reset(gpu);
986
987 /* complete all events, the GPU won't do it after the reset */
988 spin_lock(&gpu->event_spinlock);
989 for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
990 complete(&gpu->event_free);
991 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
992 spin_unlock(&gpu->event_spinlock);
993
994 etnaviv_gpu_hw_init(gpu);
995 gpu->exec_state = -1;
996 gpu->mmu_context = NULL;
997
998 mutex_unlock(&gpu->lock);
999 pm_runtime_mark_last_busy(gpu->dev);
1000 pm_runtime_put_autosuspend(gpu->dev);
1001}
1002
1003/* fence object management */
1004struct etnaviv_fence {
1005 struct etnaviv_gpu *gpu;
1006 struct dma_fence base;
1007};
1008
1009static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1010{
1011 return container_of(fence, struct etnaviv_fence, base);
1012}
1013
1014static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1015{
1016 return "etnaviv";
1017}
1018
1019static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1020{
1021 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1022
1023 return dev_name(f->gpu->dev);
1024}
1025
1026static bool etnaviv_fence_signaled(struct dma_fence *fence)
1027{
1028 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1029
1030 return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
1031}
1032
1033static void etnaviv_fence_release(struct dma_fence *fence)
1034{
1035 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1036
1037 kfree_rcu(f, base.rcu);
1038}
1039
1040static const struct dma_fence_ops etnaviv_fence_ops = {
1041 .get_driver_name = etnaviv_fence_get_driver_name,
1042 .get_timeline_name = etnaviv_fence_get_timeline_name,
1043 .signaled = etnaviv_fence_signaled,
1044 .release = etnaviv_fence_release,
1045};
1046
1047static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1048{
1049 struct etnaviv_fence *f;
1050
1051 /*
1052 * GPU lock must already be held, otherwise fence completion order might
1053 * not match the seqno order assigned here.
1054 */
1055 lockdep_assert_held(&gpu->lock);
1056
1057 f = kzalloc(sizeof(*f), GFP_KERNEL);
1058 if (!f)
1059 return NULL;
1060
1061 f->gpu = gpu;
1062
1063 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1064 gpu->fence_context, ++gpu->next_fence);
1065
1066 return &f->base;
1067}
1068
1069/* returns true if fence a comes after fence b */
1070static inline bool fence_after(u32 a, u32 b)
1071{
1072 return (s32)(a - b) > 0;
1073}
1074
1075/*
1076 * event management:
1077 */
1078
1079static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1080 unsigned int *events)
1081{
1082 unsigned long timeout = msecs_to_jiffies(10 * 10000);
1083 unsigned i, acquired = 0;
1084
1085 for (i = 0; i < nr_events; i++) {
1086 unsigned long ret;
1087
1088 ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1089
1090 if (!ret) {
1091 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1092 goto out;
1093 }
1094
1095 acquired++;
1096 timeout = ret;
1097 }
1098
1099 spin_lock(&gpu->event_spinlock);
1100
1101 for (i = 0; i < nr_events; i++) {
1102 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1103
1104 events[i] = event;
1105 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1106 set_bit(event, gpu->event_bitmap);
1107 }
1108
1109 spin_unlock(&gpu->event_spinlock);
1110
1111 return 0;
1112
1113out:
1114 for (i = 0; i < acquired; i++)
1115 complete(&gpu->event_free);
1116
1117 return -EBUSY;
1118}
1119
1120static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1121{
1122 if (!test_bit(event, gpu->event_bitmap)) {
1123 dev_warn(gpu->dev, "event %u is already marked as free",
1124 event);
1125 } else {
1126 clear_bit(event, gpu->event_bitmap);
1127 complete(&gpu->event_free);
1128 }
1129}
1130
1131/*
1132 * Cmdstream submission/retirement:
1133 */
1134int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1135 u32 id, struct timespec *timeout)
1136{
1137 struct dma_fence *fence;
1138 int ret;
1139
1140 /*
1141 * Look up the fence and take a reference. We might still find a fence
1142 * whose refcount has already dropped to zero. dma_fence_get_rcu
1143 * pretends we didn't find a fence in that case.
1144 */
1145 rcu_read_lock();
1146 fence = idr_find(&gpu->fence_idr, id);
1147 if (fence)
1148 fence = dma_fence_get_rcu(fence);
1149 rcu_read_unlock();
1150
1151 if (!fence)
1152 return 0;
1153
1154 if (!timeout) {
1155 /* No timeout was requested: just test for completion */
1156 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
1157 } else {
1158 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1159
1160 ret = dma_fence_wait_timeout(fence, true, remaining);
1161 if (ret == 0)
1162 ret = -ETIMEDOUT;
1163 else if (ret != -ERESTARTSYS)
1164 ret = 0;
1165
1166 }
1167
1168 dma_fence_put(fence);
1169 return ret;
1170}
1171
1172/*
1173 * Wait for an object to become inactive. This, on it's own, is not race
1174 * free: the object is moved by the scheduler off the active list, and
1175 * then the iova is put. Moreover, the object could be re-submitted just
1176 * after we notice that it's become inactive.
1177 *
1178 * Although the retirement happens under the gpu lock, we don't want to hold
1179 * that lock in this function while waiting.
1180 */
1181int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1182 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1183{
1184 unsigned long remaining;
1185 long ret;
1186
1187 if (!timeout)
1188 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1189
1190 remaining = etnaviv_timeout_to_jiffies(timeout);
1191
1192 ret = wait_event_interruptible_timeout(gpu->fence_event,
1193 !is_active(etnaviv_obj),
1194 remaining);
1195 if (ret > 0)
1196 return 0;
1197 else if (ret == -ERESTARTSYS)
1198 return -ERESTARTSYS;
1199 else
1200 return -ETIMEDOUT;
1201}
1202
1203static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1204 struct etnaviv_event *event, unsigned int flags)
1205{
1206 const struct etnaviv_gem_submit *submit = event->submit;
1207 unsigned int i;
1208
1209 for (i = 0; i < submit->nr_pmrs; i++) {
1210 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1211
1212 if (pmr->flags == flags)
1213 etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1214 }
1215}
1216
1217static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1218 struct etnaviv_event *event)
1219{
1220 u32 val;
1221
1222 /* disable clock gating */
1223 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1224 val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1225 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1226
1227 /* enable debug register */
1228 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1229 val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1230 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1231
1232 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1233}
1234
1235static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1236 struct etnaviv_event *event)
1237{
1238 const struct etnaviv_gem_submit *submit = event->submit;
1239 unsigned int i;
1240 u32 val;
1241
1242 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1243
1244 for (i = 0; i < submit->nr_pmrs; i++) {
1245 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1246
1247 *pmr->bo_vma = pmr->sequence;
1248 }
1249
1250 /* disable debug register */
1251 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1252 val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1253 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1254
1255 /* enable clock gating */
1256 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1257 val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1258 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1259}
1260
1261
1262/* add bo's to gpu's ring, and kick gpu: */
1263struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1264{
1265 struct etnaviv_gpu *gpu = submit->gpu;
1266 struct dma_fence *gpu_fence;
1267 unsigned int i, nr_events = 1, event[3];
1268 int ret;
1269
1270 if (!submit->runtime_resumed) {
1271 ret = pm_runtime_get_sync(gpu->dev);
1272 if (ret < 0)
1273 return NULL;
1274 submit->runtime_resumed = true;
1275 }
1276
1277 /*
1278 * if there are performance monitor requests we need to have
1279 * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1280 * requests.
1281 * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1282 * and update the sequence number for userspace.
1283 */
1284 if (submit->nr_pmrs)
1285 nr_events = 3;
1286
1287 ret = event_alloc(gpu, nr_events, event);
1288 if (ret) {
1289 DRM_ERROR("no free events\n");
1290 return NULL;
1291 }
1292
1293 mutex_lock(&gpu->lock);
1294
1295 gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1296 if (!gpu_fence) {
1297 for (i = 0; i < nr_events; i++)
1298 event_free(gpu, event[i]);
1299
1300 goto out_unlock;
1301 }
1302
1303 if (!gpu->mmu_context) {
1304 etnaviv_iommu_context_get(submit->mmu_context);
1305 gpu->mmu_context = submit->mmu_context;
1306 etnaviv_gpu_start_fe_idleloop(gpu);
1307 } else {
1308 etnaviv_iommu_context_get(gpu->mmu_context);
1309 submit->prev_mmu_context = gpu->mmu_context;
1310 }
1311
1312 if (submit->nr_pmrs) {
1313 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1314 kref_get(&submit->refcount);
1315 gpu->event[event[1]].submit = submit;
1316 etnaviv_sync_point_queue(gpu, event[1]);
1317 }
1318
1319 gpu->event[event[0]].fence = gpu_fence;
1320 submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
1321 etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
1322 event[0], &submit->cmdbuf);
1323
1324 if (submit->nr_pmrs) {
1325 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1326 kref_get(&submit->refcount);
1327 gpu->event[event[2]].submit = submit;
1328 etnaviv_sync_point_queue(gpu, event[2]);
1329 }
1330
1331out_unlock:
1332 mutex_unlock(&gpu->lock);
1333
1334 return gpu_fence;
1335}
1336
1337static void sync_point_worker(struct work_struct *work)
1338{
1339 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1340 sync_point_work);
1341 struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1342 u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1343
1344 event->sync_point(gpu, event);
1345 etnaviv_submit_put(event->submit);
1346 event_free(gpu, gpu->sync_point_event);
1347
1348 /* restart FE last to avoid GPU and IRQ racing against this worker */
1349 etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1350}
1351
1352static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1353{
1354 u32 status_reg, status;
1355 int i;
1356
1357 if (gpu->sec_mode == ETNA_SEC_NONE)
1358 status_reg = VIVS_MMUv2_STATUS;
1359 else
1360 status_reg = VIVS_MMUv2_SEC_STATUS;
1361
1362 status = gpu_read(gpu, status_reg);
1363 dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1364
1365 for (i = 0; i < 4; i++) {
1366 u32 address_reg;
1367
1368 if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
1369 continue;
1370
1371 if (gpu->sec_mode == ETNA_SEC_NONE)
1372 address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
1373 else
1374 address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
1375
1376 dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
1377 gpu_read(gpu, address_reg));
1378 }
1379}
1380
1381static irqreturn_t irq_handler(int irq, void *data)
1382{
1383 struct etnaviv_gpu *gpu = data;
1384 irqreturn_t ret = IRQ_NONE;
1385
1386 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1387
1388 if (intr != 0) {
1389 int event;
1390
1391 pm_runtime_mark_last_busy(gpu->dev);
1392
1393 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1394
1395 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1396 dev_err(gpu->dev, "AXI bus error\n");
1397 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1398 }
1399
1400 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1401 dump_mmu_fault(gpu);
1402 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1403 }
1404
1405 while ((event = ffs(intr)) != 0) {
1406 struct dma_fence *fence;
1407
1408 event -= 1;
1409
1410 intr &= ~(1 << event);
1411
1412 dev_dbg(gpu->dev, "event %u\n", event);
1413
1414 if (gpu->event[event].sync_point) {
1415 gpu->sync_point_event = event;
1416 queue_work(gpu->wq, &gpu->sync_point_work);
1417 }
1418
1419 fence = gpu->event[event].fence;
1420 if (!fence)
1421 continue;
1422
1423 gpu->event[event].fence = NULL;
1424
1425 /*
1426 * Events can be processed out of order. Eg,
1427 * - allocate and queue event 0
1428 * - allocate event 1
1429 * - event 0 completes, we process it
1430 * - allocate and queue event 0
1431 * - event 1 and event 0 complete
1432 * we can end up processing event 0 first, then 1.
1433 */
1434 if (fence_after(fence->seqno, gpu->completed_fence))
1435 gpu->completed_fence = fence->seqno;
1436 dma_fence_signal(fence);
1437
1438 event_free(gpu, event);
1439 }
1440
1441 ret = IRQ_HANDLED;
1442 }
1443
1444 return ret;
1445}
1446
1447static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1448{
1449 int ret;
1450
1451 if (gpu->clk_reg) {
1452 ret = clk_prepare_enable(gpu->clk_reg);
1453 if (ret)
1454 return ret;
1455 }
1456
1457 if (gpu->clk_bus) {
1458 ret = clk_prepare_enable(gpu->clk_bus);
1459 if (ret)
1460 return ret;
1461 }
1462
1463 if (gpu->clk_core) {
1464 ret = clk_prepare_enable(gpu->clk_core);
1465 if (ret)
1466 goto disable_clk_bus;
1467 }
1468
1469 if (gpu->clk_shader) {
1470 ret = clk_prepare_enable(gpu->clk_shader);
1471 if (ret)
1472 goto disable_clk_core;
1473 }
1474
1475 return 0;
1476
1477disable_clk_core:
1478 if (gpu->clk_core)
1479 clk_disable_unprepare(gpu->clk_core);
1480disable_clk_bus:
1481 if (gpu->clk_bus)
1482 clk_disable_unprepare(gpu->clk_bus);
1483
1484 return ret;
1485}
1486
1487static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1488{
1489 if (gpu->clk_shader)
1490 clk_disable_unprepare(gpu->clk_shader);
1491 if (gpu->clk_core)
1492 clk_disable_unprepare(gpu->clk_core);
1493 if (gpu->clk_bus)
1494 clk_disable_unprepare(gpu->clk_bus);
1495 if (gpu->clk_reg)
1496 clk_disable_unprepare(gpu->clk_reg);
1497
1498 return 0;
1499}
1500
1501int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1502{
1503 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1504
1505 do {
1506 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1507
1508 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1509 return 0;
1510
1511 if (time_is_before_jiffies(timeout)) {
1512 dev_warn(gpu->dev,
1513 "timed out waiting for idle: idle=0x%x\n",
1514 idle);
1515 return -ETIMEDOUT;
1516 }
1517
1518 udelay(5);
1519 } while (1);
1520}
1521
1522static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1523{
1524 if (gpu->initialized && gpu->mmu_context) {
1525 /* Replace the last WAIT with END */
1526 mutex_lock(&gpu->lock);
1527 etnaviv_buffer_end(gpu);
1528 mutex_unlock(&gpu->lock);
1529
1530 /*
1531 * We know that only the FE is busy here, this should
1532 * happen quickly (as the WAIT is only 200 cycles). If
1533 * we fail, just warn and continue.
1534 */
1535 etnaviv_gpu_wait_idle(gpu, 100);
1536
1537 etnaviv_iommu_context_put(gpu->mmu_context);
1538 gpu->mmu_context = NULL;
1539 }
1540
1541 gpu->exec_state = -1;
1542
1543 return etnaviv_gpu_clk_disable(gpu);
1544}
1545
1546#ifdef CONFIG_PM
1547static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1548{
1549 int ret;
1550
1551 ret = mutex_lock_killable(&gpu->lock);
1552 if (ret)
1553 return ret;
1554
1555 etnaviv_gpu_update_clock(gpu);
1556 etnaviv_gpu_hw_init(gpu);
1557
1558 mutex_unlock(&gpu->lock);
1559
1560 return 0;
1561}
1562#endif
1563
1564static int
1565etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1566 unsigned long *state)
1567{
1568 *state = 6;
1569
1570 return 0;
1571}
1572
1573static int
1574etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1575 unsigned long *state)
1576{
1577 struct etnaviv_gpu *gpu = cdev->devdata;
1578
1579 *state = gpu->freq_scale;
1580
1581 return 0;
1582}
1583
1584static int
1585etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1586 unsigned long state)
1587{
1588 struct etnaviv_gpu *gpu = cdev->devdata;
1589
1590 mutex_lock(&gpu->lock);
1591 gpu->freq_scale = state;
1592 if (!pm_runtime_suspended(gpu->dev))
1593 etnaviv_gpu_update_clock(gpu);
1594 mutex_unlock(&gpu->lock);
1595
1596 return 0;
1597}
1598
1599static struct thermal_cooling_device_ops cooling_ops = {
1600 .get_max_state = etnaviv_gpu_cooling_get_max_state,
1601 .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1602 .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1603};
1604
1605static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1606 void *data)
1607{
1608 struct drm_device *drm = data;
1609 struct etnaviv_drm_private *priv = drm->dev_private;
1610 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1611 int ret;
1612
1613 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1614 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1615 (char *)dev_name(dev), gpu, &cooling_ops);
1616 if (IS_ERR(gpu->cooling))
1617 return PTR_ERR(gpu->cooling);
1618 }
1619
1620 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1621 if (!gpu->wq) {
1622 ret = -ENOMEM;
1623 goto out_thermal;
1624 }
1625
1626 ret = etnaviv_sched_init(gpu);
1627 if (ret)
1628 goto out_workqueue;
1629
1630#ifdef CONFIG_PM
1631 ret = pm_runtime_get_sync(gpu->dev);
1632#else
1633 ret = etnaviv_gpu_clk_enable(gpu);
1634#endif
1635 if (ret < 0)
1636 goto out_sched;
1637
1638
1639 gpu->drm = drm;
1640 gpu->fence_context = dma_fence_context_alloc(1);
1641 idr_init(&gpu->fence_idr);
1642 spin_lock_init(&gpu->fence_spinlock);
1643
1644 INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1645 init_waitqueue_head(&gpu->fence_event);
1646
1647 priv->gpu[priv->num_gpus++] = gpu;
1648
1649 pm_runtime_mark_last_busy(gpu->dev);
1650 pm_runtime_put_autosuspend(gpu->dev);
1651
1652 return 0;
1653
1654out_sched:
1655 etnaviv_sched_fini(gpu);
1656
1657out_workqueue:
1658 destroy_workqueue(gpu->wq);
1659
1660out_thermal:
1661 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1662 thermal_cooling_device_unregister(gpu->cooling);
1663
1664 return ret;
1665}
1666
1667static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1668 void *data)
1669{
1670 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1671
1672 DBG("%s", dev_name(gpu->dev));
1673
1674 flush_workqueue(gpu->wq);
1675 destroy_workqueue(gpu->wq);
1676
1677 etnaviv_sched_fini(gpu);
1678
1679#ifdef CONFIG_PM
1680 pm_runtime_get_sync(gpu->dev);
1681 pm_runtime_put_sync_suspend(gpu->dev);
1682#else
1683 etnaviv_gpu_hw_suspend(gpu);
1684#endif
1685
1686 if (gpu->initialized) {
1687 etnaviv_cmdbuf_free(&gpu->buffer);
1688 etnaviv_iommu_global_fini(gpu);
1689 gpu->initialized = false;
1690 }
1691
1692 gpu->drm = NULL;
1693 idr_destroy(&gpu->fence_idr);
1694
1695 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1696 thermal_cooling_device_unregister(gpu->cooling);
1697 gpu->cooling = NULL;
1698}
1699
1700static const struct component_ops gpu_ops = {
1701 .bind = etnaviv_gpu_bind,
1702 .unbind = etnaviv_gpu_unbind,
1703};
1704
1705static const struct of_device_id etnaviv_gpu_match[] = {
1706 {
1707 .compatible = "vivante,gc"
1708 },
1709 { /* sentinel */ }
1710};
1711MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
1712
1713static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1714{
1715 struct device *dev = &pdev->dev;
1716 struct etnaviv_gpu *gpu;
1717 int err;
1718
1719 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1720 if (!gpu)
1721 return -ENOMEM;
1722
1723 gpu->dev = &pdev->dev;
1724 mutex_init(&gpu->lock);
1725 mutex_init(&gpu->fence_lock);
1726
1727 /* Map registers: */
1728 gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
1729 if (IS_ERR(gpu->mmio))
1730 return PTR_ERR(gpu->mmio);
1731
1732 /* Get Interrupt: */
1733 gpu->irq = platform_get_irq(pdev, 0);
1734 if (gpu->irq < 0) {
1735 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1736 return gpu->irq;
1737 }
1738
1739 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1740 dev_name(gpu->dev), gpu);
1741 if (err) {
1742 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1743 return err;
1744 }
1745
1746 /* Get Clocks: */
1747 gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
1748 DBG("clk_reg: %p", gpu->clk_reg);
1749 if (IS_ERR(gpu->clk_reg))
1750 gpu->clk_reg = NULL;
1751
1752 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1753 DBG("clk_bus: %p", gpu->clk_bus);
1754 if (IS_ERR(gpu->clk_bus))
1755 gpu->clk_bus = NULL;
1756
1757 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1758 DBG("clk_core: %p", gpu->clk_core);
1759 if (IS_ERR(gpu->clk_core))
1760 gpu->clk_core = NULL;
1761 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1762
1763 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1764 DBG("clk_shader: %p", gpu->clk_shader);
1765 if (IS_ERR(gpu->clk_shader))
1766 gpu->clk_shader = NULL;
1767 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1768
1769 /* TODO: figure out max mapped size */
1770 dev_set_drvdata(dev, gpu);
1771
1772 /*
1773 * We treat the device as initially suspended. The runtime PM
1774 * autosuspend delay is rather arbitary: no measurements have
1775 * yet been performed to determine an appropriate value.
1776 */
1777 pm_runtime_use_autosuspend(gpu->dev);
1778 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1779 pm_runtime_enable(gpu->dev);
1780
1781 err = component_add(&pdev->dev, &gpu_ops);
1782 if (err < 0) {
1783 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1784 return err;
1785 }
1786
1787 return 0;
1788}
1789
1790static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1791{
1792 component_del(&pdev->dev, &gpu_ops);
1793 pm_runtime_disable(&pdev->dev);
1794 return 0;
1795}
1796
1797#ifdef CONFIG_PM
1798static int etnaviv_gpu_rpm_suspend(struct device *dev)
1799{
1800 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1801 u32 idle, mask;
1802
1803 /* If there are any jobs in the HW queue, we're not idle */
1804 if (atomic_read(&gpu->sched.hw_rq_count))
1805 return -EBUSY;
1806
1807 /* Check whether the hardware (except FE) is idle */
1808 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1809 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1810 if (idle != mask)
1811 return -EBUSY;
1812
1813 return etnaviv_gpu_hw_suspend(gpu);
1814}
1815
1816static int etnaviv_gpu_rpm_resume(struct device *dev)
1817{
1818 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1819 int ret;
1820
1821 ret = etnaviv_gpu_clk_enable(gpu);
1822 if (ret)
1823 return ret;
1824
1825 /* Re-initialise the basic hardware state */
1826 if (gpu->drm && gpu->initialized) {
1827 ret = etnaviv_gpu_hw_resume(gpu);
1828 if (ret) {
1829 etnaviv_gpu_clk_disable(gpu);
1830 return ret;
1831 }
1832 }
1833
1834 return 0;
1835}
1836#endif
1837
1838static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1839 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1840 NULL)
1841};
1842
1843struct platform_driver etnaviv_gpu_driver = {
1844 .driver = {
1845 .name = "etnaviv-gpu",
1846 .owner = THIS_MODULE,
1847 .pm = &etnaviv_gpu_pm_ops,
1848 .of_match_table = etnaviv_gpu_match,
1849 },
1850 .probe = etnaviv_gpu_platform_probe,
1851 .remove = etnaviv_gpu_platform_remove,
1852 .id_table = gpu_ids,
1853};