Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms of the GNU General Public License version 2 as published by
  9 * the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful, but WITHOUT
 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14 * more details.
 15 *
 16 * You should have received a copy of the GNU General Public License along with
 17 * this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 
 
 
 
 
 
 
 
 
 20#include "adreno_gpu.h"
 21#include "msm_gem.h"
 22#include "msm_mmu.h"
 23
 24#define RB_SIZE    SZ_32K
 25#define RB_BLKSIZE 16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
 28{
 29	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 30
 31	switch (param) {
 32	case MSM_PARAM_GPU_ID:
 33		*value = adreno_gpu->info->revn;
 34		return 0;
 35	case MSM_PARAM_GMEM_SIZE:
 36		*value = adreno_gpu->gmem;
 37		return 0;
 
 
 
 38	case MSM_PARAM_CHIP_ID:
 39		*value = adreno_gpu->rev.patchid |
 40				(adreno_gpu->rev.minor << 8) |
 41				(adreno_gpu->rev.major << 16) |
 42				(adreno_gpu->rev.core << 24);
 43		return 0;
 44	case MSM_PARAM_MAX_FREQ:
 45		*value = adreno_gpu->base.fast_rate;
 46		return 0;
 47	case MSM_PARAM_TIMESTAMP:
 48		if (adreno_gpu->funcs->get_timestamp)
 49			return adreno_gpu->funcs->get_timestamp(gpu, value);
 
 
 
 
 
 
 
 50		return -EINVAL;
 
 
 
 
 
 
 
 
 
 51	default:
 52		DBG("%s: invalid param: %u", gpu->name, param);
 53		return -EINVAL;
 54	}
 55}
 56
 57#define rbmemptr(adreno_gpu, member)  \
 58	((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
 59
 60int adreno_hw_init(struct msm_gpu *gpu)
 61{
 62	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 
 
 63	int ret;
 64
 65	DBG("%s", gpu->name);
 
 
 
 
 
 
 
 
 
 66
 67	ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
 68	if (ret) {
 69		gpu->rb_iova = 0;
 70		dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
 71		return ret;
 
 
 
 
 
 
 
 72	}
 73
 74	/* Setup REG_CP_RB_CNTL: */
 75	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
 76			/* size is log2(quad-words): */
 77			AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
 78			AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
 79			(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
 80
 81	/* Setup ringbuffer address: */
 82	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
 83
 84	if (!adreno_is_a430(adreno_gpu))
 85		adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
 86						rbmemptr(adreno_gpu, rptr));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87
 88	return 0;
 89}
 90
 91static uint32_t get_wptr(struct msm_ringbuffer *ring)
 
 92{
 93	return ring->cur - ring->start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 94}
 95
 96/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
 97static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
 
 98{
 99	if (adreno_is_a430(adreno_gpu))
100		return adreno_gpu->memptrs->rptr = adreno_gpu_read(
101			adreno_gpu, REG_ADRENO_CP_RB_RPTR);
102	else
103		return adreno_gpu->memptrs->rptr;
104}
105
106uint32_t adreno_last_fence(struct msm_gpu *gpu)
107{
108	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
109	return adreno_gpu->memptrs->fence;
110}
111
112void adreno_recover(struct msm_gpu *gpu)
113{
114	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
115	struct drm_device *dev = gpu->dev;
116	int ret;
117
118	gpu->funcs->pm_suspend(gpu);
119
120	/* reset ringbuffer: */
121	gpu->rb->cur = gpu->rb->start;
122
123	/* reset completed fence seqno, just discard anything pending: */
124	adreno_gpu->memptrs->fence = gpu->submitted_fence;
125	adreno_gpu->memptrs->rptr  = 0;
126	adreno_gpu->memptrs->wptr  = 0;
127
 
128	gpu->funcs->pm_resume(gpu);
129	ret = gpu->funcs->hw_init(gpu);
 
130	if (ret) {
131		dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
132		/* hmm, oh well? */
133	}
134}
135
136int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
137		struct msm_file_private *ctx)
138{
139	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140	struct msm_drm_private *priv = gpu->dev->dev_private;
141	struct msm_ringbuffer *ring = gpu->rb;
142	unsigned i, ibs = 0;
143
144	for (i = 0; i < submit->nr_cmds; i++) {
145		switch (submit->cmd[i].type) {
146		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
147			/* ignore IB-targets */
148			break;
149		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
150			/* ignore if there has not been a ctx switch: */
151			if (priv->lastctx == ctx)
152				break;
 
153		case MSM_SUBMIT_CMD_BUF:
154			OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
155				CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
156			OUT_RING(ring, submit->cmd[i].iova);
157			OUT_RING(ring, submit->cmd[i].size);
158			ibs++;
159			break;
160		}
161	}
162
163	/* on a320, at least, we seem to need to pad things out to an
164	 * even number of qwords to avoid issue w/ CP hanging on wrap-
165	 * around:
166	 */
167	if (ibs % 2)
168		OUT_PKT2(ring);
169
170	OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
171	OUT_RING(ring, submit->fence);
172
173	if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
174		/* Flush HLSQ lazy updates to make sure there is nothing
175		 * pending for indirect loads after the timestamp has
176		 * passed:
177		 */
178		OUT_PKT3(ring, CP_EVENT_WRITE, 1);
179		OUT_RING(ring, HLSQ_FLUSH);
180
181		OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
182		OUT_RING(ring, 0x00000000);
183	}
184
185	OUT_PKT3(ring, CP_EVENT_WRITE, 3);
186	OUT_RING(ring, CACHE_FLUSH_TS);
187	OUT_RING(ring, rbmemptr(adreno_gpu, fence));
188	OUT_RING(ring, submit->fence);
189
190	/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
191	OUT_PKT3(ring, CP_INTERRUPT, 1);
192	OUT_RING(ring, 0x80000000);
193
194	/* Workaround for missing irq issue on 8x16/a306.  Unsure if the
195	 * root cause is a platform issue or some a306 quirk, but this
196	 * keeps things humming along:
197	 */
198	if (adreno_is_a306(adreno_gpu)) {
199		OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
200		OUT_RING(ring, 0x00000000);
201		OUT_PKT3(ring, CP_INTERRUPT, 1);
202		OUT_RING(ring, 0x80000000);
203	}
204
205#if 0
206	if (adreno_is_a3xx(adreno_gpu)) {
207		/* Dummy set-constant to trigger context rollover */
208		OUT_PKT3(ring, CP_SET_CONSTANT, 2);
209		OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
210		OUT_RING(ring, 0x00000000);
211	}
212#endif
213
214	gpu->funcs->flush(gpu);
215
216	return 0;
217}
218
219void adreno_flush(struct msm_gpu *gpu)
220{
221	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
222	uint32_t wptr = get_wptr(gpu->rb);
 
 
 
 
 
 
 
 
 
 
223
224	/* ensure writes to ringbuffer have hit system memory: */
225	mb();
226
227	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
228}
229
230void adreno_idle(struct msm_gpu *gpu)
231{
232	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
233	uint32_t wptr = get_wptr(gpu->rb);
234	int ret;
235
236	/* wait for CP to drain ringbuffer: */
237	ret = spin_until(get_rptr(adreno_gpu) == wptr);
238
239	if (ret)
240		DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
241
242	/* TODO maybe we need to reset GPU here to recover from hang? */
 
 
 
 
243}
244
245#ifdef CONFIG_DEBUG_FS
246void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
247{
248	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249	int i;
250
251	seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252			adreno_gpu->info->revn, adreno_gpu->rev.core,
253			adreno_gpu->rev.major, adreno_gpu->rev.minor,
254			adreno_gpu->rev.patchid);
255
256	seq_printf(m, "fence:    %d/%d\n", adreno_gpu->memptrs->fence,
257			gpu->submitted_fence);
258	seq_printf(m, "rptr:     %d\n", get_rptr(adreno_gpu));
259	seq_printf(m, "wptr:     %d\n", adreno_gpu->memptrs->wptr);
260	seq_printf(m, "rb wptr:  %d\n", get_wptr(gpu->rb));
261
262	gpu->funcs->pm_resume(gpu);
263
264	/* dump these out in a form that can be parsed by demsm: */
265	seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
266	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
267		uint32_t start = adreno_gpu->registers[i];
268		uint32_t end   = adreno_gpu->registers[i+1];
269		uint32_t addr;
 
 
270
271		for (addr = start; addr <= end; addr++) {
272			uint32_t val = gpu_read(gpu, addr);
273			seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
 
 
 
 
 
 
 
 
 
 
 
274		}
275	}
276
277	gpu->funcs->pm_suspend(gpu);
 
 
 
 
 
 
 
 
278}
279#endif
280
281/* Dump common gpu status and scratch registers on any hang, to make
282 * the hangcheck logs more useful.  The scratch registers seem always
283 * safe to read when GPU has hung (unlike some other regs, depending
284 * on how the GPU hung), and they are useful to match up to cmdstream
285 * dumps when debugging hangs:
286 */
287void adreno_dump_info(struct msm_gpu *gpu)
288{
289	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
290	int i;
291
292	printk("revision: %d (%d.%d.%d.%d)\n",
293			adreno_gpu->info->revn, adreno_gpu->rev.core,
294			adreno_gpu->rev.major, adreno_gpu->rev.minor,
295			adreno_gpu->rev.patchid);
296
297	printk("fence:    %d/%d\n", adreno_gpu->memptrs->fence,
298			gpu->submitted_fence);
299	printk("rptr:     %d\n", get_rptr(adreno_gpu));
300	printk("wptr:     %d\n", adreno_gpu->memptrs->wptr);
301	printk("rb wptr:  %d\n", get_wptr(gpu->rb));
302
303	for (i = 0; i < 8; i++) {
304		printk("CP_SCRATCH_REG%d: %u\n", i,
305			gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
306	}
307}
308
309/* would be nice to not have to duplicate the _show() stuff with printk(): */
310void adreno_dump(struct msm_gpu *gpu)
311{
312	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
313	int i;
314
 
 
 
315	/* dump these out in a form that can be parsed by demsm: */
316	printk("IO:region %s 00000000 00020000\n", gpu->name);
317	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
318		uint32_t start = adreno_gpu->registers[i];
319		uint32_t end   = adreno_gpu->registers[i+1];
320		uint32_t addr;
321
322		for (addr = start; addr <= end; addr++) {
323			uint32_t val = gpu_read(gpu, addr);
324			printk("IO:R %08x %08x\n", addr<<2, val);
325		}
326	}
327}
328
329static uint32_t ring_freewords(struct msm_gpu *gpu)
330{
331	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
332	uint32_t size = gpu->rb->size / 4;
333	uint32_t wptr = get_wptr(gpu->rb);
334	uint32_t rptr = get_rptr(adreno_gpu);
 
335	return (rptr + (size - 1) - wptr) % size;
336}
337
338void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
339{
340	if (spin_until(ring_freewords(gpu) >= ndwords))
341		DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
 
 
342}
343
344static const char *iommu_ports[] = {
345		"gfx3d_user", "gfx3d_priv",
346		"gfx3d1_user", "gfx3d1_priv",
347};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
349int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
350		struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
 
351{
352	struct adreno_platform_config *config = pdev->dev.platform_data;
 
 
353	struct msm_gpu *gpu = &adreno_gpu->base;
354	struct msm_mmu *mmu;
355	int ret;
356
357	adreno_gpu->funcs = funcs;
358	adreno_gpu->info = adreno_info(config->rev);
359	adreno_gpu->gmem = adreno_gpu->info->gmem;
360	adreno_gpu->revn = adreno_gpu->info->revn;
361	adreno_gpu->rev = config->rev;
362
363	gpu->fast_rate = config->fast_rate;
364	gpu->slow_rate = config->slow_rate;
365	gpu->bus_freq  = config->bus_freq;
366#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
367	gpu->bus_scale_table = config->bus_scale_table;
368#endif
369
370	DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
371			gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
 
 
 
 
372
373	ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
374			adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
375			RB_SIZE);
376	if (ret)
377		return ret;
378
379	ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
380	if (ret) {
381		dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
382				adreno_gpu->info->pm4fw, ret);
383		return ret;
 
 
 
 
384	}
385
386	ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
387	if (ret) {
388		dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
389				adreno_gpu->info->pfpfw, ret);
390		return ret;
391	}
392
393	mmu = gpu->mmu;
394	if (mmu) {
395		ret = mmu->funcs->attach(mmu, iommu_ports,
396				ARRAY_SIZE(iommu_ports));
397		if (ret)
398			return ret;
399	}
400
401	mutex_lock(&drm->struct_mutex);
402	adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
403			MSM_BO_UNCACHED);
404	mutex_unlock(&drm->struct_mutex);
405	if (IS_ERR(adreno_gpu->memptrs_bo)) {
406		ret = PTR_ERR(adreno_gpu->memptrs_bo);
407		adreno_gpu->memptrs_bo = NULL;
408		dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
409		return ret;
410	}
411
412	adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
413	if (!adreno_gpu->memptrs) {
414		dev_err(drm->dev, "could not vmap memptrs\n");
415		return -ENOMEM;
416	}
417
418	ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
419			&adreno_gpu->memptrs_iova);
420	if (ret) {
421		dev_err(drm->dev, "could not map memptrs: %d\n", ret);
422		return ret;
423	}
424
425	return 0;
426}
427
428void adreno_gpu_cleanup(struct adreno_gpu *gpu)
429{
430	if (gpu->memptrs_bo) {
431		if (gpu->memptrs_iova)
432			msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
433		drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
434	}
435	release_firmware(gpu->pm4);
436	release_firmware(gpu->pfp);
437	msm_gpu_cleanup(&gpu->base);
 
 
 
 
 
438}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 *
   6 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/ascii85.h>
  10#include <linux/interconnect.h>
  11#include <linux/qcom_scm.h>
  12#include <linux/kernel.h>
  13#include <linux/of_address.h>
  14#include <linux/pm_opp.h>
  15#include <linux/slab.h>
  16#include <linux/soc/qcom/mdt_loader.h>
  17#include <soc/qcom/ocmem.h>
  18#include "adreno_gpu.h"
  19#include "msm_gem.h"
  20#include "msm_mmu.h"
  21
  22static bool zap_available = true;
  23
  24static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
  25		u32 pasid)
  26{
  27	struct device *dev = &gpu->pdev->dev;
  28	const struct firmware *fw;
  29	const char *signed_fwname = NULL;
  30	struct device_node *np, *mem_np;
  31	struct resource r;
  32	phys_addr_t mem_phys;
  33	ssize_t mem_size;
  34	void *mem_region = NULL;
  35	int ret;
  36
  37	if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
  38		zap_available = false;
  39		return -EINVAL;
  40	}
  41
  42	np = of_get_child_by_name(dev->of_node, "zap-shader");
  43	if (!np) {
  44		zap_available = false;
  45		return -ENODEV;
  46	}
  47
  48	mem_np = of_parse_phandle(np, "memory-region", 0);
  49	of_node_put(np);
  50	if (!mem_np) {
  51		zap_available = false;
  52		return -EINVAL;
  53	}
  54
  55	ret = of_address_to_resource(mem_np, 0, &r);
  56	of_node_put(mem_np);
  57	if (ret)
  58		return ret;
  59
  60	mem_phys = r.start;
  61
  62	/*
  63	 * Check for a firmware-name property.  This is the new scheme
  64	 * to handle firmware that may be signed with device specific
  65	 * keys, allowing us to have a different zap fw path for different
  66	 * devices.
  67	 *
  68	 * If the firmware-name property is found, we bypass the
  69	 * adreno_request_fw() mechanism, because we don't need to handle
  70	 * the /lib/firmware/qcom/... vs /lib/firmware/... case.
  71	 *
  72	 * If the firmware-name property is not found, for backwards
  73	 * compatibility we fall back to the fwname from the gpulist
  74	 * table.
  75	 */
  76	of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
  77	if (signed_fwname) {
  78		fwname = signed_fwname;
  79		ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
  80		if (ret)
  81			fw = ERR_PTR(ret);
  82	} else if (fwname) {
  83		/* Request the MDT file from the default location: */
  84		fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
  85	} else {
  86		/*
  87		 * For new targets, we require the firmware-name property,
  88		 * if a zap-shader is required, rather than falling back
  89		 * to a firmware name specified in gpulist.
  90		 *
  91		 * Because the firmware is signed with a (potentially)
  92		 * device specific key, having the name come from gpulist
  93		 * was a bad idea, and is only provided for backwards
  94		 * compatibility for older targets.
  95		 */
  96		return -ENODEV;
  97	}
  98
  99	if (IS_ERR(fw)) {
 100		DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
 101		return PTR_ERR(fw);
 102	}
 103
 104	/* Figure out how much memory we need */
 105	mem_size = qcom_mdt_get_size(fw);
 106	if (mem_size < 0) {
 107		ret = mem_size;
 108		goto out;
 109	}
 110
 111	if (mem_size > resource_size(&r)) {
 112		DRM_DEV_ERROR(dev,
 113			"memory region is too small to load the MDT\n");
 114		ret = -E2BIG;
 115		goto out;
 116	}
 117
 118	/* Allocate memory for the firmware image */
 119	mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
 120	if (!mem_region) {
 121		ret = -ENOMEM;
 122		goto out;
 123	}
 124
 125	/*
 126	 * Load the rest of the MDT
 127	 *
 128	 * Note that we could be dealing with two different paths, since
 129	 * with upstream linux-firmware it would be in a qcom/ subdir..
 130	 * adreno_request_fw() handles this, but qcom_mdt_load() does
 131	 * not.  But since we've already gotten through adreno_request_fw()
 132	 * we know which of the two cases it is:
 133	 */
 134	if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
 135		ret = qcom_mdt_load(dev, fw, fwname, pasid,
 136				mem_region, mem_phys, mem_size, NULL);
 137	} else {
 138		char *newname;
 139
 140		newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
 141
 142		ret = qcom_mdt_load(dev, fw, newname, pasid,
 143				mem_region, mem_phys, mem_size, NULL);
 144		kfree(newname);
 145	}
 146	if (ret)
 147		goto out;
 148
 149	/* Send the image to the secure world */
 150	ret = qcom_scm_pas_auth_and_reset(pasid);
 151
 152	/*
 153	 * If the scm call returns -EOPNOTSUPP we assume that this target
 154	 * doesn't need/support the zap shader so quietly fail
 155	 */
 156	if (ret == -EOPNOTSUPP)
 157		zap_available = false;
 158	else if (ret)
 159		DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
 160
 161out:
 162	if (mem_region)
 163		memunmap(mem_region);
 164
 165	release_firmware(fw);
 166
 167	return ret;
 168}
 169
 170int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
 171{
 172	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 173	struct platform_device *pdev = gpu->pdev;
 174
 175	/* Short cut if we determine the zap shader isn't available/needed */
 176	if (!zap_available)
 177		return -ENODEV;
 178
 179	/* We need SCM to be able to load the firmware */
 180	if (!qcom_scm_is_available()) {
 181		DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
 182		return -EPROBE_DEFER;
 183	}
 184
 185	return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
 186}
 187
 188struct msm_gem_address_space *
 189adreno_iommu_create_address_space(struct msm_gpu *gpu,
 190		struct platform_device *pdev)
 191{
 192	struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
 193	struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
 194	struct msm_gem_address_space *aspace;
 195
 196	aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
 197		0xffffffff - SZ_16M);
 198
 199	if (IS_ERR(aspace) && !IS_ERR(mmu))
 200		mmu->funcs->destroy(mmu);
 201
 202	return aspace;
 203}
 204
 205int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
 206{
 207	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 208
 209	switch (param) {
 210	case MSM_PARAM_GPU_ID:
 211		*value = adreno_gpu->info->revn;
 212		return 0;
 213	case MSM_PARAM_GMEM_SIZE:
 214		*value = adreno_gpu->gmem;
 215		return 0;
 216	case MSM_PARAM_GMEM_BASE:
 217		*value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0;
 218		return 0;
 219	case MSM_PARAM_CHIP_ID:
 220		*value = adreno_gpu->rev.patchid |
 221				(adreno_gpu->rev.minor << 8) |
 222				(adreno_gpu->rev.major << 16) |
 223				(adreno_gpu->rev.core << 24);
 224		return 0;
 225	case MSM_PARAM_MAX_FREQ:
 226		*value = adreno_gpu->base.fast_rate;
 227		return 0;
 228	case MSM_PARAM_TIMESTAMP:
 229		if (adreno_gpu->funcs->get_timestamp) {
 230			int ret;
 231
 232			pm_runtime_get_sync(&gpu->pdev->dev);
 233			ret = adreno_gpu->funcs->get_timestamp(gpu, value);
 234			pm_runtime_put_autosuspend(&gpu->pdev->dev);
 235
 236			return ret;
 237		}
 238		return -EINVAL;
 239	case MSM_PARAM_NR_RINGS:
 240		*value = gpu->nr_rings;
 241		return 0;
 242	case MSM_PARAM_PP_PGTABLE:
 243		*value = 0;
 244		return 0;
 245	case MSM_PARAM_FAULTS:
 246		*value = gpu->global_faults;
 247		return 0;
 248	default:
 249		DBG("%s: invalid param: %u", gpu->name, param);
 250		return -EINVAL;
 251	}
 252}
 253
 254const struct firmware *
 255adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 
 
 256{
 257	struct drm_device *drm = adreno_gpu->base.dev;
 258	const struct firmware *fw = NULL;
 259	char *newname;
 260	int ret;
 261
 262	newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
 263	if (!newname)
 264		return ERR_PTR(-ENOMEM);
 265
 266	/*
 267	 * Try first to load from qcom/$fwfile using a direct load (to avoid
 268	 * a potential timeout waiting for usermode helper)
 269	 */
 270	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
 271	    (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
 272
 273		ret = request_firmware_direct(&fw, newname, drm->dev);
 274		if (!ret) {
 275			DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
 276				newname);
 277			adreno_gpu->fwloc = FW_LOCATION_NEW;
 278			goto out;
 279		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
 280			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 281				newname, ret);
 282			fw = ERR_PTR(ret);
 283			goto out;
 284		}
 285	}
 286
 287	/*
 288	 * Then try the legacy location without qcom/ prefix
 289	 */
 290	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
 291	    (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
 292
 293		ret = request_firmware_direct(&fw, fwname, drm->dev);
 294		if (!ret) {
 295			DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
 296				newname);
 297			adreno_gpu->fwloc = FW_LOCATION_LEGACY;
 298			goto out;
 299		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
 300			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 301				fwname, ret);
 302			fw = ERR_PTR(ret);
 303			goto out;
 304		}
 305	}
 306
 307	/*
 308	 * Finally fall back to request_firmware() for cases where the
 309	 * usermode helper is needed (I think mainly android)
 310	 */
 311	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
 312	    (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
 313
 314		ret = request_firmware(&fw, newname, drm->dev);
 315		if (!ret) {
 316			DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
 317				newname);
 318			adreno_gpu->fwloc = FW_LOCATION_HELPER;
 319			goto out;
 320		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
 321			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 322				newname, ret);
 323			fw = ERR_PTR(ret);
 324			goto out;
 325		}
 326	}
 327
 328	DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
 329	fw = ERR_PTR(-ENOENT);
 330out:
 331	kfree(newname);
 332	return fw;
 333}
 334
 335int adreno_load_fw(struct adreno_gpu *adreno_gpu)
 336{
 337	int i;
 338
 339	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
 340		const struct firmware *fw;
 341
 342		if (!adreno_gpu->info->fw[i])
 343			continue;
 344
 345		/* Skip if the firmware has already been loaded */
 346		if (adreno_gpu->fw[i])
 347			continue;
 348
 349		fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
 350		if (IS_ERR(fw))
 351			return PTR_ERR(fw);
 352
 353		adreno_gpu->fw[i] = fw;
 354	}
 355
 356	return 0;
 357}
 358
 359struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
 360		const struct firmware *fw, u64 *iova)
 361{
 362	struct drm_gem_object *bo;
 363	void *ptr;
 364
 365	ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
 366		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
 367
 368	if (IS_ERR(ptr))
 369		return ERR_CAST(ptr);
 370
 371	memcpy(ptr, &fw->data[4], fw->size - 4);
 372
 373	msm_gem_put_vaddr(bo);
 374
 375	return bo;
 376}
 377
 378int adreno_hw_init(struct msm_gpu *gpu)
 379{
 380	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 381	int ret, i;
 382
 383	DBG("%s", gpu->name);
 384
 385	ret = adreno_load_fw(adreno_gpu);
 386	if (ret)
 387		return ret;
 388
 389	for (i = 0; i < gpu->nr_rings; i++) {
 390		struct msm_ringbuffer *ring = gpu->rb[i];
 391
 392		if (!ring)
 393			continue;
 394
 395		ring->cur = ring->start;
 396		ring->next = ring->start;
 397
 398		/* reset completed fence seqno: */
 399		ring->memptrs->fence = ring->fctx->completed_fence;
 400		ring->memptrs->rptr = 0;
 401	}
 402
 403	return 0;
 404}
 405
 406/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
 407static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
 408		struct msm_ringbuffer *ring)
 409{
 410	return ring->memptrs->rptr = adreno_gpu_read(
 411		adreno_gpu, REG_ADRENO_CP_RB_RPTR);
 
 
 
 412}
 413
 414struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
 415{
 416	return gpu->rb[0];
 
 417}
 418
 419void adreno_recover(struct msm_gpu *gpu)
 420{
 
 421	struct drm_device *dev = gpu->dev;
 422	int ret;
 423
 424	// XXX pm-runtime??  we *need* the device to be off after this
 425	// so maybe continuing to call ->pm_suspend/resume() is better?
 
 
 
 
 
 
 
 426
 427	gpu->funcs->pm_suspend(gpu);
 428	gpu->funcs->pm_resume(gpu);
 429
 430	ret = msm_gpu_hw_init(gpu);
 431	if (ret) {
 432		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
 433		/* hmm, oh well? */
 434	}
 435}
 436
 437void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 438		struct msm_file_private *ctx)
 439{
 440	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 441	struct msm_drm_private *priv = gpu->dev->dev_private;
 442	struct msm_ringbuffer *ring = submit->ring;
 443	unsigned i;
 444
 445	for (i = 0; i < submit->nr_cmds; i++) {
 446		switch (submit->cmd[i].type) {
 447		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
 448			/* ignore IB-targets */
 449			break;
 450		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
 451			/* ignore if there has not been a ctx switch: */
 452			if (priv->lastctx == ctx)
 453				break;
 454			fallthrough;
 455		case MSM_SUBMIT_CMD_BUF:
 456			OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
 457				CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
 458			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
 459			OUT_RING(ring, submit->cmd[i].size);
 460			OUT_PKT2(ring);
 461			break;
 462		}
 463	}
 464
 
 
 
 
 
 
 
 465	OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
 466	OUT_RING(ring, submit->seqno);
 467
 468	if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
 469		/* Flush HLSQ lazy updates to make sure there is nothing
 470		 * pending for indirect loads after the timestamp has
 471		 * passed:
 472		 */
 473		OUT_PKT3(ring, CP_EVENT_WRITE, 1);
 474		OUT_RING(ring, HLSQ_FLUSH);
 
 
 
 475	}
 476
 477	/* wait for idle before cache flush/interrupt */
 478	OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
 479	OUT_RING(ring, 0x00000000);
 480
 481	if (!adreno_is_a2xx(adreno_gpu)) {
 482		/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
 483		OUT_PKT3(ring, CP_EVENT_WRITE, 3);
 484		OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
 485		OUT_RING(ring, rbmemptr(ring, fence));
 486		OUT_RING(ring, submit->seqno);
 487	} else {
 488		/* BIT(31) means something else on a2xx */
 489		OUT_PKT3(ring, CP_EVENT_WRITE, 3);
 490		OUT_RING(ring, CACHE_FLUSH_TS);
 491		OUT_RING(ring, rbmemptr(ring, fence));
 492		OUT_RING(ring, submit->seqno);
 493		OUT_PKT3(ring, CP_INTERRUPT, 1);
 494		OUT_RING(ring, 0x80000000);
 495	}
 496
 497#if 0
 498	if (adreno_is_a3xx(adreno_gpu)) {
 499		/* Dummy set-constant to trigger context rollover */
 500		OUT_PKT3(ring, CP_SET_CONSTANT, 2);
 501		OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
 502		OUT_RING(ring, 0x00000000);
 503	}
 504#endif
 505
 506	gpu->funcs->flush(gpu, ring);
 
 
 507}
 508
 509void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 510{
 511	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 512	uint32_t wptr;
 513
 514	/* Copy the shadow to the actual register */
 515	ring->cur = ring->next;
 516
 517	/*
 518	 * Mask wptr value that we calculate to fit in the HW range. This is
 519	 * to account for the possibility that the last command fit exactly into
 520	 * the ringbuffer and rb->next hasn't wrapped to zero yet
 521	 */
 522	wptr = get_wptr(ring);
 523
 524	/* ensure writes to ringbuffer have hit system memory: */
 525	mb();
 526
 527	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
 528}
 529
 530bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 531{
 532	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 533	uint32_t wptr = get_wptr(ring);
 
 534
 535	/* wait for CP to drain ringbuffer: */
 536	if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
 537		return true;
 
 
 538
 539	/* TODO maybe we need to reset GPU here to recover from hang? */
 540	DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
 541		gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
 542
 543	return false;
 544}
 545
 546int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
 
 547{
 548	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 549	int i, count = 0;
 550
 551	kref_init(&state->ref);
 552
 553	ktime_get_real_ts64(&state->time);
 554
 555	for (i = 0; i < gpu->nr_rings; i++) {
 556		int size = 0, j;
 557
 558		state->ring[i].fence = gpu->rb[i]->memptrs->fence;
 559		state->ring[i].iova = gpu->rb[i]->iova;
 560		state->ring[i].seqno = gpu->rb[i]->seqno;
 561		state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
 562		state->ring[i].wptr = get_wptr(gpu->rb[i]);
 563
 564		/* Copy at least 'wptr' dwords of the data */
 565		size = state->ring[i].wptr;
 566
 567		/* After wptr find the last non zero dword to save space */
 568		for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
 569			if (gpu->rb[i]->start[j])
 570				size = j + 1;
 571
 572		if (size) {
 573			state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
 574			if (state->ring[i].data) {
 575				memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
 576				state->ring[i].data_size = size << 2;
 577			}
 578		}
 579	}
 580
 581	/* Some targets prefer to collect their own registers */
 582	if (!adreno_gpu->registers)
 583		return 0;
 584
 585	/* Count the number of registers */
 586	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
 587		count += adreno_gpu->registers[i + 1] -
 588			adreno_gpu->registers[i] + 1;
 589
 590	state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
 591	if (state->registers) {
 592		int pos = 0;
 593
 594		for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
 595			u32 start = adreno_gpu->registers[i];
 596			u32 end   = adreno_gpu->registers[i + 1];
 597			u32 addr;
 598
 599			for (addr = start; addr <= end; addr++) {
 600				state->registers[pos++] = addr;
 601				state->registers[pos++] = gpu_read(gpu, addr);
 602			}
 603		}
 604
 605		state->nr_registers = count;
 606	}
 607
 608	return 0;
 609}
 610
 611void adreno_gpu_state_destroy(struct msm_gpu_state *state)
 612{
 613	int i;
 614
 615	for (i = 0; i < ARRAY_SIZE(state->ring); i++)
 616		kvfree(state->ring[i].data);
 617
 618	for (i = 0; state->bos && i < state->nr_bos; i++)
 619		kvfree(state->bos[i].data);
 620
 621	kfree(state->bos);
 622	kfree(state->comm);
 623	kfree(state->cmd);
 624	kfree(state->registers);
 625}
 626
 627static void adreno_gpu_state_kref_destroy(struct kref *kref)
 628{
 629	struct msm_gpu_state *state = container_of(kref,
 630		struct msm_gpu_state, ref);
 631
 632	adreno_gpu_state_destroy(state);
 633	kfree(state);
 634}
 635
 636int adreno_gpu_state_put(struct msm_gpu_state *state)
 637{
 638	if (IS_ERR_OR_NULL(state))
 639		return 1;
 640
 641	return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
 642}
 643
 644#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 645
 646static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
 647{
 648	void *buf;
 649	size_t buf_itr = 0, buffer_size;
 650	char out[ASCII85_BUFSZ];
 651	long l;
 652	int i;
 653
 654	if (!src || !len)
 655		return NULL;
 656
 657	l = ascii85_encode_len(len);
 658
 659	/*
 660	 * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
 661	 * account for the worst case of 5 bytes per dword plus the 1 for '\0'
 662	 */
 663	buffer_size = (l * 5) + 1;
 664
 665	buf = kvmalloc(buffer_size, GFP_KERNEL);
 666	if (!buf)
 667		return NULL;
 668
 669	for (i = 0; i < l; i++)
 670		buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
 671				ascii85_encode(src[i], out));
 672
 673	return buf;
 674}
 675
 676/* len is expected to be in bytes */
 677static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
 678		bool *encoded)
 679{
 680	if (!*ptr || !len)
 681		return;
 682
 683	if (!*encoded) {
 684		long datalen, i;
 685		u32 *buf = *ptr;
 686
 687		/*
 688		 * Only dump the non-zero part of the buffer - rarely will
 689		 * any data completely fill the entire allocated size of
 690		 * the buffer.
 691		 */
 692		for (datalen = 0, i = 0; i < len >> 2; i++)
 693			if (buf[i])
 694				datalen = ((i + 1) << 2);
 695
 696		/*
 697		 * If we reach here, then the originally captured binary buffer
 698		 * will be replaced with the ascii85 encoded string
 699		 */
 700		*ptr = adreno_gpu_ascii85_encode(buf, datalen);
 701
 702		kvfree(buf);
 703
 704		*encoded = true;
 705	}
 706
 707	if (!*ptr)
 708		return;
 709
 710	drm_puts(p, "    data: !!ascii85 |\n");
 711	drm_puts(p, "     ");
 712
 713	drm_puts(p, *ptr);
 714
 715	drm_puts(p, "\n");
 716}
 717
 718void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
 719		struct drm_printer *p)
 720{
 721	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 722	int i;
 723
 724	if (IS_ERR_OR_NULL(state))
 725		return;
 726
 727	drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
 728			adreno_gpu->info->revn, adreno_gpu->rev.core,
 729			adreno_gpu->rev.major, adreno_gpu->rev.minor,
 730			adreno_gpu->rev.patchid);
 731
 732	drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
 
 
 
 
 733
 734	drm_puts(p, "ringbuffer:\n");
 735
 736	for (i = 0; i < gpu->nr_rings; i++) {
 737		drm_printf(p, "  - id: %d\n", i);
 738		drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
 739		drm_printf(p, "    last-fence: %d\n", state->ring[i].seqno);
 740		drm_printf(p, "    retired-fence: %d\n", state->ring[i].fence);
 741		drm_printf(p, "    rptr: %d\n", state->ring[i].rptr);
 742		drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
 743		drm_printf(p, "    size: %d\n", MSM_GPU_RINGBUFFER_SZ);
 744
 745		adreno_show_object(p, &state->ring[i].data,
 746			state->ring[i].data_size, &state->ring[i].encoded);
 747	}
 748
 749	if (state->bos) {
 750		drm_puts(p, "bos:\n");
 751
 752		for (i = 0; i < state->nr_bos; i++) {
 753			drm_printf(p, "  - iova: 0x%016llx\n",
 754				state->bos[i].iova);
 755			drm_printf(p, "    size: %zd\n", state->bos[i].size);
 756
 757			adreno_show_object(p, &state->bos[i].data,
 758				state->bos[i].size, &state->bos[i].encoded);
 759		}
 760	}
 761
 762	if (state->nr_registers) {
 763		drm_puts(p, "registers:\n");
 764
 765		for (i = 0; i < state->nr_registers; i++) {
 766			drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
 767				state->registers[i * 2] << 2,
 768				state->registers[(i * 2) + 1]);
 769		}
 770	}
 771}
 772#endif
 773
 774/* Dump common gpu status and scratch registers on any hang, to make
 775 * the hangcheck logs more useful.  The scratch registers seem always
 776 * safe to read when GPU has hung (unlike some other regs, depending
 777 * on how the GPU hung), and they are useful to match up to cmdstream
 778 * dumps when debugging hangs:
 779 */
 780void adreno_dump_info(struct msm_gpu *gpu)
 781{
 782	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 783	int i;
 784
 785	printk("revision: %d (%d.%d.%d.%d)\n",
 786			adreno_gpu->info->revn, adreno_gpu->rev.core,
 787			adreno_gpu->rev.major, adreno_gpu->rev.minor,
 788			adreno_gpu->rev.patchid);
 789
 790	for (i = 0; i < gpu->nr_rings; i++) {
 791		struct msm_ringbuffer *ring = gpu->rb[i];
 792
 793		printk("rb %d: fence:    %d/%d\n", i,
 794			ring->memptrs->fence,
 795			ring->seqno);
 796
 797		printk("rptr:     %d\n", get_rptr(adreno_gpu, ring));
 798		printk("rb wptr:  %d\n", get_wptr(ring));
 799	}
 800}
 801
 802/* would be nice to not have to duplicate the _show() stuff with printk(): */
 803void adreno_dump(struct msm_gpu *gpu)
 804{
 805	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 806	int i;
 807
 808	if (!adreno_gpu->registers)
 809		return;
 810
 811	/* dump these out in a form that can be parsed by demsm: */
 812	printk("IO:region %s 00000000 00020000\n", gpu->name);
 813	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
 814		uint32_t start = adreno_gpu->registers[i];
 815		uint32_t end   = adreno_gpu->registers[i+1];
 816		uint32_t addr;
 817
 818		for (addr = start; addr <= end; addr++) {
 819			uint32_t val = gpu_read(gpu, addr);
 820			printk("IO:R %08x %08x\n", addr<<2, val);
 821		}
 822	}
 823}
 824
 825static uint32_t ring_freewords(struct msm_ringbuffer *ring)
 826{
 827	struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
 828	uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
 829	/* Use ring->next to calculate free size */
 830	uint32_t wptr = ring->next - ring->start;
 831	uint32_t rptr = get_rptr(adreno_gpu, ring);
 832	return (rptr + (size - 1) - wptr) % size;
 833}
 834
 835void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
 836{
 837	if (spin_until(ring_freewords(ring) >= ndwords))
 838		DRM_DEV_ERROR(ring->gpu->dev->dev,
 839			"timeout waiting for space in ringbuffer %d\n",
 840			ring->id);
 841}
 842
 843/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
 844static int adreno_get_legacy_pwrlevels(struct device *dev)
 845{
 846	struct device_node *child, *node;
 847	int ret;
 848
 849	node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
 850	if (!node) {
 851		DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
 852		return -ENXIO;
 853	}
 854
 855	for_each_child_of_node(node, child) {
 856		unsigned int val;
 857
 858		ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
 859		if (ret)
 860			continue;
 861
 862		/*
 863		 * Skip the intentionally bogus clock value found at the bottom
 864		 * of most legacy frequency tables
 865		 */
 866		if (val != 27000000)
 867			dev_pm_opp_add(dev, val, 0);
 868	}
 869
 870	of_node_put(node);
 871
 872	return 0;
 873}
 874
 875static void adreno_get_pwrlevels(struct device *dev,
 876		struct msm_gpu *gpu)
 877{
 878	unsigned long freq = ULONG_MAX;
 879	struct dev_pm_opp *opp;
 880	int ret;
 881
 882	gpu->fast_rate = 0;
 883
 884	/* You down with OPP? */
 885	if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
 886		ret = adreno_get_legacy_pwrlevels(dev);
 887	else {
 888		ret = dev_pm_opp_of_add_table(dev);
 889		if (ret)
 890			DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
 891	}
 892
 893	if (!ret) {
 894		/* Find the fastest defined rate */
 895		opp = dev_pm_opp_find_freq_floor(dev, &freq);
 896		if (!IS_ERR(opp)) {
 897			gpu->fast_rate = freq;
 898			dev_pm_opp_put(opp);
 899		}
 900	}
 901
 902	if (!gpu->fast_rate) {
 903		dev_warn(dev,
 904			"Could not find a clock rate. Using a reasonable default\n");
 905		/* Pick a suitably safe clock speed for any target */
 906		gpu->fast_rate = 200000000;
 907	}
 908
 909	DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
 910}
 911
 912int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
 913			  struct adreno_ocmem *adreno_ocmem)
 914{
 915	struct ocmem_buf *ocmem_hdl;
 916	struct ocmem *ocmem;
 917
 918	ocmem = of_get_ocmem(dev);
 919	if (IS_ERR(ocmem)) {
 920		if (PTR_ERR(ocmem) == -ENODEV) {
 921			/*
 922			 * Return success since either the ocmem property was
 923			 * not specified in device tree, or ocmem support is
 924			 * not compiled into the kernel.
 925			 */
 926			return 0;
 927		}
 928
 929		return PTR_ERR(ocmem);
 930	}
 931
 932	ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
 933	if (IS_ERR(ocmem_hdl))
 934		return PTR_ERR(ocmem_hdl);
 935
 936	adreno_ocmem->ocmem = ocmem;
 937	adreno_ocmem->base = ocmem_hdl->addr;
 938	adreno_ocmem->hdl = ocmem_hdl;
 939	adreno_gpu->gmem = ocmem_hdl->len;
 940
 941	return 0;
 942}
 943
 944void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
 945{
 946	if (adreno_ocmem && adreno_ocmem->base)
 947		ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
 948			   adreno_ocmem->hdl);
 949}
 950
 951int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 952		struct adreno_gpu *adreno_gpu,
 953		const struct adreno_gpu_funcs *funcs, int nr_rings)
 954{
 955	struct device *dev = &pdev->dev;
 956	struct adreno_platform_config *config = dev->platform_data;
 957	struct msm_gpu_config adreno_gpu_config  = { 0 };
 958	struct msm_gpu *gpu = &adreno_gpu->base;
 
 959	int ret;
 960
 961	adreno_gpu->funcs = funcs;
 962	adreno_gpu->info = adreno_info(config->rev);
 963	adreno_gpu->gmem = adreno_gpu->info->gmem;
 964	adreno_gpu->revn = adreno_gpu->info->revn;
 965	adreno_gpu->rev = config->rev;
 966
 967	adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
 968
 969	adreno_gpu_config.nr_rings = nr_rings;
 
 
 
 970
 971	adreno_get_pwrlevels(dev, gpu);
 972
 973	pm_runtime_set_autosuspend_delay(dev,
 974		adreno_gpu->info->inactive_period);
 975	pm_runtime_use_autosuspend(dev);
 976	pm_runtime_enable(dev);
 977
 978	ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
 979			adreno_gpu->info->name, &adreno_gpu_config);
 
 980	if (ret)
 981		return ret;
 982
 983	/*
 984	 * The legacy case, before "interconnect-names", only has a
 985	 * single interconnect path which is equivalent to "gfx-mem"
 986	 */
 987	if (!of_find_property(dev->of_node, "interconnect-names", NULL)) {
 988		gpu->icc_path = of_icc_get(dev, NULL);
 989	} else {
 990		gpu->icc_path = of_icc_get(dev, "gfx-mem");
 991		gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
 992	}
 993
 994	if (IS_ERR(gpu->icc_path)) {
 995		ret = PTR_ERR(gpu->icc_path);
 996		gpu->icc_path = NULL;
 
 997		return ret;
 998	}
 999
1000	if (IS_ERR(gpu->ocmem_icc_path)) {
1001		ret = PTR_ERR(gpu->ocmem_icc_path);
1002		gpu->ocmem_icc_path = NULL;
1003		/* allow -ENODATA, ocmem icc is optional */
1004		if (ret != -ENODATA)
1005			return ret;
1006	}
1007
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008	return 0;
1009}
1010
1011void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
1012{
1013	struct msm_gpu *gpu = &adreno_gpu->base;
1014	struct msm_drm_private *priv = gpu->dev->dev_private;
1015	unsigned int i;
1016
1017	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
1018		release_firmware(adreno_gpu->fw[i]);
1019
1020	pm_runtime_disable(&priv->gpu_pdev->dev);
1021
1022	msm_gpu_cleanup(&adreno_gpu->base);
1023
1024	icc_put(gpu->icc_path);
1025	icc_put(gpu->ocmem_icc_path);
1026}