Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "drm/drm_drv.h"
   8
   9#include "msm_gpu.h"
  10#include "msm_gem.h"
  11#include "msm_mmu.h"
  12#include "msm_fence.h"
  13#include "msm_gpu_trace.h"
  14#include "adreno/adreno_gpu.h"
  15
  16#include <generated/utsrelease.h>
  17#include <linux/string_helpers.h>
 
 
  18#include <linux/devcoredump.h>
  19#include <linux/sched/task.h>
  20
  21/*
  22 * Power Management:
  23 */
  24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  25static int enable_pwrrail(struct msm_gpu *gpu)
  26{
  27	struct drm_device *dev = gpu->dev;
  28	int ret = 0;
  29
  30	if (gpu->gpu_reg) {
  31		ret = regulator_enable(gpu->gpu_reg);
  32		if (ret) {
  33			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
  34			return ret;
  35		}
  36	}
  37
  38	if (gpu->gpu_cx) {
  39		ret = regulator_enable(gpu->gpu_cx);
  40		if (ret) {
  41			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
  42			return ret;
  43		}
  44	}
  45
  46	return 0;
  47}
  48
  49static int disable_pwrrail(struct msm_gpu *gpu)
  50{
  51	if (gpu->gpu_cx)
  52		regulator_disable(gpu->gpu_cx);
  53	if (gpu->gpu_reg)
  54		regulator_disable(gpu->gpu_reg);
  55	return 0;
  56}
  57
  58static int enable_clk(struct msm_gpu *gpu)
  59{
  60	if (gpu->core_clk && gpu->fast_rate)
  61		dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
  62
  63	/* Set the RBBM timer rate to 19.2Mhz */
  64	if (gpu->rbbmtimer_clk)
  65		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
  66
  67	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
  68}
  69
  70static int disable_clk(struct msm_gpu *gpu)
  71{
  72	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
  73
  74	/*
  75	 * Set the clock to a deliberately low rate. On older targets the clock
  76	 * speed had to be non zero to avoid problems. On newer targets this
  77	 * will be rounded down to zero anyway so it all works out.
  78	 */
  79	if (gpu->core_clk)
  80		dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
  81
  82	if (gpu->rbbmtimer_clk)
  83		clk_set_rate(gpu->rbbmtimer_clk, 0);
  84
  85	return 0;
  86}
  87
  88static int enable_axi(struct msm_gpu *gpu)
  89{
  90	return clk_prepare_enable(gpu->ebi1_clk);
  91}
  92
  93static int disable_axi(struct msm_gpu *gpu)
  94{
  95	clk_disable_unprepare(gpu->ebi1_clk);
  96	return 0;
  97}
  98
 
 
 
 
 
 
 
 
  99int msm_gpu_pm_resume(struct msm_gpu *gpu)
 100{
 101	int ret;
 102
 103	DBG("%s", gpu->name);
 104	trace_msm_gpu_resume(0);
 105
 106	ret = enable_pwrrail(gpu);
 107	if (ret)
 108		return ret;
 109
 110	ret = enable_clk(gpu);
 111	if (ret)
 112		return ret;
 113
 114	ret = enable_axi(gpu);
 115	if (ret)
 116		return ret;
 117
 118	msm_devfreq_resume(gpu);
 119
 120	gpu->needs_hw_init = true;
 121
 122	return 0;
 123}
 124
 125int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 126{
 127	int ret;
 128
 129	DBG("%s", gpu->name);
 130	trace_msm_gpu_suspend(0);
 131
 132	msm_devfreq_suspend(gpu);
 133
 134	ret = disable_axi(gpu);
 135	if (ret)
 136		return ret;
 137
 138	ret = disable_clk(gpu);
 139	if (ret)
 140		return ret;
 141
 142	ret = disable_pwrrail(gpu);
 143	if (ret)
 144		return ret;
 145
 146	gpu->suspend_count++;
 147
 148	return 0;
 149}
 150
 151void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
 152			 struct drm_printer *p)
 153{
 154	drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
 155	drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
 156	drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
 157}
 158
 159int msm_gpu_hw_init(struct msm_gpu *gpu)
 160{
 161	int ret;
 162
 163	WARN_ON(!mutex_is_locked(&gpu->lock));
 164
 165	if (!gpu->needs_hw_init)
 166		return 0;
 167
 168	disable_irq(gpu->irq);
 169	ret = gpu->funcs->hw_init(gpu);
 170	if (!ret)
 171		gpu->needs_hw_init = false;
 172	enable_irq(gpu->irq);
 173
 174	return ret;
 175}
 176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 177#ifdef CONFIG_DEV_COREDUMP
 178static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
 179		size_t count, void *data, size_t datalen)
 180{
 181	struct msm_gpu *gpu = data;
 182	struct drm_print_iterator iter;
 183	struct drm_printer p;
 184	struct msm_gpu_state *state;
 185
 186	state = msm_gpu_crashstate_get(gpu);
 187	if (!state)
 188		return 0;
 189
 190	iter.data = buffer;
 191	iter.offset = 0;
 192	iter.start = offset;
 193	iter.remain = count;
 194
 195	p = drm_coredump_printer(&iter);
 196
 197	drm_printf(&p, "---\n");
 198	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
 199	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
 200	drm_printf(&p, "time: %lld.%09ld\n",
 201		state->time.tv_sec, state->time.tv_nsec);
 202	if (state->comm)
 203		drm_printf(&p, "comm: %s\n", state->comm);
 204	if (state->cmd)
 205		drm_printf(&p, "cmdline: %s\n", state->cmd);
 206
 207	gpu->funcs->show(gpu, state, &p);
 208
 209	msm_gpu_crashstate_put(gpu);
 210
 211	return count - iter.remain;
 212}
 213
 214static void msm_gpu_devcoredump_free(void *data)
 215{
 216	struct msm_gpu *gpu = data;
 217
 218	msm_gpu_crashstate_put(gpu);
 219}
 220
 221static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
 222		struct drm_gem_object *obj, u64 iova, bool full)
 223{
 224	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 225
 226	/* Don't record write only objects */
 227	state_bo->size = obj->size;
 228	state_bo->iova = iova;
 229
 230	BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name));
 231
 232	memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name));
 233
 234	if (full) {
 235		void *ptr;
 236
 237		state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
 238		if (!state_bo->data)
 239			goto out;
 240
 241		msm_gem_lock(obj);
 242		ptr = msm_gem_get_vaddr_active(obj);
 243		msm_gem_unlock(obj);
 244		if (IS_ERR(ptr)) {
 245			kvfree(state_bo->data);
 246			state_bo->data = NULL;
 247			goto out;
 248		}
 249
 250		memcpy(state_bo->data, ptr, obj->size);
 251		msm_gem_put_vaddr(obj);
 252	}
 253out:
 254	state->nr_bos++;
 255}
 256
 257static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 258		struct msm_gem_submit *submit, char *comm, char *cmd)
 259{
 260	struct msm_gpu_state *state;
 261
 262	/* Check if the target supports capturing crash state */
 263	if (!gpu->funcs->gpu_state_get)
 264		return;
 265
 266	/* Only save one crash state at a time */
 267	if (gpu->crashstate)
 268		return;
 269
 270	state = gpu->funcs->gpu_state_get(gpu);
 271	if (IS_ERR_OR_NULL(state))
 272		return;
 273
 274	/* Fill in the additional crash state information */
 275	state->comm = kstrdup(comm, GFP_KERNEL);
 276	state->cmd = kstrdup(cmd, GFP_KERNEL);
 277	state->fault_info = gpu->fault_info;
 278
 279	if (submit) {
 280		int i;
 
 
 
 
 
 
 
 
 
 281
 282		state->bos = kcalloc(submit->nr_bos,
 283			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 284
 285		for (i = 0; state->bos && i < submit->nr_bos; i++) {
 286			msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
 287						  submit->bos[i].iova,
 288						  should_dump(submit, i));
 
 
 
 
 
 
 
 
 
 
 289		}
 290	}
 291
 292	/* Set the active crash state to be dumped on failure */
 293	gpu->crashstate = state;
 294
 295	dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
 
 296		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
 297}
 298#else
 299static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 300		struct msm_gem_submit *submit, char *comm, char *cmd)
 301{
 302}
 303#endif
 304
 305/*
 306 * Hangcheck detection for locked gpu:
 307 */
 308
 309static struct msm_gem_submit *
 310find_submit(struct msm_ringbuffer *ring, uint32_t fence)
 311{
 312	struct msm_gem_submit *submit;
 313	unsigned long flags;
 314
 315	spin_lock_irqsave(&ring->submit_lock, flags);
 316	list_for_each_entry(submit, &ring->submits, node) {
 317		if (submit->seqno == fence) {
 318			spin_unlock_irqrestore(&ring->submit_lock, flags);
 319			return submit;
 320		}
 321	}
 322	spin_unlock_irqrestore(&ring->submit_lock, flags);
 323
 324	return NULL;
 325}
 326
 327static void retire_submits(struct msm_gpu *gpu);
 328
 329static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
 330{
 331	struct msm_file_private *ctx = submit->queue->ctx;
 332	struct task_struct *task;
 333
 334	WARN_ON(!mutex_is_locked(&submit->gpu->lock));
 335
 336	/* Note that kstrdup will return NULL if argument is NULL: */
 337	*comm = kstrdup(ctx->comm, GFP_KERNEL);
 338	*cmd  = kstrdup(ctx->cmdline, GFP_KERNEL);
 339
 340	task = get_pid_task(submit->pid, PIDTYPE_PID);
 341	if (!task)
 342		return;
 343
 344	if (!*comm)
 345		*comm = kstrdup(task->comm, GFP_KERNEL);
 346
 347	if (!*cmd)
 348		*cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
 349
 350	put_task_struct(task);
 351}
 352
 353static void recover_worker(struct kthread_work *work)
 354{
 355	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 356	struct drm_device *dev = gpu->dev;
 357	struct msm_drm_private *priv = dev->dev_private;
 358	struct msm_gem_submit *submit;
 359	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 360	char *comm = NULL, *cmd = NULL;
 361	int i;
 362
 363	mutex_lock(&gpu->lock);
 364
 365	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 366
 367	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 
 
 368
 369	/*
 370	 * If the submit retired while we were waiting for the worker to run,
 371	 * or waiting to acquire the gpu lock, then nothing more to do.
 372	 */
 373	if (!submit)
 374		goto out_unlock;
 
 
 
 
 375
 376	/* Increment the fault counts */
 377	submit->queue->faults++;
 378	if (submit->aspace)
 379		submit->aspace->faults++;
 380
 381	get_comm_cmdline(submit, &comm, &cmd);
 382
 383	if (comm && cmd) {
 384		DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
 385			      gpu->name, comm, cmd);
 386
 387		msm_rd_dump_submit(priv->hangrd, submit,
 388				   "offending task: %s (%s)", comm, cmd);
 389	} else {
 390		DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
 391
 392		msm_rd_dump_submit(priv->hangrd, submit, NULL);
 
 393	}
 394
 395	/* Record the crash state */
 396	pm_runtime_get_sync(&gpu->pdev->dev);
 397	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 
 398
 399	kfree(cmd);
 400	kfree(comm);
 401
 402	/*
 403	 * Update all the rings with the latest and greatest fence.. this
 404	 * needs to happen after msm_rd_dump_submit() to ensure that the
 405	 * bo's referenced by the offending submit are still around.
 406	 */
 407	for (i = 0; i < gpu->nr_rings; i++) {
 408		struct msm_ringbuffer *ring = gpu->rb[i];
 409
 410		uint32_t fence = ring->memptrs->fence;
 411
 412		/*
 413		 * For the current (faulting?) ring/submit advance the fence by
 414		 * one more to clear the faulting submit
 415		 */
 416		if (ring == cur_ring)
 417			ring->memptrs->fence = ++fence;
 418
 419		msm_update_fence(ring->fctx, fence);
 420	}
 421
 422	if (msm_gpu_active(gpu)) {
 423		/* retire completed submits, plus the one that hung: */
 424		retire_submits(gpu);
 425
 
 426		gpu->funcs->recover(gpu);
 
 427
 428		/*
 429		 * Replay all remaining submits starting with highest priority
 430		 * ring
 431		 */
 432		for (i = 0; i < gpu->nr_rings; i++) {
 433			struct msm_ringbuffer *ring = gpu->rb[i];
 434			unsigned long flags;
 435
 436			spin_lock_irqsave(&ring->submit_lock, flags);
 437			list_for_each_entry(submit, &ring->submits, node)
 438				gpu->funcs->submit(gpu, submit);
 439			spin_unlock_irqrestore(&ring->submit_lock, flags);
 440		}
 441	}
 442
 443	pm_runtime_put(&gpu->pdev->dev);
 444
 445out_unlock:
 446	mutex_unlock(&gpu->lock);
 447
 448	msm_gpu_retire(gpu);
 449}
 450
 451static void fault_worker(struct kthread_work *work)
 452{
 453	struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
 
 454	struct msm_gem_submit *submit;
 455	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 456	char *comm = NULL, *cmd = NULL;
 457
 458	mutex_lock(&gpu->lock);
 459
 460	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 461	if (submit && submit->fault_dumped)
 462		goto resume_smmu;
 463
 464	if (submit) {
 465		get_comm_cmdline(submit, &comm, &cmd);
 
 
 
 
 
 
 
 466
 467		/*
 468		 * When we get GPU iova faults, we can get 1000s of them,
 469		 * but we really only want to log the first one.
 470		 */
 471		submit->fault_dumped = true;
 472	}
 473
 474	/* Record the crash state */
 475	pm_runtime_get_sync(&gpu->pdev->dev);
 476	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 477	pm_runtime_put_sync(&gpu->pdev->dev);
 478
 479	kfree(cmd);
 480	kfree(comm);
 481
 482resume_smmu:
 483	memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
 484	gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
 485
 486	mutex_unlock(&gpu->lock);
 487}
 488
 489static void hangcheck_timer_reset(struct msm_gpu *gpu)
 490{
 491	struct msm_drm_private *priv = gpu->dev->dev_private;
 492	mod_timer(&gpu->hangcheck_timer,
 493			round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
 494}
 495
 496static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 497{
 498	if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
 499		return false;
 500
 501	if (!gpu->funcs->progress)
 502		return false;
 503
 504	if (!gpu->funcs->progress(gpu, ring))
 505		return false;
 506
 507	ring->hangcheck_progress_retries++;
 508	return true;
 509}
 510
 511static void hangcheck_handler(struct timer_list *t)
 512{
 513	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
 514	struct drm_device *dev = gpu->dev;
 515	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
 516	uint32_t fence = ring->memptrs->fence;
 517
 518	if (fence != ring->hangcheck_fence) {
 519		/* some progress has been made.. ya! */
 520		ring->hangcheck_fence = fence;
 521		ring->hangcheck_progress_retries = 0;
 522	} else if (fence_before(fence, ring->fctx->last_fence) &&
 523			!made_progress(gpu, ring)) {
 524		/* no progress and not done.. hung! */
 525		ring->hangcheck_fence = fence;
 526		ring->hangcheck_progress_retries = 0;
 527		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
 528				gpu->name, ring->id);
 529		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
 530				gpu->name, fence);
 531		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
 532				gpu->name, ring->fctx->last_fence);
 533
 534		kthread_queue_work(gpu->worker, &gpu->recover_work);
 535	}
 536
 537	/* if still more pending work, reset the hangcheck timer: */
 538	if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
 539		hangcheck_timer_reset(gpu);
 540
 541	/* workaround for missing irq: */
 542	msm_gpu_retire(gpu);
 543}
 544
 545/*
 546 * Performance Counters:
 547 */
 548
 549/* called under perf_lock */
 550static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
 551{
 552	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
 553	int i, n = min(ncntrs, gpu->num_perfcntrs);
 554
 555	/* read current values: */
 556	for (i = 0; i < gpu->num_perfcntrs; i++)
 557		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
 558
 559	/* update cntrs: */
 560	for (i = 0; i < n; i++)
 561		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
 562
 563	/* save current values: */
 564	for (i = 0; i < gpu->num_perfcntrs; i++)
 565		gpu->last_cntrs[i] = current_cntrs[i];
 566
 567	return n;
 568}
 569
 570static void update_sw_cntrs(struct msm_gpu *gpu)
 571{
 572	ktime_t time;
 573	uint32_t elapsed;
 574	unsigned long flags;
 575
 576	spin_lock_irqsave(&gpu->perf_lock, flags);
 577	if (!gpu->perfcntr_active)
 578		goto out;
 579
 580	time = ktime_get();
 581	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
 582
 583	gpu->totaltime += elapsed;
 584	if (gpu->last_sample.active)
 585		gpu->activetime += elapsed;
 586
 587	gpu->last_sample.active = msm_gpu_active(gpu);
 588	gpu->last_sample.time = time;
 589
 590out:
 591	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 592}
 593
 594void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
 595{
 596	unsigned long flags;
 597
 598	pm_runtime_get_sync(&gpu->pdev->dev);
 599
 600	spin_lock_irqsave(&gpu->perf_lock, flags);
 601	/* we could dynamically enable/disable perfcntr registers too.. */
 602	gpu->last_sample.active = msm_gpu_active(gpu);
 603	gpu->last_sample.time = ktime_get();
 604	gpu->activetime = gpu->totaltime = 0;
 605	gpu->perfcntr_active = true;
 606	update_hw_cntrs(gpu, 0, NULL);
 607	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 608}
 609
 610void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
 611{
 612	gpu->perfcntr_active = false;
 613	pm_runtime_put_sync(&gpu->pdev->dev);
 614}
 615
 616/* returns -errno or # of cntrs sampled */
 617int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 618		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
 619{
 620	unsigned long flags;
 621	int ret;
 622
 623	spin_lock_irqsave(&gpu->perf_lock, flags);
 624
 625	if (!gpu->perfcntr_active) {
 626		ret = -EINVAL;
 627		goto out;
 628	}
 629
 630	*activetime = gpu->activetime;
 631	*totaltime = gpu->totaltime;
 632
 633	gpu->activetime = gpu->totaltime = 0;
 634
 635	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
 636
 637out:
 638	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 639
 640	return ret;
 641}
 642
 643/*
 644 * Cmdstream submission/retirement:
 645 */
 646
 647static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 648		struct msm_gem_submit *submit)
 649{
 650	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
 651	volatile struct msm_gpu_submit_stats *stats;
 652	u64 elapsed, clock = 0, cycles;
 653	unsigned long flags;
 654
 655	stats = &ring->memptrs->stats[index];
 656	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
 657	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
 658	do_div(elapsed, 192);
 659
 660	cycles = stats->cpcycles_end - stats->cpcycles_start;
 661
 662	/* Calculate the clock frequency from the number of CP cycles */
 663	if (elapsed) {
 664		clock = cycles * 1000;
 665		do_div(clock, elapsed);
 666	}
 667
 668	submit->queue->ctx->elapsed_ns += elapsed;
 669	submit->queue->ctx->cycles     += cycles;
 670
 671	trace_msm_gpu_submit_retired(submit, elapsed, clock,
 672		stats->alwayson_start, stats->alwayson_end);
 673
 674	msm_submit_retire(submit);
 
 
 
 
 
 
 
 
 675
 676	pm_runtime_mark_last_busy(&gpu->pdev->dev);
 
 677
 678	spin_lock_irqsave(&ring->submit_lock, flags);
 679	list_del(&submit->node);
 680	spin_unlock_irqrestore(&ring->submit_lock, flags);
 681
 682	/* Update devfreq on transition from active->idle: */
 683	mutex_lock(&gpu->active_lock);
 684	gpu->active_submits--;
 685	WARN_ON(gpu->active_submits < 0);
 686	if (!gpu->active_submits) {
 687		msm_devfreq_idle(gpu);
 688		pm_runtime_put_autosuspend(&gpu->pdev->dev);
 689	}
 690
 691	mutex_unlock(&gpu->active_lock);
 692
 693	msm_gem_submit_put(submit);
 694}
 695
 696static void retire_submits(struct msm_gpu *gpu)
 697{
 698	int i;
 699
 700	/* Retire the commits starting with highest priority */
 701	for (i = 0; i < gpu->nr_rings; i++) {
 702		struct msm_ringbuffer *ring = gpu->rb[i];
 703
 704		while (true) {
 705			struct msm_gem_submit *submit = NULL;
 706			unsigned long flags;
 707
 708			spin_lock_irqsave(&ring->submit_lock, flags);
 709			submit = list_first_entry_or_null(&ring->submits,
 710					struct msm_gem_submit, node);
 711			spin_unlock_irqrestore(&ring->submit_lock, flags);
 712
 713			/*
 714			 * If no submit, we are done.  If submit->fence hasn't
 715			 * been signalled, then later submits are not signalled
 716			 * either, so we are also done.
 717			 */
 718			if (submit && dma_fence_is_signaled(submit->hw_fence)) {
 719				retire_submit(gpu, ring, submit);
 720			} else {
 721				break;
 722			}
 723		}
 724	}
 725
 726	wake_up_all(&gpu->retire_event);
 727}
 728
 729static void retire_worker(struct kthread_work *work)
 730{
 731	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
 
 
 
 
 732
 733	retire_submits(gpu);
 734}
 735
 736/* call from irq handler to schedule work to retire bo's */
 737void msm_gpu_retire(struct msm_gpu *gpu)
 738{
 739	int i;
 740
 741	for (i = 0; i < gpu->nr_rings; i++)
 742		msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
 743
 744	kthread_queue_work(gpu->worker, &gpu->retire_work);
 745	update_sw_cntrs(gpu);
 746}
 747
 748/* add bo's to gpu's ring, and kick gpu: */
 749void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 750{
 
 
 751	struct msm_ringbuffer *ring = submit->ring;
 752	unsigned long flags;
 753
 754	WARN_ON(!mutex_is_locked(&gpu->lock));
 755
 756	pm_runtime_get_sync(&gpu->pdev->dev);
 757
 758	msm_gpu_hw_init(gpu);
 759
 760	submit->seqno = submit->hw_fence->seqno;
 
 
 761
 762	update_sw_cntrs(gpu);
 763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 764	/*
 765	 * ring->submits holds a ref to the submit, to deal with the case
 766	 * that a submit completes before msm_ioctl_gem_submit() returns.
 767	 */
 768	msm_gem_submit_get(submit);
 769
 770	spin_lock_irqsave(&ring->submit_lock, flags);
 771	list_add_tail(&submit->node, &ring->submits);
 772	spin_unlock_irqrestore(&ring->submit_lock, flags);
 773
 774	/* Update devfreq on transition from idle->active: */
 775	mutex_lock(&gpu->active_lock);
 776	if (!gpu->active_submits) {
 777		pm_runtime_get(&gpu->pdev->dev);
 778		msm_devfreq_active(gpu);
 779	}
 780	gpu->active_submits++;
 781	mutex_unlock(&gpu->active_lock);
 782
 783	gpu->funcs->submit(gpu, submit);
 784	gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
 785
 786	pm_runtime_put(&gpu->pdev->dev);
 787	hangcheck_timer_reset(gpu);
 788}
 789
 790/*
 791 * Init/Cleanup:
 792 */
 793
 794static irqreturn_t irq_handler(int irq, void *data)
 795{
 796	struct msm_gpu *gpu = data;
 797	return gpu->funcs->irq(gpu);
 798}
 799
 800static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 801{
 802	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
 803
 804	if (ret < 1) {
 805		gpu->nr_clocks = 0;
 806		return ret;
 807	}
 808
 809	gpu->nr_clocks = ret;
 810
 811	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 812		gpu->nr_clocks, "core");
 813
 814	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 815		gpu->nr_clocks, "rbbmtimer");
 816
 817	return 0;
 818}
 819
 820/* Return a new address space for a msm_drm_private instance */
 821struct msm_gem_address_space *
 822msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
 823{
 824	struct msm_gem_address_space *aspace = NULL;
 825	if (!gpu)
 826		return NULL;
 827
 828	/*
 829	 * If the target doesn't support private address spaces then return
 830	 * the global one
 831	 */
 832	if (gpu->funcs->create_private_address_space) {
 833		aspace = gpu->funcs->create_private_address_space(gpu);
 834		if (!IS_ERR(aspace))
 835			aspace->pid = get_pid(task_pid(task));
 836	}
 837
 838	if (IS_ERR_OR_NULL(aspace))
 839		aspace = msm_gem_address_space_get(gpu->aspace);
 840
 841	return aspace;
 842}
 843
 844int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 845		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 846		const char *name, struct msm_gpu_config *config)
 847{
 848	struct msm_drm_private *priv = drm->dev_private;
 849	int i, ret, nr_rings = config->nr_rings;
 850	void *memptrs;
 851	uint64_t memptrs_iova;
 852
 853	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
 854		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
 855
 856	gpu->dev = drm;
 857	gpu->funcs = funcs;
 858	gpu->name = name;
 859
 860	gpu->worker = kthread_create_worker(0, "gpu-worker");
 861	if (IS_ERR(gpu->worker)) {
 862		ret = PTR_ERR(gpu->worker);
 863		gpu->worker = NULL;
 864		goto fail;
 865	}
 866
 867	sched_set_fifo_low(gpu->worker->task);
 868
 869	mutex_init(&gpu->active_lock);
 870	mutex_init(&gpu->lock);
 871	init_waitqueue_head(&gpu->retire_event);
 872	kthread_init_work(&gpu->retire_work, retire_worker);
 873	kthread_init_work(&gpu->recover_work, recover_worker);
 874	kthread_init_work(&gpu->fault_work, fault_worker);
 875
 876	priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
 877
 878	/*
 879	 * If progress detection is supported, halve the hangcheck timer
 880	 * duration, as it takes two iterations of the hangcheck handler
 881	 * to detect a hang.
 882	 */
 883	if (funcs->progress)
 884		priv->hangcheck_period /= 2;
 885
 886	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
 887
 888	spin_lock_init(&gpu->perf_lock);
 889
 890
 891	/* Map registers: */
 892	gpu->mmio = msm_ioremap(pdev, config->ioname);
 893	if (IS_ERR(gpu->mmio)) {
 894		ret = PTR_ERR(gpu->mmio);
 895		goto fail;
 896	}
 897
 898	/* Get Interrupt: */
 899	gpu->irq = platform_get_irq(pdev, 0);
 900	if (gpu->irq < 0) {
 901		ret = gpu->irq;
 
 902		goto fail;
 903	}
 904
 905	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 906			IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
 907	if (ret) {
 908		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 909		goto fail;
 910	}
 911
 912	ret = get_clocks(pdev, gpu);
 913	if (ret)
 914		goto fail;
 915
 916	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
 917	DBG("ebi1_clk: %p", gpu->ebi1_clk);
 918	if (IS_ERR(gpu->ebi1_clk))
 919		gpu->ebi1_clk = NULL;
 920
 921	/* Acquire regulators: */
 922	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
 923	DBG("gpu_reg: %p", gpu->gpu_reg);
 924	if (IS_ERR(gpu->gpu_reg))
 925		gpu->gpu_reg = NULL;
 926
 927	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
 928	DBG("gpu_cx: %p", gpu->gpu_cx);
 929	if (IS_ERR(gpu->gpu_cx))
 930		gpu->gpu_cx = NULL;
 931
 932	gpu->pdev = pdev;
 933	platform_set_drvdata(pdev, &gpu->adreno_smmu);
 934
 935	msm_devfreq_init(gpu);
 936
 937
 938	gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
 939
 940	if (gpu->aspace == NULL)
 941		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 942	else if (IS_ERR(gpu->aspace)) {
 943		ret = PTR_ERR(gpu->aspace);
 944		goto fail;
 945	}
 946
 947	memptrs = msm_gem_kernel_new(drm,
 948		sizeof(struct msm_rbmemptrs) * nr_rings,
 949		check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
 950		&memptrs_iova);
 951
 952	if (IS_ERR(memptrs)) {
 953		ret = PTR_ERR(memptrs);
 954		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
 955		goto fail;
 956	}
 957
 958	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
 959
 960	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
 961		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
 962			ARRAY_SIZE(gpu->rb));
 963		nr_rings = ARRAY_SIZE(gpu->rb);
 964	}
 965
 966	/* Create ringbuffer(s): */
 967	for (i = 0; i < nr_rings; i++) {
 968		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
 969
 970		if (IS_ERR(gpu->rb[i])) {
 971			ret = PTR_ERR(gpu->rb[i]);
 972			DRM_DEV_ERROR(drm->dev,
 973				"could not create ringbuffer %d: %d\n", i, ret);
 974			goto fail;
 975		}
 976
 977		memptrs += sizeof(struct msm_rbmemptrs);
 978		memptrs_iova += sizeof(struct msm_rbmemptrs);
 979	}
 980
 981	gpu->nr_rings = nr_rings;
 982
 983	refcount_set(&gpu->sysprof_active, 1);
 984
 985	return 0;
 986
 987fail:
 988	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
 989		msm_ringbuffer_destroy(gpu->rb[i]);
 990		gpu->rb[i] = NULL;
 991	}
 992
 993	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
 994
 995	platform_set_drvdata(pdev, NULL);
 996	return ret;
 997}
 998
 999void msm_gpu_cleanup(struct msm_gpu *gpu)
1000{
1001	int i;
1002
1003	DBG("%s", gpu->name);
1004
 
 
1005	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1006		msm_ringbuffer_destroy(gpu->rb[i]);
1007		gpu->rb[i] = NULL;
1008	}
1009
1010	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1011
1012	if (!IS_ERR_OR_NULL(gpu->aspace)) {
1013		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1014		msm_gem_address_space_put(gpu->aspace);
1015	}
1016
1017	if (gpu->worker) {
1018		kthread_destroy_worker(gpu->worker);
1019	}
1020
1021	msm_devfreq_cleanup(gpu);
1022
1023	platform_set_drvdata(gpu->pdev, NULL);
1024}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
 
 
   7#include "msm_gpu.h"
   8#include "msm_gem.h"
   9#include "msm_mmu.h"
  10#include "msm_fence.h"
  11#include "msm_gpu_trace.h"
  12#include "adreno/adreno_gpu.h"
  13
  14#include <generated/utsrelease.h>
  15#include <linux/string_helpers.h>
  16#include <linux/devfreq.h>
  17#include <linux/devfreq_cooling.h>
  18#include <linux/devcoredump.h>
  19#include <linux/sched/task.h>
  20
  21/*
  22 * Power Management:
  23 */
  24
  25static int msm_devfreq_target(struct device *dev, unsigned long *freq,
  26		u32 flags)
  27{
  28	struct msm_gpu *gpu = dev_to_gpu(dev);
  29	struct dev_pm_opp *opp;
  30
  31	opp = devfreq_recommended_opp(dev, freq, flags);
  32
  33	if (IS_ERR(opp))
  34		return PTR_ERR(opp);
  35
  36	trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
  37
  38	if (gpu->funcs->gpu_set_freq)
  39		gpu->funcs->gpu_set_freq(gpu, opp);
  40	else
  41		clk_set_rate(gpu->core_clk, *freq);
  42
  43	dev_pm_opp_put(opp);
  44
  45	return 0;
  46}
  47
  48static int msm_devfreq_get_dev_status(struct device *dev,
  49		struct devfreq_dev_status *status)
  50{
  51	struct msm_gpu *gpu = dev_to_gpu(dev);
  52	ktime_t time;
  53
  54	if (gpu->funcs->gpu_get_freq)
  55		status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
  56	else
  57		status->current_frequency = clk_get_rate(gpu->core_clk);
  58
  59	status->busy_time = gpu->funcs->gpu_busy(gpu);
  60
  61	time = ktime_get();
  62	status->total_time = ktime_us_delta(time, gpu->devfreq.time);
  63	gpu->devfreq.time = time;
  64
  65	return 0;
  66}
  67
  68static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
  69{
  70	struct msm_gpu *gpu = dev_to_gpu(dev);
  71
  72	if (gpu->funcs->gpu_get_freq)
  73		*freq = gpu->funcs->gpu_get_freq(gpu);
  74	else
  75		*freq = clk_get_rate(gpu->core_clk);
  76
  77	return 0;
  78}
  79
  80static struct devfreq_dev_profile msm_devfreq_profile = {
  81	.polling_ms = 10,
  82	.target = msm_devfreq_target,
  83	.get_dev_status = msm_devfreq_get_dev_status,
  84	.get_cur_freq = msm_devfreq_get_cur_freq,
  85};
  86
  87static void msm_devfreq_init(struct msm_gpu *gpu)
  88{
  89	/* We need target support to do devfreq */
  90	if (!gpu->funcs->gpu_busy)
  91		return;
  92
  93	msm_devfreq_profile.initial_freq = gpu->fast_rate;
  94
  95	/*
  96	 * Don't set the freq_table or max_state and let devfreq build the table
  97	 * from OPP
  98	 * After a deferred probe, these may have be left to non-zero values,
  99	 * so set them back to zero before creating the devfreq device
 100	 */
 101	msm_devfreq_profile.freq_table = NULL;
 102	msm_devfreq_profile.max_state = 0;
 103
 104	gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
 105			&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
 106			NULL);
 107
 108	if (IS_ERR(gpu->devfreq.devfreq)) {
 109		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
 110		gpu->devfreq.devfreq = NULL;
 111		return;
 112	}
 113
 114	devfreq_suspend_device(gpu->devfreq.devfreq);
 115
 116	gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
 117			gpu->devfreq.devfreq);
 118	if (IS_ERR(gpu->cooling)) {
 119		DRM_DEV_ERROR(&gpu->pdev->dev,
 120				"Couldn't register GPU cooling device\n");
 121		gpu->cooling = NULL;
 122	}
 123}
 124
 125static int enable_pwrrail(struct msm_gpu *gpu)
 126{
 127	struct drm_device *dev = gpu->dev;
 128	int ret = 0;
 129
 130	if (gpu->gpu_reg) {
 131		ret = regulator_enable(gpu->gpu_reg);
 132		if (ret) {
 133			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
 134			return ret;
 135		}
 136	}
 137
 138	if (gpu->gpu_cx) {
 139		ret = regulator_enable(gpu->gpu_cx);
 140		if (ret) {
 141			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
 142			return ret;
 143		}
 144	}
 145
 146	return 0;
 147}
 148
 149static int disable_pwrrail(struct msm_gpu *gpu)
 150{
 151	if (gpu->gpu_cx)
 152		regulator_disable(gpu->gpu_cx);
 153	if (gpu->gpu_reg)
 154		regulator_disable(gpu->gpu_reg);
 155	return 0;
 156}
 157
 158static int enable_clk(struct msm_gpu *gpu)
 159{
 160	if (gpu->core_clk && gpu->fast_rate)
 161		clk_set_rate(gpu->core_clk, gpu->fast_rate);
 162
 163	/* Set the RBBM timer rate to 19.2Mhz */
 164	if (gpu->rbbmtimer_clk)
 165		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
 166
 167	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
 168}
 169
 170static int disable_clk(struct msm_gpu *gpu)
 171{
 172	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
 173
 174	/*
 175	 * Set the clock to a deliberately low rate. On older targets the clock
 176	 * speed had to be non zero to avoid problems. On newer targets this
 177	 * will be rounded down to zero anyway so it all works out.
 178	 */
 179	if (gpu->core_clk)
 180		clk_set_rate(gpu->core_clk, 27000000);
 181
 182	if (gpu->rbbmtimer_clk)
 183		clk_set_rate(gpu->rbbmtimer_clk, 0);
 184
 185	return 0;
 186}
 187
 188static int enable_axi(struct msm_gpu *gpu)
 189{
 190	return clk_prepare_enable(gpu->ebi1_clk);
 191}
 192
 193static int disable_axi(struct msm_gpu *gpu)
 194{
 195	clk_disable_unprepare(gpu->ebi1_clk);
 196	return 0;
 197}
 198
 199void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
 200{
 201	gpu->devfreq.busy_cycles = 0;
 202	gpu->devfreq.time = ktime_get();
 203
 204	devfreq_resume_device(gpu->devfreq.devfreq);
 205}
 206
 207int msm_gpu_pm_resume(struct msm_gpu *gpu)
 208{
 209	int ret;
 210
 211	DBG("%s", gpu->name);
 212	trace_msm_gpu_resume(0);
 213
 214	ret = enable_pwrrail(gpu);
 215	if (ret)
 216		return ret;
 217
 218	ret = enable_clk(gpu);
 219	if (ret)
 220		return ret;
 221
 222	ret = enable_axi(gpu);
 223	if (ret)
 224		return ret;
 225
 226	msm_gpu_resume_devfreq(gpu);
 227
 228	gpu->needs_hw_init = true;
 229
 230	return 0;
 231}
 232
 233int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 234{
 235	int ret;
 236
 237	DBG("%s", gpu->name);
 238	trace_msm_gpu_suspend(0);
 239
 240	devfreq_suspend_device(gpu->devfreq.devfreq);
 241
 242	ret = disable_axi(gpu);
 243	if (ret)
 244		return ret;
 245
 246	ret = disable_clk(gpu);
 247	if (ret)
 248		return ret;
 249
 250	ret = disable_pwrrail(gpu);
 251	if (ret)
 252		return ret;
 253
 254	gpu->suspend_count++;
 255
 256	return 0;
 257}
 258
 
 
 
 
 
 
 
 
 259int msm_gpu_hw_init(struct msm_gpu *gpu)
 260{
 261	int ret;
 262
 263	WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
 264
 265	if (!gpu->needs_hw_init)
 266		return 0;
 267
 268	disable_irq(gpu->irq);
 269	ret = gpu->funcs->hw_init(gpu);
 270	if (!ret)
 271		gpu->needs_hw_init = false;
 272	enable_irq(gpu->irq);
 273
 274	return ret;
 275}
 276
 277static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 278		uint32_t fence)
 279{
 280	struct msm_gem_submit *submit;
 281
 282	spin_lock(&ring->submit_lock);
 283	list_for_each_entry(submit, &ring->submits, node) {
 284		if (submit->seqno > fence)
 285			break;
 286
 287		msm_update_fence(submit->ring->fctx,
 288			submit->fence->seqno);
 289	}
 290	spin_unlock(&ring->submit_lock);
 291}
 292
 293#ifdef CONFIG_DEV_COREDUMP
 294static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
 295		size_t count, void *data, size_t datalen)
 296{
 297	struct msm_gpu *gpu = data;
 298	struct drm_print_iterator iter;
 299	struct drm_printer p;
 300	struct msm_gpu_state *state;
 301
 302	state = msm_gpu_crashstate_get(gpu);
 303	if (!state)
 304		return 0;
 305
 306	iter.data = buffer;
 307	iter.offset = 0;
 308	iter.start = offset;
 309	iter.remain = count;
 310
 311	p = drm_coredump_printer(&iter);
 312
 313	drm_printf(&p, "---\n");
 314	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
 315	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
 316	drm_printf(&p, "time: %lld.%09ld\n",
 317		state->time.tv_sec, state->time.tv_nsec);
 318	if (state->comm)
 319		drm_printf(&p, "comm: %s\n", state->comm);
 320	if (state->cmd)
 321		drm_printf(&p, "cmdline: %s\n", state->cmd);
 322
 323	gpu->funcs->show(gpu, state, &p);
 324
 325	msm_gpu_crashstate_put(gpu);
 326
 327	return count - iter.remain;
 328}
 329
 330static void msm_gpu_devcoredump_free(void *data)
 331{
 332	struct msm_gpu *gpu = data;
 333
 334	msm_gpu_crashstate_put(gpu);
 335}
 336
 337static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
 338		struct msm_gem_object *obj, u64 iova, u32 flags)
 339{
 340	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 341
 342	/* Don't record write only objects */
 343	state_bo->size = obj->base.size;
 344	state_bo->iova = iova;
 345
 346	/* Only store data for non imported buffer objects marked for read */
 347	if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
 
 
 
 348		void *ptr;
 349
 350		state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
 351		if (!state_bo->data)
 352			goto out;
 353
 354		msm_gem_lock(&obj->base);
 355		ptr = msm_gem_get_vaddr_active(&obj->base);
 356		msm_gem_unlock(&obj->base);
 357		if (IS_ERR(ptr)) {
 358			kvfree(state_bo->data);
 359			state_bo->data = NULL;
 360			goto out;
 361		}
 362
 363		memcpy(state_bo->data, ptr, obj->base.size);
 364		msm_gem_put_vaddr(&obj->base);
 365	}
 366out:
 367	state->nr_bos++;
 368}
 369
 370static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 371		struct msm_gem_submit *submit, char *comm, char *cmd)
 372{
 373	struct msm_gpu_state *state;
 374
 375	/* Check if the target supports capturing crash state */
 376	if (!gpu->funcs->gpu_state_get)
 377		return;
 378
 379	/* Only save one crash state at a time */
 380	if (gpu->crashstate)
 381		return;
 382
 383	state = gpu->funcs->gpu_state_get(gpu);
 384	if (IS_ERR_OR_NULL(state))
 385		return;
 386
 387	/* Fill in the additional crash state information */
 388	state->comm = kstrdup(comm, GFP_KERNEL);
 389	state->cmd = kstrdup(cmd, GFP_KERNEL);
 390	state->fault_info = gpu->fault_info;
 391
 392	if (submit) {
 393		int i, nr = 0;
 394
 395		/* count # of buffers to dump: */
 396		for (i = 0; i < submit->nr_bos; i++)
 397			if (should_dump(submit, i))
 398				nr++;
 399		/* always dump cmd bo's, but don't double count them: */
 400		for (i = 0; i < submit->nr_cmds; i++)
 401			if (!should_dump(submit, submit->cmd[i].idx))
 402				nr++;
 403
 404		state->bos = kcalloc(nr,
 405			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 406
 407		for (i = 0; i < submit->nr_bos; i++) {
 408			if (should_dump(submit, i)) {
 409				msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
 410					submit->bos[i].iova, submit->bos[i].flags);
 411			}
 412		}
 413
 414		for (i = 0; state->bos && i < submit->nr_cmds; i++) {
 415			int idx = submit->cmd[i].idx;
 416
 417			if (!should_dump(submit, submit->cmd[i].idx)) {
 418				msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
 419					submit->bos[idx].iova, submit->bos[idx].flags);
 420			}
 421		}
 422	}
 423
 424	/* Set the active crash state to be dumped on failure */
 425	gpu->crashstate = state;
 426
 427	/* FIXME: Release the crashstate if this errors out? */
 428	dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
 429		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
 430}
 431#else
 432static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 433		struct msm_gem_submit *submit, char *comm, char *cmd)
 434{
 435}
 436#endif
 437
 438/*
 439 * Hangcheck detection for locked gpu:
 440 */
 441
 442static struct msm_gem_submit *
 443find_submit(struct msm_ringbuffer *ring, uint32_t fence)
 444{
 445	struct msm_gem_submit *submit;
 
 446
 447	spin_lock(&ring->submit_lock);
 448	list_for_each_entry(submit, &ring->submits, node) {
 449		if (submit->seqno == fence) {
 450			spin_unlock(&ring->submit_lock);
 451			return submit;
 452		}
 453	}
 454	spin_unlock(&ring->submit_lock);
 455
 456	return NULL;
 457}
 458
 459static void retire_submits(struct msm_gpu *gpu);
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461static void recover_worker(struct kthread_work *work)
 462{
 463	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 464	struct drm_device *dev = gpu->dev;
 465	struct msm_drm_private *priv = dev->dev_private;
 466	struct msm_gem_submit *submit;
 467	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 468	char *comm = NULL, *cmd = NULL;
 469	int i;
 470
 471	mutex_lock(&dev->struct_mutex);
 472
 473	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 474
 475	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 476	if (submit) {
 477		struct task_struct *task;
 478
 479		/* Increment the fault counts */
 480		gpu->global_faults++;
 481		submit->queue->faults++;
 482
 483		task = get_pid_task(submit->pid, PIDTYPE_PID);
 484		if (task) {
 485			comm = kstrdup(task->comm, GFP_KERNEL);
 486			cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
 487			put_task_struct(task);
 488		}
 489
 490		/* msm_rd_dump_submit() needs bo locked to dump: */
 491		for (i = 0; i < submit->nr_bos; i++)
 492			msm_gem_lock(&submit->bos[i].obj->base);
 493
 494		if (comm && cmd) {
 495			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
 496				gpu->name, comm, cmd);
 497
 498			msm_rd_dump_submit(priv->hangrd, submit,
 499				"offending task: %s (%s)", comm, cmd);
 500		} else {
 501			msm_rd_dump_submit(priv->hangrd, submit, NULL);
 502		}
 
 
 503
 504		for (i = 0; i < submit->nr_bos; i++)
 505			msm_gem_unlock(&submit->bos[i].obj->base);
 506	}
 507
 508	/* Record the crash state */
 509	pm_runtime_get_sync(&gpu->pdev->dev);
 510	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 511	pm_runtime_put_sync(&gpu->pdev->dev);
 512
 513	kfree(cmd);
 514	kfree(comm);
 515
 516	/*
 517	 * Update all the rings with the latest and greatest fence.. this
 518	 * needs to happen after msm_rd_dump_submit() to ensure that the
 519	 * bo's referenced by the offending submit are still around.
 520	 */
 521	for (i = 0; i < gpu->nr_rings; i++) {
 522		struct msm_ringbuffer *ring = gpu->rb[i];
 523
 524		uint32_t fence = ring->memptrs->fence;
 525
 526		/*
 527		 * For the current (faulting?) ring/submit advance the fence by
 528		 * one more to clear the faulting submit
 529		 */
 530		if (ring == cur_ring)
 531			fence++;
 532
 533		update_fences(gpu, ring, fence);
 534	}
 535
 536	if (msm_gpu_active(gpu)) {
 537		/* retire completed submits, plus the one that hung: */
 538		retire_submits(gpu);
 539
 540		pm_runtime_get_sync(&gpu->pdev->dev);
 541		gpu->funcs->recover(gpu);
 542		pm_runtime_put_sync(&gpu->pdev->dev);
 543
 544		/*
 545		 * Replay all remaining submits starting with highest priority
 546		 * ring
 547		 */
 548		for (i = 0; i < gpu->nr_rings; i++) {
 549			struct msm_ringbuffer *ring = gpu->rb[i];
 
 550
 551			spin_lock(&ring->submit_lock);
 552			list_for_each_entry(submit, &ring->submits, node)
 553				gpu->funcs->submit(gpu, submit);
 554			spin_unlock(&ring->submit_lock);
 555		}
 556	}
 557
 558	mutex_unlock(&dev->struct_mutex);
 
 
 
 559
 560	msm_gpu_retire(gpu);
 561}
 562
 563static void fault_worker(struct kthread_work *work)
 564{
 565	struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
 566	struct drm_device *dev = gpu->dev;
 567	struct msm_gem_submit *submit;
 568	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 569	char *comm = NULL, *cmd = NULL;
 570
 571	mutex_lock(&dev->struct_mutex);
 572
 573	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 574	if (submit && submit->fault_dumped)
 575		goto resume_smmu;
 576
 577	if (submit) {
 578		struct task_struct *task;
 579
 580		task = get_pid_task(submit->pid, PIDTYPE_PID);
 581		if (task) {
 582			comm = kstrdup(task->comm, GFP_KERNEL);
 583			cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
 584			put_task_struct(task);
 585		}
 586
 587		/*
 588		 * When we get GPU iova faults, we can get 1000s of them,
 589		 * but we really only want to log the first one.
 590		 */
 591		submit->fault_dumped = true;
 592	}
 593
 594	/* Record the crash state */
 595	pm_runtime_get_sync(&gpu->pdev->dev);
 596	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 597	pm_runtime_put_sync(&gpu->pdev->dev);
 598
 599	kfree(cmd);
 600	kfree(comm);
 601
 602resume_smmu:
 603	memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
 604	gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
 605
 606	mutex_unlock(&dev->struct_mutex);
 607}
 608
 609static void hangcheck_timer_reset(struct msm_gpu *gpu)
 610{
 611	struct msm_drm_private *priv = gpu->dev->dev_private;
 612	mod_timer(&gpu->hangcheck_timer,
 613			round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
 614}
 615
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 616static void hangcheck_handler(struct timer_list *t)
 617{
 618	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
 619	struct drm_device *dev = gpu->dev;
 620	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
 621	uint32_t fence = ring->memptrs->fence;
 622
 623	if (fence != ring->hangcheck_fence) {
 624		/* some progress has been made.. ya! */
 625		ring->hangcheck_fence = fence;
 626	} else if (fence < ring->seqno) {
 
 
 627		/* no progress and not done.. hung! */
 628		ring->hangcheck_fence = fence;
 
 629		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
 630				gpu->name, ring->id);
 631		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
 632				gpu->name, fence);
 633		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
 634				gpu->name, ring->seqno);
 635
 636		kthread_queue_work(gpu->worker, &gpu->recover_work);
 637	}
 638
 639	/* if still more pending work, reset the hangcheck timer: */
 640	if (ring->seqno > ring->hangcheck_fence)
 641		hangcheck_timer_reset(gpu);
 642
 643	/* workaround for missing irq: */
 644	kthread_queue_work(gpu->worker, &gpu->retire_work);
 645}
 646
 647/*
 648 * Performance Counters:
 649 */
 650
 651/* called under perf_lock */
 652static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
 653{
 654	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
 655	int i, n = min(ncntrs, gpu->num_perfcntrs);
 656
 657	/* read current values: */
 658	for (i = 0; i < gpu->num_perfcntrs; i++)
 659		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
 660
 661	/* update cntrs: */
 662	for (i = 0; i < n; i++)
 663		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
 664
 665	/* save current values: */
 666	for (i = 0; i < gpu->num_perfcntrs; i++)
 667		gpu->last_cntrs[i] = current_cntrs[i];
 668
 669	return n;
 670}
 671
 672static void update_sw_cntrs(struct msm_gpu *gpu)
 673{
 674	ktime_t time;
 675	uint32_t elapsed;
 676	unsigned long flags;
 677
 678	spin_lock_irqsave(&gpu->perf_lock, flags);
 679	if (!gpu->perfcntr_active)
 680		goto out;
 681
 682	time = ktime_get();
 683	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
 684
 685	gpu->totaltime += elapsed;
 686	if (gpu->last_sample.active)
 687		gpu->activetime += elapsed;
 688
 689	gpu->last_sample.active = msm_gpu_active(gpu);
 690	gpu->last_sample.time = time;
 691
 692out:
 693	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 694}
 695
 696void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
 697{
 698	unsigned long flags;
 699
 700	pm_runtime_get_sync(&gpu->pdev->dev);
 701
 702	spin_lock_irqsave(&gpu->perf_lock, flags);
 703	/* we could dynamically enable/disable perfcntr registers too.. */
 704	gpu->last_sample.active = msm_gpu_active(gpu);
 705	gpu->last_sample.time = ktime_get();
 706	gpu->activetime = gpu->totaltime = 0;
 707	gpu->perfcntr_active = true;
 708	update_hw_cntrs(gpu, 0, NULL);
 709	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 710}
 711
 712void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
 713{
 714	gpu->perfcntr_active = false;
 715	pm_runtime_put_sync(&gpu->pdev->dev);
 716}
 717
 718/* returns -errno or # of cntrs sampled */
 719int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 720		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
 721{
 722	unsigned long flags;
 723	int ret;
 724
 725	spin_lock_irqsave(&gpu->perf_lock, flags);
 726
 727	if (!gpu->perfcntr_active) {
 728		ret = -EINVAL;
 729		goto out;
 730	}
 731
 732	*activetime = gpu->activetime;
 733	*totaltime = gpu->totaltime;
 734
 735	gpu->activetime = gpu->totaltime = 0;
 736
 737	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
 738
 739out:
 740	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 741
 742	return ret;
 743}
 744
 745/*
 746 * Cmdstream submission/retirement:
 747 */
 748
 749static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 750		struct msm_gem_submit *submit)
 751{
 752	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
 753	volatile struct msm_gpu_submit_stats *stats;
 754	u64 elapsed, clock = 0;
 755	int i;
 756
 757	stats = &ring->memptrs->stats[index];
 758	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
 759	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
 760	do_div(elapsed, 192);
 761
 
 
 762	/* Calculate the clock frequency from the number of CP cycles */
 763	if (elapsed) {
 764		clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
 765		do_div(clock, elapsed);
 766	}
 767
 
 
 
 768	trace_msm_gpu_submit_retired(submit, elapsed, clock,
 769		stats->alwayson_start, stats->alwayson_end);
 770
 771	for (i = 0; i < submit->nr_bos; i++) {
 772		struct drm_gem_object *obj = &submit->bos[i].obj->base;
 773
 774		msm_gem_lock(obj);
 775		msm_gem_active_put(obj);
 776		msm_gem_unpin_iova_locked(obj, submit->aspace);
 777		msm_gem_unlock(obj);
 778		drm_gem_object_put(obj);
 779	}
 780
 781	pm_runtime_mark_last_busy(&gpu->pdev->dev);
 782	pm_runtime_put_autosuspend(&gpu->pdev->dev);
 783
 784	spin_lock(&ring->submit_lock);
 785	list_del(&submit->node);
 786	spin_unlock(&ring->submit_lock);
 
 
 
 
 
 
 
 
 
 
 
 787
 788	msm_gem_submit_put(submit);
 789}
 790
 791static void retire_submits(struct msm_gpu *gpu)
 792{
 793	int i;
 794
 795	/* Retire the commits starting with highest priority */
 796	for (i = 0; i < gpu->nr_rings; i++) {
 797		struct msm_ringbuffer *ring = gpu->rb[i];
 798
 799		while (true) {
 800			struct msm_gem_submit *submit = NULL;
 
 801
 802			spin_lock(&ring->submit_lock);
 803			submit = list_first_entry_or_null(&ring->submits,
 804					struct msm_gem_submit, node);
 805			spin_unlock(&ring->submit_lock);
 806
 807			/*
 808			 * If no submit, we are done.  If submit->fence hasn't
 809			 * been signalled, then later submits are not signalled
 810			 * either, so we are also done.
 811			 */
 812			if (submit && dma_fence_is_signaled(submit->fence)) {
 813				retire_submit(gpu, ring, submit);
 814			} else {
 815				break;
 816			}
 817		}
 818	}
 
 
 819}
 820
 821static void retire_worker(struct kthread_work *work)
 822{
 823	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
 824	int i;
 825
 826	for (i = 0; i < gpu->nr_rings; i++)
 827		update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
 828
 829	retire_submits(gpu);
 830}
 831
 832/* call from irq handler to schedule work to retire bo's */
 833void msm_gpu_retire(struct msm_gpu *gpu)
 834{
 
 
 
 
 
 835	kthread_queue_work(gpu->worker, &gpu->retire_work);
 836	update_sw_cntrs(gpu);
 837}
 838
 839/* add bo's to gpu's ring, and kick gpu: */
 840void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 841{
 842	struct drm_device *dev = gpu->dev;
 843	struct msm_drm_private *priv = dev->dev_private;
 844	struct msm_ringbuffer *ring = submit->ring;
 845	int i;
 846
 847	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 848
 849	pm_runtime_get_sync(&gpu->pdev->dev);
 850
 851	msm_gpu_hw_init(gpu);
 852
 853	submit->seqno = ++ring->seqno;
 854
 855	msm_rd_dump_submit(priv->rd, submit, NULL);
 856
 857	update_sw_cntrs(gpu);
 858
 859	for (i = 0; i < submit->nr_bos; i++) {
 860		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 861		struct drm_gem_object *drm_obj = &msm_obj->base;
 862		uint64_t iova;
 863
 864		/* submit takes a reference to the bo and iova until retired: */
 865		drm_gem_object_get(&msm_obj->base);
 866		msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
 867
 868		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
 869			dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
 870		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
 871			dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
 872
 873		msm_gem_active_get(drm_obj, gpu);
 874	}
 875
 876	/*
 877	 * ring->submits holds a ref to the submit, to deal with the case
 878	 * that a submit completes before msm_ioctl_gem_submit() returns.
 879	 */
 880	msm_gem_submit_get(submit);
 881
 882	spin_lock(&ring->submit_lock);
 883	list_add_tail(&submit->node, &ring->submits);
 884	spin_unlock(&ring->submit_lock);
 
 
 
 
 
 
 
 
 
 885
 886	gpu->funcs->submit(gpu, submit);
 887	priv->lastctx = submit->queue->ctx;
 888
 
 889	hangcheck_timer_reset(gpu);
 890}
 891
 892/*
 893 * Init/Cleanup:
 894 */
 895
 896static irqreturn_t irq_handler(int irq, void *data)
 897{
 898	struct msm_gpu *gpu = data;
 899	return gpu->funcs->irq(gpu);
 900}
 901
 902static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 903{
 904	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
 905
 906	if (ret < 1) {
 907		gpu->nr_clocks = 0;
 908		return ret;
 909	}
 910
 911	gpu->nr_clocks = ret;
 912
 913	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 914		gpu->nr_clocks, "core");
 915
 916	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 917		gpu->nr_clocks, "rbbmtimer");
 918
 919	return 0;
 920}
 921
 922/* Return a new address space for a msm_drm_private instance */
 923struct msm_gem_address_space *
 924msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
 925{
 926	struct msm_gem_address_space *aspace = NULL;
 927	if (!gpu)
 928		return NULL;
 929
 930	/*
 931	 * If the target doesn't support private address spaces then return
 932	 * the global one
 933	 */
 934	if (gpu->funcs->create_private_address_space) {
 935		aspace = gpu->funcs->create_private_address_space(gpu);
 936		if (!IS_ERR(aspace))
 937			aspace->pid = get_pid(task_pid(task));
 938	}
 939
 940	if (IS_ERR_OR_NULL(aspace))
 941		aspace = msm_gem_address_space_get(gpu->aspace);
 942
 943	return aspace;
 944}
 945
 946int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 947		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 948		const char *name, struct msm_gpu_config *config)
 949{
 
 950	int i, ret, nr_rings = config->nr_rings;
 951	void *memptrs;
 952	uint64_t memptrs_iova;
 953
 954	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
 955		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
 956
 957	gpu->dev = drm;
 958	gpu->funcs = funcs;
 959	gpu->name = name;
 960
 961	gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
 962	if (IS_ERR(gpu->worker)) {
 963		ret = PTR_ERR(gpu->worker);
 964		gpu->worker = NULL;
 965		goto fail;
 966	}
 967
 968	sched_set_fifo_low(gpu->worker->task);
 969
 970	INIT_LIST_HEAD(&gpu->active_list);
 
 
 971	kthread_init_work(&gpu->retire_work, retire_worker);
 972	kthread_init_work(&gpu->recover_work, recover_worker);
 973	kthread_init_work(&gpu->fault_work, fault_worker);
 974
 
 
 
 
 
 
 
 
 
 
 975	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
 976
 977	spin_lock_init(&gpu->perf_lock);
 978
 979
 980	/* Map registers: */
 981	gpu->mmio = msm_ioremap(pdev, config->ioname, name);
 982	if (IS_ERR(gpu->mmio)) {
 983		ret = PTR_ERR(gpu->mmio);
 984		goto fail;
 985	}
 986
 987	/* Get Interrupt: */
 988	gpu->irq = platform_get_irq(pdev, 0);
 989	if (gpu->irq < 0) {
 990		ret = gpu->irq;
 991		DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
 992		goto fail;
 993	}
 994
 995	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 996			IRQF_TRIGGER_HIGH, gpu->name, gpu);
 997	if (ret) {
 998		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 999		goto fail;
1000	}
1001
1002	ret = get_clocks(pdev, gpu);
1003	if (ret)
1004		goto fail;
1005
1006	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
1007	DBG("ebi1_clk: %p", gpu->ebi1_clk);
1008	if (IS_ERR(gpu->ebi1_clk))
1009		gpu->ebi1_clk = NULL;
1010
1011	/* Acquire regulators: */
1012	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
1013	DBG("gpu_reg: %p", gpu->gpu_reg);
1014	if (IS_ERR(gpu->gpu_reg))
1015		gpu->gpu_reg = NULL;
1016
1017	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
1018	DBG("gpu_cx: %p", gpu->gpu_cx);
1019	if (IS_ERR(gpu->gpu_cx))
1020		gpu->gpu_cx = NULL;
1021
1022	gpu->pdev = pdev;
1023	platform_set_drvdata(pdev, &gpu->adreno_smmu);
1024
1025	msm_devfreq_init(gpu);
1026
1027
1028	gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
1029
1030	if (gpu->aspace == NULL)
1031		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
1032	else if (IS_ERR(gpu->aspace)) {
1033		ret = PTR_ERR(gpu->aspace);
1034		goto fail;
1035	}
1036
1037	memptrs = msm_gem_kernel_new(drm,
1038		sizeof(struct msm_rbmemptrs) * nr_rings,
1039		check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
1040		&memptrs_iova);
1041
1042	if (IS_ERR(memptrs)) {
1043		ret = PTR_ERR(memptrs);
1044		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
1045		goto fail;
1046	}
1047
1048	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
1049
1050	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
1051		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
1052			ARRAY_SIZE(gpu->rb));
1053		nr_rings = ARRAY_SIZE(gpu->rb);
1054	}
1055
1056	/* Create ringbuffer(s): */
1057	for (i = 0; i < nr_rings; i++) {
1058		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
1059
1060		if (IS_ERR(gpu->rb[i])) {
1061			ret = PTR_ERR(gpu->rb[i]);
1062			DRM_DEV_ERROR(drm->dev,
1063				"could not create ringbuffer %d: %d\n", i, ret);
1064			goto fail;
1065		}
1066
1067		memptrs += sizeof(struct msm_rbmemptrs);
1068		memptrs_iova += sizeof(struct msm_rbmemptrs);
1069	}
1070
1071	gpu->nr_rings = nr_rings;
1072
 
 
1073	return 0;
1074
1075fail:
1076	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
1077		msm_ringbuffer_destroy(gpu->rb[i]);
1078		gpu->rb[i] = NULL;
1079	}
1080
1081	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
1082
1083	platform_set_drvdata(pdev, NULL);
1084	return ret;
1085}
1086
1087void msm_gpu_cleanup(struct msm_gpu *gpu)
1088{
1089	int i;
1090
1091	DBG("%s", gpu->name);
1092
1093	WARN_ON(!list_empty(&gpu->active_list));
1094
1095	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1096		msm_ringbuffer_destroy(gpu->rb[i]);
1097		gpu->rb[i] = NULL;
1098	}
1099
1100	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
1101
1102	if (!IS_ERR_OR_NULL(gpu->aspace)) {
1103		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1104		msm_gem_address_space_put(gpu->aspace);
1105	}
1106
1107	if (gpu->worker) {
1108		kthread_destroy_worker(gpu->worker);
1109	}
1110
1111	devfreq_cooling_unregister(gpu->cooling);
 
 
1112}