Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "drm/drm_drv.h"
   8
   9#include "msm_gpu.h"
  10#include "msm_gem.h"
  11#include "msm_mmu.h"
  12#include "msm_fence.h"
  13#include "msm_gpu_trace.h"
  14//#include "adreno/adreno_gpu.h"
  15
  16#include <generated/utsrelease.h>
  17#include <linux/string_helpers.h>
 
  18#include <linux/devcoredump.h>
  19#include <linux/sched/task.h>
  20
  21/*
  22 * Power Management:
  23 */
  24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  25static int enable_pwrrail(struct msm_gpu *gpu)
  26{
  27	struct drm_device *dev = gpu->dev;
  28	int ret = 0;
  29
  30	if (gpu->gpu_reg) {
  31		ret = regulator_enable(gpu->gpu_reg);
  32		if (ret) {
  33			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
  34			return ret;
  35		}
  36	}
  37
  38	if (gpu->gpu_cx) {
  39		ret = regulator_enable(gpu->gpu_cx);
  40		if (ret) {
  41			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
  42			return ret;
  43		}
  44	}
  45
  46	return 0;
  47}
  48
  49static int disable_pwrrail(struct msm_gpu *gpu)
  50{
  51	if (gpu->gpu_cx)
  52		regulator_disable(gpu->gpu_cx);
  53	if (gpu->gpu_reg)
  54		regulator_disable(gpu->gpu_reg);
  55	return 0;
  56}
  57
  58static int enable_clk(struct msm_gpu *gpu)
  59{
  60	if (gpu->core_clk && gpu->fast_rate)
  61		dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
  62
  63	/* Set the RBBM timer rate to 19.2Mhz */
  64	if (gpu->rbbmtimer_clk)
  65		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
  66
  67	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
  68}
  69
  70static int disable_clk(struct msm_gpu *gpu)
  71{
  72	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
  73
  74	/*
  75	 * Set the clock to a deliberately low rate. On older targets the clock
  76	 * speed had to be non zero to avoid problems. On newer targets this
  77	 * will be rounded down to zero anyway so it all works out.
  78	 */
  79	if (gpu->core_clk)
  80		dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
  81
  82	if (gpu->rbbmtimer_clk)
  83		clk_set_rate(gpu->rbbmtimer_clk, 0);
  84
  85	return 0;
  86}
  87
  88static int enable_axi(struct msm_gpu *gpu)
  89{
  90	return clk_prepare_enable(gpu->ebi1_clk);
 
 
  91}
  92
  93static int disable_axi(struct msm_gpu *gpu)
  94{
  95	clk_disable_unprepare(gpu->ebi1_clk);
 
  96	return 0;
  97}
  98
 
 
 
 
 
 
 
 
  99int msm_gpu_pm_resume(struct msm_gpu *gpu)
 100{
 101	int ret;
 102
 103	DBG("%s", gpu->name);
 104	trace_msm_gpu_resume(0);
 105
 106	ret = enable_pwrrail(gpu);
 107	if (ret)
 108		return ret;
 109
 110	ret = enable_clk(gpu);
 111	if (ret)
 112		return ret;
 113
 114	ret = enable_axi(gpu);
 115	if (ret)
 116		return ret;
 117
 118	msm_devfreq_resume(gpu);
 119
 120	gpu->needs_hw_init = true;
 121
 122	return 0;
 123}
 124
 125int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 126{
 127	int ret;
 128
 129	DBG("%s", gpu->name);
 130	trace_msm_gpu_suspend(0);
 131
 132	msm_devfreq_suspend(gpu);
 133
 134	ret = disable_axi(gpu);
 135	if (ret)
 136		return ret;
 137
 138	ret = disable_clk(gpu);
 139	if (ret)
 140		return ret;
 141
 142	ret = disable_pwrrail(gpu);
 143	if (ret)
 144		return ret;
 145
 146	gpu->suspend_count++;
 147
 148	return 0;
 149}
 150
 151void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
 152			 struct drm_printer *p)
 153{
 154	drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
 155	drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
 156	drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
 157}
 158
 159int msm_gpu_hw_init(struct msm_gpu *gpu)
 160{
 161	int ret;
 162
 163	WARN_ON(!mutex_is_locked(&gpu->lock));
 164
 165	if (!gpu->needs_hw_init)
 166		return 0;
 167
 168	disable_irq(gpu->irq);
 169	ret = gpu->funcs->hw_init(gpu);
 170	if (!ret)
 171		gpu->needs_hw_init = false;
 172	enable_irq(gpu->irq);
 173
 174	return ret;
 175}
 176
 177#ifdef CONFIG_DEV_COREDUMP
 178static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
 179		size_t count, void *data, size_t datalen)
 180{
 181	struct msm_gpu *gpu = data;
 182	struct drm_print_iterator iter;
 183	struct drm_printer p;
 184	struct msm_gpu_state *state;
 185
 186	state = msm_gpu_crashstate_get(gpu);
 187	if (!state)
 188		return 0;
 189
 190	iter.data = buffer;
 191	iter.offset = 0;
 192	iter.start = offset;
 193	iter.remain = count;
 194
 195	p = drm_coredump_printer(&iter);
 196
 197	drm_printf(&p, "---\n");
 198	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
 199	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
 200	drm_printf(&p, "time: %lld.%09ld\n",
 201		state->time.tv_sec, state->time.tv_nsec);
 202	if (state->comm)
 203		drm_printf(&p, "comm: %s\n", state->comm);
 204	if (state->cmd)
 205		drm_printf(&p, "cmdline: %s\n", state->cmd);
 206
 207	gpu->funcs->show(gpu, state, &p);
 208
 209	msm_gpu_crashstate_put(gpu);
 210
 211	return count - iter.remain;
 212}
 213
 214static void msm_gpu_devcoredump_free(void *data)
 215{
 216	struct msm_gpu *gpu = data;
 217
 218	msm_gpu_crashstate_put(gpu);
 219}
 220
 221static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
 222		struct drm_gem_object *obj, u64 iova, bool full)
 223{
 224	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 225	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 226
 227	/* Don't record write only objects */
 228	state_bo->size = obj->size;
 229	state_bo->flags = msm_obj->flags;
 230	state_bo->iova = iova;
 231
 232	BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
 233
 234	memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
 235
 236	if (full) {
 237		void *ptr;
 238
 239		state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
 240		if (!state_bo->data)
 241			goto out;
 242
 243		msm_gem_lock(obj);
 244		ptr = msm_gem_get_vaddr_active(obj);
 245		msm_gem_unlock(obj);
 246		if (IS_ERR(ptr)) {
 247			kvfree(state_bo->data);
 248			state_bo->data = NULL;
 249			goto out;
 250		}
 251
 252		memcpy(state_bo->data, ptr, obj->size);
 253		msm_gem_put_vaddr(obj);
 254	}
 255out:
 256	state->nr_bos++;
 257}
 258
 259static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 260		struct msm_gem_submit *submit, char *comm, char *cmd)
 261{
 262	struct msm_gpu_state *state;
 263
 264	/* Check if the target supports capturing crash state */
 265	if (!gpu->funcs->gpu_state_get)
 266		return;
 267
 268	/* Only save one crash state at a time */
 269	if (gpu->crashstate)
 270		return;
 271
 272	state = gpu->funcs->gpu_state_get(gpu);
 273	if (IS_ERR_OR_NULL(state))
 274		return;
 275
 276	/* Fill in the additional crash state information */
 277	state->comm = kstrdup(comm, GFP_KERNEL);
 278	state->cmd = kstrdup(cmd, GFP_KERNEL);
 279	state->fault_info = gpu->fault_info;
 280
 281	if (submit) {
 282		int i;
 283
 284		state->bos = kcalloc(submit->nr_bos,
 
 
 
 
 
 
 
 
 
 285			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 286
 287		for (i = 0; state->bos && i < submit->nr_bos; i++) {
 288			msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
 289						  submit->bos[i].iova,
 290						  should_dump(submit, i));
 
 
 
 
 
 
 
 
 
 
 291		}
 292	}
 293
 294	/* Set the active crash state to be dumped on failure */
 295	gpu->crashstate = state;
 296
 297	dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
 
 298		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
 299}
 300#else
 301static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 302		struct msm_gem_submit *submit, char *comm, char *cmd)
 303{
 304}
 305#endif
 306
 307/*
 308 * Hangcheck detection for locked gpu:
 309 */
 310
 311static struct msm_gem_submit *
 312find_submit(struct msm_ringbuffer *ring, uint32_t fence)
 313{
 314	struct msm_gem_submit *submit;
 315	unsigned long flags;
 316
 317	spin_lock_irqsave(&ring->submit_lock, flags);
 318	list_for_each_entry(submit, &ring->submits, node) {
 319		if (submit->seqno == fence) {
 320			spin_unlock_irqrestore(&ring->submit_lock, flags);
 321			return submit;
 322		}
 323	}
 324	spin_unlock_irqrestore(&ring->submit_lock, flags);
 325
 326	return NULL;
 
 
 327}
 328
 329static void retire_submits(struct msm_gpu *gpu);
 330
 331static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
 332{
 333	struct msm_file_private *ctx = submit->queue->ctx;
 334	struct task_struct *task;
 335
 336	WARN_ON(!mutex_is_locked(&submit->gpu->lock));
 337
 338	/* Note that kstrdup will return NULL if argument is NULL: */
 339	*comm = kstrdup(ctx->comm, GFP_KERNEL);
 340	*cmd  = kstrdup(ctx->cmdline, GFP_KERNEL);
 341
 342	task = get_pid_task(submit->pid, PIDTYPE_PID);
 343	if (!task)
 344		return;
 345
 346	if (!*comm)
 347		*comm = kstrdup(task->comm, GFP_KERNEL);
 348
 349	if (!*cmd)
 350		*cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
 
 351
 352	put_task_struct(task);
 353}
 354
 355static void recover_worker(struct kthread_work *work)
 
 
 356{
 357	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 358	struct drm_device *dev = gpu->dev;
 359	struct msm_drm_private *priv = dev->dev_private;
 360	struct msm_gem_submit *submit;
 361	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 362	char *comm = NULL, *cmd = NULL;
 363	int i;
 364
 365	mutex_lock(&gpu->lock);
 366
 367	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 368
 369	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 
 
 370
 371	/*
 372	 * If the submit retired while we were waiting for the worker to run,
 373	 * or waiting to acquire the gpu lock, then nothing more to do.
 374	 */
 375	if (!submit)
 376		goto out_unlock;
 377
 378	/* Increment the fault counts */
 379	submit->queue->faults++;
 380	if (submit->aspace)
 381		submit->aspace->faults++;
 382
 383	get_comm_cmdline(submit, &comm, &cmd);
 384
 385	if (comm && cmd) {
 386		DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
 387			      gpu->name, comm, cmd);
 388
 389		msm_rd_dump_submit(priv->hangrd, submit,
 390				   "offending task: %s (%s)", comm, cmd);
 391	} else {
 392		DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
 393
 394		msm_rd_dump_submit(priv->hangrd, submit, NULL);
 
 
 
 
 
 
 
 395	}
 396
 397	/* Record the crash state */
 398	pm_runtime_get_sync(&gpu->pdev->dev);
 399	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 
 400
 401	kfree(cmd);
 402	kfree(comm);
 403
 404	/*
 405	 * Update all the rings with the latest and greatest fence.. this
 406	 * needs to happen after msm_rd_dump_submit() to ensure that the
 407	 * bo's referenced by the offending submit are still around.
 408	 */
 409	for (i = 0; i < gpu->nr_rings; i++) {
 410		struct msm_ringbuffer *ring = gpu->rb[i];
 411
 412		uint32_t fence = ring->memptrs->fence;
 413
 414		/*
 415		 * For the current (faulting?) ring/submit advance the fence by
 416		 * one more to clear the faulting submit
 417		 */
 418		if (ring == cur_ring)
 419			ring->memptrs->fence = ++fence;
 420
 421		msm_update_fence(ring->fctx, fence);
 422	}
 423
 424	if (msm_gpu_active(gpu)) {
 425		/* retire completed submits, plus the one that hung: */
 426		retire_submits(gpu);
 427
 
 428		gpu->funcs->recover(gpu);
 
 429
 430		/*
 431		 * Replay all remaining submits starting with highest priority
 432		 * ring
 433		 */
 434		for (i = 0; i < gpu->nr_rings; i++) {
 435			struct msm_ringbuffer *ring = gpu->rb[i];
 436			unsigned long flags;
 437
 438			spin_lock_irqsave(&ring->submit_lock, flags);
 439			list_for_each_entry(submit, &ring->submits, node)
 440				gpu->funcs->submit(gpu, submit);
 441			spin_unlock_irqrestore(&ring->submit_lock, flags);
 442		}
 443	}
 444
 445	pm_runtime_put(&gpu->pdev->dev);
 446
 447out_unlock:
 448	mutex_unlock(&gpu->lock);
 449
 450	msm_gpu_retire(gpu);
 451}
 452
 453static void fault_worker(struct kthread_work *work)
 454{
 455	struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
 456	struct msm_gem_submit *submit;
 457	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 458	char *comm = NULL, *cmd = NULL;
 459
 460	mutex_lock(&gpu->lock);
 461
 462	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 463	if (submit && submit->fault_dumped)
 464		goto resume_smmu;
 465
 466	if (submit) {
 467		get_comm_cmdline(submit, &comm, &cmd);
 468
 469		/*
 470		 * When we get GPU iova faults, we can get 1000s of them,
 471		 * but we really only want to log the first one.
 472		 */
 473		submit->fault_dumped = true;
 474	}
 475
 476	/* Record the crash state */
 477	pm_runtime_get_sync(&gpu->pdev->dev);
 478	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 479	pm_runtime_put_sync(&gpu->pdev->dev);
 480
 481	kfree(cmd);
 482	kfree(comm);
 483
 484resume_smmu:
 485	memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
 486	gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
 487
 488	mutex_unlock(&gpu->lock);
 489}
 490
 491static void hangcheck_timer_reset(struct msm_gpu *gpu)
 492{
 493	struct msm_drm_private *priv = gpu->dev->dev_private;
 494	mod_timer(&gpu->hangcheck_timer,
 495			round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
 496}
 497
 498static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 499{
 500	if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
 501		return false;
 502
 503	if (!gpu->funcs->progress)
 504		return false;
 505
 506	if (!gpu->funcs->progress(gpu, ring))
 507		return false;
 508
 509	ring->hangcheck_progress_retries++;
 510	return true;
 511}
 512
 513static void hangcheck_handler(struct timer_list *t)
 514{
 515	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
 516	struct drm_device *dev = gpu->dev;
 
 517	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
 518	uint32_t fence = ring->memptrs->fence;
 519
 520	if (fence != ring->hangcheck_fence) {
 521		/* some progress has been made.. ya! */
 522		ring->hangcheck_fence = fence;
 523		ring->hangcheck_progress_retries = 0;
 524	} else if (fence_before(fence, ring->fctx->last_fence) &&
 525			!made_progress(gpu, ring)) {
 526		/* no progress and not done.. hung! */
 527		ring->hangcheck_fence = fence;
 528		ring->hangcheck_progress_retries = 0;
 529		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
 530				gpu->name, ring->id);
 531		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
 532				gpu->name, fence);
 533		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
 534				gpu->name, ring->fctx->last_fence);
 535
 536		kthread_queue_work(gpu->worker, &gpu->recover_work);
 537	}
 538
 539	/* if still more pending work, reset the hangcheck timer: */
 540	if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
 541		hangcheck_timer_reset(gpu);
 542
 543	/* workaround for missing irq: */
 544	msm_gpu_retire(gpu);
 545}
 546
 547/*
 548 * Performance Counters:
 549 */
 550
 551/* called under perf_lock */
 552static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
 553{
 554	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
 555	int i, n = min(ncntrs, gpu->num_perfcntrs);
 556
 557	/* read current values: */
 558	for (i = 0; i < gpu->num_perfcntrs; i++)
 559		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
 560
 561	/* update cntrs: */
 562	for (i = 0; i < n; i++)
 563		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
 564
 565	/* save current values: */
 566	for (i = 0; i < gpu->num_perfcntrs; i++)
 567		gpu->last_cntrs[i] = current_cntrs[i];
 568
 569	return n;
 570}
 571
 572static void update_sw_cntrs(struct msm_gpu *gpu)
 573{
 574	ktime_t time;
 575	uint32_t elapsed;
 576	unsigned long flags;
 577
 578	spin_lock_irqsave(&gpu->perf_lock, flags);
 579	if (!gpu->perfcntr_active)
 580		goto out;
 581
 582	time = ktime_get();
 583	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
 584
 585	gpu->totaltime += elapsed;
 586	if (gpu->last_sample.active)
 587		gpu->activetime += elapsed;
 588
 589	gpu->last_sample.active = msm_gpu_active(gpu);
 590	gpu->last_sample.time = time;
 591
 592out:
 593	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 594}
 595
 596void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
 597{
 598	unsigned long flags;
 599
 600	pm_runtime_get_sync(&gpu->pdev->dev);
 601
 602	spin_lock_irqsave(&gpu->perf_lock, flags);
 603	/* we could dynamically enable/disable perfcntr registers too.. */
 604	gpu->last_sample.active = msm_gpu_active(gpu);
 605	gpu->last_sample.time = ktime_get();
 606	gpu->activetime = gpu->totaltime = 0;
 607	gpu->perfcntr_active = true;
 608	update_hw_cntrs(gpu, 0, NULL);
 609	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 610}
 611
 612void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
 613{
 614	gpu->perfcntr_active = false;
 615	pm_runtime_put_sync(&gpu->pdev->dev);
 616}
 617
 618/* returns -errno or # of cntrs sampled */
 619int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 620		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
 621{
 622	unsigned long flags;
 623	int ret;
 624
 625	spin_lock_irqsave(&gpu->perf_lock, flags);
 626
 627	if (!gpu->perfcntr_active) {
 628		ret = -EINVAL;
 629		goto out;
 630	}
 631
 632	*activetime = gpu->activetime;
 633	*totaltime = gpu->totaltime;
 634
 635	gpu->activetime = gpu->totaltime = 0;
 636
 637	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
 638
 639out:
 640	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 641
 642	return ret;
 643}
 644
 645/*
 646 * Cmdstream submission/retirement:
 647 */
 648
 649static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 650		struct msm_gem_submit *submit)
 651{
 652	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
 653	volatile struct msm_gpu_submit_stats *stats;
 654	u64 elapsed, clock = 0, cycles;
 655	unsigned long flags;
 656
 657	stats = &ring->memptrs->stats[index];
 658	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
 659	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
 660	do_div(elapsed, 192);
 661
 662	cycles = stats->cpcycles_end - stats->cpcycles_start;
 663
 664	/* Calculate the clock frequency from the number of CP cycles */
 665	if (elapsed) {
 666		clock = cycles * 1000;
 667		do_div(clock, elapsed);
 668	}
 669
 670	submit->queue->ctx->elapsed_ns += elapsed;
 671	submit->queue->ctx->cycles     += cycles;
 672
 673	trace_msm_gpu_submit_retired(submit, elapsed, clock,
 674		stats->alwayson_start, stats->alwayson_end);
 675
 676	msm_submit_retire(submit);
 677
 678	pm_runtime_mark_last_busy(&gpu->pdev->dev);
 679
 680	spin_lock_irqsave(&ring->submit_lock, flags);
 681	list_del(&submit->node);
 682	spin_unlock_irqrestore(&ring->submit_lock, flags);
 683
 684	/* Update devfreq on transition from active->idle: */
 685	mutex_lock(&gpu->active_lock);
 686	gpu->active_submits--;
 687	WARN_ON(gpu->active_submits < 0);
 688	if (!gpu->active_submits) {
 689		msm_devfreq_idle(gpu);
 690		pm_runtime_put_autosuspend(&gpu->pdev->dev);
 691	}
 692
 693	mutex_unlock(&gpu->active_lock);
 694
 695	msm_gem_submit_put(submit);
 696}
 697
 698static void retire_submits(struct msm_gpu *gpu)
 699{
 
 
 700	int i;
 701
 
 
 702	/* Retire the commits starting with highest priority */
 703	for (i = 0; i < gpu->nr_rings; i++) {
 704		struct msm_ringbuffer *ring = gpu->rb[i];
 705
 706		while (true) {
 707			struct msm_gem_submit *submit = NULL;
 708			unsigned long flags;
 709
 710			spin_lock_irqsave(&ring->submit_lock, flags);
 711			submit = list_first_entry_or_null(&ring->submits,
 712					struct msm_gem_submit, node);
 713			spin_unlock_irqrestore(&ring->submit_lock, flags);
 714
 715			/*
 716			 * If no submit, we are done.  If submit->fence hasn't
 717			 * been signalled, then later submits are not signalled
 718			 * either, so we are also done.
 719			 */
 720			if (submit && dma_fence_is_signaled(submit->hw_fence)) {
 721				retire_submit(gpu, ring, submit);
 722			} else {
 723				break;
 724			}
 725		}
 726	}
 727
 728	wake_up_all(&gpu->retire_event);
 729}
 730
 731static void retire_worker(struct kthread_work *work)
 732{
 733	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
 
 
 734
 
 
 
 
 735	retire_submits(gpu);
 
 736}
 737
 738/* call from irq handler to schedule work to retire bo's */
 739void msm_gpu_retire(struct msm_gpu *gpu)
 740{
 741	int i;
 742
 743	for (i = 0; i < gpu->nr_rings; i++)
 744		msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
 745
 746	kthread_queue_work(gpu->worker, &gpu->retire_work);
 747	update_sw_cntrs(gpu);
 748}
 749
 750/* add bo's to gpu's ring, and kick gpu: */
 751void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
 752{
 
 
 753	struct msm_ringbuffer *ring = submit->ring;
 754	unsigned long flags;
 755
 756	WARN_ON(!mutex_is_locked(&gpu->lock));
 757
 758	pm_runtime_get_sync(&gpu->pdev->dev);
 759
 760	msm_gpu_hw_init(gpu);
 761
 762	submit->seqno = submit->hw_fence->seqno;
 
 
 
 
 763
 764	update_sw_cntrs(gpu);
 765
 766	/*
 767	 * ring->submits holds a ref to the submit, to deal with the case
 768	 * that a submit completes before msm_ioctl_gem_submit() returns.
 769	 */
 770	msm_gem_submit_get(submit);
 771
 772	spin_lock_irqsave(&ring->submit_lock, flags);
 773	list_add_tail(&submit->node, &ring->submits);
 774	spin_unlock_irqrestore(&ring->submit_lock, flags);
 
 
 
 
 
 775
 776	/* Update devfreq on transition from idle->active: */
 777	mutex_lock(&gpu->active_lock);
 778	if (!gpu->active_submits) {
 779		pm_runtime_get(&gpu->pdev->dev);
 780		msm_devfreq_active(gpu);
 781	}
 782	gpu->active_submits++;
 783	mutex_unlock(&gpu->active_lock);
 784
 785	gpu->funcs->submit(gpu, submit);
 786	submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
 787
 788	pm_runtime_put(&gpu->pdev->dev);
 789	hangcheck_timer_reset(gpu);
 790}
 791
 792/*
 793 * Init/Cleanup:
 794 */
 795
 796static irqreturn_t irq_handler(int irq, void *data)
 797{
 798	struct msm_gpu *gpu = data;
 799	return gpu->funcs->irq(gpu);
 800}
 801
 802static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 803{
 804	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
 805
 806	if (ret < 1) {
 807		gpu->nr_clocks = 0;
 808		return ret;
 809	}
 810
 811	gpu->nr_clocks = ret;
 812
 813	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 814		gpu->nr_clocks, "core");
 815
 816	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 817		gpu->nr_clocks, "rbbmtimer");
 818
 819	return 0;
 820}
 821
 822/* Return a new address space for a msm_drm_private instance */
 823struct msm_gem_address_space *
 824msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
 825{
 826	struct msm_gem_address_space *aspace = NULL;
 827	if (!gpu)
 828		return NULL;
 829
 830	/*
 831	 * If the target doesn't support private address spaces then return
 832	 * the global one
 833	 */
 834	if (gpu->funcs->create_private_address_space) {
 835		aspace = gpu->funcs->create_private_address_space(gpu);
 836		if (!IS_ERR(aspace))
 837			aspace->pid = get_pid(task_pid(task));
 838	}
 839
 840	if (IS_ERR_OR_NULL(aspace))
 841		aspace = msm_gem_address_space_get(gpu->aspace);
 842
 843	return aspace;
 844}
 845
 846int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 847		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 848		const char *name, struct msm_gpu_config *config)
 849{
 850	struct msm_drm_private *priv = drm->dev_private;
 851	int i, ret, nr_rings = config->nr_rings;
 852	void *memptrs;
 853	uint64_t memptrs_iova;
 854
 855	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
 856		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
 857
 858	gpu->dev = drm;
 859	gpu->funcs = funcs;
 860	gpu->name = name;
 861
 862	gpu->worker = kthread_create_worker(0, "gpu-worker");
 863	if (IS_ERR(gpu->worker)) {
 864		ret = PTR_ERR(gpu->worker);
 865		gpu->worker = NULL;
 866		goto fail;
 867	}
 868
 869	sched_set_fifo_low(gpu->worker->task);
 870
 871	mutex_init(&gpu->active_lock);
 872	mutex_init(&gpu->lock);
 873	init_waitqueue_head(&gpu->retire_event);
 874	kthread_init_work(&gpu->retire_work, retire_worker);
 875	kthread_init_work(&gpu->recover_work, recover_worker);
 876	kthread_init_work(&gpu->fault_work, fault_worker);
 877
 878	priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
 879
 880	/*
 881	 * If progress detection is supported, halve the hangcheck timer
 882	 * duration, as it takes two iterations of the hangcheck handler
 883	 * to detect a hang.
 884	 */
 885	if (funcs->progress)
 886		priv->hangcheck_period /= 2;
 887
 888	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
 889
 890	spin_lock_init(&gpu->perf_lock);
 891
 892
 893	/* Map registers: */
 894	gpu->mmio = msm_ioremap(pdev, config->ioname);
 895	if (IS_ERR(gpu->mmio)) {
 896		ret = PTR_ERR(gpu->mmio);
 897		goto fail;
 898	}
 899
 900	/* Get Interrupt: */
 901	gpu->irq = platform_get_irq(pdev, 0);
 902	if (gpu->irq < 0) {
 903		ret = gpu->irq;
 
 904		goto fail;
 905	}
 906
 907	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 908			IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
 909	if (ret) {
 910		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 911		goto fail;
 912	}
 913
 914	ret = get_clocks(pdev, gpu);
 915	if (ret)
 916		goto fail;
 917
 918	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
 919	DBG("ebi1_clk: %p", gpu->ebi1_clk);
 920	if (IS_ERR(gpu->ebi1_clk))
 921		gpu->ebi1_clk = NULL;
 922
 923	/* Acquire regulators: */
 924	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
 925	DBG("gpu_reg: %p", gpu->gpu_reg);
 926	if (IS_ERR(gpu->gpu_reg))
 927		gpu->gpu_reg = NULL;
 928
 929	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
 930	DBG("gpu_cx: %p", gpu->gpu_cx);
 931	if (IS_ERR(gpu->gpu_cx))
 932		gpu->gpu_cx = NULL;
 933
 934	platform_set_drvdata(pdev, &gpu->adreno_smmu);
 
 935
 936	msm_devfreq_init(gpu);
 937
 938
 939	gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
 940
 941	if (gpu->aspace == NULL)
 942		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 943	else if (IS_ERR(gpu->aspace)) {
 944		ret = PTR_ERR(gpu->aspace);
 945		goto fail;
 946	}
 947
 948	memptrs = msm_gem_kernel_new(drm,
 949		sizeof(struct msm_rbmemptrs) * nr_rings,
 950		check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
 951		&memptrs_iova);
 952
 953	if (IS_ERR(memptrs)) {
 954		ret = PTR_ERR(memptrs);
 955		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
 956		goto fail;
 957	}
 958
 959	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
 960
 961	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
 962		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
 963			ARRAY_SIZE(gpu->rb));
 964		nr_rings = ARRAY_SIZE(gpu->rb);
 965	}
 966
 967	/* Create ringbuffer(s): */
 968	for (i = 0; i < nr_rings; i++) {
 969		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
 970
 971		if (IS_ERR(gpu->rb[i])) {
 972			ret = PTR_ERR(gpu->rb[i]);
 973			DRM_DEV_ERROR(drm->dev,
 974				"could not create ringbuffer %d: %d\n", i, ret);
 975			goto fail;
 976		}
 977
 978		memptrs += sizeof(struct msm_rbmemptrs);
 979		memptrs_iova += sizeof(struct msm_rbmemptrs);
 980	}
 981
 982	gpu->nr_rings = nr_rings;
 983
 984	refcount_set(&gpu->sysprof_active, 1);
 985
 986	return 0;
 987
 988fail:
 989	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
 990		msm_ringbuffer_destroy(gpu->rb[i]);
 991		gpu->rb[i] = NULL;
 992	}
 993
 994	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
 995
 996	platform_set_drvdata(pdev, NULL);
 997	return ret;
 998}
 999
1000void msm_gpu_cleanup(struct msm_gpu *gpu)
1001{
1002	int i;
1003
1004	DBG("%s", gpu->name);
1005
 
 
1006	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1007		msm_ringbuffer_destroy(gpu->rb[i]);
1008		gpu->rb[i] = NULL;
1009	}
1010
1011	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1012
1013	if (!IS_ERR_OR_NULL(gpu->aspace)) {
1014		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1015		msm_gem_address_space_put(gpu->aspace);
1016	}
1017
1018	if (gpu->worker) {
1019		kthread_destroy_worker(gpu->worker);
1020	}
1021
1022	msm_devfreq_cleanup(gpu);
1023
1024	platform_set_drvdata(gpu->pdev, NULL);
1025}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
 
 
  7#include "msm_gpu.h"
  8#include "msm_gem.h"
  9#include "msm_mmu.h"
 10#include "msm_fence.h"
 11#include "msm_gpu_trace.h"
 12#include "adreno/adreno_gpu.h"
 13
 14#include <generated/utsrelease.h>
 15#include <linux/string_helpers.h>
 16#include <linux/devfreq.h>
 17#include <linux/devcoredump.h>
 18#include <linux/sched/task.h>
 19
 20/*
 21 * Power Management:
 22 */
 23
 24static int msm_devfreq_target(struct device *dev, unsigned long *freq,
 25		u32 flags)
 26{
 27	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
 28	struct dev_pm_opp *opp;
 29
 30	opp = devfreq_recommended_opp(dev, freq, flags);
 31
 32	if (IS_ERR(opp))
 33		return PTR_ERR(opp);
 34
 35	if (gpu->funcs->gpu_set_freq)
 36		gpu->funcs->gpu_set_freq(gpu, opp);
 37	else
 38		clk_set_rate(gpu->core_clk, *freq);
 39
 40	dev_pm_opp_put(opp);
 41
 42	return 0;
 43}
 44
 45static int msm_devfreq_get_dev_status(struct device *dev,
 46		struct devfreq_dev_status *status)
 47{
 48	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
 49	ktime_t time;
 50
 51	if (gpu->funcs->gpu_get_freq)
 52		status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
 53	else
 54		status->current_frequency = clk_get_rate(gpu->core_clk);
 55
 56	status->busy_time = gpu->funcs->gpu_busy(gpu);
 57
 58	time = ktime_get();
 59	status->total_time = ktime_us_delta(time, gpu->devfreq.time);
 60	gpu->devfreq.time = time;
 61
 62	return 0;
 63}
 64
 65static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
 66{
 67	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
 68
 69	if (gpu->funcs->gpu_get_freq)
 70		*freq = gpu->funcs->gpu_get_freq(gpu);
 71	else
 72		*freq = clk_get_rate(gpu->core_clk);
 73
 74	return 0;
 75}
 76
 77static struct devfreq_dev_profile msm_devfreq_profile = {
 78	.polling_ms = 10,
 79	.target = msm_devfreq_target,
 80	.get_dev_status = msm_devfreq_get_dev_status,
 81	.get_cur_freq = msm_devfreq_get_cur_freq,
 82};
 83
 84static void msm_devfreq_init(struct msm_gpu *gpu)
 85{
 86	/* We need target support to do devfreq */
 87	if (!gpu->funcs->gpu_busy)
 88		return;
 89
 90	msm_devfreq_profile.initial_freq = gpu->fast_rate;
 91
 92	/*
 93	 * Don't set the freq_table or max_state and let devfreq build the table
 94	 * from OPP
 95	 * After a deferred probe, these may have be left to non-zero values,
 96	 * so set them back to zero before creating the devfreq device
 97	 */
 98	msm_devfreq_profile.freq_table = NULL;
 99	msm_devfreq_profile.max_state = 0;
100
101	gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
102			&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
103			NULL);
104
105	if (IS_ERR(gpu->devfreq.devfreq)) {
106		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
107		gpu->devfreq.devfreq = NULL;
108	}
109
110	devfreq_suspend_device(gpu->devfreq.devfreq);
111}
112
113static int enable_pwrrail(struct msm_gpu *gpu)
114{
115	struct drm_device *dev = gpu->dev;
116	int ret = 0;
117
118	if (gpu->gpu_reg) {
119		ret = regulator_enable(gpu->gpu_reg);
120		if (ret) {
121			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
122			return ret;
123		}
124	}
125
126	if (gpu->gpu_cx) {
127		ret = regulator_enable(gpu->gpu_cx);
128		if (ret) {
129			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
130			return ret;
131		}
132	}
133
134	return 0;
135}
136
137static int disable_pwrrail(struct msm_gpu *gpu)
138{
139	if (gpu->gpu_cx)
140		regulator_disable(gpu->gpu_cx);
141	if (gpu->gpu_reg)
142		regulator_disable(gpu->gpu_reg);
143	return 0;
144}
145
146static int enable_clk(struct msm_gpu *gpu)
147{
148	if (gpu->core_clk && gpu->fast_rate)
149		clk_set_rate(gpu->core_clk, gpu->fast_rate);
150
151	/* Set the RBBM timer rate to 19.2Mhz */
152	if (gpu->rbbmtimer_clk)
153		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
154
155	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
156}
157
158static int disable_clk(struct msm_gpu *gpu)
159{
160	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
161
162	/*
163	 * Set the clock to a deliberately low rate. On older targets the clock
164	 * speed had to be non zero to avoid problems. On newer targets this
165	 * will be rounded down to zero anyway so it all works out.
166	 */
167	if (gpu->core_clk)
168		clk_set_rate(gpu->core_clk, 27000000);
169
170	if (gpu->rbbmtimer_clk)
171		clk_set_rate(gpu->rbbmtimer_clk, 0);
172
173	return 0;
174}
175
176static int enable_axi(struct msm_gpu *gpu)
177{
178	if (gpu->ebi1_clk)
179		clk_prepare_enable(gpu->ebi1_clk);
180	return 0;
181}
182
183static int disable_axi(struct msm_gpu *gpu)
184{
185	if (gpu->ebi1_clk)
186		clk_disable_unprepare(gpu->ebi1_clk);
187	return 0;
188}
189
190void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
191{
192	gpu->devfreq.busy_cycles = 0;
193	gpu->devfreq.time = ktime_get();
194
195	devfreq_resume_device(gpu->devfreq.devfreq);
196}
197
198int msm_gpu_pm_resume(struct msm_gpu *gpu)
199{
200	int ret;
201
202	DBG("%s", gpu->name);
 
203
204	ret = enable_pwrrail(gpu);
205	if (ret)
206		return ret;
207
208	ret = enable_clk(gpu);
209	if (ret)
210		return ret;
211
212	ret = enable_axi(gpu);
213	if (ret)
214		return ret;
215
216	msm_gpu_resume_devfreq(gpu);
217
218	gpu->needs_hw_init = true;
219
220	return 0;
221}
222
223int msm_gpu_pm_suspend(struct msm_gpu *gpu)
224{
225	int ret;
226
227	DBG("%s", gpu->name);
 
228
229	devfreq_suspend_device(gpu->devfreq.devfreq);
230
231	ret = disable_axi(gpu);
232	if (ret)
233		return ret;
234
235	ret = disable_clk(gpu);
236	if (ret)
237		return ret;
238
239	ret = disable_pwrrail(gpu);
240	if (ret)
241		return ret;
242
 
 
243	return 0;
244}
245
 
 
 
 
 
 
 
 
246int msm_gpu_hw_init(struct msm_gpu *gpu)
247{
248	int ret;
249
250	WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
251
252	if (!gpu->needs_hw_init)
253		return 0;
254
255	disable_irq(gpu->irq);
256	ret = gpu->funcs->hw_init(gpu);
257	if (!ret)
258		gpu->needs_hw_init = false;
259	enable_irq(gpu->irq);
260
261	return ret;
262}
263
264#ifdef CONFIG_DEV_COREDUMP
265static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
266		size_t count, void *data, size_t datalen)
267{
268	struct msm_gpu *gpu = data;
269	struct drm_print_iterator iter;
270	struct drm_printer p;
271	struct msm_gpu_state *state;
272
273	state = msm_gpu_crashstate_get(gpu);
274	if (!state)
275		return 0;
276
277	iter.data = buffer;
278	iter.offset = 0;
279	iter.start = offset;
280	iter.remain = count;
281
282	p = drm_coredump_printer(&iter);
283
284	drm_printf(&p, "---\n");
285	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
286	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
287	drm_printf(&p, "time: %lld.%09ld\n",
288		state->time.tv_sec, state->time.tv_nsec);
289	if (state->comm)
290		drm_printf(&p, "comm: %s\n", state->comm);
291	if (state->cmd)
292		drm_printf(&p, "cmdline: %s\n", state->cmd);
293
294	gpu->funcs->show(gpu, state, &p);
295
296	msm_gpu_crashstate_put(gpu);
297
298	return count - iter.remain;
299}
300
301static void msm_gpu_devcoredump_free(void *data)
302{
303	struct msm_gpu *gpu = data;
304
305	msm_gpu_crashstate_put(gpu);
306}
307
308static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
309		struct msm_gem_object *obj, u64 iova, u32 flags)
310{
311	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 
312
313	/* Don't record write only objects */
314	state_bo->size = obj->base.size;
 
315	state_bo->iova = iova;
316
317	/* Only store data for non imported buffer objects marked for read */
318	if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
 
 
 
319		void *ptr;
320
321		state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
322		if (!state_bo->data)
323			goto out;
324
325		ptr = msm_gem_get_vaddr_active(&obj->base);
 
 
326		if (IS_ERR(ptr)) {
327			kvfree(state_bo->data);
328			state_bo->data = NULL;
329			goto out;
330		}
331
332		memcpy(state_bo->data, ptr, obj->base.size);
333		msm_gem_put_vaddr(&obj->base);
334	}
335out:
336	state->nr_bos++;
337}
338
339static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
340		struct msm_gem_submit *submit, char *comm, char *cmd)
341{
342	struct msm_gpu_state *state;
343
344	/* Check if the target supports capturing crash state */
345	if (!gpu->funcs->gpu_state_get)
346		return;
347
348	/* Only save one crash state at a time */
349	if (gpu->crashstate)
350		return;
351
352	state = gpu->funcs->gpu_state_get(gpu);
353	if (IS_ERR_OR_NULL(state))
354		return;
355
356	/* Fill in the additional crash state information */
357	state->comm = kstrdup(comm, GFP_KERNEL);
358	state->cmd = kstrdup(cmd, GFP_KERNEL);
 
359
360	if (submit) {
361		int i, nr = 0;
362
363		/* count # of buffers to dump: */
364		for (i = 0; i < submit->nr_bos; i++)
365			if (should_dump(submit, i))
366				nr++;
367		/* always dump cmd bo's, but don't double count them: */
368		for (i = 0; i < submit->nr_cmds; i++)
369			if (!should_dump(submit, submit->cmd[i].idx))
370				nr++;
371
372		state->bos = kcalloc(nr,
373			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
374
375		for (i = 0; i < submit->nr_bos; i++) {
376			if (should_dump(submit, i)) {
377				msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
378					submit->bos[i].iova, submit->bos[i].flags);
379			}
380		}
381
382		for (i = 0; state->bos && i < submit->nr_cmds; i++) {
383			int idx = submit->cmd[i].idx;
384
385			if (!should_dump(submit, submit->cmd[i].idx)) {
386				msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
387					submit->bos[idx].iova, submit->bos[idx].flags);
388			}
389		}
390	}
391
392	/* Set the active crash state to be dumped on failure */
393	gpu->crashstate = state;
394
395	/* FIXME: Release the crashstate if this errors out? */
396	dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
397		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
398}
399#else
400static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
401		struct msm_gem_submit *submit, char *comm, char *cmd)
402{
403}
404#endif
405
406/*
407 * Hangcheck detection for locked gpu:
408 */
409
410static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
411		uint32_t fence)
412{
413	struct msm_gem_submit *submit;
 
414
 
415	list_for_each_entry(submit, &ring->submits, node) {
416		if (submit->seqno > fence)
417			break;
 
 
 
 
418
419		msm_update_fence(submit->ring->fctx,
420			submit->fence->seqno);
421	}
422}
423
424static struct msm_gem_submit *
425find_submit(struct msm_ringbuffer *ring, uint32_t fence)
 
426{
427	struct msm_gem_submit *submit;
 
 
 
 
 
 
 
 
 
 
 
428
429	WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
 
430
431	list_for_each_entry(submit, &ring->submits, node)
432		if (submit->seqno == fence)
433			return submit;
434
435	return NULL;
436}
437
438static void retire_submits(struct msm_gpu *gpu);
439
440static void recover_worker(struct work_struct *work)
441{
442	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
443	struct drm_device *dev = gpu->dev;
444	struct msm_drm_private *priv = dev->dev_private;
445	struct msm_gem_submit *submit;
446	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
447	char *comm = NULL, *cmd = NULL;
448	int i;
449
450	mutex_lock(&dev->struct_mutex);
451
452	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
453
454	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
455	if (submit) {
456		struct task_struct *task;
457
458		/* Increment the fault counts */
459		gpu->global_faults++;
460		submit->queue->faults++;
461
462		task = get_pid_task(submit->pid, PIDTYPE_PID);
463		if (task) {
464			comm = kstrdup(task->comm, GFP_KERNEL);
465			cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
466			put_task_struct(task);
467		}
 
 
 
 
 
 
 
 
 
 
 
 
468
469		if (comm && cmd) {
470			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
471				gpu->name, comm, cmd);
472
473			msm_rd_dump_submit(priv->hangrd, submit,
474				"offending task: %s (%s)", comm, cmd);
475		} else
476			msm_rd_dump_submit(priv->hangrd, submit, NULL);
477	}
478
479	/* Record the crash state */
480	pm_runtime_get_sync(&gpu->pdev->dev);
481	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
482	pm_runtime_put_sync(&gpu->pdev->dev);
483
484	kfree(cmd);
485	kfree(comm);
486
487	/*
488	 * Update all the rings with the latest and greatest fence.. this
489	 * needs to happen after msm_rd_dump_submit() to ensure that the
490	 * bo's referenced by the offending submit are still around.
491	 */
492	for (i = 0; i < gpu->nr_rings; i++) {
493		struct msm_ringbuffer *ring = gpu->rb[i];
494
495		uint32_t fence = ring->memptrs->fence;
496
497		/*
498		 * For the current (faulting?) ring/submit advance the fence by
499		 * one more to clear the faulting submit
500		 */
501		if (ring == cur_ring)
502			fence++;
503
504		update_fences(gpu, ring, fence);
505	}
506
507	if (msm_gpu_active(gpu)) {
508		/* retire completed submits, plus the one that hung: */
509		retire_submits(gpu);
510
511		pm_runtime_get_sync(&gpu->pdev->dev);
512		gpu->funcs->recover(gpu);
513		pm_runtime_put_sync(&gpu->pdev->dev);
514
515		/*
516		 * Replay all remaining submits starting with highest priority
517		 * ring
518		 */
519		for (i = 0; i < gpu->nr_rings; i++) {
520			struct msm_ringbuffer *ring = gpu->rb[i];
 
521
 
522			list_for_each_entry(submit, &ring->submits, node)
523				gpu->funcs->submit(gpu, submit, NULL);
 
524		}
525	}
526
527	mutex_unlock(&dev->struct_mutex);
 
 
 
528
529	msm_gpu_retire(gpu);
530}
531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532static void hangcheck_timer_reset(struct msm_gpu *gpu)
533{
534	DBG("%s", gpu->name);
535	mod_timer(&gpu->hangcheck_timer,
536			round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
537}
538
539static void hangcheck_handler(struct timer_list *t)
540{
541	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
542	struct drm_device *dev = gpu->dev;
543	struct msm_drm_private *priv = dev->dev_private;
544	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
545	uint32_t fence = ring->memptrs->fence;
546
547	if (fence != ring->hangcheck_fence) {
548		/* some progress has been made.. ya! */
549		ring->hangcheck_fence = fence;
550	} else if (fence < ring->seqno) {
 
 
551		/* no progress and not done.. hung! */
552		ring->hangcheck_fence = fence;
 
553		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
554				gpu->name, ring->id);
555		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
556				gpu->name, fence);
557		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
558				gpu->name, ring->seqno);
559
560		queue_work(priv->wq, &gpu->recover_work);
561	}
562
563	/* if still more pending work, reset the hangcheck timer: */
564	if (ring->seqno > ring->hangcheck_fence)
565		hangcheck_timer_reset(gpu);
566
567	/* workaround for missing irq: */
568	queue_work(priv->wq, &gpu->retire_work);
569}
570
571/*
572 * Performance Counters:
573 */
574
575/* called under perf_lock */
576static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
577{
578	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
579	int i, n = min(ncntrs, gpu->num_perfcntrs);
580
581	/* read current values: */
582	for (i = 0; i < gpu->num_perfcntrs; i++)
583		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
584
585	/* update cntrs: */
586	for (i = 0; i < n; i++)
587		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
588
589	/* save current values: */
590	for (i = 0; i < gpu->num_perfcntrs; i++)
591		gpu->last_cntrs[i] = current_cntrs[i];
592
593	return n;
594}
595
596static void update_sw_cntrs(struct msm_gpu *gpu)
597{
598	ktime_t time;
599	uint32_t elapsed;
600	unsigned long flags;
601
602	spin_lock_irqsave(&gpu->perf_lock, flags);
603	if (!gpu->perfcntr_active)
604		goto out;
605
606	time = ktime_get();
607	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
608
609	gpu->totaltime += elapsed;
610	if (gpu->last_sample.active)
611		gpu->activetime += elapsed;
612
613	gpu->last_sample.active = msm_gpu_active(gpu);
614	gpu->last_sample.time = time;
615
616out:
617	spin_unlock_irqrestore(&gpu->perf_lock, flags);
618}
619
620void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
621{
622	unsigned long flags;
623
624	pm_runtime_get_sync(&gpu->pdev->dev);
625
626	spin_lock_irqsave(&gpu->perf_lock, flags);
627	/* we could dynamically enable/disable perfcntr registers too.. */
628	gpu->last_sample.active = msm_gpu_active(gpu);
629	gpu->last_sample.time = ktime_get();
630	gpu->activetime = gpu->totaltime = 0;
631	gpu->perfcntr_active = true;
632	update_hw_cntrs(gpu, 0, NULL);
633	spin_unlock_irqrestore(&gpu->perf_lock, flags);
634}
635
636void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
637{
638	gpu->perfcntr_active = false;
639	pm_runtime_put_sync(&gpu->pdev->dev);
640}
641
642/* returns -errno or # of cntrs sampled */
643int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
644		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
645{
646	unsigned long flags;
647	int ret;
648
649	spin_lock_irqsave(&gpu->perf_lock, flags);
650
651	if (!gpu->perfcntr_active) {
652		ret = -EINVAL;
653		goto out;
654	}
655
656	*activetime = gpu->activetime;
657	*totaltime = gpu->totaltime;
658
659	gpu->activetime = gpu->totaltime = 0;
660
661	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
662
663out:
664	spin_unlock_irqrestore(&gpu->perf_lock, flags);
665
666	return ret;
667}
668
669/*
670 * Cmdstream submission/retirement:
671 */
672
673static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
674		struct msm_gem_submit *submit)
675{
676	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
677	volatile struct msm_gpu_submit_stats *stats;
678	u64 elapsed, clock = 0;
679	int i;
680
681	stats = &ring->memptrs->stats[index];
682	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
683	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
684	do_div(elapsed, 192);
685
 
 
686	/* Calculate the clock frequency from the number of CP cycles */
687	if (elapsed) {
688		clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
689		do_div(clock, elapsed);
690	}
691
 
 
 
692	trace_msm_gpu_submit_retired(submit, elapsed, clock,
693		stats->alwayson_start, stats->alwayson_end);
694
695	for (i = 0; i < submit->nr_bos; i++) {
696		struct msm_gem_object *msm_obj = submit->bos[i].obj;
697		/* move to inactive: */
698		msm_gem_move_to_inactive(&msm_obj->base);
699		msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
700		drm_gem_object_put_locked(&msm_obj->base);
 
 
 
 
 
 
 
 
 
701	}
702
703	pm_runtime_mark_last_busy(&gpu->pdev->dev);
704	pm_runtime_put_autosuspend(&gpu->pdev->dev);
705	msm_gem_submit_free(submit);
706}
707
708static void retire_submits(struct msm_gpu *gpu)
709{
710	struct drm_device *dev = gpu->dev;
711	struct msm_gem_submit *submit, *tmp;
712	int i;
713
714	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
715
716	/* Retire the commits starting with highest priority */
717	for (i = 0; i < gpu->nr_rings; i++) {
718		struct msm_ringbuffer *ring = gpu->rb[i];
719
720		list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
721			if (dma_fence_is_signaled(submit->fence))
 
 
 
 
 
 
 
 
 
 
 
 
 
722				retire_submit(gpu, ring, submit);
 
 
 
723		}
724	}
 
 
725}
726
727static void retire_worker(struct work_struct *work)
728{
729	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
730	struct drm_device *dev = gpu->dev;
731	int i;
732
733	for (i = 0; i < gpu->nr_rings; i++)
734		update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
735
736	mutex_lock(&dev->struct_mutex);
737	retire_submits(gpu);
738	mutex_unlock(&dev->struct_mutex);
739}
740
741/* call from irq handler to schedule work to retire bo's */
742void msm_gpu_retire(struct msm_gpu *gpu)
743{
744	struct msm_drm_private *priv = gpu->dev->dev_private;
745	queue_work(priv->wq, &gpu->retire_work);
 
 
 
 
746	update_sw_cntrs(gpu);
747}
748
749/* add bo's to gpu's ring, and kick gpu: */
750void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
751		struct msm_file_private *ctx)
752{
753	struct drm_device *dev = gpu->dev;
754	struct msm_drm_private *priv = dev->dev_private;
755	struct msm_ringbuffer *ring = submit->ring;
756	int i;
757
758	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
759
760	pm_runtime_get_sync(&gpu->pdev->dev);
761
762	msm_gpu_hw_init(gpu);
763
764	submit->seqno = ++ring->seqno;
765
766	list_add_tail(&submit->node, &ring->submits);
767
768	msm_rd_dump_submit(priv->rd, submit, NULL);
769
770	update_sw_cntrs(gpu);
771
772	for (i = 0; i < submit->nr_bos; i++) {
773		struct msm_gem_object *msm_obj = submit->bos[i].obj;
774		uint64_t iova;
 
 
775
776		/* can't happen yet.. but when we add 2d support we'll have
777		 * to deal w/ cross-ring synchronization:
778		 */
779		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
780
781		/* submit takes a reference to the bo and iova until retired: */
782		drm_gem_object_get(&msm_obj->base);
783		msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
784
785		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
786			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
787		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
788			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
 
789	}
 
 
790
791	gpu->funcs->submit(gpu, submit, ctx);
792	priv->lastctx = ctx;
793
 
794	hangcheck_timer_reset(gpu);
795}
796
797/*
798 * Init/Cleanup:
799 */
800
801static irqreturn_t irq_handler(int irq, void *data)
802{
803	struct msm_gpu *gpu = data;
804	return gpu->funcs->irq(gpu);
805}
806
807static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
808{
809	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
810
811	if (ret < 1) {
812		gpu->nr_clocks = 0;
813		return ret;
814	}
815
816	gpu->nr_clocks = ret;
817
818	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
819		gpu->nr_clocks, "core");
820
821	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
822		gpu->nr_clocks, "rbbmtimer");
823
824	return 0;
825}
826
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
827int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
828		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
829		const char *name, struct msm_gpu_config *config)
830{
 
831	int i, ret, nr_rings = config->nr_rings;
832	void *memptrs;
833	uint64_t memptrs_iova;
834
835	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
836		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
837
838	gpu->dev = drm;
839	gpu->funcs = funcs;
840	gpu->name = name;
841
842	INIT_LIST_HEAD(&gpu->active_list);
843	INIT_WORK(&gpu->retire_work, retire_worker);
844	INIT_WORK(&gpu->recover_work, recover_worker);
 
 
 
 
 
845
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
846
847	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
848
849	spin_lock_init(&gpu->perf_lock);
850
851
852	/* Map registers: */
853	gpu->mmio = msm_ioremap(pdev, config->ioname, name);
854	if (IS_ERR(gpu->mmio)) {
855		ret = PTR_ERR(gpu->mmio);
856		goto fail;
857	}
858
859	/* Get Interrupt: */
860	gpu->irq = platform_get_irq(pdev, 0);
861	if (gpu->irq < 0) {
862		ret = gpu->irq;
863		DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
864		goto fail;
865	}
866
867	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
868			IRQF_TRIGGER_HIGH, gpu->name, gpu);
869	if (ret) {
870		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
871		goto fail;
872	}
873
874	ret = get_clocks(pdev, gpu);
875	if (ret)
876		goto fail;
877
878	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
879	DBG("ebi1_clk: %p", gpu->ebi1_clk);
880	if (IS_ERR(gpu->ebi1_clk))
881		gpu->ebi1_clk = NULL;
882
883	/* Acquire regulators: */
884	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
885	DBG("gpu_reg: %p", gpu->gpu_reg);
886	if (IS_ERR(gpu->gpu_reg))
887		gpu->gpu_reg = NULL;
888
889	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
890	DBG("gpu_cx: %p", gpu->gpu_cx);
891	if (IS_ERR(gpu->gpu_cx))
892		gpu->gpu_cx = NULL;
893
894	gpu->pdev = pdev;
895	platform_set_drvdata(pdev, gpu);
896
897	msm_devfreq_init(gpu);
898
899
900	gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
901
902	if (gpu->aspace == NULL)
903		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
904	else if (IS_ERR(gpu->aspace)) {
905		ret = PTR_ERR(gpu->aspace);
906		goto fail;
907	}
908
909	memptrs = msm_gem_kernel_new(drm,
910		sizeof(struct msm_rbmemptrs) * nr_rings,
911		check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
912		&memptrs_iova);
913
914	if (IS_ERR(memptrs)) {
915		ret = PTR_ERR(memptrs);
916		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
917		goto fail;
918	}
919
920	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
921
922	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
923		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
924			ARRAY_SIZE(gpu->rb));
925		nr_rings = ARRAY_SIZE(gpu->rb);
926	}
927
928	/* Create ringbuffer(s): */
929	for (i = 0; i < nr_rings; i++) {
930		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
931
932		if (IS_ERR(gpu->rb[i])) {
933			ret = PTR_ERR(gpu->rb[i]);
934			DRM_DEV_ERROR(drm->dev,
935				"could not create ringbuffer %d: %d\n", i, ret);
936			goto fail;
937		}
938
939		memptrs += sizeof(struct msm_rbmemptrs);
940		memptrs_iova += sizeof(struct msm_rbmemptrs);
941	}
942
943	gpu->nr_rings = nr_rings;
944
 
 
945	return 0;
946
947fail:
948	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
949		msm_ringbuffer_destroy(gpu->rb[i]);
950		gpu->rb[i] = NULL;
951	}
952
953	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
954
955	platform_set_drvdata(pdev, NULL);
956	return ret;
957}
958
959void msm_gpu_cleanup(struct msm_gpu *gpu)
960{
961	int i;
962
963	DBG("%s", gpu->name);
964
965	WARN_ON(!list_empty(&gpu->active_list));
966
967	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
968		msm_ringbuffer_destroy(gpu->rb[i]);
969		gpu->rb[i] = NULL;
970	}
971
972	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
973
974	if (!IS_ERR_OR_NULL(gpu->aspace)) {
975		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
976		msm_gem_address_space_put(gpu->aspace);
977	}
 
 
 
 
 
 
 
 
978}