Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "msm_gpu.h"
   8#include "msm_gem.h"
   9#include "msm_mmu.h"
  10#include "msm_fence.h"
  11#include "msm_gpu_trace.h"
  12#include "adreno/adreno_gpu.h"
  13
  14#include <generated/utsrelease.h>
  15#include <linux/string_helpers.h>
  16#include <linux/pm_opp.h>
  17#include <linux/devfreq.h>
  18#include <linux/devcoredump.h>
 
  19
  20/*
  21 * Power Management:
  22 */
  23
  24static int msm_devfreq_target(struct device *dev, unsigned long *freq,
  25		u32 flags)
  26{
  27	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
  28	struct dev_pm_opp *opp;
  29
  30	opp = devfreq_recommended_opp(dev, freq, flags);
  31
  32	if (IS_ERR(opp))
  33		return PTR_ERR(opp);
  34
  35	if (gpu->funcs->gpu_set_freq)
  36		gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
  37	else
  38		clk_set_rate(gpu->core_clk, *freq);
  39
  40	dev_pm_opp_put(opp);
  41
  42	return 0;
  43}
  44
  45static int msm_devfreq_get_dev_status(struct device *dev,
  46		struct devfreq_dev_status *status)
  47{
  48	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
  49	ktime_t time;
  50
  51	if (gpu->funcs->gpu_get_freq)
  52		status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
  53	else
  54		status->current_frequency = clk_get_rate(gpu->core_clk);
  55
  56	status->busy_time = gpu->funcs->gpu_busy(gpu);
  57
  58	time = ktime_get();
  59	status->total_time = ktime_us_delta(time, gpu->devfreq.time);
  60	gpu->devfreq.time = time;
  61
  62	return 0;
  63}
  64
  65static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
  66{
  67	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
  68
  69	if (gpu->funcs->gpu_get_freq)
  70		*freq = gpu->funcs->gpu_get_freq(gpu);
  71	else
  72		*freq = clk_get_rate(gpu->core_clk);
  73
  74	return 0;
  75}
  76
  77static struct devfreq_dev_profile msm_devfreq_profile = {
  78	.polling_ms = 10,
  79	.target = msm_devfreq_target,
  80	.get_dev_status = msm_devfreq_get_dev_status,
  81	.get_cur_freq = msm_devfreq_get_cur_freq,
  82};
  83
  84static void msm_devfreq_init(struct msm_gpu *gpu)
  85{
  86	/* We need target support to do devfreq */
  87	if (!gpu->funcs->gpu_busy)
  88		return;
  89
  90	msm_devfreq_profile.initial_freq = gpu->fast_rate;
  91
  92	/*
  93	 * Don't set the freq_table or max_state and let devfreq build the table
  94	 * from OPP
 
 
  95	 */
 
 
  96
  97	gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
  98			&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
  99			NULL);
 100
 101	if (IS_ERR(gpu->devfreq.devfreq)) {
 102		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
 103		gpu->devfreq.devfreq = NULL;
 104	}
 105
 106	devfreq_suspend_device(gpu->devfreq.devfreq);
 107}
 108
 109static int enable_pwrrail(struct msm_gpu *gpu)
 110{
 111	struct drm_device *dev = gpu->dev;
 112	int ret = 0;
 113
 114	if (gpu->gpu_reg) {
 115		ret = regulator_enable(gpu->gpu_reg);
 116		if (ret) {
 117			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
 118			return ret;
 119		}
 120	}
 121
 122	if (gpu->gpu_cx) {
 123		ret = regulator_enable(gpu->gpu_cx);
 124		if (ret) {
 125			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
 126			return ret;
 127		}
 128	}
 129
 130	return 0;
 131}
 132
 133static int disable_pwrrail(struct msm_gpu *gpu)
 134{
 135	if (gpu->gpu_cx)
 136		regulator_disable(gpu->gpu_cx);
 137	if (gpu->gpu_reg)
 138		regulator_disable(gpu->gpu_reg);
 139	return 0;
 140}
 141
 142static int enable_clk(struct msm_gpu *gpu)
 143{
 144	if (gpu->core_clk && gpu->fast_rate)
 145		clk_set_rate(gpu->core_clk, gpu->fast_rate);
 146
 147	/* Set the RBBM timer rate to 19.2Mhz */
 148	if (gpu->rbbmtimer_clk)
 149		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
 150
 151	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
 152}
 153
 154static int disable_clk(struct msm_gpu *gpu)
 155{
 156	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
 157
 158	/*
 159	 * Set the clock to a deliberately low rate. On older targets the clock
 160	 * speed had to be non zero to avoid problems. On newer targets this
 161	 * will be rounded down to zero anyway so it all works out.
 162	 */
 163	if (gpu->core_clk)
 164		clk_set_rate(gpu->core_clk, 27000000);
 165
 166	if (gpu->rbbmtimer_clk)
 167		clk_set_rate(gpu->rbbmtimer_clk, 0);
 168
 169	return 0;
 170}
 171
 172static int enable_axi(struct msm_gpu *gpu)
 173{
 174	if (gpu->ebi1_clk)
 175		clk_prepare_enable(gpu->ebi1_clk);
 176	return 0;
 177}
 178
 179static int disable_axi(struct msm_gpu *gpu)
 180{
 181	if (gpu->ebi1_clk)
 182		clk_disable_unprepare(gpu->ebi1_clk);
 183	return 0;
 184}
 185
 186void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
 187{
 188	gpu->devfreq.busy_cycles = 0;
 189	gpu->devfreq.time = ktime_get();
 190
 191	devfreq_resume_device(gpu->devfreq.devfreq);
 192}
 193
 194int msm_gpu_pm_resume(struct msm_gpu *gpu)
 195{
 196	int ret;
 197
 198	DBG("%s", gpu->name);
 199
 200	ret = enable_pwrrail(gpu);
 201	if (ret)
 202		return ret;
 203
 204	ret = enable_clk(gpu);
 205	if (ret)
 206		return ret;
 207
 208	ret = enable_axi(gpu);
 209	if (ret)
 210		return ret;
 211
 212	msm_gpu_resume_devfreq(gpu);
 213
 214	gpu->needs_hw_init = true;
 215
 216	return 0;
 217}
 218
 219int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 220{
 221	int ret;
 222
 223	DBG("%s", gpu->name);
 224
 225	devfreq_suspend_device(gpu->devfreq.devfreq);
 226
 227	ret = disable_axi(gpu);
 228	if (ret)
 229		return ret;
 230
 231	ret = disable_clk(gpu);
 232	if (ret)
 233		return ret;
 234
 235	ret = disable_pwrrail(gpu);
 236	if (ret)
 237		return ret;
 238
 239	return 0;
 240}
 241
 242int msm_gpu_hw_init(struct msm_gpu *gpu)
 243{
 244	int ret;
 245
 246	WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
 247
 248	if (!gpu->needs_hw_init)
 249		return 0;
 250
 251	disable_irq(gpu->irq);
 252	ret = gpu->funcs->hw_init(gpu);
 253	if (!ret)
 254		gpu->needs_hw_init = false;
 255	enable_irq(gpu->irq);
 256
 257	return ret;
 258}
 259
 260#ifdef CONFIG_DEV_COREDUMP
 261static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
 262		size_t count, void *data, size_t datalen)
 263{
 264	struct msm_gpu *gpu = data;
 265	struct drm_print_iterator iter;
 266	struct drm_printer p;
 267	struct msm_gpu_state *state;
 268
 269	state = msm_gpu_crashstate_get(gpu);
 270	if (!state)
 271		return 0;
 272
 273	iter.data = buffer;
 274	iter.offset = 0;
 275	iter.start = offset;
 276	iter.remain = count;
 277
 278	p = drm_coredump_printer(&iter);
 279
 280	drm_printf(&p, "---\n");
 281	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
 282	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
 283	drm_printf(&p, "time: %lld.%09ld\n",
 284		state->time.tv_sec, state->time.tv_nsec);
 285	if (state->comm)
 286		drm_printf(&p, "comm: %s\n", state->comm);
 287	if (state->cmd)
 288		drm_printf(&p, "cmdline: %s\n", state->cmd);
 289
 290	gpu->funcs->show(gpu, state, &p);
 291
 292	msm_gpu_crashstate_put(gpu);
 293
 294	return count - iter.remain;
 295}
 296
 297static void msm_gpu_devcoredump_free(void *data)
 298{
 299	struct msm_gpu *gpu = data;
 300
 301	msm_gpu_crashstate_put(gpu);
 302}
 303
 304static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
 305		struct msm_gem_object *obj, u64 iova, u32 flags)
 306{
 307	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 308
 309	/* Don't record write only objects */
 310	state_bo->size = obj->base.size;
 311	state_bo->iova = iova;
 312
 313	/* Only store data for non imported buffer objects marked for read */
 314	if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
 315		void *ptr;
 316
 317		state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
 318		if (!state_bo->data)
 319			goto out;
 320
 321		ptr = msm_gem_get_vaddr_active(&obj->base);
 322		if (IS_ERR(ptr)) {
 323			kvfree(state_bo->data);
 324			state_bo->data = NULL;
 325			goto out;
 326		}
 327
 328		memcpy(state_bo->data, ptr, obj->base.size);
 329		msm_gem_put_vaddr(&obj->base);
 330	}
 331out:
 332	state->nr_bos++;
 333}
 334
 335static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 336		struct msm_gem_submit *submit, char *comm, char *cmd)
 337{
 338	struct msm_gpu_state *state;
 339
 340	/* Check if the target supports capturing crash state */
 341	if (!gpu->funcs->gpu_state_get)
 342		return;
 343
 344	/* Only save one crash state at a time */
 345	if (gpu->crashstate)
 346		return;
 347
 348	state = gpu->funcs->gpu_state_get(gpu);
 349	if (IS_ERR_OR_NULL(state))
 350		return;
 351
 352	/* Fill in the additional crash state information */
 353	state->comm = kstrdup(comm, GFP_KERNEL);
 354	state->cmd = kstrdup(cmd, GFP_KERNEL);
 355
 356	if (submit) {
 357		int i;
 358
 359		state->bos = kcalloc(submit->nr_cmds,
 
 
 
 
 
 
 
 
 
 360			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 361
 
 
 
 
 
 
 
 362		for (i = 0; state->bos && i < submit->nr_cmds; i++) {
 363			int idx = submit->cmd[i].idx;
 364
 365			msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
 366				submit->bos[idx].iova, submit->bos[idx].flags);
 
 
 367		}
 368	}
 369
 370	/* Set the active crash state to be dumped on failure */
 371	gpu->crashstate = state;
 372
 373	/* FIXME: Release the crashstate if this errors out? */
 374	dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
 375		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
 376}
 377#else
 378static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 379		struct msm_gem_submit *submit, char *comm, char *cmd)
 380{
 381}
 382#endif
 383
 384/*
 385 * Hangcheck detection for locked gpu:
 386 */
 387
 388static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 389		uint32_t fence)
 390{
 391	struct msm_gem_submit *submit;
 392
 393	list_for_each_entry(submit, &ring->submits, node) {
 394		if (submit->seqno > fence)
 395			break;
 396
 397		msm_update_fence(submit->ring->fctx,
 398			submit->fence->seqno);
 399	}
 400}
 401
 402static struct msm_gem_submit *
 403find_submit(struct msm_ringbuffer *ring, uint32_t fence)
 404{
 405	struct msm_gem_submit *submit;
 406
 407	WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
 408
 409	list_for_each_entry(submit, &ring->submits, node)
 410		if (submit->seqno == fence)
 411			return submit;
 412
 413	return NULL;
 414}
 415
 416static void retire_submits(struct msm_gpu *gpu);
 417
 418static void recover_worker(struct work_struct *work)
 419{
 420	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 421	struct drm_device *dev = gpu->dev;
 422	struct msm_drm_private *priv = dev->dev_private;
 423	struct msm_gem_submit *submit;
 424	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 425	char *comm = NULL, *cmd = NULL;
 426	int i;
 427
 428	mutex_lock(&dev->struct_mutex);
 429
 430	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 431
 432	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 433	if (submit) {
 434		struct task_struct *task;
 435
 436		/* Increment the fault counts */
 437		gpu->global_faults++;
 438		submit->queue->faults++;
 439
 440		task = get_pid_task(submit->pid, PIDTYPE_PID);
 441		if (task) {
 442			comm = kstrdup(task->comm, GFP_KERNEL);
 443			cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
 444			put_task_struct(task);
 445		}
 446
 447		if (comm && cmd) {
 448			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
 449				gpu->name, comm, cmd);
 450
 451			msm_rd_dump_submit(priv->hangrd, submit,
 452				"offending task: %s (%s)", comm, cmd);
 453		} else
 454			msm_rd_dump_submit(priv->hangrd, submit, NULL);
 455	}
 456
 457	/* Record the crash state */
 458	pm_runtime_get_sync(&gpu->pdev->dev);
 459	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 460	pm_runtime_put_sync(&gpu->pdev->dev);
 461
 462	kfree(cmd);
 463	kfree(comm);
 464
 465	/*
 466	 * Update all the rings with the latest and greatest fence.. this
 467	 * needs to happen after msm_rd_dump_submit() to ensure that the
 468	 * bo's referenced by the offending submit are still around.
 469	 */
 470	for (i = 0; i < gpu->nr_rings; i++) {
 471		struct msm_ringbuffer *ring = gpu->rb[i];
 472
 473		uint32_t fence = ring->memptrs->fence;
 474
 475		/*
 476		 * For the current (faulting?) ring/submit advance the fence by
 477		 * one more to clear the faulting submit
 478		 */
 479		if (ring == cur_ring)
 480			fence++;
 481
 482		update_fences(gpu, ring, fence);
 483	}
 484
 485	if (msm_gpu_active(gpu)) {
 486		/* retire completed submits, plus the one that hung: */
 487		retire_submits(gpu);
 488
 489		pm_runtime_get_sync(&gpu->pdev->dev);
 490		gpu->funcs->recover(gpu);
 491		pm_runtime_put_sync(&gpu->pdev->dev);
 492
 493		/*
 494		 * Replay all remaining submits starting with highest priority
 495		 * ring
 496		 */
 497		for (i = 0; i < gpu->nr_rings; i++) {
 498			struct msm_ringbuffer *ring = gpu->rb[i];
 499
 500			list_for_each_entry(submit, &ring->submits, node)
 501				gpu->funcs->submit(gpu, submit, NULL);
 502		}
 503	}
 504
 505	mutex_unlock(&dev->struct_mutex);
 506
 507	msm_gpu_retire(gpu);
 508}
 509
 510static void hangcheck_timer_reset(struct msm_gpu *gpu)
 511{
 512	DBG("%s", gpu->name);
 513	mod_timer(&gpu->hangcheck_timer,
 514			round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
 515}
 516
 517static void hangcheck_handler(struct timer_list *t)
 518{
 519	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
 520	struct drm_device *dev = gpu->dev;
 521	struct msm_drm_private *priv = dev->dev_private;
 522	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
 523	uint32_t fence = ring->memptrs->fence;
 524
 525	if (fence != ring->hangcheck_fence) {
 526		/* some progress has been made.. ya! */
 527		ring->hangcheck_fence = fence;
 528	} else if (fence < ring->seqno) {
 529		/* no progress and not done.. hung! */
 530		ring->hangcheck_fence = fence;
 531		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
 532				gpu->name, ring->id);
 533		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
 534				gpu->name, fence);
 535		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
 536				gpu->name, ring->seqno);
 537
 538		queue_work(priv->wq, &gpu->recover_work);
 539	}
 540
 541	/* if still more pending work, reset the hangcheck timer: */
 542	if (ring->seqno > ring->hangcheck_fence)
 543		hangcheck_timer_reset(gpu);
 544
 545	/* workaround for missing irq: */
 546	queue_work(priv->wq, &gpu->retire_work);
 547}
 548
 549/*
 550 * Performance Counters:
 551 */
 552
 553/* called under perf_lock */
 554static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
 555{
 556	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
 557	int i, n = min(ncntrs, gpu->num_perfcntrs);
 558
 559	/* read current values: */
 560	for (i = 0; i < gpu->num_perfcntrs; i++)
 561		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
 562
 563	/* update cntrs: */
 564	for (i = 0; i < n; i++)
 565		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
 566
 567	/* save current values: */
 568	for (i = 0; i < gpu->num_perfcntrs; i++)
 569		gpu->last_cntrs[i] = current_cntrs[i];
 570
 571	return n;
 572}
 573
 574static void update_sw_cntrs(struct msm_gpu *gpu)
 575{
 576	ktime_t time;
 577	uint32_t elapsed;
 578	unsigned long flags;
 579
 580	spin_lock_irqsave(&gpu->perf_lock, flags);
 581	if (!gpu->perfcntr_active)
 582		goto out;
 583
 584	time = ktime_get();
 585	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
 586
 587	gpu->totaltime += elapsed;
 588	if (gpu->last_sample.active)
 589		gpu->activetime += elapsed;
 590
 591	gpu->last_sample.active = msm_gpu_active(gpu);
 592	gpu->last_sample.time = time;
 593
 594out:
 595	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 596}
 597
 598void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
 599{
 600	unsigned long flags;
 601
 602	pm_runtime_get_sync(&gpu->pdev->dev);
 603
 604	spin_lock_irqsave(&gpu->perf_lock, flags);
 605	/* we could dynamically enable/disable perfcntr registers too.. */
 606	gpu->last_sample.active = msm_gpu_active(gpu);
 607	gpu->last_sample.time = ktime_get();
 608	gpu->activetime = gpu->totaltime = 0;
 609	gpu->perfcntr_active = true;
 610	update_hw_cntrs(gpu, 0, NULL);
 611	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 612}
 613
 614void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
 615{
 616	gpu->perfcntr_active = false;
 617	pm_runtime_put_sync(&gpu->pdev->dev);
 618}
 619
 620/* returns -errno or # of cntrs sampled */
 621int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 622		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
 623{
 624	unsigned long flags;
 625	int ret;
 626
 627	spin_lock_irqsave(&gpu->perf_lock, flags);
 628
 629	if (!gpu->perfcntr_active) {
 630		ret = -EINVAL;
 631		goto out;
 632	}
 633
 634	*activetime = gpu->activetime;
 635	*totaltime = gpu->totaltime;
 636
 637	gpu->activetime = gpu->totaltime = 0;
 638
 639	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
 640
 641out:
 642	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 643
 644	return ret;
 645}
 646
 647/*
 648 * Cmdstream submission/retirement:
 649 */
 650
 651static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 652		struct msm_gem_submit *submit)
 653{
 654	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
 655	volatile struct msm_gpu_submit_stats *stats;
 656	u64 elapsed, clock = 0;
 657	int i;
 658
 659	stats = &ring->memptrs->stats[index];
 660	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
 661	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
 662	do_div(elapsed, 192);
 663
 664	/* Calculate the clock frequency from the number of CP cycles */
 665	if (elapsed) {
 666		clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
 667		do_div(clock, elapsed);
 668	}
 669
 670	trace_msm_gpu_submit_retired(submit, elapsed, clock,
 671		stats->alwayson_start, stats->alwayson_end);
 672
 673	for (i = 0; i < submit->nr_bos; i++) {
 674		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 675		/* move to inactive: */
 676		msm_gem_move_to_inactive(&msm_obj->base);
 677		msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
 678		drm_gem_object_put(&msm_obj->base);
 679	}
 680
 681	pm_runtime_mark_last_busy(&gpu->pdev->dev);
 682	pm_runtime_put_autosuspend(&gpu->pdev->dev);
 683	msm_gem_submit_free(submit);
 684}
 685
 686static void retire_submits(struct msm_gpu *gpu)
 687{
 688	struct drm_device *dev = gpu->dev;
 689	struct msm_gem_submit *submit, *tmp;
 690	int i;
 691
 692	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 693
 694	/* Retire the commits starting with highest priority */
 695	for (i = 0; i < gpu->nr_rings; i++) {
 696		struct msm_ringbuffer *ring = gpu->rb[i];
 697
 698		list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
 699			if (dma_fence_is_signaled(submit->fence))
 700				retire_submit(gpu, ring, submit);
 701		}
 702	}
 703}
 704
 705static void retire_worker(struct work_struct *work)
 706{
 707	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
 708	struct drm_device *dev = gpu->dev;
 709	int i;
 710
 711	for (i = 0; i < gpu->nr_rings; i++)
 712		update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
 713
 714	mutex_lock(&dev->struct_mutex);
 715	retire_submits(gpu);
 716	mutex_unlock(&dev->struct_mutex);
 717}
 718
 719/* call from irq handler to schedule work to retire bo's */
 720void msm_gpu_retire(struct msm_gpu *gpu)
 721{
 722	struct msm_drm_private *priv = gpu->dev->dev_private;
 723	queue_work(priv->wq, &gpu->retire_work);
 724	update_sw_cntrs(gpu);
 725}
 726
 727/* add bo's to gpu's ring, and kick gpu: */
 728void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 729		struct msm_file_private *ctx)
 730{
 731	struct drm_device *dev = gpu->dev;
 732	struct msm_drm_private *priv = dev->dev_private;
 733	struct msm_ringbuffer *ring = submit->ring;
 734	int i;
 735
 736	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 737
 738	pm_runtime_get_sync(&gpu->pdev->dev);
 739
 740	msm_gpu_hw_init(gpu);
 741
 742	submit->seqno = ++ring->seqno;
 743
 744	list_add_tail(&submit->node, &ring->submits);
 745
 746	msm_rd_dump_submit(priv->rd, submit, NULL);
 747
 748	update_sw_cntrs(gpu);
 749
 750	for (i = 0; i < submit->nr_bos; i++) {
 751		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 752		uint64_t iova;
 753
 754		/* can't happen yet.. but when we add 2d support we'll have
 755		 * to deal w/ cross-ring synchronization:
 756		 */
 757		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
 758
 759		/* submit takes a reference to the bo and iova until retired: */
 760		drm_gem_object_get(&msm_obj->base);
 761		msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
 762
 763		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
 764			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
 765		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
 766			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
 767	}
 768
 769	gpu->funcs->submit(gpu, submit, ctx);
 770	priv->lastctx = ctx;
 771
 772	hangcheck_timer_reset(gpu);
 773}
 774
 775/*
 776 * Init/Cleanup:
 777 */
 778
 779static irqreturn_t irq_handler(int irq, void *data)
 780{
 781	struct msm_gpu *gpu = data;
 782	return gpu->funcs->irq(gpu);
 783}
 784
 785static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 786{
 787	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
 788
 789	if (ret < 1) {
 790		gpu->nr_clocks = 0;
 791		return ret;
 792	}
 793
 794	gpu->nr_clocks = ret;
 795
 796	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 797		gpu->nr_clocks, "core");
 798
 799	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 800		gpu->nr_clocks, "rbbmtimer");
 801
 802	return 0;
 803}
 804
 805static struct msm_gem_address_space *
 806msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
 807		uint64_t va_start, uint64_t va_end)
 808{
 809	struct msm_gem_address_space *aspace;
 810	int ret;
 811
 812	/*
 813	 * Setup IOMMU.. eventually we will (I think) do this once per context
 814	 * and have separate page tables per context.  For now, to keep things
 815	 * simple and to get something working, just use a single address space:
 816	 */
 817	if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
 818		struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
 819		if (!iommu)
 820			return NULL;
 821
 822		iommu->geometry.aperture_start = va_start;
 823		iommu->geometry.aperture_end = va_end;
 824
 825		DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
 826
 827		aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
 828		if (IS_ERR(aspace))
 829			iommu_domain_free(iommu);
 830	} else {
 831		aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
 832			va_start, va_end);
 833	}
 834
 835	if (IS_ERR(aspace)) {
 836		DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
 837			PTR_ERR(aspace));
 838		return ERR_CAST(aspace);
 839	}
 840
 841	ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
 842	if (ret) {
 843		msm_gem_address_space_put(aspace);
 844		return ERR_PTR(ret);
 845	}
 846
 847	return aspace;
 848}
 849
 850int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 851		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 852		const char *name, struct msm_gpu_config *config)
 853{
 854	int i, ret, nr_rings = config->nr_rings;
 855	void *memptrs;
 856	uint64_t memptrs_iova;
 857
 858	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
 859		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
 860
 861	gpu->dev = drm;
 862	gpu->funcs = funcs;
 863	gpu->name = name;
 864
 865	INIT_LIST_HEAD(&gpu->active_list);
 866	INIT_WORK(&gpu->retire_work, retire_worker);
 867	INIT_WORK(&gpu->recover_work, recover_worker);
 868
 869
 870	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
 871
 872	spin_lock_init(&gpu->perf_lock);
 873
 874
 875	/* Map registers: */
 876	gpu->mmio = msm_ioremap(pdev, config->ioname, name);
 877	if (IS_ERR(gpu->mmio)) {
 878		ret = PTR_ERR(gpu->mmio);
 879		goto fail;
 880	}
 881
 882	/* Get Interrupt: */
 883	gpu->irq = platform_get_irq(pdev, 0);
 884	if (gpu->irq < 0) {
 885		ret = gpu->irq;
 886		DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
 887		goto fail;
 888	}
 889
 890	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 891			IRQF_TRIGGER_HIGH, gpu->name, gpu);
 892	if (ret) {
 893		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 894		goto fail;
 895	}
 896
 897	ret = get_clocks(pdev, gpu);
 898	if (ret)
 899		goto fail;
 900
 901	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
 902	DBG("ebi1_clk: %p", gpu->ebi1_clk);
 903	if (IS_ERR(gpu->ebi1_clk))
 904		gpu->ebi1_clk = NULL;
 905
 906	/* Acquire regulators: */
 907	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
 908	DBG("gpu_reg: %p", gpu->gpu_reg);
 909	if (IS_ERR(gpu->gpu_reg))
 910		gpu->gpu_reg = NULL;
 911
 912	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
 913	DBG("gpu_cx: %p", gpu->gpu_cx);
 914	if (IS_ERR(gpu->gpu_cx))
 915		gpu->gpu_cx = NULL;
 916
 917	gpu->pdev = pdev;
 918	platform_set_drvdata(pdev, gpu);
 919
 920	msm_devfreq_init(gpu);
 921
 922	gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
 923		config->va_start, config->va_end);
 924
 925	if (gpu->aspace == NULL)
 926		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 927	else if (IS_ERR(gpu->aspace)) {
 928		ret = PTR_ERR(gpu->aspace);
 929		goto fail;
 930	}
 931
 932	memptrs = msm_gem_kernel_new(drm,
 933		sizeof(struct msm_rbmemptrs) * nr_rings,
 934		MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
 935		&memptrs_iova);
 936
 937	if (IS_ERR(memptrs)) {
 938		ret = PTR_ERR(memptrs);
 939		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
 940		goto fail;
 941	}
 942
 943	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
 944
 945	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
 946		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
 947			ARRAY_SIZE(gpu->rb));
 948		nr_rings = ARRAY_SIZE(gpu->rb);
 949	}
 950
 951	/* Create ringbuffer(s): */
 952	for (i = 0; i < nr_rings; i++) {
 953		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
 954
 955		if (IS_ERR(gpu->rb[i])) {
 956			ret = PTR_ERR(gpu->rb[i]);
 957			DRM_DEV_ERROR(drm->dev,
 958				"could not create ringbuffer %d: %d\n", i, ret);
 959			goto fail;
 960		}
 961
 962		memptrs += sizeof(struct msm_rbmemptrs);
 963		memptrs_iova += sizeof(struct msm_rbmemptrs);
 964	}
 965
 966	gpu->nr_rings = nr_rings;
 967
 968	return 0;
 969
 970fail:
 971	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
 972		msm_ringbuffer_destroy(gpu->rb[i]);
 973		gpu->rb[i] = NULL;
 974	}
 975
 976	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
 977
 978	platform_set_drvdata(pdev, NULL);
 979	return ret;
 980}
 981
 982void msm_gpu_cleanup(struct msm_gpu *gpu)
 983{
 984	int i;
 985
 986	DBG("%s", gpu->name);
 987
 988	WARN_ON(!list_empty(&gpu->active_list));
 989
 990	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
 991		msm_ringbuffer_destroy(gpu->rb[i]);
 992		gpu->rb[i] = NULL;
 993	}
 994
 995	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
 996
 997	if (!IS_ERR_OR_NULL(gpu->aspace)) {
 998		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
 999			NULL, 0);
1000		msm_gem_address_space_put(gpu->aspace);
1001	}
1002}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
  7#include "msm_gpu.h"
  8#include "msm_gem.h"
  9#include "msm_mmu.h"
 10#include "msm_fence.h"
 11#include "msm_gpu_trace.h"
 12#include "adreno/adreno_gpu.h"
 13
 14#include <generated/utsrelease.h>
 15#include <linux/string_helpers.h>
 
 16#include <linux/devfreq.h>
 17#include <linux/devcoredump.h>
 18#include <linux/sched/task.h>
 19
 20/*
 21 * Power Management:
 22 */
 23
 24static int msm_devfreq_target(struct device *dev, unsigned long *freq,
 25		u32 flags)
 26{
 27	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
 28	struct dev_pm_opp *opp;
 29
 30	opp = devfreq_recommended_opp(dev, freq, flags);
 31
 32	if (IS_ERR(opp))
 33		return PTR_ERR(opp);
 34
 35	if (gpu->funcs->gpu_set_freq)
 36		gpu->funcs->gpu_set_freq(gpu, opp);
 37	else
 38		clk_set_rate(gpu->core_clk, *freq);
 39
 40	dev_pm_opp_put(opp);
 41
 42	return 0;
 43}
 44
 45static int msm_devfreq_get_dev_status(struct device *dev,
 46		struct devfreq_dev_status *status)
 47{
 48	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
 49	ktime_t time;
 50
 51	if (gpu->funcs->gpu_get_freq)
 52		status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
 53	else
 54		status->current_frequency = clk_get_rate(gpu->core_clk);
 55
 56	status->busy_time = gpu->funcs->gpu_busy(gpu);
 57
 58	time = ktime_get();
 59	status->total_time = ktime_us_delta(time, gpu->devfreq.time);
 60	gpu->devfreq.time = time;
 61
 62	return 0;
 63}
 64
 65static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
 66{
 67	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
 68
 69	if (gpu->funcs->gpu_get_freq)
 70		*freq = gpu->funcs->gpu_get_freq(gpu);
 71	else
 72		*freq = clk_get_rate(gpu->core_clk);
 73
 74	return 0;
 75}
 76
 77static struct devfreq_dev_profile msm_devfreq_profile = {
 78	.polling_ms = 10,
 79	.target = msm_devfreq_target,
 80	.get_dev_status = msm_devfreq_get_dev_status,
 81	.get_cur_freq = msm_devfreq_get_cur_freq,
 82};
 83
 84static void msm_devfreq_init(struct msm_gpu *gpu)
 85{
 86	/* We need target support to do devfreq */
 87	if (!gpu->funcs->gpu_busy)
 88		return;
 89
 90	msm_devfreq_profile.initial_freq = gpu->fast_rate;
 91
 92	/*
 93	 * Don't set the freq_table or max_state and let devfreq build the table
 94	 * from OPP
 95	 * After a deferred probe, these may have be left to non-zero values,
 96	 * so set them back to zero before creating the devfreq device
 97	 */
 98	msm_devfreq_profile.freq_table = NULL;
 99	msm_devfreq_profile.max_state = 0;
100
101	gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
102			&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
103			NULL);
104
105	if (IS_ERR(gpu->devfreq.devfreq)) {
106		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
107		gpu->devfreq.devfreq = NULL;
108	}
109
110	devfreq_suspend_device(gpu->devfreq.devfreq);
111}
112
113static int enable_pwrrail(struct msm_gpu *gpu)
114{
115	struct drm_device *dev = gpu->dev;
116	int ret = 0;
117
118	if (gpu->gpu_reg) {
119		ret = regulator_enable(gpu->gpu_reg);
120		if (ret) {
121			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
122			return ret;
123		}
124	}
125
126	if (gpu->gpu_cx) {
127		ret = regulator_enable(gpu->gpu_cx);
128		if (ret) {
129			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
130			return ret;
131		}
132	}
133
134	return 0;
135}
136
137static int disable_pwrrail(struct msm_gpu *gpu)
138{
139	if (gpu->gpu_cx)
140		regulator_disable(gpu->gpu_cx);
141	if (gpu->gpu_reg)
142		regulator_disable(gpu->gpu_reg);
143	return 0;
144}
145
146static int enable_clk(struct msm_gpu *gpu)
147{
148	if (gpu->core_clk && gpu->fast_rate)
149		clk_set_rate(gpu->core_clk, gpu->fast_rate);
150
151	/* Set the RBBM timer rate to 19.2Mhz */
152	if (gpu->rbbmtimer_clk)
153		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
154
155	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
156}
157
158static int disable_clk(struct msm_gpu *gpu)
159{
160	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
161
162	/*
163	 * Set the clock to a deliberately low rate. On older targets the clock
164	 * speed had to be non zero to avoid problems. On newer targets this
165	 * will be rounded down to zero anyway so it all works out.
166	 */
167	if (gpu->core_clk)
168		clk_set_rate(gpu->core_clk, 27000000);
169
170	if (gpu->rbbmtimer_clk)
171		clk_set_rate(gpu->rbbmtimer_clk, 0);
172
173	return 0;
174}
175
176static int enable_axi(struct msm_gpu *gpu)
177{
178	if (gpu->ebi1_clk)
179		clk_prepare_enable(gpu->ebi1_clk);
180	return 0;
181}
182
183static int disable_axi(struct msm_gpu *gpu)
184{
185	if (gpu->ebi1_clk)
186		clk_disable_unprepare(gpu->ebi1_clk);
187	return 0;
188}
189
190void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
191{
192	gpu->devfreq.busy_cycles = 0;
193	gpu->devfreq.time = ktime_get();
194
195	devfreq_resume_device(gpu->devfreq.devfreq);
196}
197
198int msm_gpu_pm_resume(struct msm_gpu *gpu)
199{
200	int ret;
201
202	DBG("%s", gpu->name);
203
204	ret = enable_pwrrail(gpu);
205	if (ret)
206		return ret;
207
208	ret = enable_clk(gpu);
209	if (ret)
210		return ret;
211
212	ret = enable_axi(gpu);
213	if (ret)
214		return ret;
215
216	msm_gpu_resume_devfreq(gpu);
217
218	gpu->needs_hw_init = true;
219
220	return 0;
221}
222
223int msm_gpu_pm_suspend(struct msm_gpu *gpu)
224{
225	int ret;
226
227	DBG("%s", gpu->name);
228
229	devfreq_suspend_device(gpu->devfreq.devfreq);
230
231	ret = disable_axi(gpu);
232	if (ret)
233		return ret;
234
235	ret = disable_clk(gpu);
236	if (ret)
237		return ret;
238
239	ret = disable_pwrrail(gpu);
240	if (ret)
241		return ret;
242
243	return 0;
244}
245
246int msm_gpu_hw_init(struct msm_gpu *gpu)
247{
248	int ret;
249
250	WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
251
252	if (!gpu->needs_hw_init)
253		return 0;
254
255	disable_irq(gpu->irq);
256	ret = gpu->funcs->hw_init(gpu);
257	if (!ret)
258		gpu->needs_hw_init = false;
259	enable_irq(gpu->irq);
260
261	return ret;
262}
263
264#ifdef CONFIG_DEV_COREDUMP
265static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
266		size_t count, void *data, size_t datalen)
267{
268	struct msm_gpu *gpu = data;
269	struct drm_print_iterator iter;
270	struct drm_printer p;
271	struct msm_gpu_state *state;
272
273	state = msm_gpu_crashstate_get(gpu);
274	if (!state)
275		return 0;
276
277	iter.data = buffer;
278	iter.offset = 0;
279	iter.start = offset;
280	iter.remain = count;
281
282	p = drm_coredump_printer(&iter);
283
284	drm_printf(&p, "---\n");
285	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
286	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
287	drm_printf(&p, "time: %lld.%09ld\n",
288		state->time.tv_sec, state->time.tv_nsec);
289	if (state->comm)
290		drm_printf(&p, "comm: %s\n", state->comm);
291	if (state->cmd)
292		drm_printf(&p, "cmdline: %s\n", state->cmd);
293
294	gpu->funcs->show(gpu, state, &p);
295
296	msm_gpu_crashstate_put(gpu);
297
298	return count - iter.remain;
299}
300
301static void msm_gpu_devcoredump_free(void *data)
302{
303	struct msm_gpu *gpu = data;
304
305	msm_gpu_crashstate_put(gpu);
306}
307
308static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
309		struct msm_gem_object *obj, u64 iova, u32 flags)
310{
311	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
312
313	/* Don't record write only objects */
314	state_bo->size = obj->base.size;
315	state_bo->iova = iova;
316
317	/* Only store data for non imported buffer objects marked for read */
318	if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
319		void *ptr;
320
321		state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
322		if (!state_bo->data)
323			goto out;
324
325		ptr = msm_gem_get_vaddr_active(&obj->base);
326		if (IS_ERR(ptr)) {
327			kvfree(state_bo->data);
328			state_bo->data = NULL;
329			goto out;
330		}
331
332		memcpy(state_bo->data, ptr, obj->base.size);
333		msm_gem_put_vaddr(&obj->base);
334	}
335out:
336	state->nr_bos++;
337}
338
339static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
340		struct msm_gem_submit *submit, char *comm, char *cmd)
341{
342	struct msm_gpu_state *state;
343
344	/* Check if the target supports capturing crash state */
345	if (!gpu->funcs->gpu_state_get)
346		return;
347
348	/* Only save one crash state at a time */
349	if (gpu->crashstate)
350		return;
351
352	state = gpu->funcs->gpu_state_get(gpu);
353	if (IS_ERR_OR_NULL(state))
354		return;
355
356	/* Fill in the additional crash state information */
357	state->comm = kstrdup(comm, GFP_KERNEL);
358	state->cmd = kstrdup(cmd, GFP_KERNEL);
359
360	if (submit) {
361		int i, nr = 0;
362
363		/* count # of buffers to dump: */
364		for (i = 0; i < submit->nr_bos; i++)
365			if (should_dump(submit, i))
366				nr++;
367		/* always dump cmd bo's, but don't double count them: */
368		for (i = 0; i < submit->nr_cmds; i++)
369			if (!should_dump(submit, submit->cmd[i].idx))
370				nr++;
371
372		state->bos = kcalloc(nr,
373			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
374
375		for (i = 0; i < submit->nr_bos; i++) {
376			if (should_dump(submit, i)) {
377				msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
378					submit->bos[i].iova, submit->bos[i].flags);
379			}
380		}
381
382		for (i = 0; state->bos && i < submit->nr_cmds; i++) {
383			int idx = submit->cmd[i].idx;
384
385			if (!should_dump(submit, submit->cmd[i].idx)) {
386				msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
387					submit->bos[idx].iova, submit->bos[idx].flags);
388			}
389		}
390	}
391
392	/* Set the active crash state to be dumped on failure */
393	gpu->crashstate = state;
394
395	/* FIXME: Release the crashstate if this errors out? */
396	dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
397		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
398}
399#else
400static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
401		struct msm_gem_submit *submit, char *comm, char *cmd)
402{
403}
404#endif
405
406/*
407 * Hangcheck detection for locked gpu:
408 */
409
410static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
411		uint32_t fence)
412{
413	struct msm_gem_submit *submit;
414
415	list_for_each_entry(submit, &ring->submits, node) {
416		if (submit->seqno > fence)
417			break;
418
419		msm_update_fence(submit->ring->fctx,
420			submit->fence->seqno);
421	}
422}
423
424static struct msm_gem_submit *
425find_submit(struct msm_ringbuffer *ring, uint32_t fence)
426{
427	struct msm_gem_submit *submit;
428
429	WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
430
431	list_for_each_entry(submit, &ring->submits, node)
432		if (submit->seqno == fence)
433			return submit;
434
435	return NULL;
436}
437
438static void retire_submits(struct msm_gpu *gpu);
439
440static void recover_worker(struct work_struct *work)
441{
442	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
443	struct drm_device *dev = gpu->dev;
444	struct msm_drm_private *priv = dev->dev_private;
445	struct msm_gem_submit *submit;
446	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
447	char *comm = NULL, *cmd = NULL;
448	int i;
449
450	mutex_lock(&dev->struct_mutex);
451
452	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
453
454	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
455	if (submit) {
456		struct task_struct *task;
457
458		/* Increment the fault counts */
459		gpu->global_faults++;
460		submit->queue->faults++;
461
462		task = get_pid_task(submit->pid, PIDTYPE_PID);
463		if (task) {
464			comm = kstrdup(task->comm, GFP_KERNEL);
465			cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
466			put_task_struct(task);
467		}
468
469		if (comm && cmd) {
470			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
471				gpu->name, comm, cmd);
472
473			msm_rd_dump_submit(priv->hangrd, submit,
474				"offending task: %s (%s)", comm, cmd);
475		} else
476			msm_rd_dump_submit(priv->hangrd, submit, NULL);
477	}
478
479	/* Record the crash state */
480	pm_runtime_get_sync(&gpu->pdev->dev);
481	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
482	pm_runtime_put_sync(&gpu->pdev->dev);
483
484	kfree(cmd);
485	kfree(comm);
486
487	/*
488	 * Update all the rings with the latest and greatest fence.. this
489	 * needs to happen after msm_rd_dump_submit() to ensure that the
490	 * bo's referenced by the offending submit are still around.
491	 */
492	for (i = 0; i < gpu->nr_rings; i++) {
493		struct msm_ringbuffer *ring = gpu->rb[i];
494
495		uint32_t fence = ring->memptrs->fence;
496
497		/*
498		 * For the current (faulting?) ring/submit advance the fence by
499		 * one more to clear the faulting submit
500		 */
501		if (ring == cur_ring)
502			fence++;
503
504		update_fences(gpu, ring, fence);
505	}
506
507	if (msm_gpu_active(gpu)) {
508		/* retire completed submits, plus the one that hung: */
509		retire_submits(gpu);
510
511		pm_runtime_get_sync(&gpu->pdev->dev);
512		gpu->funcs->recover(gpu);
513		pm_runtime_put_sync(&gpu->pdev->dev);
514
515		/*
516		 * Replay all remaining submits starting with highest priority
517		 * ring
518		 */
519		for (i = 0; i < gpu->nr_rings; i++) {
520			struct msm_ringbuffer *ring = gpu->rb[i];
521
522			list_for_each_entry(submit, &ring->submits, node)
523				gpu->funcs->submit(gpu, submit, NULL);
524		}
525	}
526
527	mutex_unlock(&dev->struct_mutex);
528
529	msm_gpu_retire(gpu);
530}
531
532static void hangcheck_timer_reset(struct msm_gpu *gpu)
533{
534	DBG("%s", gpu->name);
535	mod_timer(&gpu->hangcheck_timer,
536			round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
537}
538
539static void hangcheck_handler(struct timer_list *t)
540{
541	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
542	struct drm_device *dev = gpu->dev;
543	struct msm_drm_private *priv = dev->dev_private;
544	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
545	uint32_t fence = ring->memptrs->fence;
546
547	if (fence != ring->hangcheck_fence) {
548		/* some progress has been made.. ya! */
549		ring->hangcheck_fence = fence;
550	} else if (fence < ring->seqno) {
551		/* no progress and not done.. hung! */
552		ring->hangcheck_fence = fence;
553		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
554				gpu->name, ring->id);
555		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
556				gpu->name, fence);
557		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
558				gpu->name, ring->seqno);
559
560		queue_work(priv->wq, &gpu->recover_work);
561	}
562
563	/* if still more pending work, reset the hangcheck timer: */
564	if (ring->seqno > ring->hangcheck_fence)
565		hangcheck_timer_reset(gpu);
566
567	/* workaround for missing irq: */
568	queue_work(priv->wq, &gpu->retire_work);
569}
570
571/*
572 * Performance Counters:
573 */
574
575/* called under perf_lock */
576static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
577{
578	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
579	int i, n = min(ncntrs, gpu->num_perfcntrs);
580
581	/* read current values: */
582	for (i = 0; i < gpu->num_perfcntrs; i++)
583		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
584
585	/* update cntrs: */
586	for (i = 0; i < n; i++)
587		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
588
589	/* save current values: */
590	for (i = 0; i < gpu->num_perfcntrs; i++)
591		gpu->last_cntrs[i] = current_cntrs[i];
592
593	return n;
594}
595
596static void update_sw_cntrs(struct msm_gpu *gpu)
597{
598	ktime_t time;
599	uint32_t elapsed;
600	unsigned long flags;
601
602	spin_lock_irqsave(&gpu->perf_lock, flags);
603	if (!gpu->perfcntr_active)
604		goto out;
605
606	time = ktime_get();
607	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
608
609	gpu->totaltime += elapsed;
610	if (gpu->last_sample.active)
611		gpu->activetime += elapsed;
612
613	gpu->last_sample.active = msm_gpu_active(gpu);
614	gpu->last_sample.time = time;
615
616out:
617	spin_unlock_irqrestore(&gpu->perf_lock, flags);
618}
619
620void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
621{
622	unsigned long flags;
623
624	pm_runtime_get_sync(&gpu->pdev->dev);
625
626	spin_lock_irqsave(&gpu->perf_lock, flags);
627	/* we could dynamically enable/disable perfcntr registers too.. */
628	gpu->last_sample.active = msm_gpu_active(gpu);
629	gpu->last_sample.time = ktime_get();
630	gpu->activetime = gpu->totaltime = 0;
631	gpu->perfcntr_active = true;
632	update_hw_cntrs(gpu, 0, NULL);
633	spin_unlock_irqrestore(&gpu->perf_lock, flags);
634}
635
636void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
637{
638	gpu->perfcntr_active = false;
639	pm_runtime_put_sync(&gpu->pdev->dev);
640}
641
642/* returns -errno or # of cntrs sampled */
643int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
644		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
645{
646	unsigned long flags;
647	int ret;
648
649	spin_lock_irqsave(&gpu->perf_lock, flags);
650
651	if (!gpu->perfcntr_active) {
652		ret = -EINVAL;
653		goto out;
654	}
655
656	*activetime = gpu->activetime;
657	*totaltime = gpu->totaltime;
658
659	gpu->activetime = gpu->totaltime = 0;
660
661	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
662
663out:
664	spin_unlock_irqrestore(&gpu->perf_lock, flags);
665
666	return ret;
667}
668
669/*
670 * Cmdstream submission/retirement:
671 */
672
673static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
674		struct msm_gem_submit *submit)
675{
676	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
677	volatile struct msm_gpu_submit_stats *stats;
678	u64 elapsed, clock = 0;
679	int i;
680
681	stats = &ring->memptrs->stats[index];
682	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
683	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
684	do_div(elapsed, 192);
685
686	/* Calculate the clock frequency from the number of CP cycles */
687	if (elapsed) {
688		clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
689		do_div(clock, elapsed);
690	}
691
692	trace_msm_gpu_submit_retired(submit, elapsed, clock,
693		stats->alwayson_start, stats->alwayson_end);
694
695	for (i = 0; i < submit->nr_bos; i++) {
696		struct msm_gem_object *msm_obj = submit->bos[i].obj;
697		/* move to inactive: */
698		msm_gem_move_to_inactive(&msm_obj->base);
699		msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
700		drm_gem_object_put_locked(&msm_obj->base);
701	}
702
703	pm_runtime_mark_last_busy(&gpu->pdev->dev);
704	pm_runtime_put_autosuspend(&gpu->pdev->dev);
705	msm_gem_submit_free(submit);
706}
707
708static void retire_submits(struct msm_gpu *gpu)
709{
710	struct drm_device *dev = gpu->dev;
711	struct msm_gem_submit *submit, *tmp;
712	int i;
713
714	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
715
716	/* Retire the commits starting with highest priority */
717	for (i = 0; i < gpu->nr_rings; i++) {
718		struct msm_ringbuffer *ring = gpu->rb[i];
719
720		list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
721			if (dma_fence_is_signaled(submit->fence))
722				retire_submit(gpu, ring, submit);
723		}
724	}
725}
726
727static void retire_worker(struct work_struct *work)
728{
729	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
730	struct drm_device *dev = gpu->dev;
731	int i;
732
733	for (i = 0; i < gpu->nr_rings; i++)
734		update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
735
736	mutex_lock(&dev->struct_mutex);
737	retire_submits(gpu);
738	mutex_unlock(&dev->struct_mutex);
739}
740
741/* call from irq handler to schedule work to retire bo's */
742void msm_gpu_retire(struct msm_gpu *gpu)
743{
744	struct msm_drm_private *priv = gpu->dev->dev_private;
745	queue_work(priv->wq, &gpu->retire_work);
746	update_sw_cntrs(gpu);
747}
748
749/* add bo's to gpu's ring, and kick gpu: */
750void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
751		struct msm_file_private *ctx)
752{
753	struct drm_device *dev = gpu->dev;
754	struct msm_drm_private *priv = dev->dev_private;
755	struct msm_ringbuffer *ring = submit->ring;
756	int i;
757
758	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
759
760	pm_runtime_get_sync(&gpu->pdev->dev);
761
762	msm_gpu_hw_init(gpu);
763
764	submit->seqno = ++ring->seqno;
765
766	list_add_tail(&submit->node, &ring->submits);
767
768	msm_rd_dump_submit(priv->rd, submit, NULL);
769
770	update_sw_cntrs(gpu);
771
772	for (i = 0; i < submit->nr_bos; i++) {
773		struct msm_gem_object *msm_obj = submit->bos[i].obj;
774		uint64_t iova;
775
776		/* can't happen yet.. but when we add 2d support we'll have
777		 * to deal w/ cross-ring synchronization:
778		 */
779		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
780
781		/* submit takes a reference to the bo and iova until retired: */
782		drm_gem_object_get(&msm_obj->base);
783		msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
784
785		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
786			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
787		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
788			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
789	}
790
791	gpu->funcs->submit(gpu, submit, ctx);
792	priv->lastctx = ctx;
793
794	hangcheck_timer_reset(gpu);
795}
796
797/*
798 * Init/Cleanup:
799 */
800
801static irqreturn_t irq_handler(int irq, void *data)
802{
803	struct msm_gpu *gpu = data;
804	return gpu->funcs->irq(gpu);
805}
806
807static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
808{
809	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
810
811	if (ret < 1) {
812		gpu->nr_clocks = 0;
813		return ret;
814	}
815
816	gpu->nr_clocks = ret;
817
818	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
819		gpu->nr_clocks, "core");
820
821	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
822		gpu->nr_clocks, "rbbmtimer");
823
824	return 0;
825}
826
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
827int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
828		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
829		const char *name, struct msm_gpu_config *config)
830{
831	int i, ret, nr_rings = config->nr_rings;
832	void *memptrs;
833	uint64_t memptrs_iova;
834
835	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
836		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
837
838	gpu->dev = drm;
839	gpu->funcs = funcs;
840	gpu->name = name;
841
842	INIT_LIST_HEAD(&gpu->active_list);
843	INIT_WORK(&gpu->retire_work, retire_worker);
844	INIT_WORK(&gpu->recover_work, recover_worker);
845
846
847	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
848
849	spin_lock_init(&gpu->perf_lock);
850
851
852	/* Map registers: */
853	gpu->mmio = msm_ioremap(pdev, config->ioname, name);
854	if (IS_ERR(gpu->mmio)) {
855		ret = PTR_ERR(gpu->mmio);
856		goto fail;
857	}
858
859	/* Get Interrupt: */
860	gpu->irq = platform_get_irq(pdev, 0);
861	if (gpu->irq < 0) {
862		ret = gpu->irq;
863		DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
864		goto fail;
865	}
866
867	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
868			IRQF_TRIGGER_HIGH, gpu->name, gpu);
869	if (ret) {
870		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
871		goto fail;
872	}
873
874	ret = get_clocks(pdev, gpu);
875	if (ret)
876		goto fail;
877
878	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
879	DBG("ebi1_clk: %p", gpu->ebi1_clk);
880	if (IS_ERR(gpu->ebi1_clk))
881		gpu->ebi1_clk = NULL;
882
883	/* Acquire regulators: */
884	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
885	DBG("gpu_reg: %p", gpu->gpu_reg);
886	if (IS_ERR(gpu->gpu_reg))
887		gpu->gpu_reg = NULL;
888
889	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
890	DBG("gpu_cx: %p", gpu->gpu_cx);
891	if (IS_ERR(gpu->gpu_cx))
892		gpu->gpu_cx = NULL;
893
894	gpu->pdev = pdev;
895	platform_set_drvdata(pdev, gpu);
896
897	msm_devfreq_init(gpu);
898
899
900	gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
901
902	if (gpu->aspace == NULL)
903		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
904	else if (IS_ERR(gpu->aspace)) {
905		ret = PTR_ERR(gpu->aspace);
906		goto fail;
907	}
908
909	memptrs = msm_gem_kernel_new(drm,
910		sizeof(struct msm_rbmemptrs) * nr_rings,
911		check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
912		&memptrs_iova);
913
914	if (IS_ERR(memptrs)) {
915		ret = PTR_ERR(memptrs);
916		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
917		goto fail;
918	}
919
920	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
921
922	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
923		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
924			ARRAY_SIZE(gpu->rb));
925		nr_rings = ARRAY_SIZE(gpu->rb);
926	}
927
928	/* Create ringbuffer(s): */
929	for (i = 0; i < nr_rings; i++) {
930		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
931
932		if (IS_ERR(gpu->rb[i])) {
933			ret = PTR_ERR(gpu->rb[i]);
934			DRM_DEV_ERROR(drm->dev,
935				"could not create ringbuffer %d: %d\n", i, ret);
936			goto fail;
937		}
938
939		memptrs += sizeof(struct msm_rbmemptrs);
940		memptrs_iova += sizeof(struct msm_rbmemptrs);
941	}
942
943	gpu->nr_rings = nr_rings;
944
945	return 0;
946
947fail:
948	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
949		msm_ringbuffer_destroy(gpu->rb[i]);
950		gpu->rb[i] = NULL;
951	}
952
953	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
954
955	platform_set_drvdata(pdev, NULL);
956	return ret;
957}
958
959void msm_gpu_cleanup(struct msm_gpu *gpu)
960{
961	int i;
962
963	DBG("%s", gpu->name);
964
965	WARN_ON(!list_empty(&gpu->active_list));
966
967	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
968		msm_ringbuffer_destroy(gpu->rb[i]);
969		gpu->rb[i] = NULL;
970	}
971
972	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
973
974	if (!IS_ERR_OR_NULL(gpu->aspace)) {
975		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
 
976		msm_gem_address_space_put(gpu->aspace);
977	}
978}