Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 
 
 18#include "msm_gpu.h"
 19#include "msm_gem.h"
 20#include "msm_mmu.h"
 21
 
 
 
 
 
 
 
 
 22
 23/*
 24 * Power Management:
 25 */
 26
 27#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
 28#include <mach/board.h>
 29static void bs_init(struct msm_gpu *gpu)
 30{
 31	if (gpu->bus_scale_table) {
 32		gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
 33		DBG("bus scale client: %08x", gpu->bsc);
 34	}
 35}
 36
 37static void bs_fini(struct msm_gpu *gpu)
 38{
 39	if (gpu->bsc) {
 40		msm_bus_scale_unregister_client(gpu->bsc);
 41		gpu->bsc = 0;
 42	}
 43}
 44
 45static void bs_set(struct msm_gpu *gpu, int idx)
 46{
 47	if (gpu->bsc) {
 48		DBG("set bus scaling: %d", idx);
 49		msm_bus_scale_client_update_request(gpu->bsc, idx);
 50	}
 51}
 52#else
 53static void bs_init(struct msm_gpu *gpu) {}
 54static void bs_fini(struct msm_gpu *gpu) {}
 55static void bs_set(struct msm_gpu *gpu, int idx) {}
 56#endif
 57
 58static int enable_pwrrail(struct msm_gpu *gpu)
 59{
 60	struct drm_device *dev = gpu->dev;
 61	int ret = 0;
 62
 63	if (gpu->gpu_reg) {
 64		ret = regulator_enable(gpu->gpu_reg);
 65		if (ret) {
 66			dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
 67			return ret;
 68		}
 69	}
 70
 71	if (gpu->gpu_cx) {
 72		ret = regulator_enable(gpu->gpu_cx);
 73		if (ret) {
 74			dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
 75			return ret;
 76		}
 77	}
 78
 79	return 0;
 80}
 81
 82static int disable_pwrrail(struct msm_gpu *gpu)
 83{
 84	if (gpu->gpu_cx)
 85		regulator_disable(gpu->gpu_cx);
 86	if (gpu->gpu_reg)
 87		regulator_disable(gpu->gpu_reg);
 88	return 0;
 89}
 90
 91static int enable_clk(struct msm_gpu *gpu)
 92{
 93	struct clk *rate_clk = NULL;
 94	int i;
 95
 96	/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
 97	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
 98		if (gpu->grp_clks[i]) {
 99			clk_prepare(gpu->grp_clks[i]);
100			rate_clk = gpu->grp_clks[i];
101		}
102	}
103
104	if (rate_clk && gpu->fast_rate)
105		clk_set_rate(rate_clk, gpu->fast_rate);
 
106
107	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
108		if (gpu->grp_clks[i])
109			clk_enable(gpu->grp_clks[i]);
110
111	return 0;
112}
113
114static int disable_clk(struct msm_gpu *gpu)
115{
116	struct clk *rate_clk = NULL;
117	int i;
118
119	/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
120	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
121		if (gpu->grp_clks[i]) {
122			clk_disable(gpu->grp_clks[i]);
123			rate_clk = gpu->grp_clks[i];
124		}
125	}
126
127	if (rate_clk && gpu->slow_rate)
128		clk_set_rate(rate_clk, gpu->slow_rate);
 
 
 
 
 
129
130	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
131		if (gpu->grp_clks[i])
132			clk_unprepare(gpu->grp_clks[i]);
133
134	return 0;
135}
136
137static int enable_axi(struct msm_gpu *gpu)
138{
139	if (gpu->ebi1_clk)
140		clk_prepare_enable(gpu->ebi1_clk);
141	if (gpu->bus_freq)
142		bs_set(gpu, gpu->bus_freq);
143	return 0;
144}
145
146static int disable_axi(struct msm_gpu *gpu)
147{
148	if (gpu->ebi1_clk)
149		clk_disable_unprepare(gpu->ebi1_clk);
150	if (gpu->bus_freq)
151		bs_set(gpu, 0);
152	return 0;
153}
154
155int msm_gpu_pm_resume(struct msm_gpu *gpu)
156{
157	struct drm_device *dev = gpu->dev;
158	int ret;
159
160	DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
161
162	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
163
164	if (gpu->active_cnt++ > 0)
165		return 0;
166
167	if (WARN_ON(gpu->active_cnt <= 0))
168		return -EINVAL;
169
170	ret = enable_pwrrail(gpu);
171	if (ret)
172		return ret;
173
174	ret = enable_clk(gpu);
175	if (ret)
176		return ret;
177
178	ret = enable_axi(gpu);
179	if (ret)
180		return ret;
181
 
 
 
 
182	return 0;
183}
184
185int msm_gpu_pm_suspend(struct msm_gpu *gpu)
186{
187	struct drm_device *dev = gpu->dev;
188	int ret;
189
190	DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
191
192	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
193
194	if (--gpu->active_cnt > 0)
195		return 0;
196
197	if (WARN_ON(gpu->active_cnt < 0))
198		return -EINVAL;
199
200	ret = disable_axi(gpu);
201	if (ret)
202		return ret;
203
204	ret = disable_clk(gpu);
205	if (ret)
206		return ret;
207
208	ret = disable_pwrrail(gpu);
209	if (ret)
210		return ret;
211
 
 
212	return 0;
213}
214
215/*
216 * Inactivity detection (for suspend):
217 */
 
 
 
 
 
 
218
219static void inactive_worker(struct work_struct *work)
220{
221	struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
222	struct drm_device *dev = gpu->dev;
223
224	if (gpu->inactive)
225		return;
226
227	DBG("%s: inactive!\n", gpu->name);
228	mutex_lock(&dev->struct_mutex);
229	if (!(msm_gpu_active(gpu) || gpu->inactive)) {
230		disable_axi(gpu);
231		disable_clk(gpu);
232		gpu->inactive = true;
233	}
234	mutex_unlock(&dev->struct_mutex);
 
 
235}
236
237static void inactive_handler(unsigned long data)
 
 
238{
239	struct msm_gpu *gpu = (struct msm_gpu *)data;
240	struct msm_drm_private *priv = gpu->dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
242	queue_work(priv->wq, &gpu->inactive_work);
243}
244
245/* cancel inactive timer and make sure we are awake: */
246static void inactive_cancel(struct msm_gpu *gpu)
247{
248	DBG("%s", gpu->name);
249	del_timer(&gpu->inactive_timer);
250	if (gpu->inactive) {
251		enable_clk(gpu);
252		enable_axi(gpu);
253		gpu->inactive = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254	}
 
 
255}
256
257static void inactive_start(struct msm_gpu *gpu)
 
258{
259	DBG("%s", gpu->name);
260	mod_timer(&gpu->inactive_timer,
261			round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262}
 
 
 
 
 
 
263
264/*
265 * Hangcheck detection for locked gpu:
266 */
267
268static void retire_submits(struct msm_gpu *gpu, uint32_t fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
270static void recover_worker(struct work_struct *work)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271{
272	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
273	struct drm_device *dev = gpu->dev;
 
 
 
 
 
274
275	dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
276
277	mutex_lock(&dev->struct_mutex);
278	if (msm_gpu_active(gpu)) {
279		struct msm_gem_submit *submit;
280		uint32_t fence = gpu->funcs->last_fence(gpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282		/* retire completed submits, plus the one that hung: */
283		retire_submits(gpu, fence + 1);
284
285		inactive_cancel(gpu);
286		gpu->funcs->recover(gpu);
287
288		/* replay the remaining submits after the one that hung: */
289		list_for_each_entry(submit, &gpu->submit_list, node) {
290			gpu->funcs->submit(gpu, submit, NULL);
 
 
 
 
 
 
 
 
 
291		}
292	}
293	mutex_unlock(&dev->struct_mutex);
 
 
 
294
295	msm_gpu_retire(gpu);
296}
297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298static void hangcheck_timer_reset(struct msm_gpu *gpu)
299{
300	DBG("%s", gpu->name);
301	mod_timer(&gpu->hangcheck_timer,
302			round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303}
304
305static void hangcheck_handler(unsigned long data)
306{
307	struct msm_gpu *gpu = (struct msm_gpu *)data;
308	struct drm_device *dev = gpu->dev;
309	struct msm_drm_private *priv = dev->dev_private;
310	uint32_t fence = gpu->funcs->last_fence(gpu);
311
312	if (fence != gpu->hangcheck_fence) {
313		/* some progress has been made.. ya! */
314		gpu->hangcheck_fence = fence;
315	} else if (fence < gpu->submitted_fence) {
 
 
316		/* no progress and not done.. hung! */
317		gpu->hangcheck_fence = fence;
318		dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
319				gpu->name);
320		dev_err(dev->dev, "%s:     completed fence: %u\n",
 
321				gpu->name, fence);
322		dev_err(dev->dev, "%s:     submitted fence: %u\n",
323				gpu->name, gpu->submitted_fence);
324		queue_work(priv->wq, &gpu->recover_work);
 
325	}
326
327	/* if still more pending work, reset the hangcheck timer: */
328	if (gpu->submitted_fence > gpu->hangcheck_fence)
329		hangcheck_timer_reset(gpu);
330
331	/* workaround for missing irq: */
332	queue_work(priv->wq, &gpu->retire_work);
333}
334
335/*
336 * Performance Counters:
337 */
338
339/* called under perf_lock */
340static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
341{
342	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
343	int i, n = min(ncntrs, gpu->num_perfcntrs);
344
345	/* read current values: */
346	for (i = 0; i < gpu->num_perfcntrs; i++)
347		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
348
349	/* update cntrs: */
350	for (i = 0; i < n; i++)
351		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
352
353	/* save current values: */
354	for (i = 0; i < gpu->num_perfcntrs; i++)
355		gpu->last_cntrs[i] = current_cntrs[i];
356
357	return n;
358}
359
360static void update_sw_cntrs(struct msm_gpu *gpu)
361{
362	ktime_t time;
363	uint32_t elapsed;
364	unsigned long flags;
365
366	spin_lock_irqsave(&gpu->perf_lock, flags);
367	if (!gpu->perfcntr_active)
368		goto out;
369
370	time = ktime_get();
371	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
372
373	gpu->totaltime += elapsed;
374	if (gpu->last_sample.active)
375		gpu->activetime += elapsed;
376
377	gpu->last_sample.active = msm_gpu_active(gpu);
378	gpu->last_sample.time = time;
379
380out:
381	spin_unlock_irqrestore(&gpu->perf_lock, flags);
382}
383
384void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
385{
386	unsigned long flags;
387
 
 
388	spin_lock_irqsave(&gpu->perf_lock, flags);
389	/* we could dynamically enable/disable perfcntr registers too.. */
390	gpu->last_sample.active = msm_gpu_active(gpu);
391	gpu->last_sample.time = ktime_get();
392	gpu->activetime = gpu->totaltime = 0;
393	gpu->perfcntr_active = true;
394	update_hw_cntrs(gpu, 0, NULL);
395	spin_unlock_irqrestore(&gpu->perf_lock, flags);
396}
397
398void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
399{
400	gpu->perfcntr_active = false;
 
401}
402
403/* returns -errno or # of cntrs sampled */
404int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
405		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
406{
407	unsigned long flags;
408	int ret;
409
410	spin_lock_irqsave(&gpu->perf_lock, flags);
411
412	if (!gpu->perfcntr_active) {
413		ret = -EINVAL;
414		goto out;
415	}
416
417	*activetime = gpu->activetime;
418	*totaltime = gpu->totaltime;
419
420	gpu->activetime = gpu->totaltime = 0;
421
422	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
423
424out:
425	spin_unlock_irqrestore(&gpu->perf_lock, flags);
426
427	return ret;
428}
429
430/*
431 * Cmdstream submission/retirement:
432 */
433
434static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
 
435{
436	struct drm_device *dev = gpu->dev;
437
438	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
439
440	while (!list_empty(&gpu->submit_list)) {
441		struct msm_gem_submit *submit;
 
 
442
443		submit = list_first_entry(&gpu->submit_list,
444				struct msm_gem_submit, node);
445
446		if (submit->fence <= fence) {
447			list_del(&submit->node);
448			kfree(submit);
449		} else {
450			break;
451		}
452	}
453}
454
455static void retire_worker(struct work_struct *work)
456{
457	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
458	struct drm_device *dev = gpu->dev;
459	uint32_t fence = gpu->funcs->last_fence(gpu);
460
461	msm_update_fence(gpu->dev, fence);
 
462
463	mutex_lock(&dev->struct_mutex);
464
465	retire_submits(gpu, fence);
466
467	while (!list_empty(&gpu->active_list)) {
468		struct msm_gem_object *obj;
 
469
470		obj = list_first_entry(&gpu->active_list,
471				struct msm_gem_object, mm_list);
 
 
 
 
 
 
472
473		if ((obj->read_fence <= fence) &&
474				(obj->write_fence <= fence)) {
475			/* move to inactive: */
476			msm_gem_move_to_inactive(&obj->base);
477			msm_gem_put_iova(&obj->base, gpu->id);
478			drm_gem_object_unreference(&obj->base);
479		} else {
480			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481		}
482	}
483
484	mutex_unlock(&dev->struct_mutex);
 
485
486	if (!msm_gpu_active(gpu))
487		inactive_start(gpu);
 
 
 
488}
489
490/* call from irq handler to schedule work to retire bo's */
491void msm_gpu_retire(struct msm_gpu *gpu)
492{
493	struct msm_drm_private *priv = gpu->dev->dev_private;
494	queue_work(priv->wq, &gpu->retire_work);
 
 
 
 
495	update_sw_cntrs(gpu);
496}
497
498/* add bo's to gpu's ring, and kick gpu: */
499int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
500		struct msm_file_private *ctx)
501{
502	struct drm_device *dev = gpu->dev;
503	struct msm_drm_private *priv = dev->dev_private;
504	int i, ret;
505
506	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
507
508	submit->fence = ++priv->next_fence;
509
510	gpu->submitted_fence = submit->fence;
511
512	inactive_cancel(gpu);
513
514	list_add_tail(&submit->node, &gpu->submit_list);
515
516	msm_rd_dump_submit(submit);
517
518	gpu->submitted_fence = submit->fence;
519
520	update_sw_cntrs(gpu);
521
522	for (i = 0; i < submit->nr_bos; i++) {
523		struct msm_gem_object *msm_obj = submit->bos[i].obj;
524
525		/* can't happen yet.. but when we add 2d support we'll have
526		 * to deal w/ cross-ring synchronization:
527		 */
528		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
529
530		if (!is_active(msm_obj)) {
531			uint32_t iova;
532
533			/* ring takes a reference to the bo and iova: */
534			drm_gem_object_reference(&msm_obj->base);
535			msm_gem_get_iova_locked(&msm_obj->base,
536					submit->gpu->id, &iova);
537		}
538
539		if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
540			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
 
541
542		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
543			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
 
 
 
544	}
 
 
545
546	ret = gpu->funcs->submit(gpu, submit, ctx);
547	priv->lastctx = ctx;
548
 
549	hangcheck_timer_reset(gpu);
550
551	return ret;
552}
553
554/*
555 * Init/Cleanup:
556 */
557
558static irqreturn_t irq_handler(int irq, void *data)
559{
560	struct msm_gpu *gpu = data;
561	return gpu->funcs->irq(gpu);
562}
563
564static const char *clk_names[] = {
565		"src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
566		"alt_mem_iface_clk",
567};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568
569int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
570		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
571		const char *name, const char *ioname, const char *irqname, int ringsz)
572{
573	struct iommu_domain *iommu;
574	int i, ret;
 
 
575
576	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
577		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
578
579	gpu->dev = drm;
580	gpu->funcs = funcs;
581	gpu->name = name;
582	gpu->inactive = true;
583
584	INIT_LIST_HEAD(&gpu->active_list);
585	INIT_WORK(&gpu->retire_work, retire_worker);
586	INIT_WORK(&gpu->inactive_work, inactive_worker);
587	INIT_WORK(&gpu->recover_work, recover_worker);
588
589	INIT_LIST_HEAD(&gpu->submit_list);
590
591	setup_timer(&gpu->inactive_timer, inactive_handler,
592			(unsigned long)gpu);
593	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
594			(unsigned long)gpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595
596	spin_lock_init(&gpu->perf_lock);
597
598	BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
599
600	/* Map registers: */
601	gpu->mmio = msm_ioremap(pdev, ioname, name);
602	if (IS_ERR(gpu->mmio)) {
603		ret = PTR_ERR(gpu->mmio);
604		goto fail;
605	}
606
607	/* Get Interrupt: */
608	gpu->irq = platform_get_irq_byname(pdev, irqname);
609	if (gpu->irq < 0) {
610		ret = gpu->irq;
611		dev_err(drm->dev, "failed to get irq: %d\n", ret);
612		goto fail;
613	}
614
615	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
616			IRQF_TRIGGER_HIGH, gpu->name, gpu);
617	if (ret) {
618		dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
619		goto fail;
620	}
621
622	/* Acquire clocks: */
623	for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
624		gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
625		DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
626		if (IS_ERR(gpu->grp_clks[i]))
627			gpu->grp_clks[i] = NULL;
628	}
629
630	gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
631	DBG("ebi1_clk: %p", gpu->ebi1_clk);
632	if (IS_ERR(gpu->ebi1_clk))
633		gpu->ebi1_clk = NULL;
634
635	/* Acquire regulators: */
636	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
637	DBG("gpu_reg: %p", gpu->gpu_reg);
638	if (IS_ERR(gpu->gpu_reg))
639		gpu->gpu_reg = NULL;
640
641	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
642	DBG("gpu_cx: %p", gpu->gpu_cx);
643	if (IS_ERR(gpu->gpu_cx))
644		gpu->gpu_cx = NULL;
645
646	/* Setup IOMMU.. eventually we will (I think) do this once per context
647	 * and have separate page tables per context.  For now, to keep things
648	 * simple and to get something working, just use a single address space:
649	 */
650	iommu = iommu_domain_alloc(&platform_bus_type);
651	if (iommu) {
652		dev_info(drm->dev, "%s: using IOMMU\n", name);
653		gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
654		if (IS_ERR(gpu->mmu)) {
655			ret = PTR_ERR(gpu->mmu);
656			dev_err(drm->dev, "failed to init iommu: %d\n", ret);
657			gpu->mmu = NULL;
658			iommu_domain_free(iommu);
659			goto fail;
660		}
661
662	} else {
663		dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
664	}
665	gpu->id = msm_register_mmu(drm, gpu->mmu);
666
 
667
668	/* Create ringbuffer: */
669	mutex_lock(&drm->struct_mutex);
670	gpu->rb = msm_ringbuffer_new(gpu, ringsz);
671	mutex_unlock(&drm->struct_mutex);
672	if (IS_ERR(gpu->rb)) {
673		ret = PTR_ERR(gpu->rb);
674		gpu->rb = NULL;
675		dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
676		goto fail;
677	}
678
679	bs_init(gpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680
681	return 0;
682
683fail:
 
 
 
 
 
 
 
 
684	return ret;
685}
686
687void msm_gpu_cleanup(struct msm_gpu *gpu)
688{
 
 
689	DBG("%s", gpu->name);
690
691	WARN_ON(!list_empty(&gpu->active_list));
 
 
 
692
693	bs_fini(gpu);
694
695	if (gpu->rb) {
696		if (gpu->rb_iova)
697			msm_gem_put_iova(gpu->rb->bo, gpu->id);
698		msm_ringbuffer_destroy(gpu->rb);
699	}
700
701	if (gpu->mmu)
702		gpu->mmu->funcs->destroy(gpu->mmu);
 
 
 
 
 
703}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include "drm/drm_drv.h"
   8
   9#include "msm_gpu.h"
  10#include "msm_gem.h"
  11#include "msm_mmu.h"
  12#include "msm_fence.h"
  13#include "msm_gpu_trace.h"
  14#include "adreno/adreno_gpu.h"
  15
  16#include <generated/utsrelease.h>
  17#include <linux/string_helpers.h>
  18#include <linux/devcoredump.h>
  19#include <linux/reset.h>
  20#include <linux/sched/task.h>
  21
  22/*
  23 * Power Management:
  24 */
  25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  26static int enable_pwrrail(struct msm_gpu *gpu)
  27{
  28	struct drm_device *dev = gpu->dev;
  29	int ret = 0;
  30
  31	if (gpu->gpu_reg) {
  32		ret = regulator_enable(gpu->gpu_reg);
  33		if (ret) {
  34			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
  35			return ret;
  36		}
  37	}
  38
  39	if (gpu->gpu_cx) {
  40		ret = regulator_enable(gpu->gpu_cx);
  41		if (ret) {
  42			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
  43			return ret;
  44		}
  45	}
  46
  47	return 0;
  48}
  49
  50static int disable_pwrrail(struct msm_gpu *gpu)
  51{
  52	if (gpu->gpu_cx)
  53		regulator_disable(gpu->gpu_cx);
  54	if (gpu->gpu_reg)
  55		regulator_disable(gpu->gpu_reg);
  56	return 0;
  57}
  58
  59static int enable_clk(struct msm_gpu *gpu)
  60{
  61	if (gpu->core_clk && gpu->fast_rate)
  62		clk_set_rate(gpu->core_clk, gpu->fast_rate);
 
 
 
 
 
 
 
 
  63
  64	/* Set the RBBM timer rate to 19.2Mhz */
  65	if (gpu->rbbmtimer_clk)
  66		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
  67
  68	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
 
 
 
 
  69}
  70
  71static int disable_clk(struct msm_gpu *gpu)
  72{
  73	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
 
 
 
 
 
 
 
 
 
  74
  75	/*
  76	 * Set the clock to a deliberately low rate. On older targets the clock
  77	 * speed had to be non zero to avoid problems. On newer targets this
  78	 * will be rounded down to zero anyway so it all works out.
  79	 */
  80	if (gpu->core_clk)
  81		clk_set_rate(gpu->core_clk, 27000000);
  82
  83	if (gpu->rbbmtimer_clk)
  84		clk_set_rate(gpu->rbbmtimer_clk, 0);
 
  85
  86	return 0;
  87}
  88
  89static int enable_axi(struct msm_gpu *gpu)
  90{
  91	return clk_prepare_enable(gpu->ebi1_clk);
 
 
 
 
  92}
  93
  94static int disable_axi(struct msm_gpu *gpu)
  95{
  96	clk_disable_unprepare(gpu->ebi1_clk);
 
 
 
  97	return 0;
  98}
  99
 100int msm_gpu_pm_resume(struct msm_gpu *gpu)
 101{
 
 102	int ret;
 103
 104	DBG("%s", gpu->name);
 105	trace_msm_gpu_resume(0);
 
 
 
 
 
 
 
 106
 107	ret = enable_pwrrail(gpu);
 108	if (ret)
 109		return ret;
 110
 111	ret = enable_clk(gpu);
 112	if (ret)
 113		return ret;
 114
 115	ret = enable_axi(gpu);
 116	if (ret)
 117		return ret;
 118
 119	msm_devfreq_resume(gpu);
 120
 121	gpu->needs_hw_init = true;
 122
 123	return 0;
 124}
 125
 126int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 127{
 
 128	int ret;
 129
 130	DBG("%s", gpu->name);
 131	trace_msm_gpu_suspend(0);
 
 
 
 
 132
 133	msm_devfreq_suspend(gpu);
 
 134
 135	ret = disable_axi(gpu);
 136	if (ret)
 137		return ret;
 138
 139	ret = disable_clk(gpu);
 140	if (ret)
 141		return ret;
 142
 143	ret = disable_pwrrail(gpu);
 144	if (ret)
 145		return ret;
 146
 147	gpu->suspend_count++;
 148
 149	return 0;
 150}
 151
 152void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
 153			 struct drm_printer *p)
 154{
 155	drm_printf(p, "drm-driver:\t%s\n", gpu->dev->driver->name);
 156	drm_printf(p, "drm-client-id:\t%u\n", ctx->seqno);
 157	drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
 158	drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
 159	drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
 160}
 161
 162int msm_gpu_hw_init(struct msm_gpu *gpu)
 163{
 164	int ret;
 
 165
 166	WARN_ON(!mutex_is_locked(&gpu->lock));
 
 167
 168	if (!gpu->needs_hw_init)
 169		return 0;
 170
 171	disable_irq(gpu->irq);
 172	ret = gpu->funcs->hw_init(gpu);
 173	if (!ret)
 174		gpu->needs_hw_init = false;
 175	enable_irq(gpu->irq);
 176
 177	return ret;
 178}
 179
 180#ifdef CONFIG_DEV_COREDUMP
 181static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
 182		size_t count, void *data, size_t datalen)
 183{
 184	struct msm_gpu *gpu = data;
 185	struct drm_print_iterator iter;
 186	struct drm_printer p;
 187	struct msm_gpu_state *state;
 188
 189	state = msm_gpu_crashstate_get(gpu);
 190	if (!state)
 191		return 0;
 192
 193	iter.data = buffer;
 194	iter.offset = 0;
 195	iter.start = offset;
 196	iter.remain = count;
 197
 198	p = drm_coredump_printer(&iter);
 199
 200	drm_printf(&p, "---\n");
 201	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
 202	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
 203	drm_printf(&p, "time: %lld.%09ld\n",
 204		state->time.tv_sec, state->time.tv_nsec);
 205	if (state->comm)
 206		drm_printf(&p, "comm: %s\n", state->comm);
 207	if (state->cmd)
 208		drm_printf(&p, "cmdline: %s\n", state->cmd);
 209
 210	gpu->funcs->show(gpu, state, &p);
 211
 212	msm_gpu_crashstate_put(gpu);
 213
 214	return count - iter.remain;
 215}
 216
 217static void msm_gpu_devcoredump_free(void *data)
 
 218{
 219	struct msm_gpu *gpu = data;
 220
 221	msm_gpu_crashstate_put(gpu);
 222}
 223
 224static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
 225		struct msm_gem_object *obj, u64 iova, bool full)
 226{
 227	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 228
 229	/* Don't record write only objects */
 230	state_bo->size = obj->base.size;
 231	state_bo->iova = iova;
 232
 233	BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(obj->name));
 234
 235	memcpy(state_bo->name, obj->name, sizeof(state_bo->name));
 236
 237	if (full) {
 238		void *ptr;
 239
 240		state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
 241		if (!state_bo->data)
 242			goto out;
 243
 244		msm_gem_lock(&obj->base);
 245		ptr = msm_gem_get_vaddr_active(&obj->base);
 246		msm_gem_unlock(&obj->base);
 247		if (IS_ERR(ptr)) {
 248			kvfree(state_bo->data);
 249			state_bo->data = NULL;
 250			goto out;
 251		}
 252
 253		memcpy(state_bo->data, ptr, obj->base.size);
 254		msm_gem_put_vaddr(&obj->base);
 255	}
 256out:
 257	state->nr_bos++;
 258}
 259
 260static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 261		struct msm_gem_submit *submit, char *comm, char *cmd)
 262{
 263	struct msm_gpu_state *state;
 264
 265	/* Check if the target supports capturing crash state */
 266	if (!gpu->funcs->gpu_state_get)
 267		return;
 268
 269	/* Only save one crash state at a time */
 270	if (gpu->crashstate)
 271		return;
 272
 273	state = gpu->funcs->gpu_state_get(gpu);
 274	if (IS_ERR_OR_NULL(state))
 275		return;
 276
 277	/* Fill in the additional crash state information */
 278	state->comm = kstrdup(comm, GFP_KERNEL);
 279	state->cmd = kstrdup(cmd, GFP_KERNEL);
 280	state->fault_info = gpu->fault_info;
 281
 282	if (submit) {
 283		int i;
 284
 285		state->bos = kcalloc(submit->nr_bos,
 286			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 287
 288		for (i = 0; state->bos && i < submit->nr_bos; i++) {
 289			msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
 290						  submit->bos[i].iova,
 291						  should_dump(submit, i));
 292		}
 293	}
 294
 295	/* Set the active crash state to be dumped on failure */
 296	gpu->crashstate = state;
 297
 298	/* FIXME: Release the crashstate if this errors out? */
 299	dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
 300		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
 301}
 302#else
 303static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 304		struct msm_gem_submit *submit, char *comm, char *cmd)
 305{
 306}
 307#endif
 308
 309/*
 310 * Hangcheck detection for locked gpu:
 311 */
 312
 313static struct msm_gem_submit *
 314find_submit(struct msm_ringbuffer *ring, uint32_t fence)
 315{
 316	struct msm_gem_submit *submit;
 317	unsigned long flags;
 318
 319	spin_lock_irqsave(&ring->submit_lock, flags);
 320	list_for_each_entry(submit, &ring->submits, node) {
 321		if (submit->seqno == fence) {
 322			spin_unlock_irqrestore(&ring->submit_lock, flags);
 323			return submit;
 324		}
 325	}
 326	spin_unlock_irqrestore(&ring->submit_lock, flags);
 327
 328	return NULL;
 329}
 330
 331static void retire_submits(struct msm_gpu *gpu);
 332
 333static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
 334{
 335	struct msm_file_private *ctx = submit->queue->ctx;
 336	struct task_struct *task;
 337
 338	WARN_ON(!mutex_is_locked(&submit->gpu->lock));
 339
 340	/* Note that kstrdup will return NULL if argument is NULL: */
 341	*comm = kstrdup(ctx->comm, GFP_KERNEL);
 342	*cmd  = kstrdup(ctx->cmdline, GFP_KERNEL);
 343
 344	task = get_pid_task(submit->pid, PIDTYPE_PID);
 345	if (!task)
 346		return;
 347
 348	if (!*comm)
 349		*comm = kstrdup(task->comm, GFP_KERNEL);
 350
 351	if (!*cmd)
 352		*cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
 353
 354	put_task_struct(task);
 355}
 356
 357static void recover_worker(struct kthread_work *work)
 358{
 359	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 360	struct drm_device *dev = gpu->dev;
 361	struct msm_drm_private *priv = dev->dev_private;
 362	struct msm_gem_submit *submit;
 363	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 364	char *comm = NULL, *cmd = NULL;
 365	int i;
 366
 367	mutex_lock(&gpu->lock);
 368
 369	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 370
 371	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 372	if (submit) {
 373		/* Increment the fault counts */
 374		submit->queue->faults++;
 375		if (submit->aspace)
 376			submit->aspace->faults++;
 377
 378		get_comm_cmdline(submit, &comm, &cmd);
 379
 380		if (comm && cmd) {
 381			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
 382				gpu->name, comm, cmd);
 383
 384			msm_rd_dump_submit(priv->hangrd, submit,
 385				"offending task: %s (%s)", comm, cmd);
 386		} else {
 387			msm_rd_dump_submit(priv->hangrd, submit, NULL);
 388		}
 389	} else {
 390		/*
 391		 * We couldn't attribute this fault to any particular context,
 392		 * so increment the global fault count instead.
 393		 */
 394		gpu->global_faults++;
 395	}
 396
 397	/* Record the crash state */
 398	pm_runtime_get_sync(&gpu->pdev->dev);
 399	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 400
 401	kfree(cmd);
 402	kfree(comm);
 403
 404	/*
 405	 * Update all the rings with the latest and greatest fence.. this
 406	 * needs to happen after msm_rd_dump_submit() to ensure that the
 407	 * bo's referenced by the offending submit are still around.
 408	 */
 409	for (i = 0; i < gpu->nr_rings; i++) {
 410		struct msm_ringbuffer *ring = gpu->rb[i];
 411
 412		uint32_t fence = ring->memptrs->fence;
 413
 414		/*
 415		 * For the current (faulting?) ring/submit advance the fence by
 416		 * one more to clear the faulting submit
 417		 */
 418		if (ring == cur_ring)
 419			ring->memptrs->fence = ++fence;
 420
 421		msm_update_fence(ring->fctx, fence);
 422	}
 423
 424	if (msm_gpu_active(gpu)) {
 425		/* retire completed submits, plus the one that hung: */
 426		retire_submits(gpu);
 427
 
 428		gpu->funcs->recover(gpu);
 429
 430		/*
 431		 * Replay all remaining submits starting with highest priority
 432		 * ring
 433		 */
 434		for (i = 0; i < gpu->nr_rings; i++) {
 435			struct msm_ringbuffer *ring = gpu->rb[i];
 436			unsigned long flags;
 437
 438			spin_lock_irqsave(&ring->submit_lock, flags);
 439			list_for_each_entry(submit, &ring->submits, node)
 440				gpu->funcs->submit(gpu, submit);
 441			spin_unlock_irqrestore(&ring->submit_lock, flags);
 442		}
 443	}
 444
 445	pm_runtime_put(&gpu->pdev->dev);
 446
 447	mutex_unlock(&gpu->lock);
 448
 449	msm_gpu_retire(gpu);
 450}
 451
 452static void fault_worker(struct kthread_work *work)
 453{
 454	struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
 455	struct msm_gem_submit *submit;
 456	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
 457	char *comm = NULL, *cmd = NULL;
 458
 459	mutex_lock(&gpu->lock);
 460
 461	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 462	if (submit && submit->fault_dumped)
 463		goto resume_smmu;
 464
 465	if (submit) {
 466		get_comm_cmdline(submit, &comm, &cmd);
 467
 468		/*
 469		 * When we get GPU iova faults, we can get 1000s of them,
 470		 * but we really only want to log the first one.
 471		 */
 472		submit->fault_dumped = true;
 473	}
 474
 475	/* Record the crash state */
 476	pm_runtime_get_sync(&gpu->pdev->dev);
 477	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
 478	pm_runtime_put_sync(&gpu->pdev->dev);
 479
 480	kfree(cmd);
 481	kfree(comm);
 482
 483resume_smmu:
 484	memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
 485	gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
 486
 487	mutex_unlock(&gpu->lock);
 488}
 489
 490static void hangcheck_timer_reset(struct msm_gpu *gpu)
 491{
 492	struct msm_drm_private *priv = gpu->dev->dev_private;
 493	mod_timer(&gpu->hangcheck_timer,
 494			round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
 495}
 496
 497static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 498{
 499	if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
 500		return false;
 501
 502	if (!gpu->funcs->progress)
 503		return false;
 504
 505	if (!gpu->funcs->progress(gpu, ring))
 506		return false;
 507
 508	ring->hangcheck_progress_retries++;
 509	return true;
 510}
 511
 512static void hangcheck_handler(struct timer_list *t)
 513{
 514	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
 515	struct drm_device *dev = gpu->dev;
 516	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
 517	uint32_t fence = ring->memptrs->fence;
 518
 519	if (fence != ring->hangcheck_fence) {
 520		/* some progress has been made.. ya! */
 521		ring->hangcheck_fence = fence;
 522		ring->hangcheck_progress_retries = 0;
 523	} else if (fence_before(fence, ring->fctx->last_fence) &&
 524			!made_progress(gpu, ring)) {
 525		/* no progress and not done.. hung! */
 526		ring->hangcheck_fence = fence;
 527		ring->hangcheck_progress_retries = 0;
 528		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
 529				gpu->name, ring->id);
 530		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
 531				gpu->name, fence);
 532		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
 533				gpu->name, ring->fctx->last_fence);
 534
 535		kthread_queue_work(gpu->worker, &gpu->recover_work);
 536	}
 537
 538	/* if still more pending work, reset the hangcheck timer: */
 539	if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
 540		hangcheck_timer_reset(gpu);
 541
 542	/* workaround for missing irq: */
 543	msm_gpu_retire(gpu);
 544}
 545
 546/*
 547 * Performance Counters:
 548 */
 549
 550/* called under perf_lock */
 551static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
 552{
 553	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
 554	int i, n = min(ncntrs, gpu->num_perfcntrs);
 555
 556	/* read current values: */
 557	for (i = 0; i < gpu->num_perfcntrs; i++)
 558		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
 559
 560	/* update cntrs: */
 561	for (i = 0; i < n; i++)
 562		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
 563
 564	/* save current values: */
 565	for (i = 0; i < gpu->num_perfcntrs; i++)
 566		gpu->last_cntrs[i] = current_cntrs[i];
 567
 568	return n;
 569}
 570
 571static void update_sw_cntrs(struct msm_gpu *gpu)
 572{
 573	ktime_t time;
 574	uint32_t elapsed;
 575	unsigned long flags;
 576
 577	spin_lock_irqsave(&gpu->perf_lock, flags);
 578	if (!gpu->perfcntr_active)
 579		goto out;
 580
 581	time = ktime_get();
 582	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
 583
 584	gpu->totaltime += elapsed;
 585	if (gpu->last_sample.active)
 586		gpu->activetime += elapsed;
 587
 588	gpu->last_sample.active = msm_gpu_active(gpu);
 589	gpu->last_sample.time = time;
 590
 591out:
 592	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 593}
 594
 595void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
 596{
 597	unsigned long flags;
 598
 599	pm_runtime_get_sync(&gpu->pdev->dev);
 600
 601	spin_lock_irqsave(&gpu->perf_lock, flags);
 602	/* we could dynamically enable/disable perfcntr registers too.. */
 603	gpu->last_sample.active = msm_gpu_active(gpu);
 604	gpu->last_sample.time = ktime_get();
 605	gpu->activetime = gpu->totaltime = 0;
 606	gpu->perfcntr_active = true;
 607	update_hw_cntrs(gpu, 0, NULL);
 608	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 609}
 610
 611void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
 612{
 613	gpu->perfcntr_active = false;
 614	pm_runtime_put_sync(&gpu->pdev->dev);
 615}
 616
 617/* returns -errno or # of cntrs sampled */
 618int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 619		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
 620{
 621	unsigned long flags;
 622	int ret;
 623
 624	spin_lock_irqsave(&gpu->perf_lock, flags);
 625
 626	if (!gpu->perfcntr_active) {
 627		ret = -EINVAL;
 628		goto out;
 629	}
 630
 631	*activetime = gpu->activetime;
 632	*totaltime = gpu->totaltime;
 633
 634	gpu->activetime = gpu->totaltime = 0;
 635
 636	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
 637
 638out:
 639	spin_unlock_irqrestore(&gpu->perf_lock, flags);
 640
 641	return ret;
 642}
 643
 644/*
 645 * Cmdstream submission/retirement:
 646 */
 647
 648static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 649		struct msm_gem_submit *submit)
 650{
 651	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
 652	volatile struct msm_gpu_submit_stats *stats;
 653	u64 elapsed, clock = 0, cycles;
 654	unsigned long flags;
 655
 656	stats = &ring->memptrs->stats[index];
 657	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
 658	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
 659	do_div(elapsed, 192);
 660
 661	cycles = stats->cpcycles_end - stats->cpcycles_start;
 
 662
 663	/* Calculate the clock frequency from the number of CP cycles */
 664	if (elapsed) {
 665		clock = cycles * 1000;
 666		do_div(clock, elapsed);
 
 
 667	}
 
 668
 669	submit->queue->ctx->elapsed_ns += elapsed;
 670	submit->queue->ctx->cycles     += cycles;
 
 
 
 671
 672	trace_msm_gpu_submit_retired(submit, elapsed, clock,
 673		stats->alwayson_start, stats->alwayson_end);
 674
 675	msm_submit_retire(submit);
 676
 677	pm_runtime_mark_last_busy(&gpu->pdev->dev);
 678
 679	spin_lock_irqsave(&ring->submit_lock, flags);
 680	list_del(&submit->node);
 681	spin_unlock_irqrestore(&ring->submit_lock, flags);
 682
 683	/* Update devfreq on transition from active->idle: */
 684	mutex_lock(&gpu->active_lock);
 685	gpu->active_submits--;
 686	WARN_ON(gpu->active_submits < 0);
 687	if (!gpu->active_submits) {
 688		msm_devfreq_idle(gpu);
 689		pm_runtime_put_autosuspend(&gpu->pdev->dev);
 690	}
 691
 692	mutex_unlock(&gpu->active_lock);
 693
 694	msm_gem_submit_put(submit);
 695}
 696
 697static void retire_submits(struct msm_gpu *gpu)
 698{
 699	int i;
 700
 701	/* Retire the commits starting with highest priority */
 702	for (i = 0; i < gpu->nr_rings; i++) {
 703		struct msm_ringbuffer *ring = gpu->rb[i];
 704
 705		while (true) {
 706			struct msm_gem_submit *submit = NULL;
 707			unsigned long flags;
 708
 709			spin_lock_irqsave(&ring->submit_lock, flags);
 710			submit = list_first_entry_or_null(&ring->submits,
 711					struct msm_gem_submit, node);
 712			spin_unlock_irqrestore(&ring->submit_lock, flags);
 713
 714			/*
 715			 * If no submit, we are done.  If submit->fence hasn't
 716			 * been signalled, then later submits are not signalled
 717			 * either, so we are also done.
 718			 */
 719			if (submit && dma_fence_is_signaled(submit->hw_fence)) {
 720				retire_submit(gpu, ring, submit);
 721			} else {
 722				break;
 723			}
 724		}
 725	}
 726
 727	wake_up_all(&gpu->retire_event);
 728}
 729
 730static void retire_worker(struct kthread_work *work)
 731{
 732	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
 733
 734	retire_submits(gpu);
 735}
 736
 737/* call from irq handler to schedule work to retire bo's */
 738void msm_gpu_retire(struct msm_gpu *gpu)
 739{
 740	int i;
 741
 742	for (i = 0; i < gpu->nr_rings; i++)
 743		msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
 744
 745	kthread_queue_work(gpu->worker, &gpu->retire_work);
 746	update_sw_cntrs(gpu);
 747}
 748
 749/* add bo's to gpu's ring, and kick gpu: */
 750void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
 751{
 752	struct drm_device *dev = gpu->dev;
 753	struct msm_drm_private *priv = dev->dev_private;
 754	struct msm_ringbuffer *ring = submit->ring;
 755	unsigned long flags;
 
 
 
 756
 757	WARN_ON(!mutex_is_locked(&gpu->lock));
 758
 759	pm_runtime_get_sync(&gpu->pdev->dev);
 760
 761	msm_gpu_hw_init(gpu);
 762
 763	submit->seqno = submit->hw_fence->seqno;
 764
 765	msm_rd_dump_submit(priv->rd, submit, NULL);
 766
 767	update_sw_cntrs(gpu);
 768
 769	/*
 770	 * ring->submits holds a ref to the submit, to deal with the case
 771	 * that a submit completes before msm_ioctl_gem_submit() returns.
 772	 */
 773	msm_gem_submit_get(submit);
 
 
 
 
 
 
 
 
 
 
 
 774
 775	spin_lock_irqsave(&ring->submit_lock, flags);
 776	list_add_tail(&submit->node, &ring->submits);
 777	spin_unlock_irqrestore(&ring->submit_lock, flags);
 778
 779	/* Update devfreq on transition from idle->active: */
 780	mutex_lock(&gpu->active_lock);
 781	if (!gpu->active_submits) {
 782		pm_runtime_get(&gpu->pdev->dev);
 783		msm_devfreq_active(gpu);
 784	}
 785	gpu->active_submits++;
 786	mutex_unlock(&gpu->active_lock);
 787
 788	gpu->funcs->submit(gpu, submit);
 789	gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
 790
 791	pm_runtime_put(&gpu->pdev->dev);
 792	hangcheck_timer_reset(gpu);
 
 
 793}
 794
 795/*
 796 * Init/Cleanup:
 797 */
 798
 799static irqreturn_t irq_handler(int irq, void *data)
 800{
 801	struct msm_gpu *gpu = data;
 802	return gpu->funcs->irq(gpu);
 803}
 804
 805static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 806{
 807	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
 808
 809	if (ret < 1) {
 810		gpu->nr_clocks = 0;
 811		return ret;
 812	}
 813
 814	gpu->nr_clocks = ret;
 815
 816	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 817		gpu->nr_clocks, "core");
 818
 819	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
 820		gpu->nr_clocks, "rbbmtimer");
 821
 822	return 0;
 823}
 824
 825/* Return a new address space for a msm_drm_private instance */
 826struct msm_gem_address_space *
 827msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
 828{
 829	struct msm_gem_address_space *aspace = NULL;
 830	if (!gpu)
 831		return NULL;
 832
 833	/*
 834	 * If the target doesn't support private address spaces then return
 835	 * the global one
 836	 */
 837	if (gpu->funcs->create_private_address_space) {
 838		aspace = gpu->funcs->create_private_address_space(gpu);
 839		if (!IS_ERR(aspace))
 840			aspace->pid = get_pid(task_pid(task));
 841	}
 842
 843	if (IS_ERR_OR_NULL(aspace))
 844		aspace = msm_gem_address_space_get(gpu->aspace);
 845
 846	return aspace;
 847}
 848
 849int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 850		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 851		const char *name, struct msm_gpu_config *config)
 852{
 853	struct msm_drm_private *priv = drm->dev_private;
 854	int i, ret, nr_rings = config->nr_rings;
 855	void *memptrs;
 856	uint64_t memptrs_iova;
 857
 858	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
 859		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
 860
 861	gpu->dev = drm;
 862	gpu->funcs = funcs;
 863	gpu->name = name;
 
 864
 865	gpu->worker = kthread_create_worker(0, "gpu-worker");
 866	if (IS_ERR(gpu->worker)) {
 867		ret = PTR_ERR(gpu->worker);
 868		gpu->worker = NULL;
 869		goto fail;
 870	}
 871
 872	sched_set_fifo_low(gpu->worker->task);
 873
 874	mutex_init(&gpu->active_lock);
 875	mutex_init(&gpu->lock);
 876	init_waitqueue_head(&gpu->retire_event);
 877	kthread_init_work(&gpu->retire_work, retire_worker);
 878	kthread_init_work(&gpu->recover_work, recover_worker);
 879	kthread_init_work(&gpu->fault_work, fault_worker);
 880
 881	priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
 882
 883	/*
 884	 * If progress detection is supported, halve the hangcheck timer
 885	 * duration, as it takes two iterations of the hangcheck handler
 886	 * to detect a hang.
 887	 */
 888	if (funcs->progress)
 889		priv->hangcheck_period /= 2;
 890
 891	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
 892
 893	spin_lock_init(&gpu->perf_lock);
 894
 
 895
 896	/* Map registers: */
 897	gpu->mmio = msm_ioremap(pdev, config->ioname);
 898	if (IS_ERR(gpu->mmio)) {
 899		ret = PTR_ERR(gpu->mmio);
 900		goto fail;
 901	}
 902
 903	/* Get Interrupt: */
 904	gpu->irq = platform_get_irq(pdev, 0);
 905	if (gpu->irq < 0) {
 906		ret = gpu->irq;
 907		DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
 908		goto fail;
 909	}
 910
 911	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 912			IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
 913	if (ret) {
 914		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 915		goto fail;
 916	}
 917
 918	ret = get_clocks(pdev, gpu);
 919	if (ret)
 920		goto fail;
 
 
 
 
 921
 922	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
 923	DBG("ebi1_clk: %p", gpu->ebi1_clk);
 924	if (IS_ERR(gpu->ebi1_clk))
 925		gpu->ebi1_clk = NULL;
 926
 927	/* Acquire regulators: */
 928	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
 929	DBG("gpu_reg: %p", gpu->gpu_reg);
 930	if (IS_ERR(gpu->gpu_reg))
 931		gpu->gpu_reg = NULL;
 932
 933	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
 934	DBG("gpu_cx: %p", gpu->gpu_cx);
 935	if (IS_ERR(gpu->gpu_cx))
 936		gpu->gpu_cx = NULL;
 937
 938	gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
 939			"cx_collapse");
 940
 941	gpu->pdev = pdev;
 942	platform_set_drvdata(pdev, &gpu->adreno_smmu);
 943
 944	msm_devfreq_init(gpu);
 
 
 
 
 
 
 
 
 945
 
 
 
 
 946
 947	gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
 948
 949	if (gpu->aspace == NULL)
 950		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 951	else if (IS_ERR(gpu->aspace)) {
 952		ret = PTR_ERR(gpu->aspace);
 
 
 
 
 953		goto fail;
 954	}
 955
 956	memptrs = msm_gem_kernel_new(drm,
 957		sizeof(struct msm_rbmemptrs) * nr_rings,
 958		check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
 959		&memptrs_iova);
 960
 961	if (IS_ERR(memptrs)) {
 962		ret = PTR_ERR(memptrs);
 963		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
 964		goto fail;
 965	}
 966
 967	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
 968
 969	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
 970		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
 971			ARRAY_SIZE(gpu->rb));
 972		nr_rings = ARRAY_SIZE(gpu->rb);
 973	}
 974
 975	/* Create ringbuffer(s): */
 976	for (i = 0; i < nr_rings; i++) {
 977		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
 978
 979		if (IS_ERR(gpu->rb[i])) {
 980			ret = PTR_ERR(gpu->rb[i]);
 981			DRM_DEV_ERROR(drm->dev,
 982				"could not create ringbuffer %d: %d\n", i, ret);
 983			goto fail;
 984		}
 985
 986		memptrs += sizeof(struct msm_rbmemptrs);
 987		memptrs_iova += sizeof(struct msm_rbmemptrs);
 988	}
 989
 990	gpu->nr_rings = nr_rings;
 991
 992	refcount_set(&gpu->sysprof_active, 1);
 993
 994	return 0;
 995
 996fail:
 997	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
 998		msm_ringbuffer_destroy(gpu->rb[i]);
 999		gpu->rb[i] = NULL;
1000	}
1001
1002	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1003
1004	platform_set_drvdata(pdev, NULL);
1005	return ret;
1006}
1007
1008void msm_gpu_cleanup(struct msm_gpu *gpu)
1009{
1010	int i;
1011
1012	DBG("%s", gpu->name);
1013
1014	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1015		msm_ringbuffer_destroy(gpu->rb[i]);
1016		gpu->rb[i] = NULL;
1017	}
1018
1019	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1020
1021	if (!IS_ERR_OR_NULL(gpu->aspace)) {
1022		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1023		msm_gem_address_space_put(gpu->aspace);
 
1024	}
1025
1026	if (gpu->worker) {
1027		kthread_destroy_worker(gpu->worker);
1028	}
1029
1030	msm_devfreq_cleanup(gpu);
1031
1032	platform_set_drvdata(gpu->pdev, NULL);
1033}