Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2017 Linaro Limited. All rights reserved.
  4 *
  5 * Author: Leo Yan <leo.yan@linaro.org>
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7#include <linux/acpi.h>
  8#include <linux/amba/bus.h>
  9#include <linux/coresight.h>
 10#include <linux/cpu.h>
 11#include <linux/debugfs.h>
 12#include <linux/delay.h>
 13#include <linux/device.h>
 14#include <linux/err.h>
 15#include <linux/init.h>
 16#include <linux/io.h>
 17#include <linux/iopoll.h>
 18#include <linux/kernel.h>
 19#include <linux/module.h>
 20#include <linux/moduleparam.h>
 21#include <linux/panic_notifier.h>
 22#include <linux/platform_device.h>
 23#include <linux/pm_qos.h>
 24#include <linux/slab.h>
 25#include <linux/smp.h>
 26#include <linux/types.h>
 27#include <linux/uaccess.h>
 28
 29#include "coresight-priv.h"
 30
 31#define EDPCSR				0x0A0
 32#define EDCIDSR				0x0A4
 33#define EDVIDSR				0x0A8
 34#define EDPCSR_HI			0x0AC
 35#define EDOSLAR				0x300
 36#define EDPRCR				0x310
 37#define EDPRSR				0x314
 38#define EDDEVID1			0xFC4
 39#define EDDEVID				0xFC8
 40
 41#define EDPCSR_PROHIBITED		0xFFFFFFFF
 42
 43/* bits definition for EDPCSR */
 44#define EDPCSR_THUMB			BIT(0)
 45#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
 46#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
 47
 48/* bits definition for EDPRCR */
 49#define EDPRCR_COREPURQ			BIT(3)
 50#define EDPRCR_CORENPDRQ		BIT(0)
 51
 52/* bits definition for EDPRSR */
 53#define EDPRSR_DLK			BIT(6)
 54#define EDPRSR_PU			BIT(0)
 55
 56/* bits definition for EDVIDSR */
 57#define EDVIDSR_NS			BIT(31)
 58#define EDVIDSR_E2			BIT(30)
 59#define EDVIDSR_E3			BIT(29)
 60#define EDVIDSR_HV			BIT(28)
 61#define EDVIDSR_VMID			GENMASK(7, 0)
 62
 63/*
 64 * bits definition for EDDEVID1:PSCROffset
 65 *
 66 * NOTE: armv8 and armv7 have different definition for the register,
 67 * so consolidate the bits definition as below:
 68 *
 69 * 0b0000 - Sample offset applies based on the instruction state, we
 70 *          rely on EDDEVID to check if EDPCSR is implemented or not
 71 * 0b0001 - No offset applies.
 72 * 0b0010 - No offset applies, but do not use in AArch32 mode
 73 *
 74 */
 75#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
 76#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
 77#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
 78
 79/* bits definition for EDDEVID */
 80#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
 81#define EDDEVID_IMPL_EDPCSR		(0x1)
 82#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
 83#define EDDEVID_IMPL_FULL		(0x3)
 84
 85#define DEBUG_WAIT_SLEEP		1000
 86#define DEBUG_WAIT_TIMEOUT		32000
 87
 88struct debug_drvdata {
 89	struct clk	*pclk;
 90	void __iomem	*base;
 91	struct device	*dev;
 92	int		cpu;
 93
 94	bool		edpcsr_present;
 95	bool		edcidsr_present;
 96	bool		edvidsr_present;
 97	bool		pc_has_offset;
 98
 99	u32		edpcsr;
100	u32		edpcsr_hi;
101	u32		edprsr;
102	u32		edvidsr;
103	u32		edcidsr;
104};
105
106static DEFINE_MUTEX(debug_lock);
107static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
108static int debug_count;
109static struct dentry *debug_debugfs_dir;
110
111static bool debug_enable = IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON);
112module_param_named(enable, debug_enable, bool, 0600);
113MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
114
115static void debug_os_unlock(struct debug_drvdata *drvdata)
116{
117	/* Unlocks the debug registers */
118	writel_relaxed(0x0, drvdata->base + EDOSLAR);
119
120	/* Make sure the registers are unlocked before accessing */
121	wmb();
122}
123
124/*
125 * According to ARM DDI 0487A.k, before access external debug
126 * registers should firstly check the access permission; if any
127 * below condition has been met then cannot access debug
128 * registers to avoid lockup issue:
129 *
130 * - CPU power domain is powered off;
131 * - The OS Double Lock is locked;
132 *
133 * By checking EDPRSR can get to know if meet these conditions.
134 */
135static bool debug_access_permitted(struct debug_drvdata *drvdata)
136{
137	/* CPU is powered off */
138	if (!(drvdata->edprsr & EDPRSR_PU))
139		return false;
140
141	/* The OS Double Lock is locked */
142	if (drvdata->edprsr & EDPRSR_DLK)
143		return false;
144
145	return true;
146}
147
148static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
149{
150	u32 edprcr;
151
152try_again:
153
154	/*
155	 * Send request to power management controller and assert
156	 * DBGPWRUPREQ signal; if power management controller has
157	 * sane implementation, it should enable CPU power domain
158	 * in case CPU is in low power state.
159	 */
160	edprcr = readl_relaxed(drvdata->base + EDPRCR);
161	edprcr |= EDPRCR_COREPURQ;
162	writel_relaxed(edprcr, drvdata->base + EDPRCR);
163
164	/* Wait for CPU to be powered up (timeout~=32ms) */
165	if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
166			drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
167			DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
168		/*
169		 * Unfortunately the CPU cannot be powered up, so return
170		 * back and later has no permission to access other
171		 * registers. For this case, should disable CPU low power
172		 * states to ensure CPU power domain is enabled!
173		 */
174		dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
175			__func__, drvdata->cpu);
176		return;
177	}
178
179	/*
180	 * At this point the CPU is powered up, so set the no powerdown
181	 * request bit so we don't lose power and emulate power down.
182	 */
183	edprcr = readl_relaxed(drvdata->base + EDPRCR);
184	edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
185	writel_relaxed(edprcr, drvdata->base + EDPRCR);
186
187	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
188
189	/* The core power domain got switched off on use, try again */
190	if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
191		goto try_again;
192}
193
194static void debug_read_regs(struct debug_drvdata *drvdata)
195{
196	u32 save_edprcr;
197
198	CS_UNLOCK(drvdata->base);
199
200	/* Unlock os lock */
201	debug_os_unlock(drvdata);
202
203	/* Save EDPRCR register */
204	save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
205
206	/*
207	 * Ensure CPU power domain is enabled to let registers
208	 * are accessiable.
209	 */
210	debug_force_cpu_powered_up(drvdata);
211
212	if (!debug_access_permitted(drvdata))
213		goto out;
214
215	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
216
217	/*
218	 * As described in ARM DDI 0487A.k, if the processing
219	 * element (PE) is in debug state, or sample-based
220	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
221	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
222	 * UNKNOWN state. So directly bail out for this case.
223	 */
224	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
225		goto out;
226
227	/*
228	 * A read of the EDPCSR normally has the side-effect of
229	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
230	 * at this point it's safe to read value from them.
231	 */
232	if (IS_ENABLED(CONFIG_64BIT))
233		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
234
235	if (drvdata->edcidsr_present)
236		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
237
238	if (drvdata->edvidsr_present)
239		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
240
241out:
242	/* Restore EDPRCR register */
243	writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
244
245	CS_LOCK(drvdata->base);
246}
247
248#ifdef CONFIG_64BIT
249static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
250{
251	return (unsigned long)drvdata->edpcsr_hi << 32 |
252	       (unsigned long)drvdata->edpcsr;
253}
254#else
255static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
256{
257	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
258	unsigned long pc;
259
260	pc = (unsigned long)drvdata->edpcsr;
261
262	if (drvdata->pc_has_offset) {
263		arm_inst_offset = 8;
264		thumb_inst_offset = 4;
265	}
266
267	/* Handle thumb instruction */
268	if (pc & EDPCSR_THUMB) {
269		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
270		return pc;
271	}
272
273	/*
274	 * Handle arm instruction offset, if the arm instruction
275	 * is not 4 byte alignment then it's possible the case
276	 * for implementation defined; keep original value for this
277	 * case and print info for notice.
278	 */
279	if (pc & BIT(1))
280		dev_emerg(drvdata->dev,
281			  "Instruction offset is implementation defined\n");
282	else
283		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
284
285	return pc;
286}
287#endif
288
289static void debug_dump_regs(struct debug_drvdata *drvdata)
290{
291	struct device *dev = drvdata->dev;
292	unsigned long pc;
293
294	dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
295		  drvdata->edprsr,
296		  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
297		  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
298
299	if (!debug_access_permitted(drvdata)) {
300		dev_emerg(dev, "No permission to access debug registers!\n");
301		return;
302	}
303
304	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
305		dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
306		return;
307	}
308
309	pc = debug_adjust_pc(drvdata);
310	dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
311
312	if (drvdata->edcidsr_present)
313		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
314
315	if (drvdata->edvidsr_present)
316		dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
317			  drvdata->edvidsr,
318			  drvdata->edvidsr & EDVIDSR_NS ?
319			  "Non-secure" : "Secure",
320			  drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
321				(drvdata->edvidsr & EDVIDSR_E2 ?
322				 "EL2" : "EL1/0"),
323			  drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
324			  drvdata->edvidsr & (u32)EDVIDSR_VMID);
325}
326
327static void debug_init_arch_data(void *info)
328{
329	struct debug_drvdata *drvdata = info;
330	u32 mode, pcsr_offset;
331	u32 eddevid, eddevid1;
332
333	CS_UNLOCK(drvdata->base);
334
335	/* Read device info */
336	eddevid  = readl_relaxed(drvdata->base + EDDEVID);
337	eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
338
339	CS_LOCK(drvdata->base);
340
341	/* Parse implementation feature */
342	mode = eddevid & EDDEVID_PCSAMPLE_MODE;
343	pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
344
345	drvdata->edpcsr_present  = false;
346	drvdata->edcidsr_present = false;
347	drvdata->edvidsr_present = false;
348	drvdata->pc_has_offset   = false;
349
350	switch (mode) {
351	case EDDEVID_IMPL_FULL:
352		drvdata->edvidsr_present = true;
353		fallthrough;
354	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
355		drvdata->edcidsr_present = true;
356		fallthrough;
357	case EDDEVID_IMPL_EDPCSR:
358		/*
359		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
360		 * define if has the offset for PC sampling value; if read
361		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
362		 * module does not sample the instruction set state when
363		 * armv8 CPU in AArch32 state.
364		 */
365		drvdata->edpcsr_present =
366			((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
367			 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
368
369		drvdata->pc_has_offset =
370			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
371		break;
372	default:
373		break;
374	}
375}
376
377/*
378 * Dump out information on panic.
379 */
380static int debug_notifier_call(struct notifier_block *self,
381			       unsigned long v, void *p)
382{
383	int cpu;
384	struct debug_drvdata *drvdata;
385
386	/* Bail out if we can't acquire the mutex or the functionality is off */
387	if (!mutex_trylock(&debug_lock))
388		return NOTIFY_DONE;
389
 
390	if (!debug_enable)
391		goto skip_dump;
392
393	pr_emerg("ARM external debug module:\n");
394
395	for_each_possible_cpu(cpu) {
396		drvdata = per_cpu(debug_drvdata, cpu);
397		if (!drvdata)
398			continue;
399
400		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
401
402		debug_read_regs(drvdata);
403		debug_dump_regs(drvdata);
404	}
405
406skip_dump:
407	mutex_unlock(&debug_lock);
408	return NOTIFY_DONE;
409}
410
411static struct notifier_block debug_notifier = {
412	.notifier_call = debug_notifier_call,
413};
414
415static int debug_enable_func(void)
416{
417	struct debug_drvdata *drvdata;
418	int cpu, ret = 0;
419	cpumask_t mask;
420
421	/*
422	 * Use cpumask to track which debug power domains have
423	 * been powered on and use it to handle failure case.
424	 */
425	cpumask_clear(&mask);
426
427	for_each_possible_cpu(cpu) {
428		drvdata = per_cpu(debug_drvdata, cpu);
429		if (!drvdata)
430			continue;
431
432		ret = pm_runtime_get_sync(drvdata->dev);
433		if (ret < 0)
434			goto err;
435		else
436			cpumask_set_cpu(cpu, &mask);
437	}
438
439	return 0;
440
441err:
442	/*
443	 * If pm_runtime_get_sync() has failed, need rollback on
444	 * all the other CPUs that have been enabled before that.
445	 */
446	for_each_cpu(cpu, &mask) {
447		drvdata = per_cpu(debug_drvdata, cpu);
448		pm_runtime_put_noidle(drvdata->dev);
449	}
450
451	return ret;
452}
453
454static int debug_disable_func(void)
455{
456	struct debug_drvdata *drvdata;
457	int cpu, ret, err = 0;
458
459	/*
460	 * Disable debug power domains, records the error and keep
461	 * circling through all other CPUs when an error has been
462	 * encountered.
463	 */
464	for_each_possible_cpu(cpu) {
465		drvdata = per_cpu(debug_drvdata, cpu);
466		if (!drvdata)
467			continue;
468
469		ret = pm_runtime_put(drvdata->dev);
470		if (ret < 0)
471			err = ret;
472	}
473
474	return err;
475}
476
477static ssize_t debug_func_knob_write(struct file *f,
478		const char __user *buf, size_t count, loff_t *ppos)
479{
480	u8 val;
481	int ret;
482
483	ret = kstrtou8_from_user(buf, count, 2, &val);
484	if (ret)
485		return ret;
486
487	mutex_lock(&debug_lock);
488
489	if (val == debug_enable)
490		goto out;
491
492	if (val)
493		ret = debug_enable_func();
494	else
495		ret = debug_disable_func();
496
497	if (ret) {
498		pr_err("%s: unable to %s debug function: %d\n",
499		       __func__, val ? "enable" : "disable", ret);
500		goto err;
501	}
502
503	debug_enable = val;
504out:
505	ret = count;
506err:
507	mutex_unlock(&debug_lock);
508	return ret;
509}
510
511static ssize_t debug_func_knob_read(struct file *f,
512		char __user *ubuf, size_t count, loff_t *ppos)
513{
514	ssize_t ret;
515	char buf[3];
516
517	mutex_lock(&debug_lock);
518	snprintf(buf, sizeof(buf), "%d\n", debug_enable);
519	mutex_unlock(&debug_lock);
520
521	ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
522	return ret;
523}
524
525static const struct file_operations debug_func_knob_fops = {
526	.open	= simple_open,
527	.read	= debug_func_knob_read,
528	.write	= debug_func_knob_write,
529};
530
531static int debug_func_init(void)
532{
 
533	int ret;
534
535	/* Create debugfs node */
536	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
537	debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
538			    &debug_func_knob_fops);
 
 
 
 
 
 
 
 
 
 
539
540	/* Register function to be called for panic */
541	ret = atomic_notifier_chain_register(&panic_notifier_list,
542					     &debug_notifier);
543	if (ret) {
544		pr_err("%s: unable to register notifier: %d\n",
545		       __func__, ret);
546		goto err;
547	}
548
549	return 0;
550
551err:
552	debugfs_remove_recursive(debug_debugfs_dir);
553	return ret;
554}
555
556static void debug_func_exit(void)
557{
558	atomic_notifier_chain_unregister(&panic_notifier_list,
559					 &debug_notifier);
560	debugfs_remove_recursive(debug_debugfs_dir);
561}
562
563static int __debug_probe(struct device *dev, struct resource *res)
564{
565	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
566	void __iomem *base;
 
 
 
 
567	int ret;
568
569	drvdata->cpu = coresight_get_cpu(dev);
570	if (drvdata->cpu < 0)
571		return drvdata->cpu;
572
 
573	if (per_cpu(debug_drvdata, drvdata->cpu)) {
574		dev_err(dev, "CPU%d drvdata has already been initialized\n",
575			drvdata->cpu);
576		return -EBUSY;
577	}
578
579	drvdata->dev = dev;
 
 
 
580	base = devm_ioremap_resource(dev, res);
581	if (IS_ERR(base))
582		return PTR_ERR(base);
583
584	drvdata->base = base;
585
586	cpus_read_lock();
587	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
588	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
589				       drvdata, 1);
590	cpus_read_unlock();
591
592	if (ret) {
593		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
594		goto err;
595	}
596
597	if (!drvdata->edpcsr_present) {
598		dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
599			drvdata->cpu);
600		ret = -ENXIO;
601		goto err;
602	}
603
604	if (!debug_count++) {
605		ret = debug_func_init();
606		if (ret)
607			goto err_func_init;
608	}
609
610	mutex_lock(&debug_lock);
611	/* Turn off debug power domain if debugging is disabled */
612	if (!debug_enable)
613		pm_runtime_put(dev);
614	mutex_unlock(&debug_lock);
615
616	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
617	return 0;
618
619err_func_init:
620	debug_count--;
621err:
622	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
623	return ret;
624}
625
626static int debug_probe(struct amba_device *adev, const struct amba_id *id)
627{
628	struct debug_drvdata *drvdata;
629
630	drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
631	if (!drvdata)
632		return -ENOMEM;
633
634	amba_set_drvdata(adev, drvdata);
635	return __debug_probe(&adev->dev, &adev->res);
636}
637
638static void __debug_remove(struct device *dev)
639{
640	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
 
641
642	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
643
644	mutex_lock(&debug_lock);
645	/* Turn off debug power domain before rmmod the module */
646	if (debug_enable)
647		pm_runtime_put(dev);
648	mutex_unlock(&debug_lock);
649
650	if (!--debug_count)
651		debug_func_exit();
652}
653
654static void debug_remove(struct amba_device *adev)
655{
656	__debug_remove(&adev->dev);
657}
658
659static const struct amba_cs_uci_id uci_id_debug[] = {
660	{
661		/*  CPU Debug UCI data */
662		.devarch	= 0x47706a15,
663		.devarch_mask	= 0xfff0ffff,
664		.devtype	= 0x00000015,
665	}
666};
667
668static const struct amba_id debug_ids[] = {
669	CS_AMBA_ID(0x000bbd03),				/* Cortex-A53 */
670	CS_AMBA_ID(0x000bbd07),				/* Cortex-A57 */
671	CS_AMBA_ID(0x000bbd08),				/* Cortex-A72 */
672	CS_AMBA_ID(0x000bbd09),				/* Cortex-A73 */
673	CS_AMBA_UCI_ID(0x000f0205, uci_id_debug),	/* Qualcomm Kryo */
674	CS_AMBA_UCI_ID(0x000f0211, uci_id_debug),	/* Qualcomm Kryo */
675	{},
 
 
 
 
 
 
676};
677
678MODULE_DEVICE_TABLE(amba, debug_ids);
679
680static struct amba_driver debug_driver = {
681	.drv = {
682		.name   = "coresight-cpu-debug",
683		.suppress_bind_attrs = true,
684	},
685	.probe		= debug_probe,
686	.remove		= debug_remove,
687	.id_table	= debug_ids,
688};
689
690static int debug_platform_probe(struct platform_device *pdev)
691{
692	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
693	struct debug_drvdata *drvdata;
694	int ret = 0;
695
696	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
697	if (!drvdata)
698		return -ENOMEM;
699
700	drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
701	if (IS_ERR(drvdata->pclk))
702		return -ENODEV;
703
704	dev_set_drvdata(&pdev->dev, drvdata);
705	pm_runtime_get_noresume(&pdev->dev);
706	pm_runtime_set_active(&pdev->dev);
707	pm_runtime_enable(&pdev->dev);
708
709	ret = __debug_probe(&pdev->dev, res);
710	if (ret) {
711		pm_runtime_put_noidle(&pdev->dev);
712		pm_runtime_disable(&pdev->dev);
713		if (!IS_ERR_OR_NULL(drvdata->pclk))
714			clk_put(drvdata->pclk);
715	}
716	return ret;
717}
718
719static void debug_platform_remove(struct platform_device *pdev)
720{
721	struct debug_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
722
723	if (WARN_ON(!drvdata))
724		return;
725
726	__debug_remove(&pdev->dev);
727	pm_runtime_disable(&pdev->dev);
728	if (!IS_ERR_OR_NULL(drvdata->pclk))
729		clk_put(drvdata->pclk);
730}
731
732#ifdef CONFIG_ACPI
733static const struct acpi_device_id debug_platform_ids[] = {
734	{"ARMHC503", 0, 0, 0}, /* ARM CoreSight Debug */
735	{},
736};
737MODULE_DEVICE_TABLE(acpi, debug_platform_ids);
738#endif
739
740#ifdef CONFIG_PM
741static int debug_runtime_suspend(struct device *dev)
742{
743	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
744
745	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
746		clk_disable_unprepare(drvdata->pclk);
747	return 0;
748}
749
750static int debug_runtime_resume(struct device *dev)
751{
752	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
753
754	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
755		clk_prepare_enable(drvdata->pclk);
756	return 0;
757}
758#endif
759
760static const struct dev_pm_ops debug_dev_pm_ops = {
761	SET_RUNTIME_PM_OPS(debug_runtime_suspend, debug_runtime_resume, NULL)
762};
763
764static struct platform_driver debug_platform_driver = {
765	.probe	= debug_platform_probe,
766	.remove = debug_platform_remove,
767	.driver	= {
768		.name			= "coresight-debug-platform",
769		.acpi_match_table	= ACPI_PTR(debug_platform_ids),
770		.suppress_bind_attrs	= true,
771		.pm			= &debug_dev_pm_ops,
772	},
773};
774
775static int __init debug_init(void)
776{
777	return coresight_init_driver("debug", &debug_driver, &debug_platform_driver);
778}
779
780static void __exit debug_exit(void)
781{
782	coresight_remove_driver(&debug_driver, &debug_platform_driver);
783}
784module_init(debug_init);
785module_exit(debug_exit);
786
787MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
788MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
789MODULE_LICENSE("GPL");
v4.17
 
  1/*
  2 * Copyright (c) 2017 Linaro Limited. All rights reserved.
  3 *
  4 * Author: Leo Yan <leo.yan@linaro.org>
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License version 2 as published by
  8 * the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful, but WITHOUT
 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13 * more details.
 14 *
 15 * You should have received a copy of the GNU General Public License along with
 16 * this program.  If not, see <http://www.gnu.org/licenses/>.
 17 *
 18 */
 
 19#include <linux/amba/bus.h>
 20#include <linux/coresight.h>
 21#include <linux/cpu.h>
 22#include <linux/debugfs.h>
 23#include <linux/delay.h>
 24#include <linux/device.h>
 25#include <linux/err.h>
 26#include <linux/init.h>
 27#include <linux/io.h>
 28#include <linux/iopoll.h>
 29#include <linux/kernel.h>
 30#include <linux/module.h>
 31#include <linux/moduleparam.h>
 
 
 32#include <linux/pm_qos.h>
 33#include <linux/slab.h>
 34#include <linux/smp.h>
 35#include <linux/types.h>
 36#include <linux/uaccess.h>
 37
 38#include "coresight-priv.h"
 39
 40#define EDPCSR				0x0A0
 41#define EDCIDSR				0x0A4
 42#define EDVIDSR				0x0A8
 43#define EDPCSR_HI			0x0AC
 44#define EDOSLAR				0x300
 45#define EDPRCR				0x310
 46#define EDPRSR				0x314
 47#define EDDEVID1			0xFC4
 48#define EDDEVID				0xFC8
 49
 50#define EDPCSR_PROHIBITED		0xFFFFFFFF
 51
 52/* bits definition for EDPCSR */
 53#define EDPCSR_THUMB			BIT(0)
 54#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
 55#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
 56
 57/* bits definition for EDPRCR */
 58#define EDPRCR_COREPURQ			BIT(3)
 59#define EDPRCR_CORENPDRQ		BIT(0)
 60
 61/* bits definition for EDPRSR */
 62#define EDPRSR_DLK			BIT(6)
 63#define EDPRSR_PU			BIT(0)
 64
 65/* bits definition for EDVIDSR */
 66#define EDVIDSR_NS			BIT(31)
 67#define EDVIDSR_E2			BIT(30)
 68#define EDVIDSR_E3			BIT(29)
 69#define EDVIDSR_HV			BIT(28)
 70#define EDVIDSR_VMID			GENMASK(7, 0)
 71
 72/*
 73 * bits definition for EDDEVID1:PSCROffset
 74 *
 75 * NOTE: armv8 and armv7 have different definition for the register,
 76 * so consolidate the bits definition as below:
 77 *
 78 * 0b0000 - Sample offset applies based on the instruction state, we
 79 *          rely on EDDEVID to check if EDPCSR is implemented or not
 80 * 0b0001 - No offset applies.
 81 * 0b0010 - No offset applies, but do not use in AArch32 mode
 82 *
 83 */
 84#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
 85#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
 86#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
 87
 88/* bits definition for EDDEVID */
 89#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
 90#define EDDEVID_IMPL_EDPCSR		(0x1)
 91#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
 92#define EDDEVID_IMPL_FULL		(0x3)
 93
 94#define DEBUG_WAIT_SLEEP		1000
 95#define DEBUG_WAIT_TIMEOUT		32000
 96
 97struct debug_drvdata {
 
 98	void __iomem	*base;
 99	struct device	*dev;
100	int		cpu;
101
102	bool		edpcsr_present;
103	bool		edcidsr_present;
104	bool		edvidsr_present;
105	bool		pc_has_offset;
106
107	u32		edpcsr;
108	u32		edpcsr_hi;
109	u32		edprsr;
110	u32		edvidsr;
111	u32		edcidsr;
112};
113
114static DEFINE_MUTEX(debug_lock);
115static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
116static int debug_count;
117static struct dentry *debug_debugfs_dir;
118
119static bool debug_enable;
120module_param_named(enable, debug_enable, bool, 0600);
121MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
122
123static void debug_os_unlock(struct debug_drvdata *drvdata)
124{
125	/* Unlocks the debug registers */
126	writel_relaxed(0x0, drvdata->base + EDOSLAR);
127
128	/* Make sure the registers are unlocked before accessing */
129	wmb();
130}
131
132/*
133 * According to ARM DDI 0487A.k, before access external debug
134 * registers should firstly check the access permission; if any
135 * below condition has been met then cannot access debug
136 * registers to avoid lockup issue:
137 *
138 * - CPU power domain is powered off;
139 * - The OS Double Lock is locked;
140 *
141 * By checking EDPRSR can get to know if meet these conditions.
142 */
143static bool debug_access_permitted(struct debug_drvdata *drvdata)
144{
145	/* CPU is powered off */
146	if (!(drvdata->edprsr & EDPRSR_PU))
147		return false;
148
149	/* The OS Double Lock is locked */
150	if (drvdata->edprsr & EDPRSR_DLK)
151		return false;
152
153	return true;
154}
155
156static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
157{
158	u32 edprcr;
159
160try_again:
161
162	/*
163	 * Send request to power management controller and assert
164	 * DBGPWRUPREQ signal; if power management controller has
165	 * sane implementation, it should enable CPU power domain
166	 * in case CPU is in low power state.
167	 */
168	edprcr = readl_relaxed(drvdata->base + EDPRCR);
169	edprcr |= EDPRCR_COREPURQ;
170	writel_relaxed(edprcr, drvdata->base + EDPRCR);
171
172	/* Wait for CPU to be powered up (timeout~=32ms) */
173	if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
174			drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
175			DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
176		/*
177		 * Unfortunately the CPU cannot be powered up, so return
178		 * back and later has no permission to access other
179		 * registers. For this case, should disable CPU low power
180		 * states to ensure CPU power domain is enabled!
181		 */
182		dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
183			__func__, drvdata->cpu);
184		return;
185	}
186
187	/*
188	 * At this point the CPU is powered up, so set the no powerdown
189	 * request bit so we don't lose power and emulate power down.
190	 */
191	edprcr = readl_relaxed(drvdata->base + EDPRCR);
192	edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
193	writel_relaxed(edprcr, drvdata->base + EDPRCR);
194
195	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
196
197	/* The core power domain got switched off on use, try again */
198	if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
199		goto try_again;
200}
201
202static void debug_read_regs(struct debug_drvdata *drvdata)
203{
204	u32 save_edprcr;
205
206	CS_UNLOCK(drvdata->base);
207
208	/* Unlock os lock */
209	debug_os_unlock(drvdata);
210
211	/* Save EDPRCR register */
212	save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
213
214	/*
215	 * Ensure CPU power domain is enabled to let registers
216	 * are accessiable.
217	 */
218	debug_force_cpu_powered_up(drvdata);
219
220	if (!debug_access_permitted(drvdata))
221		goto out;
222
223	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
224
225	/*
226	 * As described in ARM DDI 0487A.k, if the processing
227	 * element (PE) is in debug state, or sample-based
228	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
229	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
230	 * UNKNOWN state. So directly bail out for this case.
231	 */
232	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
233		goto out;
234
235	/*
236	 * A read of the EDPCSR normally has the side-effect of
237	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
238	 * at this point it's safe to read value from them.
239	 */
240	if (IS_ENABLED(CONFIG_64BIT))
241		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
242
243	if (drvdata->edcidsr_present)
244		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
245
246	if (drvdata->edvidsr_present)
247		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
248
249out:
250	/* Restore EDPRCR register */
251	writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
252
253	CS_LOCK(drvdata->base);
254}
255
256#ifdef CONFIG_64BIT
257static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
258{
259	return (unsigned long)drvdata->edpcsr_hi << 32 |
260	       (unsigned long)drvdata->edpcsr;
261}
262#else
263static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
264{
265	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
266	unsigned long pc;
267
268	pc = (unsigned long)drvdata->edpcsr;
269
270	if (drvdata->pc_has_offset) {
271		arm_inst_offset = 8;
272		thumb_inst_offset = 4;
273	}
274
275	/* Handle thumb instruction */
276	if (pc & EDPCSR_THUMB) {
277		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
278		return pc;
279	}
280
281	/*
282	 * Handle arm instruction offset, if the arm instruction
283	 * is not 4 byte alignment then it's possible the case
284	 * for implementation defined; keep original value for this
285	 * case and print info for notice.
286	 */
287	if (pc & BIT(1))
288		dev_emerg(drvdata->dev,
289			  "Instruction offset is implementation defined\n");
290	else
291		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
292
293	return pc;
294}
295#endif
296
297static void debug_dump_regs(struct debug_drvdata *drvdata)
298{
299	struct device *dev = drvdata->dev;
300	unsigned long pc;
301
302	dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
303		  drvdata->edprsr,
304		  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
305		  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
306
307	if (!debug_access_permitted(drvdata)) {
308		dev_emerg(dev, "No permission to access debug registers!\n");
309		return;
310	}
311
312	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
313		dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
314		return;
315	}
316
317	pc = debug_adjust_pc(drvdata);
318	dev_emerg(dev, " EDPCSR:  [<%px>] %pS\n", (void *)pc, (void *)pc);
319
320	if (drvdata->edcidsr_present)
321		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
322
323	if (drvdata->edvidsr_present)
324		dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
325			  drvdata->edvidsr,
326			  drvdata->edvidsr & EDVIDSR_NS ?
327			  "Non-secure" : "Secure",
328			  drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
329				(drvdata->edvidsr & EDVIDSR_E2 ?
330				 "EL2" : "EL1/0"),
331			  drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
332			  drvdata->edvidsr & (u32)EDVIDSR_VMID);
333}
334
335static void debug_init_arch_data(void *info)
336{
337	struct debug_drvdata *drvdata = info;
338	u32 mode, pcsr_offset;
339	u32 eddevid, eddevid1;
340
341	CS_UNLOCK(drvdata->base);
342
343	/* Read device info */
344	eddevid  = readl_relaxed(drvdata->base + EDDEVID);
345	eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
346
347	CS_LOCK(drvdata->base);
348
349	/* Parse implementation feature */
350	mode = eddevid & EDDEVID_PCSAMPLE_MODE;
351	pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
352
353	drvdata->edpcsr_present  = false;
354	drvdata->edcidsr_present = false;
355	drvdata->edvidsr_present = false;
356	drvdata->pc_has_offset   = false;
357
358	switch (mode) {
359	case EDDEVID_IMPL_FULL:
360		drvdata->edvidsr_present = true;
361		/* Fall through */
362	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
363		drvdata->edcidsr_present = true;
364		/* Fall through */
365	case EDDEVID_IMPL_EDPCSR:
366		/*
367		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
368		 * define if has the offset for PC sampling value; if read
369		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
370		 * module does not sample the instruction set state when
371		 * armv8 CPU in AArch32 state.
372		 */
373		drvdata->edpcsr_present =
374			((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
375			 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
376
377		drvdata->pc_has_offset =
378			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
379		break;
380	default:
381		break;
382	}
383}
384
385/*
386 * Dump out information on panic.
387 */
388static int debug_notifier_call(struct notifier_block *self,
389			       unsigned long v, void *p)
390{
391	int cpu;
392	struct debug_drvdata *drvdata;
393
394	mutex_lock(&debug_lock);
 
 
395
396	/* Bail out if the functionality is disabled */
397	if (!debug_enable)
398		goto skip_dump;
399
400	pr_emerg("ARM external debug module:\n");
401
402	for_each_possible_cpu(cpu) {
403		drvdata = per_cpu(debug_drvdata, cpu);
404		if (!drvdata)
405			continue;
406
407		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
408
409		debug_read_regs(drvdata);
410		debug_dump_regs(drvdata);
411	}
412
413skip_dump:
414	mutex_unlock(&debug_lock);
415	return 0;
416}
417
418static struct notifier_block debug_notifier = {
419	.notifier_call = debug_notifier_call,
420};
421
422static int debug_enable_func(void)
423{
424	struct debug_drvdata *drvdata;
425	int cpu, ret = 0;
426	cpumask_t mask;
427
428	/*
429	 * Use cpumask to track which debug power domains have
430	 * been powered on and use it to handle failure case.
431	 */
432	cpumask_clear(&mask);
433
434	for_each_possible_cpu(cpu) {
435		drvdata = per_cpu(debug_drvdata, cpu);
436		if (!drvdata)
437			continue;
438
439		ret = pm_runtime_get_sync(drvdata->dev);
440		if (ret < 0)
441			goto err;
442		else
443			cpumask_set_cpu(cpu, &mask);
444	}
445
446	return 0;
447
448err:
449	/*
450	 * If pm_runtime_get_sync() has failed, need rollback on
451	 * all the other CPUs that have been enabled before that.
452	 */
453	for_each_cpu(cpu, &mask) {
454		drvdata = per_cpu(debug_drvdata, cpu);
455		pm_runtime_put_noidle(drvdata->dev);
456	}
457
458	return ret;
459}
460
461static int debug_disable_func(void)
462{
463	struct debug_drvdata *drvdata;
464	int cpu, ret, err = 0;
465
466	/*
467	 * Disable debug power domains, records the error and keep
468	 * circling through all other CPUs when an error has been
469	 * encountered.
470	 */
471	for_each_possible_cpu(cpu) {
472		drvdata = per_cpu(debug_drvdata, cpu);
473		if (!drvdata)
474			continue;
475
476		ret = pm_runtime_put(drvdata->dev);
477		if (ret < 0)
478			err = ret;
479	}
480
481	return err;
482}
483
484static ssize_t debug_func_knob_write(struct file *f,
485		const char __user *buf, size_t count, loff_t *ppos)
486{
487	u8 val;
488	int ret;
489
490	ret = kstrtou8_from_user(buf, count, 2, &val);
491	if (ret)
492		return ret;
493
494	mutex_lock(&debug_lock);
495
496	if (val == debug_enable)
497		goto out;
498
499	if (val)
500		ret = debug_enable_func();
501	else
502		ret = debug_disable_func();
503
504	if (ret) {
505		pr_err("%s: unable to %s debug function: %d\n",
506		       __func__, val ? "enable" : "disable", ret);
507		goto err;
508	}
509
510	debug_enable = val;
511out:
512	ret = count;
513err:
514	mutex_unlock(&debug_lock);
515	return ret;
516}
517
518static ssize_t debug_func_knob_read(struct file *f,
519		char __user *ubuf, size_t count, loff_t *ppos)
520{
521	ssize_t ret;
522	char buf[3];
523
524	mutex_lock(&debug_lock);
525	snprintf(buf, sizeof(buf), "%d\n", debug_enable);
526	mutex_unlock(&debug_lock);
527
528	ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
529	return ret;
530}
531
532static const struct file_operations debug_func_knob_fops = {
533	.open	= simple_open,
534	.read	= debug_func_knob_read,
535	.write	= debug_func_knob_write,
536};
537
538static int debug_func_init(void)
539{
540	struct dentry *file;
541	int ret;
542
543	/* Create debugfs node */
544	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
545	if (!debug_debugfs_dir) {
546		pr_err("%s: unable to create debugfs directory\n", __func__);
547		return -ENOMEM;
548	}
549
550	file = debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
551				   &debug_func_knob_fops);
552	if (!file) {
553		pr_err("%s: unable to create enable knob file\n", __func__);
554		ret = -ENOMEM;
555		goto err;
556	}
557
558	/* Register function to be called for panic */
559	ret = atomic_notifier_chain_register(&panic_notifier_list,
560					     &debug_notifier);
561	if (ret) {
562		pr_err("%s: unable to register notifier: %d\n",
563		       __func__, ret);
564		goto err;
565	}
566
567	return 0;
568
569err:
570	debugfs_remove_recursive(debug_debugfs_dir);
571	return ret;
572}
573
574static void debug_func_exit(void)
575{
576	atomic_notifier_chain_unregister(&panic_notifier_list,
577					 &debug_notifier);
578	debugfs_remove_recursive(debug_debugfs_dir);
579}
580
581static int debug_probe(struct amba_device *adev, const struct amba_id *id)
582{
 
583	void __iomem *base;
584	struct device *dev = &adev->dev;
585	struct debug_drvdata *drvdata;
586	struct resource *res = &adev->res;
587	struct device_node *np = adev->dev.of_node;
588	int ret;
589
590	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
591	if (!drvdata)
592		return -ENOMEM;
593
594	drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;
595	if (per_cpu(debug_drvdata, drvdata->cpu)) {
596		dev_err(dev, "CPU%d drvdata has already been initialized\n",
597			drvdata->cpu);
598		return -EBUSY;
599	}
600
601	drvdata->dev = &adev->dev;
602	amba_set_drvdata(adev, drvdata);
603
604	/* Validity for the resource is already checked by the AMBA core */
605	base = devm_ioremap_resource(dev, res);
606	if (IS_ERR(base))
607		return PTR_ERR(base);
608
609	drvdata->base = base;
610
611	get_online_cpus();
612	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
613	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
614				       drvdata, 1);
615	put_online_cpus();
616
617	if (ret) {
618		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
619		goto err;
620	}
621
622	if (!drvdata->edpcsr_present) {
623		dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
624			drvdata->cpu);
625		ret = -ENXIO;
626		goto err;
627	}
628
629	if (!debug_count++) {
630		ret = debug_func_init();
631		if (ret)
632			goto err_func_init;
633	}
634
635	mutex_lock(&debug_lock);
636	/* Turn off debug power domain if debugging is disabled */
637	if (!debug_enable)
638		pm_runtime_put(dev);
639	mutex_unlock(&debug_lock);
640
641	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
642	return 0;
643
644err_func_init:
645	debug_count--;
646err:
647	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
648	return ret;
649}
650
651static int debug_remove(struct amba_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
652{
653	struct device *dev = &adev->dev;
654	struct debug_drvdata *drvdata = amba_get_drvdata(adev);
655
656	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
657
658	mutex_lock(&debug_lock);
659	/* Turn off debug power domain before rmmod the module */
660	if (debug_enable)
661		pm_runtime_put(dev);
662	mutex_unlock(&debug_lock);
663
664	if (!--debug_count)
665		debug_func_exit();
 
666
667	return 0;
 
 
668}
669
 
 
 
 
 
 
 
 
 
670static const struct amba_id debug_ids[] = {
671	{       /* Debug for Cortex-A53 */
672		.id	= 0x000bbd03,
673		.mask	= 0x000fffff,
674	},
675	{       /* Debug for Cortex-A57 */
676		.id	= 0x000bbd07,
677		.mask	= 0x000fffff,
678	},
679	{       /* Debug for Cortex-A72 */
680		.id	= 0x000bbd08,
681		.mask	= 0x000fffff,
682	},
683	{ 0, 0 },
684};
685
 
 
686static struct amba_driver debug_driver = {
687	.drv = {
688		.name   = "coresight-cpu-debug",
689		.suppress_bind_attrs = true,
690	},
691	.probe		= debug_probe,
692	.remove		= debug_remove,
693	.id_table	= debug_ids,
694};
695
696module_amba_driver(debug_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
697
698MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
699MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
700MODULE_LICENSE("GPL");