Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2017 Linaro Limited. All rights reserved.
  4 *
  5 * Author: Leo Yan <leo.yan@linaro.org>
  6 */
  7#include <linux/amba/bus.h>
  8#include <linux/coresight.h>
  9#include <linux/cpu.h>
 10#include <linux/debugfs.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <linux/err.h>
 14#include <linux/init.h>
 15#include <linux/io.h>
 16#include <linux/iopoll.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/moduleparam.h>
 20#include <linux/panic_notifier.h>
 21#include <linux/pm_qos.h>
 22#include <linux/slab.h>
 23#include <linux/smp.h>
 24#include <linux/types.h>
 25#include <linux/uaccess.h>
 26
 27#include "coresight-priv.h"
 28
 29#define EDPCSR				0x0A0
 30#define EDCIDSR				0x0A4
 31#define EDVIDSR				0x0A8
 32#define EDPCSR_HI			0x0AC
 33#define EDOSLAR				0x300
 34#define EDPRCR				0x310
 35#define EDPRSR				0x314
 36#define EDDEVID1			0xFC4
 37#define EDDEVID				0xFC8
 38
 39#define EDPCSR_PROHIBITED		0xFFFFFFFF
 40
 41/* bits definition for EDPCSR */
 42#define EDPCSR_THUMB			BIT(0)
 43#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
 44#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
 45
 46/* bits definition for EDPRCR */
 47#define EDPRCR_COREPURQ			BIT(3)
 48#define EDPRCR_CORENPDRQ		BIT(0)
 49
 50/* bits definition for EDPRSR */
 51#define EDPRSR_DLK			BIT(6)
 52#define EDPRSR_PU			BIT(0)
 53
 54/* bits definition for EDVIDSR */
 55#define EDVIDSR_NS			BIT(31)
 56#define EDVIDSR_E2			BIT(30)
 57#define EDVIDSR_E3			BIT(29)
 58#define EDVIDSR_HV			BIT(28)
 59#define EDVIDSR_VMID			GENMASK(7, 0)
 60
 61/*
 62 * bits definition for EDDEVID1:PSCROffset
 63 *
 64 * NOTE: armv8 and armv7 have different definition for the register,
 65 * so consolidate the bits definition as below:
 66 *
 67 * 0b0000 - Sample offset applies based on the instruction state, we
 68 *          rely on EDDEVID to check if EDPCSR is implemented or not
 69 * 0b0001 - No offset applies.
 70 * 0b0010 - No offset applies, but do not use in AArch32 mode
 71 *
 72 */
 73#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
 74#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
 75#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
 76
 77/* bits definition for EDDEVID */
 78#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
 79#define EDDEVID_IMPL_EDPCSR		(0x1)
 80#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
 81#define EDDEVID_IMPL_FULL		(0x3)
 82
 83#define DEBUG_WAIT_SLEEP		1000
 84#define DEBUG_WAIT_TIMEOUT		32000
 85
 86struct debug_drvdata {
 87	void __iomem	*base;
 88	struct device	*dev;
 89	int		cpu;
 90
 91	bool		edpcsr_present;
 92	bool		edcidsr_present;
 93	bool		edvidsr_present;
 94	bool		pc_has_offset;
 95
 96	u32		edpcsr;
 97	u32		edpcsr_hi;
 98	u32		edprsr;
 99	u32		edvidsr;
100	u32		edcidsr;
101};
102
103static DEFINE_MUTEX(debug_lock);
104static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
105static int debug_count;
106static struct dentry *debug_debugfs_dir;
107
108static bool debug_enable = IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON);
109module_param_named(enable, debug_enable, bool, 0600);
110MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
111
112static void debug_os_unlock(struct debug_drvdata *drvdata)
113{
114	/* Unlocks the debug registers */
115	writel_relaxed(0x0, drvdata->base + EDOSLAR);
116
117	/* Make sure the registers are unlocked before accessing */
118	wmb();
119}
120
121/*
122 * According to ARM DDI 0487A.k, before access external debug
123 * registers should firstly check the access permission; if any
124 * below condition has been met then cannot access debug
125 * registers to avoid lockup issue:
126 *
127 * - CPU power domain is powered off;
128 * - The OS Double Lock is locked;
129 *
130 * By checking EDPRSR can get to know if meet these conditions.
131 */
132static bool debug_access_permitted(struct debug_drvdata *drvdata)
133{
134	/* CPU is powered off */
135	if (!(drvdata->edprsr & EDPRSR_PU))
136		return false;
137
138	/* The OS Double Lock is locked */
139	if (drvdata->edprsr & EDPRSR_DLK)
140		return false;
141
142	return true;
143}
144
145static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
146{
147	u32 edprcr;
148
149try_again:
150
151	/*
152	 * Send request to power management controller and assert
153	 * DBGPWRUPREQ signal; if power management controller has
154	 * sane implementation, it should enable CPU power domain
155	 * in case CPU is in low power state.
156	 */
157	edprcr = readl_relaxed(drvdata->base + EDPRCR);
158	edprcr |= EDPRCR_COREPURQ;
159	writel_relaxed(edprcr, drvdata->base + EDPRCR);
160
161	/* Wait for CPU to be powered up (timeout~=32ms) */
162	if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
163			drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
164			DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
165		/*
166		 * Unfortunately the CPU cannot be powered up, so return
167		 * back and later has no permission to access other
168		 * registers. For this case, should disable CPU low power
169		 * states to ensure CPU power domain is enabled!
170		 */
171		dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
172			__func__, drvdata->cpu);
173		return;
174	}
175
176	/*
177	 * At this point the CPU is powered up, so set the no powerdown
178	 * request bit so we don't lose power and emulate power down.
179	 */
180	edprcr = readl_relaxed(drvdata->base + EDPRCR);
181	edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
182	writel_relaxed(edprcr, drvdata->base + EDPRCR);
183
184	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
185
186	/* The core power domain got switched off on use, try again */
187	if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
188		goto try_again;
189}
190
191static void debug_read_regs(struct debug_drvdata *drvdata)
192{
193	u32 save_edprcr;
194
195	CS_UNLOCK(drvdata->base);
196
197	/* Unlock os lock */
198	debug_os_unlock(drvdata);
199
200	/* Save EDPRCR register */
201	save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
202
203	/*
204	 * Ensure CPU power domain is enabled to let registers
205	 * are accessiable.
206	 */
207	debug_force_cpu_powered_up(drvdata);
208
209	if (!debug_access_permitted(drvdata))
210		goto out;
211
212	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
213
214	/*
215	 * As described in ARM DDI 0487A.k, if the processing
216	 * element (PE) is in debug state, or sample-based
217	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
218	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
219	 * UNKNOWN state. So directly bail out for this case.
220	 */
221	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
222		goto out;
223
224	/*
225	 * A read of the EDPCSR normally has the side-effect of
226	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
227	 * at this point it's safe to read value from them.
228	 */
229	if (IS_ENABLED(CONFIG_64BIT))
230		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
231
232	if (drvdata->edcidsr_present)
233		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
234
235	if (drvdata->edvidsr_present)
236		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
237
238out:
239	/* Restore EDPRCR register */
240	writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
241
242	CS_LOCK(drvdata->base);
243}
244
245#ifdef CONFIG_64BIT
246static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
247{
248	return (unsigned long)drvdata->edpcsr_hi << 32 |
249	       (unsigned long)drvdata->edpcsr;
250}
251#else
252static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
253{
254	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
255	unsigned long pc;
256
257	pc = (unsigned long)drvdata->edpcsr;
258
259	if (drvdata->pc_has_offset) {
260		arm_inst_offset = 8;
261		thumb_inst_offset = 4;
262	}
263
264	/* Handle thumb instruction */
265	if (pc & EDPCSR_THUMB) {
266		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
267		return pc;
268	}
269
270	/*
271	 * Handle arm instruction offset, if the arm instruction
272	 * is not 4 byte alignment then it's possible the case
273	 * for implementation defined; keep original value for this
274	 * case and print info for notice.
275	 */
276	if (pc & BIT(1))
277		dev_emerg(drvdata->dev,
278			  "Instruction offset is implementation defined\n");
279	else
280		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
281
282	return pc;
283}
284#endif
285
286static void debug_dump_regs(struct debug_drvdata *drvdata)
287{
288	struct device *dev = drvdata->dev;
289	unsigned long pc;
290
291	dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
292		  drvdata->edprsr,
293		  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
294		  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
295
296	if (!debug_access_permitted(drvdata)) {
297		dev_emerg(dev, "No permission to access debug registers!\n");
298		return;
299	}
300
301	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
302		dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
303		return;
304	}
305
306	pc = debug_adjust_pc(drvdata);
307	dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
308
309	if (drvdata->edcidsr_present)
310		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
311
312	if (drvdata->edvidsr_present)
313		dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
314			  drvdata->edvidsr,
315			  drvdata->edvidsr & EDVIDSR_NS ?
316			  "Non-secure" : "Secure",
317			  drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
318				(drvdata->edvidsr & EDVIDSR_E2 ?
319				 "EL2" : "EL1/0"),
320			  drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
321			  drvdata->edvidsr & (u32)EDVIDSR_VMID);
322}
323
324static void debug_init_arch_data(void *info)
325{
326	struct debug_drvdata *drvdata = info;
327	u32 mode, pcsr_offset;
328	u32 eddevid, eddevid1;
329
330	CS_UNLOCK(drvdata->base);
331
332	/* Read device info */
333	eddevid  = readl_relaxed(drvdata->base + EDDEVID);
334	eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
335
336	CS_LOCK(drvdata->base);
337
338	/* Parse implementation feature */
339	mode = eddevid & EDDEVID_PCSAMPLE_MODE;
340	pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
341
342	drvdata->edpcsr_present  = false;
343	drvdata->edcidsr_present = false;
344	drvdata->edvidsr_present = false;
345	drvdata->pc_has_offset   = false;
346
347	switch (mode) {
348	case EDDEVID_IMPL_FULL:
349		drvdata->edvidsr_present = true;
350		fallthrough;
351	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
352		drvdata->edcidsr_present = true;
353		fallthrough;
354	case EDDEVID_IMPL_EDPCSR:
355		/*
356		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
357		 * define if has the offset for PC sampling value; if read
358		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
359		 * module does not sample the instruction set state when
360		 * armv8 CPU in AArch32 state.
361		 */
362		drvdata->edpcsr_present =
363			((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
364			 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
365
366		drvdata->pc_has_offset =
367			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
368		break;
369	default:
370		break;
371	}
372}
373
374/*
375 * Dump out information on panic.
376 */
377static int debug_notifier_call(struct notifier_block *self,
378			       unsigned long v, void *p)
379{
380	int cpu;
381	struct debug_drvdata *drvdata;
382
383	/* Bail out if we can't acquire the mutex or the functionality is off */
384	if (!mutex_trylock(&debug_lock))
385		return NOTIFY_DONE;
386
 
387	if (!debug_enable)
388		goto skip_dump;
389
390	pr_emerg("ARM external debug module:\n");
391
392	for_each_possible_cpu(cpu) {
393		drvdata = per_cpu(debug_drvdata, cpu);
394		if (!drvdata)
395			continue;
396
397		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
398
399		debug_read_regs(drvdata);
400		debug_dump_regs(drvdata);
401	}
402
403skip_dump:
404	mutex_unlock(&debug_lock);
405	return NOTIFY_DONE;
406}
407
408static struct notifier_block debug_notifier = {
409	.notifier_call = debug_notifier_call,
410};
411
412static int debug_enable_func(void)
413{
414	struct debug_drvdata *drvdata;
415	int cpu, ret = 0;
416	cpumask_t mask;
417
418	/*
419	 * Use cpumask to track which debug power domains have
420	 * been powered on and use it to handle failure case.
421	 */
422	cpumask_clear(&mask);
423
424	for_each_possible_cpu(cpu) {
425		drvdata = per_cpu(debug_drvdata, cpu);
426		if (!drvdata)
427			continue;
428
429		ret = pm_runtime_get_sync(drvdata->dev);
430		if (ret < 0)
431			goto err;
432		else
433			cpumask_set_cpu(cpu, &mask);
434	}
435
436	return 0;
437
438err:
439	/*
440	 * If pm_runtime_get_sync() has failed, need rollback on
441	 * all the other CPUs that have been enabled before that.
442	 */
443	for_each_cpu(cpu, &mask) {
444		drvdata = per_cpu(debug_drvdata, cpu);
445		pm_runtime_put_noidle(drvdata->dev);
446	}
447
448	return ret;
449}
450
451static int debug_disable_func(void)
452{
453	struct debug_drvdata *drvdata;
454	int cpu, ret, err = 0;
455
456	/*
457	 * Disable debug power domains, records the error and keep
458	 * circling through all other CPUs when an error has been
459	 * encountered.
460	 */
461	for_each_possible_cpu(cpu) {
462		drvdata = per_cpu(debug_drvdata, cpu);
463		if (!drvdata)
464			continue;
465
466		ret = pm_runtime_put(drvdata->dev);
467		if (ret < 0)
468			err = ret;
469	}
470
471	return err;
472}
473
474static ssize_t debug_func_knob_write(struct file *f,
475		const char __user *buf, size_t count, loff_t *ppos)
476{
477	u8 val;
478	int ret;
479
480	ret = kstrtou8_from_user(buf, count, 2, &val);
481	if (ret)
482		return ret;
483
484	mutex_lock(&debug_lock);
485
486	if (val == debug_enable)
487		goto out;
488
489	if (val)
490		ret = debug_enable_func();
491	else
492		ret = debug_disable_func();
493
494	if (ret) {
495		pr_err("%s: unable to %s debug function: %d\n",
496		       __func__, val ? "enable" : "disable", ret);
497		goto err;
498	}
499
500	debug_enable = val;
501out:
502	ret = count;
503err:
504	mutex_unlock(&debug_lock);
505	return ret;
506}
507
508static ssize_t debug_func_knob_read(struct file *f,
509		char __user *ubuf, size_t count, loff_t *ppos)
510{
511	ssize_t ret;
512	char buf[3];
513
514	mutex_lock(&debug_lock);
515	snprintf(buf, sizeof(buf), "%d\n", debug_enable);
516	mutex_unlock(&debug_lock);
517
518	ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
519	return ret;
520}
521
522static const struct file_operations debug_func_knob_fops = {
523	.open	= simple_open,
524	.read	= debug_func_knob_read,
525	.write	= debug_func_knob_write,
526};
527
528static int debug_func_init(void)
529{
530	int ret;
531
532	/* Create debugfs node */
533	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
534	debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
535			    &debug_func_knob_fops);
536
537	/* Register function to be called for panic */
538	ret = atomic_notifier_chain_register(&panic_notifier_list,
539					     &debug_notifier);
540	if (ret) {
541		pr_err("%s: unable to register notifier: %d\n",
542		       __func__, ret);
543		goto err;
544	}
545
546	return 0;
547
548err:
549	debugfs_remove_recursive(debug_debugfs_dir);
550	return ret;
551}
552
553static void debug_func_exit(void)
554{
555	atomic_notifier_chain_unregister(&panic_notifier_list,
556					 &debug_notifier);
557	debugfs_remove_recursive(debug_debugfs_dir);
558}
559
560static int debug_probe(struct amba_device *adev, const struct amba_id *id)
561{
562	void __iomem *base;
563	struct device *dev = &adev->dev;
564	struct debug_drvdata *drvdata;
565	struct resource *res = &adev->res;
566	int ret;
567
568	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
569	if (!drvdata)
570		return -ENOMEM;
571
572	drvdata->cpu = coresight_get_cpu(dev);
573	if (drvdata->cpu < 0)
574		return drvdata->cpu;
575
576	if (per_cpu(debug_drvdata, drvdata->cpu)) {
577		dev_err(dev, "CPU%d drvdata has already been initialized\n",
578			drvdata->cpu);
579		return -EBUSY;
580	}
581
582	drvdata->dev = &adev->dev;
583	amba_set_drvdata(adev, drvdata);
584
585	/* Validity for the resource is already checked by the AMBA core */
586	base = devm_ioremap_resource(dev, res);
587	if (IS_ERR(base))
588		return PTR_ERR(base);
589
590	drvdata->base = base;
591
592	cpus_read_lock();
593	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
594	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
595				       drvdata, 1);
596	cpus_read_unlock();
597
598	if (ret) {
599		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
600		goto err;
601	}
602
603	if (!drvdata->edpcsr_present) {
604		dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
605			drvdata->cpu);
606		ret = -ENXIO;
607		goto err;
608	}
609
610	if (!debug_count++) {
611		ret = debug_func_init();
612		if (ret)
613			goto err_func_init;
614	}
615
616	mutex_lock(&debug_lock);
617	/* Turn off debug power domain if debugging is disabled */
618	if (!debug_enable)
619		pm_runtime_put(dev);
620	mutex_unlock(&debug_lock);
621
622	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
623	return 0;
624
625err_func_init:
626	debug_count--;
627err:
628	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
629	return ret;
630}
631
632static void debug_remove(struct amba_device *adev)
633{
634	struct device *dev = &adev->dev;
635	struct debug_drvdata *drvdata = amba_get_drvdata(adev);
636
637	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
638
639	mutex_lock(&debug_lock);
640	/* Turn off debug power domain before rmmod the module */
641	if (debug_enable)
642		pm_runtime_put(dev);
643	mutex_unlock(&debug_lock);
644
645	if (!--debug_count)
646		debug_func_exit();
 
 
647}
648
649static const struct amba_cs_uci_id uci_id_debug[] = {
650	{
651		/*  CPU Debug UCI data */
652		.devarch	= 0x47706a15,
653		.devarch_mask	= 0xfff0ffff,
654		.devtype	= 0x00000015,
655	}
656};
657
658static const struct amba_id debug_ids[] = {
659	CS_AMBA_ID(0x000bbd03),				/* Cortex-A53 */
660	CS_AMBA_ID(0x000bbd07),				/* Cortex-A57 */
661	CS_AMBA_ID(0x000bbd08),				/* Cortex-A72 */
662	CS_AMBA_ID(0x000bbd09),				/* Cortex-A73 */
663	CS_AMBA_UCI_ID(0x000f0205, uci_id_debug),	/* Qualcomm Kryo */
664	CS_AMBA_UCI_ID(0x000f0211, uci_id_debug),	/* Qualcomm Kryo */
665	{},
666};
667
668MODULE_DEVICE_TABLE(amba, debug_ids);
669
670static struct amba_driver debug_driver = {
671	.drv = {
672		.name   = "coresight-cpu-debug",
673		.suppress_bind_attrs = true,
674	},
675	.probe		= debug_probe,
676	.remove		= debug_remove,
677	.id_table	= debug_ids,
678};
679
680module_amba_driver(debug_driver);
681
682MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
683MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
684MODULE_LICENSE("GPL");
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2017 Linaro Limited. All rights reserved.
  4 *
  5 * Author: Leo Yan <leo.yan@linaro.org>
  6 */
  7#include <linux/amba/bus.h>
  8#include <linux/coresight.h>
  9#include <linux/cpu.h>
 10#include <linux/debugfs.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <linux/err.h>
 14#include <linux/init.h>
 15#include <linux/io.h>
 16#include <linux/iopoll.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/moduleparam.h>
 
 20#include <linux/pm_qos.h>
 21#include <linux/slab.h>
 22#include <linux/smp.h>
 23#include <linux/types.h>
 24#include <linux/uaccess.h>
 25
 26#include "coresight-priv.h"
 27
 28#define EDPCSR				0x0A0
 29#define EDCIDSR				0x0A4
 30#define EDVIDSR				0x0A8
 31#define EDPCSR_HI			0x0AC
 32#define EDOSLAR				0x300
 33#define EDPRCR				0x310
 34#define EDPRSR				0x314
 35#define EDDEVID1			0xFC4
 36#define EDDEVID				0xFC8
 37
 38#define EDPCSR_PROHIBITED		0xFFFFFFFF
 39
 40/* bits definition for EDPCSR */
 41#define EDPCSR_THUMB			BIT(0)
 42#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
 43#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
 44
 45/* bits definition for EDPRCR */
 46#define EDPRCR_COREPURQ			BIT(3)
 47#define EDPRCR_CORENPDRQ		BIT(0)
 48
 49/* bits definition for EDPRSR */
 50#define EDPRSR_DLK			BIT(6)
 51#define EDPRSR_PU			BIT(0)
 52
 53/* bits definition for EDVIDSR */
 54#define EDVIDSR_NS			BIT(31)
 55#define EDVIDSR_E2			BIT(30)
 56#define EDVIDSR_E3			BIT(29)
 57#define EDVIDSR_HV			BIT(28)
 58#define EDVIDSR_VMID			GENMASK(7, 0)
 59
 60/*
 61 * bits definition for EDDEVID1:PSCROffset
 62 *
 63 * NOTE: armv8 and armv7 have different definition for the register,
 64 * so consolidate the bits definition as below:
 65 *
 66 * 0b0000 - Sample offset applies based on the instruction state, we
 67 *          rely on EDDEVID to check if EDPCSR is implemented or not
 68 * 0b0001 - No offset applies.
 69 * 0b0010 - No offset applies, but do not use in AArch32 mode
 70 *
 71 */
 72#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
 73#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
 74#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
 75
 76/* bits definition for EDDEVID */
 77#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
 78#define EDDEVID_IMPL_EDPCSR		(0x1)
 79#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
 80#define EDDEVID_IMPL_FULL		(0x3)
 81
 82#define DEBUG_WAIT_SLEEP		1000
 83#define DEBUG_WAIT_TIMEOUT		32000
 84
 85struct debug_drvdata {
 86	void __iomem	*base;
 87	struct device	*dev;
 88	int		cpu;
 89
 90	bool		edpcsr_present;
 91	bool		edcidsr_present;
 92	bool		edvidsr_present;
 93	bool		pc_has_offset;
 94
 95	u32		edpcsr;
 96	u32		edpcsr_hi;
 97	u32		edprsr;
 98	u32		edvidsr;
 99	u32		edcidsr;
100};
101
102static DEFINE_MUTEX(debug_lock);
103static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
104static int debug_count;
105static struct dentry *debug_debugfs_dir;
106
107static bool debug_enable;
108module_param_named(enable, debug_enable, bool, 0600);
109MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
110
111static void debug_os_unlock(struct debug_drvdata *drvdata)
112{
113	/* Unlocks the debug registers */
114	writel_relaxed(0x0, drvdata->base + EDOSLAR);
115
116	/* Make sure the registers are unlocked before accessing */
117	wmb();
118}
119
120/*
121 * According to ARM DDI 0487A.k, before access external debug
122 * registers should firstly check the access permission; if any
123 * below condition has been met then cannot access debug
124 * registers to avoid lockup issue:
125 *
126 * - CPU power domain is powered off;
127 * - The OS Double Lock is locked;
128 *
129 * By checking EDPRSR can get to know if meet these conditions.
130 */
131static bool debug_access_permitted(struct debug_drvdata *drvdata)
132{
133	/* CPU is powered off */
134	if (!(drvdata->edprsr & EDPRSR_PU))
135		return false;
136
137	/* The OS Double Lock is locked */
138	if (drvdata->edprsr & EDPRSR_DLK)
139		return false;
140
141	return true;
142}
143
144static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
145{
146	u32 edprcr;
147
148try_again:
149
150	/*
151	 * Send request to power management controller and assert
152	 * DBGPWRUPREQ signal; if power management controller has
153	 * sane implementation, it should enable CPU power domain
154	 * in case CPU is in low power state.
155	 */
156	edprcr = readl_relaxed(drvdata->base + EDPRCR);
157	edprcr |= EDPRCR_COREPURQ;
158	writel_relaxed(edprcr, drvdata->base + EDPRCR);
159
160	/* Wait for CPU to be powered up (timeout~=32ms) */
161	if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
162			drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
163			DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
164		/*
165		 * Unfortunately the CPU cannot be powered up, so return
166		 * back and later has no permission to access other
167		 * registers. For this case, should disable CPU low power
168		 * states to ensure CPU power domain is enabled!
169		 */
170		dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
171			__func__, drvdata->cpu);
172		return;
173	}
174
175	/*
176	 * At this point the CPU is powered up, so set the no powerdown
177	 * request bit so we don't lose power and emulate power down.
178	 */
179	edprcr = readl_relaxed(drvdata->base + EDPRCR);
180	edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
181	writel_relaxed(edprcr, drvdata->base + EDPRCR);
182
183	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
184
185	/* The core power domain got switched off on use, try again */
186	if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
187		goto try_again;
188}
189
190static void debug_read_regs(struct debug_drvdata *drvdata)
191{
192	u32 save_edprcr;
193
194	CS_UNLOCK(drvdata->base);
195
196	/* Unlock os lock */
197	debug_os_unlock(drvdata);
198
199	/* Save EDPRCR register */
200	save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
201
202	/*
203	 * Ensure CPU power domain is enabled to let registers
204	 * are accessiable.
205	 */
206	debug_force_cpu_powered_up(drvdata);
207
208	if (!debug_access_permitted(drvdata))
209		goto out;
210
211	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
212
213	/*
214	 * As described in ARM DDI 0487A.k, if the processing
215	 * element (PE) is in debug state, or sample-based
216	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
217	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
218	 * UNKNOWN state. So directly bail out for this case.
219	 */
220	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
221		goto out;
222
223	/*
224	 * A read of the EDPCSR normally has the side-effect of
225	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
226	 * at this point it's safe to read value from them.
227	 */
228	if (IS_ENABLED(CONFIG_64BIT))
229		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
230
231	if (drvdata->edcidsr_present)
232		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
233
234	if (drvdata->edvidsr_present)
235		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
236
237out:
238	/* Restore EDPRCR register */
239	writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
240
241	CS_LOCK(drvdata->base);
242}
243
244#ifdef CONFIG_64BIT
245static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
246{
247	return (unsigned long)drvdata->edpcsr_hi << 32 |
248	       (unsigned long)drvdata->edpcsr;
249}
250#else
251static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
252{
253	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
254	unsigned long pc;
255
256	pc = (unsigned long)drvdata->edpcsr;
257
258	if (drvdata->pc_has_offset) {
259		arm_inst_offset = 8;
260		thumb_inst_offset = 4;
261	}
262
263	/* Handle thumb instruction */
264	if (pc & EDPCSR_THUMB) {
265		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
266		return pc;
267	}
268
269	/*
270	 * Handle arm instruction offset, if the arm instruction
271	 * is not 4 byte alignment then it's possible the case
272	 * for implementation defined; keep original value for this
273	 * case and print info for notice.
274	 */
275	if (pc & BIT(1))
276		dev_emerg(drvdata->dev,
277			  "Instruction offset is implementation defined\n");
278	else
279		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
280
281	return pc;
282}
283#endif
284
285static void debug_dump_regs(struct debug_drvdata *drvdata)
286{
287	struct device *dev = drvdata->dev;
288	unsigned long pc;
289
290	dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
291		  drvdata->edprsr,
292		  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
293		  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
294
295	if (!debug_access_permitted(drvdata)) {
296		dev_emerg(dev, "No permission to access debug registers!\n");
297		return;
298	}
299
300	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
301		dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
302		return;
303	}
304
305	pc = debug_adjust_pc(drvdata);
306	dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
307
308	if (drvdata->edcidsr_present)
309		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
310
311	if (drvdata->edvidsr_present)
312		dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
313			  drvdata->edvidsr,
314			  drvdata->edvidsr & EDVIDSR_NS ?
315			  "Non-secure" : "Secure",
316			  drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
317				(drvdata->edvidsr & EDVIDSR_E2 ?
318				 "EL2" : "EL1/0"),
319			  drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
320			  drvdata->edvidsr & (u32)EDVIDSR_VMID);
321}
322
323static void debug_init_arch_data(void *info)
324{
325	struct debug_drvdata *drvdata = info;
326	u32 mode, pcsr_offset;
327	u32 eddevid, eddevid1;
328
329	CS_UNLOCK(drvdata->base);
330
331	/* Read device info */
332	eddevid  = readl_relaxed(drvdata->base + EDDEVID);
333	eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
334
335	CS_LOCK(drvdata->base);
336
337	/* Parse implementation feature */
338	mode = eddevid & EDDEVID_PCSAMPLE_MODE;
339	pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
340
341	drvdata->edpcsr_present  = false;
342	drvdata->edcidsr_present = false;
343	drvdata->edvidsr_present = false;
344	drvdata->pc_has_offset   = false;
345
346	switch (mode) {
347	case EDDEVID_IMPL_FULL:
348		drvdata->edvidsr_present = true;
349		fallthrough;
350	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
351		drvdata->edcidsr_present = true;
352		fallthrough;
353	case EDDEVID_IMPL_EDPCSR:
354		/*
355		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
356		 * define if has the offset for PC sampling value; if read
357		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
358		 * module does not sample the instruction set state when
359		 * armv8 CPU in AArch32 state.
360		 */
361		drvdata->edpcsr_present =
362			((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
363			 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
364
365		drvdata->pc_has_offset =
366			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
367		break;
368	default:
369		break;
370	}
371}
372
373/*
374 * Dump out information on panic.
375 */
376static int debug_notifier_call(struct notifier_block *self,
377			       unsigned long v, void *p)
378{
379	int cpu;
380	struct debug_drvdata *drvdata;
381
382	mutex_lock(&debug_lock);
 
 
383
384	/* Bail out if the functionality is disabled */
385	if (!debug_enable)
386		goto skip_dump;
387
388	pr_emerg("ARM external debug module:\n");
389
390	for_each_possible_cpu(cpu) {
391		drvdata = per_cpu(debug_drvdata, cpu);
392		if (!drvdata)
393			continue;
394
395		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
396
397		debug_read_regs(drvdata);
398		debug_dump_regs(drvdata);
399	}
400
401skip_dump:
402	mutex_unlock(&debug_lock);
403	return 0;
404}
405
406static struct notifier_block debug_notifier = {
407	.notifier_call = debug_notifier_call,
408};
409
410static int debug_enable_func(void)
411{
412	struct debug_drvdata *drvdata;
413	int cpu, ret = 0;
414	cpumask_t mask;
415
416	/*
417	 * Use cpumask to track which debug power domains have
418	 * been powered on and use it to handle failure case.
419	 */
420	cpumask_clear(&mask);
421
422	for_each_possible_cpu(cpu) {
423		drvdata = per_cpu(debug_drvdata, cpu);
424		if (!drvdata)
425			continue;
426
427		ret = pm_runtime_get_sync(drvdata->dev);
428		if (ret < 0)
429			goto err;
430		else
431			cpumask_set_cpu(cpu, &mask);
432	}
433
434	return 0;
435
436err:
437	/*
438	 * If pm_runtime_get_sync() has failed, need rollback on
439	 * all the other CPUs that have been enabled before that.
440	 */
441	for_each_cpu(cpu, &mask) {
442		drvdata = per_cpu(debug_drvdata, cpu);
443		pm_runtime_put_noidle(drvdata->dev);
444	}
445
446	return ret;
447}
448
449static int debug_disable_func(void)
450{
451	struct debug_drvdata *drvdata;
452	int cpu, ret, err = 0;
453
454	/*
455	 * Disable debug power domains, records the error and keep
456	 * circling through all other CPUs when an error has been
457	 * encountered.
458	 */
459	for_each_possible_cpu(cpu) {
460		drvdata = per_cpu(debug_drvdata, cpu);
461		if (!drvdata)
462			continue;
463
464		ret = pm_runtime_put(drvdata->dev);
465		if (ret < 0)
466			err = ret;
467	}
468
469	return err;
470}
471
472static ssize_t debug_func_knob_write(struct file *f,
473		const char __user *buf, size_t count, loff_t *ppos)
474{
475	u8 val;
476	int ret;
477
478	ret = kstrtou8_from_user(buf, count, 2, &val);
479	if (ret)
480		return ret;
481
482	mutex_lock(&debug_lock);
483
484	if (val == debug_enable)
485		goto out;
486
487	if (val)
488		ret = debug_enable_func();
489	else
490		ret = debug_disable_func();
491
492	if (ret) {
493		pr_err("%s: unable to %s debug function: %d\n",
494		       __func__, val ? "enable" : "disable", ret);
495		goto err;
496	}
497
498	debug_enable = val;
499out:
500	ret = count;
501err:
502	mutex_unlock(&debug_lock);
503	return ret;
504}
505
506static ssize_t debug_func_knob_read(struct file *f,
507		char __user *ubuf, size_t count, loff_t *ppos)
508{
509	ssize_t ret;
510	char buf[3];
511
512	mutex_lock(&debug_lock);
513	snprintf(buf, sizeof(buf), "%d\n", debug_enable);
514	mutex_unlock(&debug_lock);
515
516	ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
517	return ret;
518}
519
520static const struct file_operations debug_func_knob_fops = {
521	.open	= simple_open,
522	.read	= debug_func_knob_read,
523	.write	= debug_func_knob_write,
524};
525
526static int debug_func_init(void)
527{
528	int ret;
529
530	/* Create debugfs node */
531	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
532	debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
533			    &debug_func_knob_fops);
534
535	/* Register function to be called for panic */
536	ret = atomic_notifier_chain_register(&panic_notifier_list,
537					     &debug_notifier);
538	if (ret) {
539		pr_err("%s: unable to register notifier: %d\n",
540		       __func__, ret);
541		goto err;
542	}
543
544	return 0;
545
546err:
547	debugfs_remove_recursive(debug_debugfs_dir);
548	return ret;
549}
550
551static void debug_func_exit(void)
552{
553	atomic_notifier_chain_unregister(&panic_notifier_list,
554					 &debug_notifier);
555	debugfs_remove_recursive(debug_debugfs_dir);
556}
557
558static int debug_probe(struct amba_device *adev, const struct amba_id *id)
559{
560	void __iomem *base;
561	struct device *dev = &adev->dev;
562	struct debug_drvdata *drvdata;
563	struct resource *res = &adev->res;
564	int ret;
565
566	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
567	if (!drvdata)
568		return -ENOMEM;
569
570	drvdata->cpu = coresight_get_cpu(dev);
571	if (drvdata->cpu < 0)
572		return drvdata->cpu;
573
574	if (per_cpu(debug_drvdata, drvdata->cpu)) {
575		dev_err(dev, "CPU%d drvdata has already been initialized\n",
576			drvdata->cpu);
577		return -EBUSY;
578	}
579
580	drvdata->dev = &adev->dev;
581	amba_set_drvdata(adev, drvdata);
582
583	/* Validity for the resource is already checked by the AMBA core */
584	base = devm_ioremap_resource(dev, res);
585	if (IS_ERR(base))
586		return PTR_ERR(base);
587
588	drvdata->base = base;
589
590	get_online_cpus();
591	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
592	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
593				       drvdata, 1);
594	put_online_cpus();
595
596	if (ret) {
597		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
598		goto err;
599	}
600
601	if (!drvdata->edpcsr_present) {
602		dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
603			drvdata->cpu);
604		ret = -ENXIO;
605		goto err;
606	}
607
608	if (!debug_count++) {
609		ret = debug_func_init();
610		if (ret)
611			goto err_func_init;
612	}
613
614	mutex_lock(&debug_lock);
615	/* Turn off debug power domain if debugging is disabled */
616	if (!debug_enable)
617		pm_runtime_put(dev);
618	mutex_unlock(&debug_lock);
619
620	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
621	return 0;
622
623err_func_init:
624	debug_count--;
625err:
626	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
627	return ret;
628}
629
630static int debug_remove(struct amba_device *adev)
631{
632	struct device *dev = &adev->dev;
633	struct debug_drvdata *drvdata = amba_get_drvdata(adev);
634
635	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
636
637	mutex_lock(&debug_lock);
638	/* Turn off debug power domain before rmmod the module */
639	if (debug_enable)
640		pm_runtime_put(dev);
641	mutex_unlock(&debug_lock);
642
643	if (!--debug_count)
644		debug_func_exit();
645
646	return 0;
647}
648
649static const struct amba_cs_uci_id uci_id_debug[] = {
650	{
651		/*  CPU Debug UCI data */
652		.devarch	= 0x47706a15,
653		.devarch_mask	= 0xfff0ffff,
654		.devtype	= 0x00000015,
655	}
656};
657
658static const struct amba_id debug_ids[] = {
659	CS_AMBA_ID(0x000bbd03),				/* Cortex-A53 */
660	CS_AMBA_ID(0x000bbd07),				/* Cortex-A57 */
661	CS_AMBA_ID(0x000bbd08),				/* Cortex-A72 */
662	CS_AMBA_ID(0x000bbd09),				/* Cortex-A73 */
663	CS_AMBA_UCI_ID(0x000f0205, uci_id_debug),	/* Qualcomm Kryo */
664	CS_AMBA_UCI_ID(0x000f0211, uci_id_debug),	/* Qualcomm Kryo */
665	{},
666};
 
 
667
668static struct amba_driver debug_driver = {
669	.drv = {
670		.name   = "coresight-cpu-debug",
671		.suppress_bind_attrs = true,
672	},
673	.probe		= debug_probe,
674	.remove		= debug_remove,
675	.id_table	= debug_ids,
676};
677
678module_amba_driver(debug_driver);
679
680MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
681MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
682MODULE_LICENSE("GPL");