Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright 2016,2017 IBM Corporation.
  4 */
  5
  6#define pr_fmt(fmt) "xive: " fmt
  7
  8#include <linux/types.h>
  9#include <linux/irq.h>
 
 10#include <linux/smp.h>
 11#include <linux/interrupt.h>
 12#include <linux/init.h>
 13#include <linux/of.h>
 
 
 14#include <linux/slab.h>
 15#include <linux/spinlock.h>
 
 16#include <linux/cpumask.h>
 17#include <linux/mm.h>
 18#include <linux/delay.h>
 19#include <linux/libfdt.h>
 20
 21#include <asm/machdep.h>
 22#include <asm/prom.h>
 23#include <asm/io.h>
 24#include <asm/smp.h>
 25#include <asm/irq.h>
 26#include <asm/errno.h>
 27#include <asm/xive.h>
 28#include <asm/xive-regs.h>
 29#include <asm/hvcall.h>
 30#include <asm/svm.h>
 31#include <asm/ultravisor.h>
 32
 33#include "xive-internal.h"
 34
 35static u32 xive_queue_shift;
 36
 37struct xive_irq_bitmap {
 38	unsigned long		*bitmap;
 39	unsigned int		base;
 40	unsigned int		count;
 41	spinlock_t		lock;
 42	struct list_head	list;
 43};
 44
 45static LIST_HEAD(xive_irq_bitmaps);
 46
 47static int xive_irq_bitmap_add(int base, int count)
 48{
 49	struct xive_irq_bitmap *xibm;
 50
 51	xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
 52	if (!xibm)
 53		return -ENOMEM;
 54
 55	spin_lock_init(&xibm->lock);
 56	xibm->base = base;
 57	xibm->count = count;
 58	xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
 59	if (!xibm->bitmap) {
 60		kfree(xibm);
 61		return -ENOMEM;
 62	}
 63	list_add(&xibm->list, &xive_irq_bitmaps);
 64
 65	pr_info("Using IRQ range [%x-%x]", xibm->base,
 66		xibm->base + xibm->count - 1);
 67	return 0;
 68}
 69
 
 
 
 
 
 
 
 
 
 
 
 70static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
 71{
 72	int irq;
 73
 74	irq = find_first_zero_bit(xibm->bitmap, xibm->count);
 75	if (irq != xibm->count) {
 76		set_bit(irq, xibm->bitmap);
 77		irq += xibm->base;
 78	} else {
 79		irq = -ENOMEM;
 80	}
 81
 82	return irq;
 83}
 84
 85static int xive_irq_bitmap_alloc(void)
 86{
 87	struct xive_irq_bitmap *xibm;
 88	unsigned long flags;
 89	int irq = -ENOENT;
 90
 91	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
 92		spin_lock_irqsave(&xibm->lock, flags);
 93		irq = __xive_irq_bitmap_alloc(xibm);
 94		spin_unlock_irqrestore(&xibm->lock, flags);
 95		if (irq >= 0)
 96			break;
 97	}
 98	return irq;
 99}
100
101static void xive_irq_bitmap_free(int irq)
102{
103	unsigned long flags;
104	struct xive_irq_bitmap *xibm;
105
106	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
107		if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
108			spin_lock_irqsave(&xibm->lock, flags);
109			clear_bit(irq - xibm->base, xibm->bitmap);
110			spin_unlock_irqrestore(&xibm->lock, flags);
111			break;
112		}
113	}
114}
115
116
117/* Based on the similar routines in RTAS */
118static unsigned int plpar_busy_delay_time(long rc)
119{
120	unsigned int ms = 0;
121
122	if (H_IS_LONG_BUSY(rc)) {
123		ms = get_longbusy_msecs(rc);
124	} else if (rc == H_BUSY) {
125		ms = 10; /* seems appropriate for XIVE hcalls */
126	}
127
128	return ms;
129}
130
131static unsigned int plpar_busy_delay(int rc)
132{
133	unsigned int ms;
134
135	ms = plpar_busy_delay_time(rc);
136	if (ms)
137		mdelay(ms);
138
139	return ms;
140}
141
142/*
143 * Note: this call has a partition wide scope and can take a while to
144 * complete. If it returns H_LONG_BUSY_* it should be retried
145 * periodically.
146 */
147static long plpar_int_reset(unsigned long flags)
148{
149	long rc;
150
151	do {
152		rc = plpar_hcall_norets(H_INT_RESET, flags);
153	} while (plpar_busy_delay(rc));
154
155	if (rc)
156		pr_err("H_INT_RESET failed %ld\n", rc);
157
158	return rc;
159}
160
161static long plpar_int_get_source_info(unsigned long flags,
162				      unsigned long lisn,
163				      unsigned long *src_flags,
164				      unsigned long *eoi_page,
165				      unsigned long *trig_page,
166				      unsigned long *esb_shift)
167{
168	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
169	long rc;
170
171	do {
172		rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
173	} while (plpar_busy_delay(rc));
174
175	if (rc) {
176		pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
177		return rc;
178	}
179
180	*src_flags = retbuf[0];
181	*eoi_page  = retbuf[1];
182	*trig_page = retbuf[2];
183	*esb_shift = retbuf[3];
184
185	pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
186		retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
187
188	return 0;
189}
190
191#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
192#define XIVE_SRC_MASK     (1ull << (63 - 63)) /* unused */
193
194static long plpar_int_set_source_config(unsigned long flags,
195					unsigned long lisn,
196					unsigned long target,
197					unsigned long prio,
198					unsigned long sw_irq)
199{
200	long rc;
201
202
203	pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
204		flags, lisn, target, prio, sw_irq);
205
206
207	do {
208		rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
209					target, prio, sw_irq);
210	} while (plpar_busy_delay(rc));
211
212	if (rc) {
213		pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
214		       lisn, target, prio, rc);
215		return rc;
216	}
217
218	return 0;
219}
220
221static long plpar_int_get_source_config(unsigned long flags,
222					unsigned long lisn,
223					unsigned long *target,
224					unsigned long *prio,
225					unsigned long *sw_irq)
226{
227	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
228	long rc;
229
230	pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
231
232	do {
233		rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
234				 target, prio, sw_irq);
235	} while (plpar_busy_delay(rc));
236
237	if (rc) {
238		pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
239		       lisn, rc);
240		return rc;
241	}
242
243	*target = retbuf[0];
244	*prio   = retbuf[1];
245	*sw_irq = retbuf[2];
246
247	pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
248		retbuf[0], retbuf[1], retbuf[2]);
249
250	return 0;
251}
252
253static long plpar_int_get_queue_info(unsigned long flags,
254				     unsigned long target,
255				     unsigned long priority,
256				     unsigned long *esn_page,
257				     unsigned long *esn_size)
258{
259	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
260	long rc;
261
262	do {
263		rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
264				 priority);
265	} while (plpar_busy_delay(rc));
266
267	if (rc) {
268		pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
269		       target, priority, rc);
270		return rc;
271	}
272
273	*esn_page = retbuf[0];
274	*esn_size = retbuf[1];
275
276	pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
277		retbuf[0], retbuf[1]);
278
279	return 0;
280}
281
282#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
283
284static long plpar_int_set_queue_config(unsigned long flags,
285				       unsigned long target,
286				       unsigned long priority,
287				       unsigned long qpage,
288				       unsigned long qsize)
289{
290	long rc;
291
292	pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
293		flags,  target, priority, qpage, qsize);
294
295	do {
296		rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
297					priority, qpage, qsize);
298	} while (plpar_busy_delay(rc));
299
300	if (rc) {
301		pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
302		       target, priority, qpage, rc);
303		return  rc;
304	}
305
306	return 0;
307}
308
309static long plpar_int_sync(unsigned long flags, unsigned long lisn)
310{
311	long rc;
312
313	do {
314		rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
315	} while (plpar_busy_delay(rc));
316
317	if (rc) {
318		pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
319		return  rc;
320	}
321
322	return 0;
323}
324
325#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
326
327static long plpar_int_esb(unsigned long flags,
328			  unsigned long lisn,
329			  unsigned long offset,
330			  unsigned long in_data,
331			  unsigned long *out_data)
332{
333	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
334	long rc;
335
336	pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
337		flags,  lisn, offset, in_data);
338
339	do {
340		rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
341				 in_data);
342	} while (plpar_busy_delay(rc));
343
344	if (rc) {
345		pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
346		       lisn, offset, rc);
347		return  rc;
348	}
349
350	*out_data = retbuf[0];
351
352	return 0;
353}
354
355static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
356{
357	unsigned long read_data;
358	long rc;
359
360	rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
361			   lisn, offset, data, &read_data);
362	if (rc)
363		return -1;
364
365	return write ? 0 : read_data;
366}
367
368#define XIVE_SRC_H_INT_ESB     (1ull << (63 - 60))
369#define XIVE_SRC_LSI           (1ull << (63 - 61))
370#define XIVE_SRC_TRIGGER       (1ull << (63 - 62))
371#define XIVE_SRC_STORE_EOI     (1ull << (63 - 63))
372
373static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
374{
375	long rc;
376	unsigned long flags;
377	unsigned long eoi_page;
378	unsigned long trig_page;
379	unsigned long esb_shift;
380
381	memset(data, 0, sizeof(*data));
382
383	rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
384				       &esb_shift);
385	if (rc)
386		return  -EINVAL;
387
388	if (flags & XIVE_SRC_H_INT_ESB)
389		data->flags  |= XIVE_IRQ_FLAG_H_INT_ESB;
390	if (flags & XIVE_SRC_STORE_EOI)
391		data->flags  |= XIVE_IRQ_FLAG_STORE_EOI;
392	if (flags & XIVE_SRC_LSI)
393		data->flags  |= XIVE_IRQ_FLAG_LSI;
394	data->eoi_page  = eoi_page;
395	data->esb_shift = esb_shift;
396	data->trig_page = trig_page;
397
398	data->hw_irq = hw_irq;
399
400	/*
401	 * No chip-id for the sPAPR backend. This has an impact how we
402	 * pick a target. See xive_pick_irq_target().
403	 */
404	data->src_chip = XIVE_INVALID_CHIP_ID;
405
406	/*
407	 * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
408	 * be used for interrupt management. Skip the remapping of the
409	 * ESB pages which are not available.
410	 */
411	if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
412		return 0;
413
414	data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
415	if (!data->eoi_mmio) {
416		pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
417		return -ENOMEM;
418	}
419
420	/* Full function page supports trigger */
421	if (flags & XIVE_SRC_TRIGGER) {
422		data->trig_mmio = data->eoi_mmio;
423		return 0;
424	}
425
426	data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
427	if (!data->trig_mmio) {
 
428		pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
429		return -ENOMEM;
430	}
431	return 0;
432}
433
434static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
435{
436	long rc;
437
438	rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
439					 prio, sw_irq);
440
441	return rc == 0 ? 0 : -ENXIO;
442}
443
444static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
445				     u32 *sw_irq)
446{
447	long rc;
448	unsigned long h_target;
449	unsigned long h_prio;
450	unsigned long h_sw_irq;
451
452	rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
453					 &h_sw_irq);
454
455	*target = h_target;
456	*prio = h_prio;
457	*sw_irq = h_sw_irq;
458
459	return rc == 0 ? 0 : -ENXIO;
460}
461
462/* This can be called multiple time to change a queue configuration */
463static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
464				   __be32 *qpage, u32 order)
465{
466	s64 rc = 0;
467	unsigned long esn_page;
468	unsigned long esn_size;
469	u64 flags, qpage_phys;
470
471	/* If there's an actual queue page, clean it */
472	if (order) {
473		if (WARN_ON(!qpage))
474			return -EINVAL;
475		qpage_phys = __pa(qpage);
476	} else {
477		qpage_phys = 0;
478	}
479
480	/* Initialize the rest of the fields */
481	q->msk = order ? ((1u << (order - 2)) - 1) : 0;
482	q->idx = 0;
483	q->toggle = 0;
484
485	rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
486	if (rc) {
487		pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
488		       target, prio);
489		rc = -EIO;
490		goto fail;
491	}
492
493	/* TODO: add support for the notification page */
494	q->eoi_phys = esn_page;
495
496	/* Default is to always notify */
497	flags = XIVE_EQ_ALWAYS_NOTIFY;
498
499	/* Configure and enable the queue in HW */
500	rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
501	if (rc) {
502		pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
503		       target, prio);
504		rc = -EIO;
505	} else {
506		q->qpage = qpage;
507		if (is_secure_guest())
508			uv_share_page(PHYS_PFN(qpage_phys),
509					1 << xive_alloc_order(order));
510	}
511fail:
512	return rc;
513}
514
515static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
516				  u8 prio)
517{
518	struct xive_q *q = &xc->queue[prio];
519	__be32 *qpage;
520
521	qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
522	if (IS_ERR(qpage))
523		return PTR_ERR(qpage);
524
525	return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
526					  q, prio, qpage, xive_queue_shift);
527}
528
529static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
530				  u8 prio)
531{
532	struct xive_q *q = &xc->queue[prio];
533	unsigned int alloc_order;
534	long rc;
535	int hw_cpu = get_hard_smp_processor_id(cpu);
536
537	rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
538	if (rc)
539		pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
540		       hw_cpu, prio);
541
542	alloc_order = xive_alloc_order(xive_queue_shift);
543	if (is_secure_guest())
544		uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
545	free_pages((unsigned long)q->qpage, alloc_order);
546	q->qpage = NULL;
547}
548
549static bool xive_spapr_match(struct device_node *node)
550{
551	/* Ignore cascaded controllers for the moment */
552	return true;
553}
554
555#ifdef CONFIG_SMP
556static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
557{
558	int irq = xive_irq_bitmap_alloc();
559
560	if (irq < 0) {
561		pr_err("Failed to allocate IPI on CPU %d\n", cpu);
562		return -ENXIO;
563	}
564
565	xc->hw_ipi = irq;
566	return 0;
567}
568
569static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
570{
571	if (xc->hw_ipi == XIVE_BAD_IRQ)
572		return;
573
574	xive_irq_bitmap_free(xc->hw_ipi);
575	xc->hw_ipi = XIVE_BAD_IRQ;
576}
577#endif /* CONFIG_SMP */
578
579static void xive_spapr_shutdown(void)
580{
581	plpar_int_reset(0);
582}
583
584/*
585 * Perform an "ack" cycle on the current thread. Grab the pending
586 * active priorities and update the CPPR to the most favored one.
587 */
588static void xive_spapr_update_pending(struct xive_cpu *xc)
589{
590	u8 nsr, cppr;
591	u16 ack;
592
593	/*
594	 * Perform the "Acknowledge O/S to Register" cycle.
595	 *
596	 * Let's speedup the access to the TIMA using the raw I/O
597	 * accessor as we don't need the synchronisation routine of
598	 * the higher level ones
599	 */
600	ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
601
602	/* Synchronize subsequent queue accesses */
603	mb();
604
605	/*
606	 * Grab the CPPR and the "NSR" field which indicates the source
607	 * of the interrupt (if any)
608	 */
609	cppr = ack & 0xff;
610	nsr = ack >> 8;
611
612	if (nsr & TM_QW1_NSR_EO) {
613		if (cppr == 0xff)
614			return;
615		/* Mark the priority pending */
616		xc->pending_prio |= 1 << cppr;
617
618		/*
619		 * A new interrupt should never have a CPPR less favored
620		 * than our current one.
621		 */
622		if (cppr >= xc->cppr)
623			pr_err("CPU %d odd ack CPPR, got %d at %d\n",
624			       smp_processor_id(), cppr, xc->cppr);
625
626		/* Update our idea of what the CPPR is */
627		xc->cppr = cppr;
628	}
629}
630
631static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
632{
633	/* Only some debug on the TIMA settings */
634	pr_debug("(HW value: %08x %08x %08x)\n",
635		 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
636		 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
637		 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
638}
639
640static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
641{
642	/* Nothing to do */;
643}
644
645static void xive_spapr_sync_source(u32 hw_irq)
646{
647	/* Specs are unclear on what this is doing */
648	plpar_int_sync(0, hw_irq);
649}
650
651static int xive_spapr_debug_show(struct seq_file *m, void *private)
652{
653	struct xive_irq_bitmap *xibm;
654	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
655
 
 
 
656	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
657		memset(buf, 0, PAGE_SIZE);
658		bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
659		seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
660	}
661	kfree(buf);
662
663	return 0;
664}
665
666static const struct xive_ops xive_spapr_ops = {
667	.populate_irq_data	= xive_spapr_populate_irq_data,
668	.configure_irq		= xive_spapr_configure_irq,
669	.get_irq_config		= xive_spapr_get_irq_config,
670	.setup_queue		= xive_spapr_setup_queue,
671	.cleanup_queue		= xive_spapr_cleanup_queue,
672	.match			= xive_spapr_match,
673	.shutdown		= xive_spapr_shutdown,
674	.update_pending		= xive_spapr_update_pending,
675	.setup_cpu		= xive_spapr_setup_cpu,
676	.teardown_cpu		= xive_spapr_teardown_cpu,
677	.sync_source		= xive_spapr_sync_source,
678	.esb_rw			= xive_spapr_esb_rw,
679#ifdef CONFIG_SMP
680	.get_ipi		= xive_spapr_get_ipi,
681	.put_ipi		= xive_spapr_put_ipi,
682	.debug_show		= xive_spapr_debug_show,
683#endif /* CONFIG_SMP */
684	.name			= "spapr",
685};
686
687/*
688 * get max priority from "/ibm,plat-res-int-priorities"
689 */
690static bool xive_get_max_prio(u8 *max_prio)
691{
692	struct device_node *rootdn;
693	const __be32 *reg;
694	u32 len;
695	int prio, found;
696
697	rootdn = of_find_node_by_path("/");
698	if (!rootdn) {
699		pr_err("not root node found !\n");
700		return false;
701	}
702
703	reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
 
704	if (!reg) {
705		pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
706		return false;
707	}
708
709	if (len % (2 * sizeof(u32)) != 0) {
710		pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
711		return false;
712	}
713
714	/* HW supports priorities in the range [0-7] and 0xFF is a
715	 * wildcard priority used to mask. We scan the ranges reserved
716	 * by the hypervisor to find the lowest priority we can use.
717	 */
718	found = 0xFF;
719	for (prio = 0; prio < 8; prio++) {
720		int reserved = 0;
721		int i;
722
723		for (i = 0; i < len / (2 * sizeof(u32)); i++) {
724			int base  = be32_to_cpu(reg[2 * i]);
725			int range = be32_to_cpu(reg[2 * i + 1]);
726
727			if (prio >= base && prio < base + range)
728				reserved++;
729		}
730
731		if (!reserved)
732			found = prio;
733	}
734
735	if (found == 0xFF) {
736		pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
737		return false;
738	}
739
740	*max_prio = found;
741	return true;
742}
743
744static const u8 *get_vec5_feature(unsigned int index)
745{
746	unsigned long root, chosen;
747	int size;
748	const u8 *vec5;
749
750	root = of_get_flat_dt_root();
751	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
752	if (chosen == -FDT_ERR_NOTFOUND)
753		return NULL;
754
755	vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
756	if (!vec5)
757		return NULL;
758
759	if (size <= index)
760		return NULL;
761
762	return vec5 + index;
763}
764
765static bool __init xive_spapr_disabled(void)
766{
767	const u8 *vec5_xive;
768
769	vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
770	if (vec5_xive) {
771		u8 val;
772
773		val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
774		switch (val) {
775		case OV5_FEAT(OV5_XIVE_EITHER):
776		case OV5_FEAT(OV5_XIVE_LEGACY):
777			break;
778		case OV5_FEAT(OV5_XIVE_EXPLOIT):
779			/* Hypervisor only supports XIVE */
780			if (xive_cmdline_disabled)
781				pr_warn("WARNING: Ignoring cmdline option xive=off\n");
782			return false;
783		default:
784			pr_warn("%s: Unknown xive support option: 0x%x\n",
785				__func__, val);
786			break;
787		}
788	}
789
790	return xive_cmdline_disabled;
791}
792
793bool __init xive_spapr_init(void)
794{
795	struct device_node *np;
796	struct resource r;
797	void __iomem *tima;
798	struct property *prop;
799	u8 max_prio;
800	u32 val;
801	u32 len;
802	const __be32 *reg;
803	int i;
804
805	if (xive_spapr_disabled())
806		return false;
807
808	pr_devel("%s()\n", __func__);
809	np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
810	if (!np) {
811		pr_devel("not found !\n");
812		return false;
813	}
814	pr_devel("Found %s\n", np->full_name);
815
816	/* Resource 1 is the OS ring TIMA */
817	if (of_address_to_resource(np, 1, &r)) {
818		pr_err("Failed to get thread mgmnt area resource\n");
819		return false;
820	}
821	tima = ioremap(r.start, resource_size(&r));
822	if (!tima) {
823		pr_err("Failed to map thread mgmnt area\n");
824		return false;
825	}
826
827	if (!xive_get_max_prio(&max_prio))
828		return false;
829
830	/* Feed the IRQ number allocator with the ranges given in the DT */
831	reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
832	if (!reg) {
833		pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
834		return false;
835	}
836
837	if (len % (2 * sizeof(u32)) != 0) {
838		pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
839		return false;
840	}
841
842	for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
843		xive_irq_bitmap_add(be32_to_cpu(reg[0]),
844				    be32_to_cpu(reg[1]));
 
 
 
845
846	/* Iterate the EQ sizes and pick one */
847	of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
848		xive_queue_shift = val;
849		if (val == PAGE_SHIFT)
850			break;
851	}
852
853	/* Initialize XIVE core with our backend */
854	if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
855		return false;
856
 
857	pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
858	return true;
 
 
 
 
 
 
 
 
859}
860
861machine_arch_initcall(pseries, xive_core_debug_init);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright 2016,2017 IBM Corporation.
  4 */
  5
  6#define pr_fmt(fmt) "xive: " fmt
  7
  8#include <linux/types.h>
  9#include <linux/irq.h>
 10#include <linux/seq_file.h>
 11#include <linux/smp.h>
 12#include <linux/interrupt.h>
 13#include <linux/init.h>
 14#include <linux/of.h>
 15#include <linux/of_address.h>
 16#include <linux/of_fdt.h>
 17#include <linux/slab.h>
 18#include <linux/spinlock.h>
 19#include <linux/bitmap.h>
 20#include <linux/cpumask.h>
 21#include <linux/mm.h>
 22#include <linux/delay.h>
 23#include <linux/libfdt.h>
 24
 25#include <asm/machdep.h>
 26#include <asm/prom.h>
 27#include <asm/io.h>
 28#include <asm/smp.h>
 29#include <asm/irq.h>
 30#include <asm/errno.h>
 31#include <asm/xive.h>
 32#include <asm/xive-regs.h>
 33#include <asm/hvcall.h>
 34#include <asm/svm.h>
 35#include <asm/ultravisor.h>
 36
 37#include "xive-internal.h"
 38
 39static u32 xive_queue_shift;
 40
 41struct xive_irq_bitmap {
 42	unsigned long		*bitmap;
 43	unsigned int		base;
 44	unsigned int		count;
 45	spinlock_t		lock;
 46	struct list_head	list;
 47};
 48
 49static LIST_HEAD(xive_irq_bitmaps);
 50
 51static int __init xive_irq_bitmap_add(int base, int count)
 52{
 53	struct xive_irq_bitmap *xibm;
 54
 55	xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
 56	if (!xibm)
 57		return -ENOMEM;
 58
 59	spin_lock_init(&xibm->lock);
 60	xibm->base = base;
 61	xibm->count = count;
 62	xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
 63	if (!xibm->bitmap) {
 64		kfree(xibm);
 65		return -ENOMEM;
 66	}
 67	list_add(&xibm->list, &xive_irq_bitmaps);
 68
 69	pr_info("Using IRQ range [%x-%x]", xibm->base,
 70		xibm->base + xibm->count - 1);
 71	return 0;
 72}
 73
 74static void xive_irq_bitmap_remove_all(void)
 75{
 76	struct xive_irq_bitmap *xibm, *tmp;
 77
 78	list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
 79		list_del(&xibm->list);
 80		bitmap_free(xibm->bitmap);
 81		kfree(xibm);
 82	}
 83}
 84
 85static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
 86{
 87	int irq;
 88
 89	irq = find_first_zero_bit(xibm->bitmap, xibm->count);
 90	if (irq != xibm->count) {
 91		set_bit(irq, xibm->bitmap);
 92		irq += xibm->base;
 93	} else {
 94		irq = -ENOMEM;
 95	}
 96
 97	return irq;
 98}
 99
100static int xive_irq_bitmap_alloc(void)
101{
102	struct xive_irq_bitmap *xibm;
103	unsigned long flags;
104	int irq = -ENOENT;
105
106	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
107		spin_lock_irqsave(&xibm->lock, flags);
108		irq = __xive_irq_bitmap_alloc(xibm);
109		spin_unlock_irqrestore(&xibm->lock, flags);
110		if (irq >= 0)
111			break;
112	}
113	return irq;
114}
115
116static void xive_irq_bitmap_free(int irq)
117{
118	unsigned long flags;
119	struct xive_irq_bitmap *xibm;
120
121	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
122		if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
123			spin_lock_irqsave(&xibm->lock, flags);
124			clear_bit(irq - xibm->base, xibm->bitmap);
125			spin_unlock_irqrestore(&xibm->lock, flags);
126			break;
127		}
128	}
129}
130
131
132/* Based on the similar routines in RTAS */
133static unsigned int plpar_busy_delay_time(long rc)
134{
135	unsigned int ms = 0;
136
137	if (H_IS_LONG_BUSY(rc)) {
138		ms = get_longbusy_msecs(rc);
139	} else if (rc == H_BUSY) {
140		ms = 10; /* seems appropriate for XIVE hcalls */
141	}
142
143	return ms;
144}
145
146static unsigned int plpar_busy_delay(int rc)
147{
148	unsigned int ms;
149
150	ms = plpar_busy_delay_time(rc);
151	if (ms)
152		mdelay(ms);
153
154	return ms;
155}
156
157/*
158 * Note: this call has a partition wide scope and can take a while to
159 * complete. If it returns H_LONG_BUSY_* it should be retried
160 * periodically.
161 */
162static long plpar_int_reset(unsigned long flags)
163{
164	long rc;
165
166	do {
167		rc = plpar_hcall_norets(H_INT_RESET, flags);
168	} while (plpar_busy_delay(rc));
169
170	if (rc)
171		pr_err("H_INT_RESET failed %ld\n", rc);
172
173	return rc;
174}
175
176static long plpar_int_get_source_info(unsigned long flags,
177				      unsigned long lisn,
178				      unsigned long *src_flags,
179				      unsigned long *eoi_page,
180				      unsigned long *trig_page,
181				      unsigned long *esb_shift)
182{
183	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
184	long rc;
185
186	do {
187		rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
188	} while (plpar_busy_delay(rc));
189
190	if (rc) {
191		pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc);
192		return rc;
193	}
194
195	*src_flags = retbuf[0];
196	*eoi_page  = retbuf[1];
197	*trig_page = retbuf[2];
198	*esb_shift = retbuf[3];
199
200	pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n",
201		 lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
202
203	return 0;
204}
205
206#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
207#define XIVE_SRC_MASK     (1ull << (63 - 63)) /* unused */
208
209static long plpar_int_set_source_config(unsigned long flags,
210					unsigned long lisn,
211					unsigned long target,
212					unsigned long prio,
213					unsigned long sw_irq)
214{
215	long rc;
216
217
218	pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n",
219		 flags, lisn, target, prio, sw_irq);
220
221
222	do {
223		rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
224					target, prio, sw_irq);
225	} while (plpar_busy_delay(rc));
226
227	if (rc) {
228		pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n",
229		       lisn, target, prio, rc);
230		return rc;
231	}
232
233	return 0;
234}
235
236static long plpar_int_get_source_config(unsigned long flags,
237					unsigned long lisn,
238					unsigned long *target,
239					unsigned long *prio,
240					unsigned long *sw_irq)
241{
242	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
243	long rc;
244
245	pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn);
246
247	do {
248		rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
249				 target, prio, sw_irq);
250	} while (plpar_busy_delay(rc));
251
252	if (rc) {
253		pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n",
254		       lisn, rc);
255		return rc;
256	}
257
258	*target = retbuf[0];
259	*prio   = retbuf[1];
260	*sw_irq = retbuf[2];
261
262	pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n",
263		 retbuf[0], retbuf[1], retbuf[2]);
264
265	return 0;
266}
267
268static long plpar_int_get_queue_info(unsigned long flags,
269				     unsigned long target,
270				     unsigned long priority,
271				     unsigned long *esn_page,
272				     unsigned long *esn_size)
273{
274	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
275	long rc;
276
277	do {
278		rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
279				 priority);
280	} while (plpar_busy_delay(rc));
281
282	if (rc) {
283		pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
284		       target, priority, rc);
285		return rc;
286	}
287
288	*esn_page = retbuf[0];
289	*esn_size = retbuf[1];
290
291	pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n",
292		 target, priority, retbuf[0], retbuf[1]);
293
294	return 0;
295}
296
297#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
298
299static long plpar_int_set_queue_config(unsigned long flags,
300				       unsigned long target,
301				       unsigned long priority,
302				       unsigned long qpage,
303				       unsigned long qsize)
304{
305	long rc;
306
307	pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n",
308		 flags,  target, priority, qpage, qsize);
309
310	do {
311		rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
312					priority, qpage, qsize);
313	} while (plpar_busy_delay(rc));
314
315	if (rc) {
316		pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n",
317		       target, priority, qpage, rc);
318		return  rc;
319	}
320
321	return 0;
322}
323
324static long plpar_int_sync(unsigned long flags, unsigned long lisn)
325{
326	long rc;
327
328	do {
329		rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
330	} while (plpar_busy_delay(rc));
331
332	if (rc) {
333		pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc);
334		return  rc;
335	}
336
337	return 0;
338}
339
340#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
341
342static long plpar_int_esb(unsigned long flags,
343			  unsigned long lisn,
344			  unsigned long offset,
345			  unsigned long in_data,
346			  unsigned long *out_data)
347{
348	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
349	long rc;
350
351	pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n",
352		 flags,  lisn, offset, in_data);
353
354	do {
355		rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
356				 in_data);
357	} while (plpar_busy_delay(rc));
358
359	if (rc) {
360		pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n",
361		       lisn, offset, rc);
362		return  rc;
363	}
364
365	*out_data = retbuf[0];
366
367	return 0;
368}
369
370static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
371{
372	unsigned long read_data;
373	long rc;
374
375	rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
376			   lisn, offset, data, &read_data);
377	if (rc)
378		return -1;
379
380	return write ? 0 : read_data;
381}
382
383#define XIVE_SRC_H_INT_ESB     (1ull << (63 - 60))
384#define XIVE_SRC_LSI           (1ull << (63 - 61))
385#define XIVE_SRC_TRIGGER       (1ull << (63 - 62))
386#define XIVE_SRC_STORE_EOI     (1ull << (63 - 63))
387
388static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
389{
390	long rc;
391	unsigned long flags;
392	unsigned long eoi_page;
393	unsigned long trig_page;
394	unsigned long esb_shift;
395
396	memset(data, 0, sizeof(*data));
397
398	rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
399				       &esb_shift);
400	if (rc)
401		return  -EINVAL;
402
403	if (flags & XIVE_SRC_H_INT_ESB)
404		data->flags  |= XIVE_IRQ_FLAG_H_INT_ESB;
405	if (flags & XIVE_SRC_STORE_EOI)
406		data->flags  |= XIVE_IRQ_FLAG_STORE_EOI;
407	if (flags & XIVE_SRC_LSI)
408		data->flags  |= XIVE_IRQ_FLAG_LSI;
409	data->eoi_page  = eoi_page;
410	data->esb_shift = esb_shift;
411	data->trig_page = trig_page;
412
413	data->hw_irq = hw_irq;
414
415	/*
416	 * No chip-id for the sPAPR backend. This has an impact how we
417	 * pick a target. See xive_pick_irq_target().
418	 */
419	data->src_chip = XIVE_INVALID_CHIP_ID;
420
421	/*
422	 * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
423	 * be used for interrupt management. Skip the remapping of the
424	 * ESB pages which are not available.
425	 */
426	if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
427		return 0;
428
429	data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
430	if (!data->eoi_mmio) {
431		pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
432		return -ENOMEM;
433	}
434
435	/* Full function page supports trigger */
436	if (flags & XIVE_SRC_TRIGGER) {
437		data->trig_mmio = data->eoi_mmio;
438		return 0;
439	}
440
441	data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
442	if (!data->trig_mmio) {
443		iounmap(data->eoi_mmio);
444		pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
445		return -ENOMEM;
446	}
447	return 0;
448}
449
450static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
451{
452	long rc;
453
454	rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
455					 prio, sw_irq);
456
457	return rc == 0 ? 0 : -ENXIO;
458}
459
460static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
461				     u32 *sw_irq)
462{
463	long rc;
464	unsigned long h_target;
465	unsigned long h_prio;
466	unsigned long h_sw_irq;
467
468	rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
469					 &h_sw_irq);
470
471	*target = h_target;
472	*prio = h_prio;
473	*sw_irq = h_sw_irq;
474
475	return rc == 0 ? 0 : -ENXIO;
476}
477
478/* This can be called multiple time to change a queue configuration */
479static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
480				   __be32 *qpage, u32 order)
481{
482	s64 rc = 0;
483	unsigned long esn_page;
484	unsigned long esn_size;
485	u64 flags, qpage_phys;
486
487	/* If there's an actual queue page, clean it */
488	if (order) {
489		if (WARN_ON(!qpage))
490			return -EINVAL;
491		qpage_phys = __pa(qpage);
492	} else {
493		qpage_phys = 0;
494	}
495
496	/* Initialize the rest of the fields */
497	q->msk = order ? ((1u << (order - 2)) - 1) : 0;
498	q->idx = 0;
499	q->toggle = 0;
500
501	rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
502	if (rc) {
503		pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
504		       target, prio);
505		rc = -EIO;
506		goto fail;
507	}
508
509	/* TODO: add support for the notification page */
510	q->eoi_phys = esn_page;
511
512	/* Default is to always notify */
513	flags = XIVE_EQ_ALWAYS_NOTIFY;
514
515	/* Configure and enable the queue in HW */
516	rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
517	if (rc) {
518		pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
519		       target, prio);
520		rc = -EIO;
521	} else {
522		q->qpage = qpage;
523		if (is_secure_guest())
524			uv_share_page(PHYS_PFN(qpage_phys),
525					1 << xive_alloc_order(order));
526	}
527fail:
528	return rc;
529}
530
531static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
532				  u8 prio)
533{
534	struct xive_q *q = &xc->queue[prio];
535	__be32 *qpage;
536
537	qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
538	if (IS_ERR(qpage))
539		return PTR_ERR(qpage);
540
541	return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
542					  q, prio, qpage, xive_queue_shift);
543}
544
545static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
546				  u8 prio)
547{
548	struct xive_q *q = &xc->queue[prio];
549	unsigned int alloc_order;
550	long rc;
551	int hw_cpu = get_hard_smp_processor_id(cpu);
552
553	rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
554	if (rc)
555		pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
556		       hw_cpu, prio);
557
558	alloc_order = xive_alloc_order(xive_queue_shift);
559	if (is_secure_guest())
560		uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
561	free_pages((unsigned long)q->qpage, alloc_order);
562	q->qpage = NULL;
563}
564
565static bool xive_spapr_match(struct device_node *node)
566{
567	/* Ignore cascaded controllers for the moment */
568	return true;
569}
570
571#ifdef CONFIG_SMP
572static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
573{
574	int irq = xive_irq_bitmap_alloc();
575
576	if (irq < 0) {
577		pr_err("Failed to allocate IPI on CPU %d\n", cpu);
578		return -ENXIO;
579	}
580
581	xc->hw_ipi = irq;
582	return 0;
583}
584
585static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
586{
587	if (xc->hw_ipi == XIVE_BAD_IRQ)
588		return;
589
590	xive_irq_bitmap_free(xc->hw_ipi);
591	xc->hw_ipi = XIVE_BAD_IRQ;
592}
593#endif /* CONFIG_SMP */
594
595static void xive_spapr_shutdown(void)
596{
597	plpar_int_reset(0);
598}
599
600/*
601 * Perform an "ack" cycle on the current thread. Grab the pending
602 * active priorities and update the CPPR to the most favored one.
603 */
604static void xive_spapr_update_pending(struct xive_cpu *xc)
605{
606	u8 nsr, cppr;
607	u16 ack;
608
609	/*
610	 * Perform the "Acknowledge O/S to Register" cycle.
611	 *
612	 * Let's speedup the access to the TIMA using the raw I/O
613	 * accessor as we don't need the synchronisation routine of
614	 * the higher level ones
615	 */
616	ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
617
618	/* Synchronize subsequent queue accesses */
619	mb();
620
621	/*
622	 * Grab the CPPR and the "NSR" field which indicates the source
623	 * of the interrupt (if any)
624	 */
625	cppr = ack & 0xff;
626	nsr = ack >> 8;
627
628	if (nsr & TM_QW1_NSR_EO) {
629		if (cppr == 0xff)
630			return;
631		/* Mark the priority pending */
632		xc->pending_prio |= 1 << cppr;
633
634		/*
635		 * A new interrupt should never have a CPPR less favored
636		 * than our current one.
637		 */
638		if (cppr >= xc->cppr)
639			pr_err("CPU %d odd ack CPPR, got %d at %d\n",
640			       smp_processor_id(), cppr, xc->cppr);
641
642		/* Update our idea of what the CPPR is */
643		xc->cppr = cppr;
644	}
645}
646
647static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
648{
649	/* Only some debug on the TIMA settings */
650	pr_debug("(HW value: %08x %08x %08x)\n",
651		 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
652		 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
653		 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
654}
655
656static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
657{
658	/* Nothing to do */;
659}
660
661static void xive_spapr_sync_source(u32 hw_irq)
662{
663	/* Specs are unclear on what this is doing */
664	plpar_int_sync(0, hw_irq);
665}
666
667static int xive_spapr_debug_show(struct seq_file *m, void *private)
668{
669	struct xive_irq_bitmap *xibm;
670	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
671
672	if (!buf)
673		return -ENOMEM;
674
675	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
676		memset(buf, 0, PAGE_SIZE);
677		bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
678		seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
679	}
680	kfree(buf);
681
682	return 0;
683}
684
685static const struct xive_ops xive_spapr_ops = {
686	.populate_irq_data	= xive_spapr_populate_irq_data,
687	.configure_irq		= xive_spapr_configure_irq,
688	.get_irq_config		= xive_spapr_get_irq_config,
689	.setup_queue		= xive_spapr_setup_queue,
690	.cleanup_queue		= xive_spapr_cleanup_queue,
691	.match			= xive_spapr_match,
692	.shutdown		= xive_spapr_shutdown,
693	.update_pending		= xive_spapr_update_pending,
694	.setup_cpu		= xive_spapr_setup_cpu,
695	.teardown_cpu		= xive_spapr_teardown_cpu,
696	.sync_source		= xive_spapr_sync_source,
697	.esb_rw			= xive_spapr_esb_rw,
698#ifdef CONFIG_SMP
699	.get_ipi		= xive_spapr_get_ipi,
700	.put_ipi		= xive_spapr_put_ipi,
701	.debug_show		= xive_spapr_debug_show,
702#endif /* CONFIG_SMP */
703	.name			= "spapr",
704};
705
706/*
707 * get max priority from "/ibm,plat-res-int-priorities"
708 */
709static bool __init xive_get_max_prio(u8 *max_prio)
710{
711	struct device_node *rootdn;
712	const __be32 *reg;
713	u32 len;
714	int prio, found;
715
716	rootdn = of_find_node_by_path("/");
717	if (!rootdn) {
718		pr_err("not root node found !\n");
719		return false;
720	}
721
722	reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
723	of_node_put(rootdn);
724	if (!reg) {
725		pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
726		return false;
727	}
728
729	if (len % (2 * sizeof(u32)) != 0) {
730		pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
731		return false;
732	}
733
734	/* HW supports priorities in the range [0-7] and 0xFF is a
735	 * wildcard priority used to mask. We scan the ranges reserved
736	 * by the hypervisor to find the lowest priority we can use.
737	 */
738	found = 0xFF;
739	for (prio = 0; prio < 8; prio++) {
740		int reserved = 0;
741		int i;
742
743		for (i = 0; i < len / (2 * sizeof(u32)); i++) {
744			int base  = be32_to_cpu(reg[2 * i]);
745			int range = be32_to_cpu(reg[2 * i + 1]);
746
747			if (prio >= base && prio < base + range)
748				reserved++;
749		}
750
751		if (!reserved)
752			found = prio;
753	}
754
755	if (found == 0xFF) {
756		pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
757		return false;
758	}
759
760	*max_prio = found;
761	return true;
762}
763
764static const u8 *__init get_vec5_feature(unsigned int index)
765{
766	unsigned long root, chosen;
767	int size;
768	const u8 *vec5;
769
770	root = of_get_flat_dt_root();
771	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
772	if (chosen == -FDT_ERR_NOTFOUND)
773		return NULL;
774
775	vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
776	if (!vec5)
777		return NULL;
778
779	if (size <= index)
780		return NULL;
781
782	return vec5 + index;
783}
784
785static bool __init xive_spapr_disabled(void)
786{
787	const u8 *vec5_xive;
788
789	vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
790	if (vec5_xive) {
791		u8 val;
792
793		val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
794		switch (val) {
795		case OV5_FEAT(OV5_XIVE_EITHER):
796		case OV5_FEAT(OV5_XIVE_LEGACY):
797			break;
798		case OV5_FEAT(OV5_XIVE_EXPLOIT):
799			/* Hypervisor only supports XIVE */
800			if (xive_cmdline_disabled)
801				pr_warn("WARNING: Ignoring cmdline option xive=off\n");
802			return false;
803		default:
804			pr_warn("%s: Unknown xive support option: 0x%x\n",
805				__func__, val);
806			break;
807		}
808	}
809
810	return xive_cmdline_disabled;
811}
812
813bool __init xive_spapr_init(void)
814{
815	struct device_node *np;
816	struct resource r;
817	void __iomem *tima;
 
818	u8 max_prio;
819	u32 val;
820	u32 len;
821	const __be32 *reg;
822	int i, err;
823
824	if (xive_spapr_disabled())
825		return false;
826
827	pr_devel("%s()\n", __func__);
828	np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
829	if (!np) {
830		pr_devel("not found !\n");
831		return false;
832	}
833	pr_devel("Found %s\n", np->full_name);
834
835	/* Resource 1 is the OS ring TIMA */
836	if (of_address_to_resource(np, 1, &r)) {
837		pr_err("Failed to get thread mgmnt area resource\n");
838		goto err_put;
839	}
840	tima = ioremap(r.start, resource_size(&r));
841	if (!tima) {
842		pr_err("Failed to map thread mgmnt area\n");
843		goto err_put;
844	}
845
846	if (!xive_get_max_prio(&max_prio))
847		goto err_unmap;
848
849	/* Feed the IRQ number allocator with the ranges given in the DT */
850	reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
851	if (!reg) {
852		pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
853		goto err_unmap;
854	}
855
856	if (len % (2 * sizeof(u32)) != 0) {
857		pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
858		goto err_unmap;
859	}
860
861	for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) {
862		err = xive_irq_bitmap_add(be32_to_cpu(reg[0]),
863					  be32_to_cpu(reg[1]));
864		if (err < 0)
865			goto err_mem_free;
866	}
867
868	/* Iterate the EQ sizes and pick one */
869	of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) {
870		xive_queue_shift = val;
871		if (val == PAGE_SHIFT)
872			break;
873	}
874
875	/* Initialize XIVE core with our backend */
876	if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
877		goto err_mem_free;
878
879	of_node_put(np);
880	pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
881	return true;
882
883err_mem_free:
884	xive_irq_bitmap_remove_all();
885err_unmap:
886	iounmap(tima);
887err_put:
888	of_node_put(np);
889	return false;
890}
891
892machine_arch_initcall(pseries, xive_core_debug_init);