Linux Audio

Check our new training course

Loading...
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright 2016,2017 IBM Corporation.
  4 */
  5
  6#define pr_fmt(fmt) "xive: " fmt
  7
  8#include <linux/types.h>
  9#include <linux/irq.h>
 10#include <linux/debugfs.h>
 11#include <linux/smp.h>
 12#include <linux/interrupt.h>
 13#include <linux/seq_file.h>
 14#include <linux/init.h>
 15#include <linux/of.h>
 16#include <linux/of_address.h>
 17#include <linux/slab.h>
 18#include <linux/spinlock.h>
 19#include <linux/delay.h>
 20#include <linux/cpumask.h>
 21#include <linux/mm.h>
 22#include <linux/kmemleak.h>
 23
 24#include <asm/machdep.h>
 
 25#include <asm/io.h>
 26#include <asm/smp.h>
 27#include <asm/irq.h>
 28#include <asm/errno.h>
 29#include <asm/xive.h>
 30#include <asm/xive-regs.h>
 31#include <asm/opal.h>
 32#include <asm/kvm_ppc.h>
 33
 34#include "xive-internal.h"
 35
 36
 37static u32 xive_provision_size;
 38static u32 *xive_provision_chips;
 39static u32 xive_provision_chip_count;
 40static u32 xive_queue_shift;
 41static u32 xive_pool_vps = XIVE_INVALID_VP;
 42static struct kmem_cache *xive_provision_cache;
 43static bool xive_has_single_esc;
 44bool xive_has_save_restore;
 45
 46int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
 47{
 48	__be64 flags, eoi_page, trig_page;
 49	__be32 esb_shift, src_chip;
 50	u64 opal_flags;
 51	s64 rc;
 52
 53	memset(data, 0, sizeof(*data));
 54
 55	rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
 56				    &esb_shift, &src_chip);
 57	if (rc) {
 58		pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
 59		       hw_irq, rc);
 60		return -EINVAL;
 61	}
 62
 63	opal_flags = be64_to_cpu(flags);
 64	if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
 65		data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
 66	if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
 67		data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
 68	if (opal_flags & OPAL_XIVE_IRQ_LSI)
 69		data->flags |= XIVE_IRQ_FLAG_LSI;
 70	data->eoi_page = be64_to_cpu(eoi_page);
 71	data->trig_page = be64_to_cpu(trig_page);
 72	data->esb_shift = be32_to_cpu(esb_shift);
 73	data->src_chip = be32_to_cpu(src_chip);
 74
 75	data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
 76	if (!data->eoi_mmio) {
 77		pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
 78		return -ENOMEM;
 79	}
 80
 81	data->hw_irq = hw_irq;
 82
 83	if (!data->trig_page)
 84		return 0;
 85	if (data->trig_page == data->eoi_page) {
 86		data->trig_mmio = data->eoi_mmio;
 87		return 0;
 88	}
 89
 90	data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
 91	if (!data->trig_mmio) {
 92		pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
 93		return -ENOMEM;
 94	}
 95	return 0;
 96}
 97EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
 98
 99int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
100{
101	s64 rc;
102
103	for (;;) {
104		rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
105		if (rc != OPAL_BUSY)
106			break;
107		msleep(OPAL_BUSY_DELAY_MS);
108	}
109	return rc == 0 ? 0 : -ENXIO;
110}
111EXPORT_SYMBOL_GPL(xive_native_configure_irq);
112
113static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
114				      u32 *sw_irq)
115{
116	s64 rc;
117	__be64 vp;
118	__be32 lirq;
119
120	rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
121
122	*target = be64_to_cpu(vp);
123	*sw_irq = be32_to_cpu(lirq);
124
125	return rc == 0 ? 0 : -ENXIO;
126}
127
128#define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
129
130/* This can be called multiple time to change a queue configuration */
131int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
132				__be32 *qpage, u32 order, bool can_escalate)
133{
134	s64 rc = 0;
135	__be64 qeoi_page_be;
136	__be32 esc_irq_be;
137	u64 flags, qpage_phys;
138
139	/* If there's an actual queue page, clean it */
140	if (order) {
141		if (WARN_ON(!qpage))
142			return -EINVAL;
143		qpage_phys = __pa(qpage);
144	} else
145		qpage_phys = 0;
146
147	/* Initialize the rest of the fields */
148	q->msk = order ? ((1u << (order - 2)) - 1) : 0;
149	q->idx = 0;
150	q->toggle = 0;
151
152	rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
153				      &qeoi_page_be,
154				      &esc_irq_be,
155				      NULL);
156	if (rc) {
157		vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
158		rc = -EIO;
159		goto fail;
160	}
161	q->eoi_phys = be64_to_cpu(qeoi_page_be);
162
163	/* Default flags */
164	flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
165
166	/* Escalation needed ? */
167	if (can_escalate) {
168		q->esc_irq = be32_to_cpu(esc_irq_be);
169		flags |= OPAL_XIVE_EQ_ESCALATE;
170	}
171
172	/* Configure and enable the queue in HW */
173	for (;;) {
174		rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
175		if (rc != OPAL_BUSY)
176			break;
177		msleep(OPAL_BUSY_DELAY_MS);
178	}
179	if (rc) {
180		vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
181		rc = -EIO;
182	} else {
183		/*
184		 * KVM code requires all of the above to be visible before
185		 * q->qpage is set due to how it manages IPI EOIs
186		 */
187		wmb();
188		q->qpage = qpage;
189	}
190fail:
191	return rc;
192}
193EXPORT_SYMBOL_GPL(xive_native_configure_queue);
194
195static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
196{
197	s64 rc;
198
199	/* Disable the queue in HW */
200	for (;;) {
201		rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
202		if (rc != OPAL_BUSY)
203			break;
204		msleep(OPAL_BUSY_DELAY_MS);
205	}
206	if (rc)
207		vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
208}
209
210void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
211{
212	__xive_native_disable_queue(vp_id, q, prio);
213}
214EXPORT_SYMBOL_GPL(xive_native_disable_queue);
215
216static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
217{
218	struct xive_q *q = &xc->queue[prio];
219	__be32 *qpage;
220
221	qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
222	if (IS_ERR(qpage))
223		return PTR_ERR(qpage);
224
225	return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
226					   q, prio, qpage, xive_queue_shift, false);
227}
228
229static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
230{
231	struct xive_q *q = &xc->queue[prio];
232	unsigned int alloc_order;
233
234	/*
235	 * We use the variant with no iounmap as this is called on exec
236	 * from an IPI and iounmap isn't safe
237	 */
238	__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
239	alloc_order = xive_alloc_order(xive_queue_shift);
240	free_pages((unsigned long)q->qpage, alloc_order);
241	q->qpage = NULL;
242}
243
244static bool xive_native_match(struct device_node *node)
245{
246	return of_device_is_compatible(node, "ibm,opal-xive-vc");
247}
248
249static s64 opal_xive_allocate_irq(u32 chip_id)
250{
251	s64 irq = opal_xive_allocate_irq_raw(chip_id);
252
253	/*
254	 * Old versions of skiboot can incorrectly return 0xffffffff to
255	 * indicate no space, fix it up here.
256	 */
257	return irq == 0xffffffff ? OPAL_RESOURCE : irq;
258}
259
260#ifdef CONFIG_SMP
261static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
262{
263	s64 irq;
264
265	/* Allocate an IPI and populate info about it */
266	for (;;) {
267		irq = opal_xive_allocate_irq(xc->chip_id);
268		if (irq == OPAL_BUSY) {
269			msleep(OPAL_BUSY_DELAY_MS);
270			continue;
271		}
272		if (irq < 0) {
273			pr_err("Failed to allocate IPI on CPU %d\n", cpu);
274			return -ENXIO;
275		}
276		xc->hw_ipi = irq;
277		break;
278	}
279	return 0;
280}
281#endif /* CONFIG_SMP */
282
283u32 xive_native_alloc_irq_on_chip(u32 chip_id)
284{
285	s64 rc;
286
287	for (;;) {
288		rc = opal_xive_allocate_irq(chip_id);
289		if (rc != OPAL_BUSY)
290			break;
291		msleep(OPAL_BUSY_DELAY_MS);
292	}
293	if (rc < 0)
294		return 0;
295	return rc;
296}
297EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
298
299void xive_native_free_irq(u32 irq)
300{
301	for (;;) {
302		s64 rc = opal_xive_free_irq(irq);
303		if (rc != OPAL_BUSY)
304			break;
305		msleep(OPAL_BUSY_DELAY_MS);
306	}
307}
308EXPORT_SYMBOL_GPL(xive_native_free_irq);
309
310#ifdef CONFIG_SMP
311static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
312{
313	s64 rc;
314
315	/* Free the IPI */
316	if (xc->hw_ipi == XIVE_BAD_IRQ)
317		return;
318	for (;;) {
319		rc = opal_xive_free_irq(xc->hw_ipi);
320		if (rc == OPAL_BUSY) {
321			msleep(OPAL_BUSY_DELAY_MS);
322			continue;
323		}
324		xc->hw_ipi = XIVE_BAD_IRQ;
325		break;
326	}
327}
328#endif /* CONFIG_SMP */
329
330static void xive_native_shutdown(void)
331{
332	/* Switch the XIVE to emulation mode */
333	opal_xive_reset(OPAL_XIVE_MODE_EMU);
334}
335
336/*
337 * Perform an "ack" cycle on the current thread, thus
338 * grabbing the pending active priorities and updating
339 * the CPPR to the most favored one.
340 */
341static void xive_native_update_pending(struct xive_cpu *xc)
342{
343	u8 he, cppr;
344	u16 ack;
345
346	/* Perform the acknowledge hypervisor to register cycle */
347	ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
348
349	/* Synchronize subsequent queue accesses */
350	mb();
351
352	/*
353	 * Grab the CPPR and the "HE" field which indicates the source
354	 * of the hypervisor interrupt (if any)
355	 */
356	cppr = ack & 0xff;
357	he = (ack >> 8) >> 6;
358	switch(he) {
359	case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
360		break;
361	case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
362		if (cppr == 0xff)
363			return;
364		/* Mark the priority pending */
365		xc->pending_prio |= 1 << cppr;
366
367		/*
368		 * A new interrupt should never have a CPPR less favored
369		 * than our current one.
370		 */
371		if (cppr >= xc->cppr)
372			pr_err("CPU %d odd ack CPPR, got %d at %d\n",
373			       smp_processor_id(), cppr, xc->cppr);
374
375		/* Update our idea of what the CPPR is */
376		xc->cppr = cppr;
377		break;
378	case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
379	case TM_QW3_NSR_HE_LSI:  /* Legacy FW LSI (unused) */
380		pr_err("CPU %d got unexpected interrupt type HE=%d\n",
381		       smp_processor_id(), he);
382		return;
383	}
384}
385
386static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
387{
388	xc->chip_id = cpu_to_chip_id(cpu);
389}
390
391static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
392{
393	s64 rc;
394	u32 vp;
395	__be64 vp_cam_be;
396	u64 vp_cam;
397
398	if (xive_pool_vps == XIVE_INVALID_VP)
399		return;
400
401	/* Check if pool VP already active, if it is, pull it */
402	if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
403		in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
404
405	/* Enable the pool VP */
406	vp = xive_pool_vps + cpu;
407	for (;;) {
408		rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
409		if (rc != OPAL_BUSY)
410			break;
411		msleep(OPAL_BUSY_DELAY_MS);
412	}
413	if (rc) {
414		pr_err("Failed to enable pool VP on CPU %d\n", cpu);
415		return;
416	}
417
418	/* Grab it's CAM value */
419	rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
420	if (rc) {
421		pr_err("Failed to get pool VP info CPU %d\n", cpu);
422		return;
423	}
424	vp_cam = be64_to_cpu(vp_cam_be);
425
426	/* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
427	out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
428	out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
429}
430
431static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
432{
433	s64 rc;
434	u32 vp;
435
436	if (xive_pool_vps == XIVE_INVALID_VP)
437		return;
438
439	/* Pull the pool VP from the CPU */
440	in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
441
442	/* Disable it */
443	vp = xive_pool_vps + cpu;
444	for (;;) {
445		rc = opal_xive_set_vp_info(vp, 0, 0);
446		if (rc != OPAL_BUSY)
447			break;
448		msleep(OPAL_BUSY_DELAY_MS);
449	}
450}
451
452void xive_native_sync_source(u32 hw_irq)
453{
454	opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
455}
456EXPORT_SYMBOL_GPL(xive_native_sync_source);
457
458void xive_native_sync_queue(u32 hw_irq)
459{
460	opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
461}
462EXPORT_SYMBOL_GPL(xive_native_sync_queue);
463
464#ifdef CONFIG_DEBUG_FS
465static int xive_native_debug_create(struct dentry *xive_dir)
466{
467	debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
468	return 0;
469}
470#endif
471
472static const struct xive_ops xive_native_ops = {
473	.populate_irq_data	= xive_native_populate_irq_data,
474	.configure_irq		= xive_native_configure_irq,
475	.get_irq_config		= xive_native_get_irq_config,
476	.setup_queue		= xive_native_setup_queue,
477	.cleanup_queue		= xive_native_cleanup_queue,
478	.match			= xive_native_match,
479	.shutdown		= xive_native_shutdown,
480	.update_pending		= xive_native_update_pending,
481	.prepare_cpu		= xive_native_prepare_cpu,
482	.setup_cpu		= xive_native_setup_cpu,
483	.teardown_cpu		= xive_native_teardown_cpu,
484	.sync_source		= xive_native_sync_source,
485#ifdef CONFIG_SMP
486	.get_ipi		= xive_native_get_ipi,
487	.put_ipi		= xive_native_put_ipi,
488#endif /* CONFIG_SMP */
489#ifdef CONFIG_DEBUG_FS
490	.debug_create		= xive_native_debug_create,
491#endif /* CONFIG_DEBUG_FS */
492	.name			= "native",
493};
494
495static bool __init xive_parse_provisioning(struct device_node *np)
496{
497	int rc;
498
499	if (of_property_read_u32(np, "ibm,xive-provision-page-size",
500				 &xive_provision_size) < 0)
501		return true;
502	rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
503	if (rc < 0) {
504		pr_err("Error %d getting provision chips array\n", rc);
505		return false;
506	}
507	xive_provision_chip_count = rc;
508	if (rc == 0)
509		return true;
510
511	xive_provision_chips = kcalloc(4, xive_provision_chip_count,
512				       GFP_KERNEL);
513	if (WARN_ON(!xive_provision_chips))
514		return false;
515
516	rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
517					xive_provision_chips,
518					xive_provision_chip_count);
519	if (rc < 0) {
520		pr_err("Error %d reading provision chips array\n", rc);
521		return false;
522	}
523
524	xive_provision_cache = kmem_cache_create("xive-provision",
525						 xive_provision_size,
526						 xive_provision_size,
527						 0, NULL);
528	if (!xive_provision_cache) {
529		pr_err("Failed to allocate provision cache\n");
530		return false;
531	}
532	return true;
533}
534
535static void __init xive_native_setup_pools(void)
536{
537	/* Allocate a pool big enough */
538	pr_debug("Allocating VP block for pool size %u\n", nr_cpu_ids);
539
540	xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
541	if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
542		pr_err("Failed to allocate pool VP, KVM might not function\n");
543
544	pr_debug("Pool VPs allocated at 0x%x for %u max CPUs\n",
545		 xive_pool_vps, nr_cpu_ids);
546}
547
548u32 xive_native_default_eq_shift(void)
549{
550	return xive_queue_shift;
551}
552EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
553
554unsigned long xive_tima_os;
555EXPORT_SYMBOL_GPL(xive_tima_os);
556
557bool __init xive_native_init(void)
558{
559	struct device_node *np;
560	struct resource r;
561	void __iomem *tima;
562	struct property *prop;
563	u8 max_prio = 7;
564	const __be32 *p;
565	u32 val, cpu;
566	s64 rc;
567
568	if (xive_cmdline_disabled)
569		return false;
570
571	pr_devel("xive_native_init()\n");
572	np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
573	if (!np) {
574		pr_devel("not found !\n");
575		return false;
576	}
577	pr_devel("Found %pOF\n", np);
578
579	/* Resource 1 is HV window */
580	if (of_address_to_resource(np, 1, &r)) {
581		pr_err("Failed to get thread mgmnt area resource\n");
582		goto err_put;
583	}
584	tima = ioremap(r.start, resource_size(&r));
585	if (!tima) {
586		pr_err("Failed to map thread mgmnt area\n");
587		goto err_put;
588	}
589
590	/* Read number of priorities */
591	if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
592		max_prio = val - 1;
593
594	/* Iterate the EQ sizes and pick one */
595	of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
596		xive_queue_shift = val;
597		if (val == PAGE_SHIFT)
598			break;
599	}
600
601	/* Do we support single escalation */
602	xive_has_single_esc = of_property_read_bool(np, "single-escalation-support");
603
604	xive_has_save_restore = of_property_read_bool(np, "vp-save-restore");
605
606	/* Configure Thread Management areas for KVM */
607	for_each_possible_cpu(cpu)
608		kvmppc_set_xive_tima(cpu, r.start, tima);
609
610	/* Resource 2 is OS window */
611	if (of_address_to_resource(np, 2, &r)) {
612		pr_err("Failed to get thread mgmnt area resource\n");
613		goto err_put;
614	}
615
616	xive_tima_os = r.start;
617
618	/* Grab size of provisioning pages */
619	xive_parse_provisioning(np);
620
621	/* Switch the XIVE to exploitation mode */
622	rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
623	if (rc) {
624		pr_err("Switch to exploitation mode failed with error %lld\n", rc);
625		goto err_put;
626	}
627
628	/* Setup some dummy HV pool VPs */
629	xive_native_setup_pools();
630
631	/* Initialize XIVE core with our backend */
632	if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
633			    max_prio)) {
634		opal_xive_reset(OPAL_XIVE_MODE_EMU);
635		goto err_put;
636	}
637	of_node_put(np);
638	pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
639	return true;
640
641err_put:
642	of_node_put(np);
643	return false;
644}
645
646static bool xive_native_provision_pages(void)
647{
648	u32 i;
649	void *p;
650
651	for (i = 0; i < xive_provision_chip_count; i++) {
652		u32 chip = xive_provision_chips[i];
653
654		/*
655		 * XXX TODO: Try to make the allocation local to the node where
656		 * the chip resides.
657		 */
658		p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
659		if (!p) {
660			pr_err("Failed to allocate provisioning page\n");
661			return false;
662		}
663		kmemleak_ignore(p);
664		opal_xive_donate_page(chip, __pa(p));
665	}
666	return true;
667}
668
669u32 xive_native_alloc_vp_block(u32 max_vcpus)
670{
671	s64 rc;
672	u32 order;
673
674	order = fls(max_vcpus) - 1;
675	if (max_vcpus > (1 << order))
676		order++;
677
678	pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
679		 max_vcpus, order);
680
681	for (;;) {
682		rc = opal_xive_alloc_vp_block(order);
683		switch (rc) {
684		case OPAL_BUSY:
685			msleep(OPAL_BUSY_DELAY_MS);
686			break;
687		case OPAL_XIVE_PROVISIONING:
688			if (!xive_native_provision_pages())
689				return XIVE_INVALID_VP;
690			break;
691		default:
692			if (rc < 0) {
693				pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
694				       order, rc);
695				return XIVE_INVALID_VP;
696			}
697			return rc;
698		}
699	}
700}
701EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
702
703void xive_native_free_vp_block(u32 vp_base)
704{
705	s64 rc;
706
707	if (vp_base == XIVE_INVALID_VP)
708		return;
709
710	rc = opal_xive_free_vp_block(vp_base);
711	if (rc < 0)
712		pr_warn("OPAL error %lld freeing VP block\n", rc);
713}
714EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
715
716int xive_native_enable_vp(u32 vp_id, bool single_escalation)
717{
718	s64 rc;
719	u64 flags = OPAL_XIVE_VP_ENABLED;
720
721	if (single_escalation)
722		flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
723	for (;;) {
724		rc = opal_xive_set_vp_info(vp_id, flags, 0);
725		if (rc != OPAL_BUSY)
726			break;
727		msleep(OPAL_BUSY_DELAY_MS);
728	}
729	if (rc)
730		vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
731	return rc ? -EIO : 0;
732}
733EXPORT_SYMBOL_GPL(xive_native_enable_vp);
734
735int xive_native_disable_vp(u32 vp_id)
736{
737	s64 rc;
738
739	for (;;) {
740		rc = opal_xive_set_vp_info(vp_id, 0, 0);
741		if (rc != OPAL_BUSY)
742			break;
743		msleep(OPAL_BUSY_DELAY_MS);
744	}
745	if (rc)
746		vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
747	return rc ? -EIO : 0;
748}
749EXPORT_SYMBOL_GPL(xive_native_disable_vp);
750
751int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
752{
753	__be64 vp_cam_be;
754	__be32 vp_chip_id_be;
755	s64 rc;
756
757	rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
758	if (rc) {
759		vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
760		return -EIO;
761	}
762	*out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
763	*out_chip_id = be32_to_cpu(vp_chip_id_be);
764
765	return 0;
766}
767EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
768
769bool xive_native_has_single_escalation(void)
770{
771	return xive_has_single_esc;
772}
773EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
774
775bool xive_native_has_save_restore(void)
776{
777	return xive_has_save_restore;
778}
779EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
780
781int xive_native_get_queue_info(u32 vp_id, u32 prio,
782			       u64 *out_qpage,
783			       u64 *out_qsize,
784			       u64 *out_qeoi_page,
785			       u32 *out_escalate_irq,
786			       u64 *out_qflags)
787{
788	__be64 qpage;
789	__be64 qsize;
790	__be64 qeoi_page;
791	__be32 escalate_irq;
792	__be64 qflags;
793	s64 rc;
794
795	rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
796				      &qeoi_page, &escalate_irq, &qflags);
797	if (rc) {
798		vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
799		return -EIO;
800	}
801
802	if (out_qpage)
803		*out_qpage = be64_to_cpu(qpage);
804	if (out_qsize)
805		*out_qsize = be64_to_cpu(qsize);
806	if (out_qeoi_page)
807		*out_qeoi_page = be64_to_cpu(qeoi_page);
808	if (out_escalate_irq)
809		*out_escalate_irq = be32_to_cpu(escalate_irq);
810	if (out_qflags)
811		*out_qflags = be64_to_cpu(qflags);
812
813	return 0;
814}
815EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
816
817int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
818{
819	__be32 opal_qtoggle;
820	__be32 opal_qindex;
821	s64 rc;
822
823	rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
824				       &opal_qindex);
825	if (rc) {
826		vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
827		return -EIO;
828	}
829
830	if (qtoggle)
831		*qtoggle = be32_to_cpu(opal_qtoggle);
832	if (qindex)
833		*qindex = be32_to_cpu(opal_qindex);
834
835	return 0;
836}
837EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
838
839int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
840{
841	s64 rc;
842
843	rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
844	if (rc) {
845		vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
846		return -EIO;
847	}
848
849	return 0;
850}
851EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
852
853bool xive_native_has_queue_state_support(void)
854{
855	return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
856		opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
857}
858EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
859
860int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
861{
862	__be64 state;
863	s64 rc;
864
865	rc = opal_xive_get_vp_state(vp_id, &state);
866	if (rc) {
867		vp_err(vp_id, "failed to get vp state : %lld\n", rc);
868		return -EIO;
869	}
870
871	if (out_state)
872		*out_state = be64_to_cpu(state);
873	return 0;
874}
875EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
876
877machine_arch_initcall(powernv, xive_core_debug_init);
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright 2016,2017 IBM Corporation.
  4 */
  5
  6#define pr_fmt(fmt) "xive: " fmt
  7
  8#include <linux/types.h>
  9#include <linux/irq.h>
 10#include <linux/debugfs.h>
 11#include <linux/smp.h>
 12#include <linux/interrupt.h>
 13#include <linux/seq_file.h>
 14#include <linux/init.h>
 15#include <linux/of.h>
 
 16#include <linux/slab.h>
 17#include <linux/spinlock.h>
 18#include <linux/delay.h>
 19#include <linux/cpumask.h>
 20#include <linux/mm.h>
 21#include <linux/kmemleak.h>
 22
 23#include <asm/machdep.h>
 24#include <asm/prom.h>
 25#include <asm/io.h>
 26#include <asm/smp.h>
 27#include <asm/irq.h>
 28#include <asm/errno.h>
 29#include <asm/xive.h>
 30#include <asm/xive-regs.h>
 31#include <asm/opal.h>
 32#include <asm/kvm_ppc.h>
 33
 34#include "xive-internal.h"
 35
 36
 37static u32 xive_provision_size;
 38static u32 *xive_provision_chips;
 39static u32 xive_provision_chip_count;
 40static u32 xive_queue_shift;
 41static u32 xive_pool_vps = XIVE_INVALID_VP;
 42static struct kmem_cache *xive_provision_cache;
 43static bool xive_has_single_esc;
 
 44
 45int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
 46{
 47	__be64 flags, eoi_page, trig_page;
 48	__be32 esb_shift, src_chip;
 49	u64 opal_flags;
 50	s64 rc;
 51
 52	memset(data, 0, sizeof(*data));
 53
 54	rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
 55				    &esb_shift, &src_chip);
 56	if (rc) {
 57		pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
 58		       hw_irq, rc);
 59		return -EINVAL;
 60	}
 61
 62	opal_flags = be64_to_cpu(flags);
 63	if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
 64		data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
 
 
 65	if (opal_flags & OPAL_XIVE_IRQ_LSI)
 66		data->flags |= XIVE_IRQ_FLAG_LSI;
 67	data->eoi_page = be64_to_cpu(eoi_page);
 68	data->trig_page = be64_to_cpu(trig_page);
 69	data->esb_shift = be32_to_cpu(esb_shift);
 70	data->src_chip = be32_to_cpu(src_chip);
 71
 72	data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
 73	if (!data->eoi_mmio) {
 74		pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
 75		return -ENOMEM;
 76	}
 77
 78	data->hw_irq = hw_irq;
 79
 80	if (!data->trig_page)
 81		return 0;
 82	if (data->trig_page == data->eoi_page) {
 83		data->trig_mmio = data->eoi_mmio;
 84		return 0;
 85	}
 86
 87	data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
 88	if (!data->trig_mmio) {
 89		pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
 90		return -ENOMEM;
 91	}
 92	return 0;
 93}
 94EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
 95
 96int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
 97{
 98	s64 rc;
 99
100	for (;;) {
101		rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
102		if (rc != OPAL_BUSY)
103			break;
104		msleep(OPAL_BUSY_DELAY_MS);
105	}
106	return rc == 0 ? 0 : -ENXIO;
107}
108EXPORT_SYMBOL_GPL(xive_native_configure_irq);
109
110static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
111				      u32 *sw_irq)
112{
113	s64 rc;
114	__be64 vp;
115	__be32 lirq;
116
117	rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
118
119	*target = be64_to_cpu(vp);
120	*sw_irq = be32_to_cpu(lirq);
121
122	return rc == 0 ? 0 : -ENXIO;
123}
124
125#define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
126
127/* This can be called multiple time to change a queue configuration */
128int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
129				__be32 *qpage, u32 order, bool can_escalate)
130{
131	s64 rc = 0;
132	__be64 qeoi_page_be;
133	__be32 esc_irq_be;
134	u64 flags, qpage_phys;
135
136	/* If there's an actual queue page, clean it */
137	if (order) {
138		if (WARN_ON(!qpage))
139			return -EINVAL;
140		qpage_phys = __pa(qpage);
141	} else
142		qpage_phys = 0;
143
144	/* Initialize the rest of the fields */
145	q->msk = order ? ((1u << (order - 2)) - 1) : 0;
146	q->idx = 0;
147	q->toggle = 0;
148
149	rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
150				      &qeoi_page_be,
151				      &esc_irq_be,
152				      NULL);
153	if (rc) {
154		vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
155		rc = -EIO;
156		goto fail;
157	}
158	q->eoi_phys = be64_to_cpu(qeoi_page_be);
159
160	/* Default flags */
161	flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
162
163	/* Escalation needed ? */
164	if (can_escalate) {
165		q->esc_irq = be32_to_cpu(esc_irq_be);
166		flags |= OPAL_XIVE_EQ_ESCALATE;
167	}
168
169	/* Configure and enable the queue in HW */
170	for (;;) {
171		rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
172		if (rc != OPAL_BUSY)
173			break;
174		msleep(OPAL_BUSY_DELAY_MS);
175	}
176	if (rc) {
177		vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
178		rc = -EIO;
179	} else {
180		/*
181		 * KVM code requires all of the above to be visible before
182		 * q->qpage is set due to how it manages IPI EOIs
183		 */
184		wmb();
185		q->qpage = qpage;
186	}
187fail:
188	return rc;
189}
190EXPORT_SYMBOL_GPL(xive_native_configure_queue);
191
192static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
193{
194	s64 rc;
195
196	/* Disable the queue in HW */
197	for (;;) {
198		rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
199		if (rc != OPAL_BUSY)
200			break;
201		msleep(OPAL_BUSY_DELAY_MS);
202	}
203	if (rc)
204		vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
205}
206
207void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
208{
209	__xive_native_disable_queue(vp_id, q, prio);
210}
211EXPORT_SYMBOL_GPL(xive_native_disable_queue);
212
213static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
214{
215	struct xive_q *q = &xc->queue[prio];
216	__be32 *qpage;
217
218	qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
219	if (IS_ERR(qpage))
220		return PTR_ERR(qpage);
221
222	return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
223					   q, prio, qpage, xive_queue_shift, false);
224}
225
226static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
227{
228	struct xive_q *q = &xc->queue[prio];
229	unsigned int alloc_order;
230
231	/*
232	 * We use the variant with no iounmap as this is called on exec
233	 * from an IPI and iounmap isn't safe
234	 */
235	__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
236	alloc_order = xive_alloc_order(xive_queue_shift);
237	free_pages((unsigned long)q->qpage, alloc_order);
238	q->qpage = NULL;
239}
240
241static bool xive_native_match(struct device_node *node)
242{
243	return of_device_is_compatible(node, "ibm,opal-xive-vc");
244}
245
246static s64 opal_xive_allocate_irq(u32 chip_id)
247{
248	s64 irq = opal_xive_allocate_irq_raw(chip_id);
249
250	/*
251	 * Old versions of skiboot can incorrectly return 0xffffffff to
252	 * indicate no space, fix it up here.
253	 */
254	return irq == 0xffffffff ? OPAL_RESOURCE : irq;
255}
256
257#ifdef CONFIG_SMP
258static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
259{
260	s64 irq;
261
262	/* Allocate an IPI and populate info about it */
263	for (;;) {
264		irq = opal_xive_allocate_irq(xc->chip_id);
265		if (irq == OPAL_BUSY) {
266			msleep(OPAL_BUSY_DELAY_MS);
267			continue;
268		}
269		if (irq < 0) {
270			pr_err("Failed to allocate IPI on CPU %d\n", cpu);
271			return -ENXIO;
272		}
273		xc->hw_ipi = irq;
274		break;
275	}
276	return 0;
277}
278#endif /* CONFIG_SMP */
279
280u32 xive_native_alloc_irq_on_chip(u32 chip_id)
281{
282	s64 rc;
283
284	for (;;) {
285		rc = opal_xive_allocate_irq(chip_id);
286		if (rc != OPAL_BUSY)
287			break;
288		msleep(OPAL_BUSY_DELAY_MS);
289	}
290	if (rc < 0)
291		return 0;
292	return rc;
293}
294EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
295
296void xive_native_free_irq(u32 irq)
297{
298	for (;;) {
299		s64 rc = opal_xive_free_irq(irq);
300		if (rc != OPAL_BUSY)
301			break;
302		msleep(OPAL_BUSY_DELAY_MS);
303	}
304}
305EXPORT_SYMBOL_GPL(xive_native_free_irq);
306
307#ifdef CONFIG_SMP
308static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
309{
310	s64 rc;
311
312	/* Free the IPI */
313	if (xc->hw_ipi == XIVE_BAD_IRQ)
314		return;
315	for (;;) {
316		rc = opal_xive_free_irq(xc->hw_ipi);
317		if (rc == OPAL_BUSY) {
318			msleep(OPAL_BUSY_DELAY_MS);
319			continue;
320		}
321		xc->hw_ipi = XIVE_BAD_IRQ;
322		break;
323	}
324}
325#endif /* CONFIG_SMP */
326
327static void xive_native_shutdown(void)
328{
329	/* Switch the XIVE to emulation mode */
330	opal_xive_reset(OPAL_XIVE_MODE_EMU);
331}
332
333/*
334 * Perform an "ack" cycle on the current thread, thus
335 * grabbing the pending active priorities and updating
336 * the CPPR to the most favored one.
337 */
338static void xive_native_update_pending(struct xive_cpu *xc)
339{
340	u8 he, cppr;
341	u16 ack;
342
343	/* Perform the acknowledge hypervisor to register cycle */
344	ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
345
346	/* Synchronize subsequent queue accesses */
347	mb();
348
349	/*
350	 * Grab the CPPR and the "HE" field which indicates the source
351	 * of the hypervisor interrupt (if any)
352	 */
353	cppr = ack & 0xff;
354	he = (ack >> 8) >> 6;
355	switch(he) {
356	case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
357		break;
358	case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
359		if (cppr == 0xff)
360			return;
361		/* Mark the priority pending */
362		xc->pending_prio |= 1 << cppr;
363
364		/*
365		 * A new interrupt should never have a CPPR less favored
366		 * than our current one.
367		 */
368		if (cppr >= xc->cppr)
369			pr_err("CPU %d odd ack CPPR, got %d at %d\n",
370			       smp_processor_id(), cppr, xc->cppr);
371
372		/* Update our idea of what the CPPR is */
373		xc->cppr = cppr;
374		break;
375	case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
376	case TM_QW3_NSR_HE_LSI:  /* Legacy FW LSI (unused) */
377		pr_err("CPU %d got unexpected interrupt type HE=%d\n",
378		       smp_processor_id(), he);
379		return;
380	}
381}
382
383static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
384{
385	xc->chip_id = cpu_to_chip_id(cpu);
386}
387
388static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
389{
390	s64 rc;
391	u32 vp;
392	__be64 vp_cam_be;
393	u64 vp_cam;
394
395	if (xive_pool_vps == XIVE_INVALID_VP)
396		return;
397
398	/* Check if pool VP already active, if it is, pull it */
399	if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
400		in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
401
402	/* Enable the pool VP */
403	vp = xive_pool_vps + cpu;
404	for (;;) {
405		rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
406		if (rc != OPAL_BUSY)
407			break;
408		msleep(OPAL_BUSY_DELAY_MS);
409	}
410	if (rc) {
411		pr_err("Failed to enable pool VP on CPU %d\n", cpu);
412		return;
413	}
414
415	/* Grab it's CAM value */
416	rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
417	if (rc) {
418		pr_err("Failed to get pool VP info CPU %d\n", cpu);
419		return;
420	}
421	vp_cam = be64_to_cpu(vp_cam_be);
422
423	/* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
424	out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
425	out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
426}
427
428static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
429{
430	s64 rc;
431	u32 vp;
432
433	if (xive_pool_vps == XIVE_INVALID_VP)
434		return;
435
436	/* Pull the pool VP from the CPU */
437	in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
438
439	/* Disable it */
440	vp = xive_pool_vps + cpu;
441	for (;;) {
442		rc = opal_xive_set_vp_info(vp, 0, 0);
443		if (rc != OPAL_BUSY)
444			break;
445		msleep(OPAL_BUSY_DELAY_MS);
446	}
447}
448
449void xive_native_sync_source(u32 hw_irq)
450{
451	opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
452}
453EXPORT_SYMBOL_GPL(xive_native_sync_source);
454
455void xive_native_sync_queue(u32 hw_irq)
456{
457	opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
458}
459EXPORT_SYMBOL_GPL(xive_native_sync_queue);
460
 
 
 
 
 
 
 
 
461static const struct xive_ops xive_native_ops = {
462	.populate_irq_data	= xive_native_populate_irq_data,
463	.configure_irq		= xive_native_configure_irq,
464	.get_irq_config		= xive_native_get_irq_config,
465	.setup_queue		= xive_native_setup_queue,
466	.cleanup_queue		= xive_native_cleanup_queue,
467	.match			= xive_native_match,
468	.shutdown		= xive_native_shutdown,
469	.update_pending		= xive_native_update_pending,
470	.prepare_cpu		= xive_native_prepare_cpu,
471	.setup_cpu		= xive_native_setup_cpu,
472	.teardown_cpu		= xive_native_teardown_cpu,
473	.sync_source		= xive_native_sync_source,
474#ifdef CONFIG_SMP
475	.get_ipi		= xive_native_get_ipi,
476	.put_ipi		= xive_native_put_ipi,
477#endif /* CONFIG_SMP */
 
 
 
478	.name			= "native",
479};
480
481static bool xive_parse_provisioning(struct device_node *np)
482{
483	int rc;
484
485	if (of_property_read_u32(np, "ibm,xive-provision-page-size",
486				 &xive_provision_size) < 0)
487		return true;
488	rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
489	if (rc < 0) {
490		pr_err("Error %d getting provision chips array\n", rc);
491		return false;
492	}
493	xive_provision_chip_count = rc;
494	if (rc == 0)
495		return true;
496
497	xive_provision_chips = kcalloc(4, xive_provision_chip_count,
498				       GFP_KERNEL);
499	if (WARN_ON(!xive_provision_chips))
500		return false;
501
502	rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
503					xive_provision_chips,
504					xive_provision_chip_count);
505	if (rc < 0) {
506		pr_err("Error %d reading provision chips array\n", rc);
507		return false;
508	}
509
510	xive_provision_cache = kmem_cache_create("xive-provision",
511						 xive_provision_size,
512						 xive_provision_size,
513						 0, NULL);
514	if (!xive_provision_cache) {
515		pr_err("Failed to allocate provision cache\n");
516		return false;
517	}
518	return true;
519}
520
521static void xive_native_setup_pools(void)
522{
523	/* Allocate a pool big enough */
524	pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
525
526	xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
527	if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
528		pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
529
530	pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
531		 xive_pool_vps, nr_cpu_ids);
532}
533
534u32 xive_native_default_eq_shift(void)
535{
536	return xive_queue_shift;
537}
538EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
539
540unsigned long xive_tima_os;
541EXPORT_SYMBOL_GPL(xive_tima_os);
542
543bool __init xive_native_init(void)
544{
545	struct device_node *np;
546	struct resource r;
547	void __iomem *tima;
548	struct property *prop;
549	u8 max_prio = 7;
550	const __be32 *p;
551	u32 val, cpu;
552	s64 rc;
553
554	if (xive_cmdline_disabled)
555		return false;
556
557	pr_devel("xive_native_init()\n");
558	np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
559	if (!np) {
560		pr_devel("not found !\n");
561		return false;
562	}
563	pr_devel("Found %pOF\n", np);
564
565	/* Resource 1 is HV window */
566	if (of_address_to_resource(np, 1, &r)) {
567		pr_err("Failed to get thread mgmnt area resource\n");
568		return false;
569	}
570	tima = ioremap(r.start, resource_size(&r));
571	if (!tima) {
572		pr_err("Failed to map thread mgmnt area\n");
573		return false;
574	}
575
576	/* Read number of priorities */
577	if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
578		max_prio = val - 1;
579
580	/* Iterate the EQ sizes and pick one */
581	of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
582		xive_queue_shift = val;
583		if (val == PAGE_SHIFT)
584			break;
585	}
586
587	/* Do we support single escalation */
588	if (of_get_property(np, "single-escalation-support", NULL) != NULL)
589		xive_has_single_esc = true;
 
590
591	/* Configure Thread Management areas for KVM */
592	for_each_possible_cpu(cpu)
593		kvmppc_set_xive_tima(cpu, r.start, tima);
594
595	/* Resource 2 is OS window */
596	if (of_address_to_resource(np, 2, &r)) {
597		pr_err("Failed to get thread mgmnt area resource\n");
598		return false;
599	}
600
601	xive_tima_os = r.start;
602
603	/* Grab size of provisionning pages */
604	xive_parse_provisioning(np);
605
606	/* Switch the XIVE to exploitation mode */
607	rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
608	if (rc) {
609		pr_err("Switch to exploitation mode failed with error %lld\n", rc);
610		return false;
611	}
612
613	/* Setup some dummy HV pool VPs */
614	xive_native_setup_pools();
615
616	/* Initialize XIVE core with our backend */
617	if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
618			    max_prio)) {
619		opal_xive_reset(OPAL_XIVE_MODE_EMU);
620		return false;
621	}
 
622	pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
623	return true;
 
 
 
 
624}
625
626static bool xive_native_provision_pages(void)
627{
628	u32 i;
629	void *p;
630
631	for (i = 0; i < xive_provision_chip_count; i++) {
632		u32 chip = xive_provision_chips[i];
633
634		/*
635		 * XXX TODO: Try to make the allocation local to the node where
636		 * the chip resides.
637		 */
638		p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
639		if (!p) {
640			pr_err("Failed to allocate provisioning page\n");
641			return false;
642		}
643		kmemleak_ignore(p);
644		opal_xive_donate_page(chip, __pa(p));
645	}
646	return true;
647}
648
649u32 xive_native_alloc_vp_block(u32 max_vcpus)
650{
651	s64 rc;
652	u32 order;
653
654	order = fls(max_vcpus) - 1;
655	if (max_vcpus > (1 << order))
656		order++;
657
658	pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
659		 max_vcpus, order);
660
661	for (;;) {
662		rc = opal_xive_alloc_vp_block(order);
663		switch (rc) {
664		case OPAL_BUSY:
665			msleep(OPAL_BUSY_DELAY_MS);
666			break;
667		case OPAL_XIVE_PROVISIONING:
668			if (!xive_native_provision_pages())
669				return XIVE_INVALID_VP;
670			break;
671		default:
672			if (rc < 0) {
673				pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
674				       order, rc);
675				return XIVE_INVALID_VP;
676			}
677			return rc;
678		}
679	}
680}
681EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
682
683void xive_native_free_vp_block(u32 vp_base)
684{
685	s64 rc;
686
687	if (vp_base == XIVE_INVALID_VP)
688		return;
689
690	rc = opal_xive_free_vp_block(vp_base);
691	if (rc < 0)
692		pr_warn("OPAL error %lld freeing VP block\n", rc);
693}
694EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
695
696int xive_native_enable_vp(u32 vp_id, bool single_escalation)
697{
698	s64 rc;
699	u64 flags = OPAL_XIVE_VP_ENABLED;
700
701	if (single_escalation)
702		flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
703	for (;;) {
704		rc = opal_xive_set_vp_info(vp_id, flags, 0);
705		if (rc != OPAL_BUSY)
706			break;
707		msleep(OPAL_BUSY_DELAY_MS);
708	}
709	if (rc)
710		vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
711	return rc ? -EIO : 0;
712}
713EXPORT_SYMBOL_GPL(xive_native_enable_vp);
714
715int xive_native_disable_vp(u32 vp_id)
716{
717	s64 rc;
718
719	for (;;) {
720		rc = opal_xive_set_vp_info(vp_id, 0, 0);
721		if (rc != OPAL_BUSY)
722			break;
723		msleep(OPAL_BUSY_DELAY_MS);
724	}
725	if (rc)
726		vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
727	return rc ? -EIO : 0;
728}
729EXPORT_SYMBOL_GPL(xive_native_disable_vp);
730
731int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
732{
733	__be64 vp_cam_be;
734	__be32 vp_chip_id_be;
735	s64 rc;
736
737	rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
738	if (rc) {
739		vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
740		return -EIO;
741	}
742	*out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
743	*out_chip_id = be32_to_cpu(vp_chip_id_be);
744
745	return 0;
746}
747EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
748
749bool xive_native_has_single_escalation(void)
750{
751	return xive_has_single_esc;
752}
753EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
754
 
 
 
 
 
 
755int xive_native_get_queue_info(u32 vp_id, u32 prio,
756			       u64 *out_qpage,
757			       u64 *out_qsize,
758			       u64 *out_qeoi_page,
759			       u32 *out_escalate_irq,
760			       u64 *out_qflags)
761{
762	__be64 qpage;
763	__be64 qsize;
764	__be64 qeoi_page;
765	__be32 escalate_irq;
766	__be64 qflags;
767	s64 rc;
768
769	rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
770				      &qeoi_page, &escalate_irq, &qflags);
771	if (rc) {
772		vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
773		return -EIO;
774	}
775
776	if (out_qpage)
777		*out_qpage = be64_to_cpu(qpage);
778	if (out_qsize)
779		*out_qsize = be32_to_cpu(qsize);
780	if (out_qeoi_page)
781		*out_qeoi_page = be64_to_cpu(qeoi_page);
782	if (out_escalate_irq)
783		*out_escalate_irq = be32_to_cpu(escalate_irq);
784	if (out_qflags)
785		*out_qflags = be64_to_cpu(qflags);
786
787	return 0;
788}
789EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
790
791int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
792{
793	__be32 opal_qtoggle;
794	__be32 opal_qindex;
795	s64 rc;
796
797	rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
798				       &opal_qindex);
799	if (rc) {
800		vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
801		return -EIO;
802	}
803
804	if (qtoggle)
805		*qtoggle = be32_to_cpu(opal_qtoggle);
806	if (qindex)
807		*qindex = be32_to_cpu(opal_qindex);
808
809	return 0;
810}
811EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
812
813int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
814{
815	s64 rc;
816
817	rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
818	if (rc) {
819		vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
820		return -EIO;
821	}
822
823	return 0;
824}
825EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
826
827bool xive_native_has_queue_state_support(void)
828{
829	return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
830		opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
831}
832EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
833
834int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
835{
836	__be64 state;
837	s64 rc;
838
839	rc = opal_xive_get_vp_state(vp_id, &state);
840	if (rc) {
841		vp_err(vp_id, "failed to get vp state : %lld\n", rc);
842		return -EIO;
843	}
844
845	if (out_state)
846		*out_state = be64_to_cpu(state);
847	return 0;
848}
849EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
850
851machine_arch_initcall(powernv, xive_core_debug_init);