Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/******************************************************************************
   2 * arch/ia64/xen/xen_pv_ops.c
   3 *
   4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
   5 *                    VA Linux Systems Japan K.K.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  20 *
  21 */
  22
  23#include <linux/console.h>
  24#include <linux/irq.h>
  25#include <linux/kernel.h>
  26#include <linux/pm.h>
  27#include <linux/unistd.h>
  28
  29#include <asm/xen/hypervisor.h>
  30#include <asm/xen/xencomm.h>
  31#include <asm/xen/privop.h>
  32
  33#include "irq_xen.h"
  34#include "time.h"
  35
  36/***************************************************************************
  37 * general info
  38 */
  39static struct pv_info xen_info __initdata = {
  40	.kernel_rpl = 2,	/* or 1: determin at runtime */
  41	.paravirt_enabled = 1,
  42	.name = "Xen/ia64",
  43};
  44
  45#define IA64_RSC_PL_SHIFT	2
  46#define IA64_RSC_PL_BIT_SIZE	2
  47#define IA64_RSC_PL_MASK	\
  48	(((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
  49
  50static void __init
  51xen_info_init(void)
  52{
  53	/* Xenified Linux/ia64 may run on pl = 1 or 2.
  54	 * determin at run time. */
  55	unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
  56	unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
  57	xen_info.kernel_rpl = rpl;
  58}
  59
  60/***************************************************************************
  61 * pv_init_ops
  62 * initialization hooks.
  63 */
  64
  65static void
  66xen_panic_hypercall(struct unw_frame_info *info, void *arg)
  67{
  68	current->thread.ksp = (__u64)info->sw - 16;
  69	HYPERVISOR_shutdown(SHUTDOWN_crash);
  70	/* we're never actually going to get here... */
  71}
  72
  73static int
  74xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
  75{
  76	unw_init_running(xen_panic_hypercall, NULL);
  77	/* we're never actually going to get here... */
  78	return NOTIFY_DONE;
  79}
  80
  81static struct notifier_block xen_panic_block = {
  82	xen_panic_event, NULL, 0 /* try to go last */
  83};
  84
  85static void xen_pm_power_off(void)
  86{
  87	local_irq_disable();
  88	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
  89}
  90
  91static void __init
  92xen_banner(void)
  93{
  94	printk(KERN_INFO
  95	       "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
  96	       "flags=0x%x\n",
  97	       xen_info.kernel_rpl,
  98	       HYPERVISOR_shared_info->arch.start_info_pfn,
  99	       xen_start_info->nr_pages, xen_start_info->flags);
 100}
 101
 102static int __init
 103xen_reserve_memory(struct rsvd_region *region)
 104{
 105	region->start = (unsigned long)__va(
 106		(HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
 107	region->end   = region->start + PAGE_SIZE;
 108	return 1;
 109}
 110
 111static void __init
 112xen_arch_setup_early(void)
 113{
 114	struct shared_info *s;
 115	BUG_ON(!xen_pv_domain());
 116
 117	s = HYPERVISOR_shared_info;
 118	xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
 119
 120	/* Must be done before any hypercall.  */
 121	xencomm_initialize();
 122
 123	xen_setup_features();
 124	/* Register a call for panic conditions. */
 125	atomic_notifier_chain_register(&panic_notifier_list,
 126				       &xen_panic_block);
 127	pm_power_off = xen_pm_power_off;
 128
 129	xen_ia64_enable_opt_feature();
 130}
 131
 132static void __init
 133xen_arch_setup_console(char **cmdline_p)
 134{
 135	add_preferred_console("xenboot", 0, NULL);
 136	add_preferred_console("tty", 0, NULL);
 137	/* use hvc_xen */
 138	add_preferred_console("hvc", 0, NULL);
 139
 140#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
 141	conswitchp = NULL;
 142#endif
 143}
 144
 145static int __init
 146xen_arch_setup_nomca(void)
 147{
 148	return 1;
 149}
 150
 151static void __init
 152xen_post_smp_prepare_boot_cpu(void)
 153{
 154	xen_setup_vcpu_info_placement();
 155}
 156
 157#ifdef ASM_SUPPORTED
 158static unsigned long __init_or_module
 159xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
 160#endif
 161static void __init
 162xen_patch_branch(unsigned long tag, unsigned long type);
 163
 164static const struct pv_init_ops xen_init_ops __initconst = {
 165	.banner = xen_banner,
 166
 167	.reserve_memory = xen_reserve_memory,
 168
 169	.arch_setup_early = xen_arch_setup_early,
 170	.arch_setup_console = xen_arch_setup_console,
 171	.arch_setup_nomca = xen_arch_setup_nomca,
 172
 173	.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
 174#ifdef ASM_SUPPORTED
 175	.patch_bundle = xen_patch_bundle,
 176#endif
 177	.patch_branch = xen_patch_branch,
 178};
 179
 180/***************************************************************************
 181 * pv_fsys_data
 182 * addresses for fsys
 183 */
 184
 185extern unsigned long xen_fsyscall_table[NR_syscalls];
 186extern char xen_fsys_bubble_down[];
 187struct pv_fsys_data xen_fsys_data __initdata = {
 188	.fsyscall_table = (unsigned long *)xen_fsyscall_table,
 189	.fsys_bubble_down = (void *)xen_fsys_bubble_down,
 190};
 191
 192/***************************************************************************
 193 * pv_patchdata
 194 * patchdata addresses
 195 */
 196
 197#define DECLARE(name)							\
 198	extern unsigned long __xen_start_gate_##name##_patchlist[];	\
 199	extern unsigned long __xen_end_gate_##name##_patchlist[]
 200
 201DECLARE(fsyscall);
 202DECLARE(brl_fsys_bubble_down);
 203DECLARE(vtop);
 204DECLARE(mckinley_e9);
 205
 206extern unsigned long __xen_start_gate_section[];
 207
 208#define ASSIGN(name)							\
 209	.start_##name##_patchlist =					\
 210		(unsigned long)__xen_start_gate_##name##_patchlist,	\
 211	.end_##name##_patchlist =					\
 212		(unsigned long)__xen_end_gate_##name##_patchlist
 213
 214static struct pv_patchdata xen_patchdata __initdata = {
 215	ASSIGN(fsyscall),
 216	ASSIGN(brl_fsys_bubble_down),
 217	ASSIGN(vtop),
 218	ASSIGN(mckinley_e9),
 219
 220	.gate_section = (void*)__xen_start_gate_section,
 221};
 222
 223/***************************************************************************
 224 * pv_cpu_ops
 225 * intrinsics hooks.
 226 */
 227
 228#ifndef ASM_SUPPORTED
 229static void
 230xen_set_itm_with_offset(unsigned long val)
 231{
 232	/* ia64_cpu_local_tick() calls this with interrupt enabled. */
 233	/* WARN_ON(!irqs_disabled()); */
 234	xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
 235}
 236
 237static unsigned long
 238xen_get_itm_with_offset(void)
 239{
 240	/* unused at this moment */
 241	printk(KERN_DEBUG "%s is called.\n", __func__);
 242
 243	WARN_ON(!irqs_disabled());
 244	return ia64_native_getreg(_IA64_REG_CR_ITM) +
 245		XEN_MAPPEDREGS->itc_offset;
 246}
 247
 248/* ia64_set_itc() is only called by
 249 * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
 250 * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
 251 */
 252static void
 253xen_set_itc(unsigned long val)
 254{
 255	unsigned long mitc;
 256
 257	WARN_ON(!irqs_disabled());
 258	mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
 259	XEN_MAPPEDREGS->itc_offset = val - mitc;
 260	XEN_MAPPEDREGS->itc_last = val;
 261}
 262
 263static unsigned long
 264xen_get_itc(void)
 265{
 266	unsigned long res;
 267	unsigned long itc_offset;
 268	unsigned long itc_last;
 269	unsigned long ret_itc_last;
 270
 271	itc_offset = XEN_MAPPEDREGS->itc_offset;
 272	do {
 273		itc_last = XEN_MAPPEDREGS->itc_last;
 274		res = ia64_native_getreg(_IA64_REG_AR_ITC);
 275		res += itc_offset;
 276		if (itc_last >= res)
 277			res = itc_last + 1;
 278		ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
 279				       itc_last, res);
 280	} while (unlikely(ret_itc_last != itc_last));
 281	return res;
 282
 283#if 0
 284	/* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
 285	   Should it be paravirtualized instead? */
 286	WARN_ON(!irqs_disabled());
 287	itc_offset = XEN_MAPPEDREGS->itc_offset;
 288	itc_last = XEN_MAPPEDREGS->itc_last;
 289	res = ia64_native_getreg(_IA64_REG_AR_ITC);
 290	res += itc_offset;
 291	if (itc_last >= res)
 292		res = itc_last + 1;
 293	XEN_MAPPEDREGS->itc_last = res;
 294	return res;
 295#endif
 296}
 297
 298static void xen_setreg(int regnum, unsigned long val)
 299{
 300	switch (regnum) {
 301	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
 302		xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
 303		break;
 304	case _IA64_REG_AR_ITC:
 305		xen_set_itc(val);
 306		break;
 307	case _IA64_REG_CR_TPR:
 308		xen_set_tpr(val);
 309		break;
 310	case _IA64_REG_CR_ITM:
 311		xen_set_itm_with_offset(val);
 312		break;
 313	case _IA64_REG_CR_EOI:
 314		xen_eoi(val);
 315		break;
 316	default:
 317		ia64_native_setreg_func(regnum, val);
 318		break;
 319	}
 320}
 321
 322static unsigned long xen_getreg(int regnum)
 323{
 324	unsigned long res;
 325
 326	switch (regnum) {
 327	case _IA64_REG_PSR:
 328		res = xen_get_psr();
 329		break;
 330	case _IA64_REG_AR_ITC:
 331		res = xen_get_itc();
 332		break;
 333	case _IA64_REG_CR_ITM:
 334		res = xen_get_itm_with_offset();
 335		break;
 336	case _IA64_REG_CR_IVR:
 337		res = xen_get_ivr();
 338		break;
 339	case _IA64_REG_CR_TPR:
 340		res = xen_get_tpr();
 341		break;
 342	default:
 343		res = ia64_native_getreg_func(regnum);
 344		break;
 345	}
 346	return res;
 347}
 348
 349/* turning on interrupts is a bit more complicated.. write to the
 350 * memory-mapped virtual psr.i bit first (to avoid race condition),
 351 * then if any interrupts were pending, we have to execute a hyperprivop
 352 * to ensure the pending interrupt gets delivered; else we're done! */
 353static void
 354xen_ssm_i(void)
 355{
 356	int old = xen_get_virtual_psr_i();
 357	xen_set_virtual_psr_i(1);
 358	barrier();
 359	if (!old && xen_get_virtual_pend())
 360		xen_hyper_ssm_i();
 361}
 362
 363/* turning off interrupts can be paravirtualized simply by writing
 364 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
 365static void
 366xen_rsm_i(void)
 367{
 368	xen_set_virtual_psr_i(0);
 369	barrier();
 370}
 371
 372static unsigned long
 373xen_get_psr_i(void)
 374{
 375	return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
 376}
 377
 378static void
 379xen_intrin_local_irq_restore(unsigned long mask)
 380{
 381	if (mask & IA64_PSR_I)
 382		xen_ssm_i();
 383	else
 384		xen_rsm_i();
 385}
 386#else
 387#define __DEFINE_FUNC(name, code)					\
 388	extern const char xen_ ## name ## _direct_start[];		\
 389	extern const char xen_ ## name ## _direct_end[];		\
 390	asm (".align 32\n"						\
 391	     ".proc xen_" #name "\n"					\
 392	     "xen_" #name ":\n"						\
 393	     "xen_" #name "_direct_start:\n"				\
 394	     code							\
 395	     "xen_" #name "_direct_end:\n"				\
 396	     "br.cond.sptk.many b6\n"					\
 397	     ".endp xen_" #name "\n")
 398
 399#define DEFINE_VOID_FUNC0(name, code)		\
 400	extern void				\
 401	xen_ ## name (void);			\
 402	__DEFINE_FUNC(name, code)
 403
 404#define DEFINE_VOID_FUNC1(name, code)		\
 405	extern void				\
 406	xen_ ## name (unsigned long arg);	\
 407	__DEFINE_FUNC(name, code)
 408
 409#define DEFINE_VOID_FUNC1_VOID(name, code)	\
 410	extern void				\
 411	xen_ ## name (void *arg);		\
 412	__DEFINE_FUNC(name, code)
 413
 414#define DEFINE_VOID_FUNC2(name, code)		\
 415	extern void				\
 416	xen_ ## name (unsigned long arg0,	\
 417		      unsigned long arg1);	\
 418	__DEFINE_FUNC(name, code)
 419
 420#define DEFINE_FUNC0(name, code)		\
 421	extern unsigned long			\
 422	xen_ ## name (void);			\
 423	__DEFINE_FUNC(name, code)
 424
 425#define DEFINE_FUNC1(name, type, code)		\
 426	extern unsigned long			\
 427	xen_ ## name (type arg);		\
 428	__DEFINE_FUNC(name, code)
 429
 430#define XEN_PSR_I_ADDR_ADDR     (XSI_BASE + XSI_PSR_I_ADDR_OFS)
 431
 432/*
 433 * static void xen_set_itm_with_offset(unsigned long val)
 434 *        xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
 435 */
 436/* 2 bundles */
 437DEFINE_VOID_FUNC1(set_itm_with_offset,
 438		  "mov r2 = " __stringify(XSI_BASE) " + "
 439		  __stringify(XSI_ITC_OFFSET_OFS) "\n"
 440		  ";;\n"
 441		  "ld8 r3 = [r2]\n"
 442		  ";;\n"
 443		  "sub r8 = r8, r3\n"
 444		  "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
 445
 446/*
 447 * static unsigned long xen_get_itm_with_offset(void)
 448 *    return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
 449 */
 450/* 2 bundles */
 451DEFINE_FUNC0(get_itm_with_offset,
 452	     "mov r2 = " __stringify(XSI_BASE) " + "
 453	     __stringify(XSI_ITC_OFFSET_OFS) "\n"
 454	     ";;\n"
 455	     "ld8 r3 = [r2]\n"
 456	     "mov r8 = cr.itm\n"
 457	     ";;\n"
 458	     "add r8 = r8, r2\n");
 459
 460/*
 461 * static void xen_set_itc(unsigned long val)
 462 *	unsigned long mitc;
 463 *
 464 *	WARN_ON(!irqs_disabled());
 465 *	mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
 466 *	XEN_MAPPEDREGS->itc_offset = val - mitc;
 467 *	XEN_MAPPEDREGS->itc_last = val;
 468 */
 469/* 2 bundles */
 470DEFINE_VOID_FUNC1(set_itc,
 471		  "mov r2 = " __stringify(XSI_BASE) " + "
 472		  __stringify(XSI_ITC_LAST_OFS) "\n"
 473		  "mov r3 = ar.itc\n"
 474		  ";;\n"
 475		  "sub r3 = r8, r3\n"
 476		  "st8 [r2] = r8, "
 477		  __stringify(XSI_ITC_LAST_OFS) " - "
 478		  __stringify(XSI_ITC_OFFSET_OFS) "\n"
 479		  ";;\n"
 480		  "st8 [r2] = r3\n");
 481
 482/*
 483 * static unsigned long xen_get_itc(void)
 484 *	unsigned long res;
 485 *	unsigned long itc_offset;
 486 *	unsigned long itc_last;
 487 *	unsigned long ret_itc_last;
 488 *
 489 *	itc_offset = XEN_MAPPEDREGS->itc_offset;
 490 *	do {
 491 *		itc_last = XEN_MAPPEDREGS->itc_last;
 492 *		res = ia64_native_getreg(_IA64_REG_AR_ITC);
 493 *		res += itc_offset;
 494 *		if (itc_last >= res)
 495 *			res = itc_last + 1;
 496 *		ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
 497 *				       itc_last, res);
 498 *	} while (unlikely(ret_itc_last != itc_last));
 499 *	return res;
 500 */
 501/* 5 bundles */
 502DEFINE_FUNC0(get_itc,
 503	     "mov r2 = " __stringify(XSI_BASE) " + "
 504	     __stringify(XSI_ITC_OFFSET_OFS) "\n"
 505	     ";;\n"
 506	     "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
 507	     __stringify(XSI_ITC_OFFSET_OFS) "\n"
 508					/* r9 = itc_offset */
 509					/* r2 = XSI_ITC_OFFSET */
 510	     "888:\n"
 511	     "mov r8 = ar.itc\n"	/* res = ar.itc */
 512	     ";;\n"
 513	     "ld8 r3 = [r2]\n"		/* r3 = itc_last */
 514	     "add r8 = r8, r9\n"	/* res = ar.itc + itc_offset */
 515	     ";;\n"
 516	     "cmp.gtu p6, p0 = r3, r8\n"
 517	     ";;\n"
 518	     "(p6) add r8 = 1, r3\n"	/* if (itc_last > res) itc_last + 1 */
 519	     ";;\n"
 520	     "mov ar.ccv = r8\n"
 521	     ";;\n"
 522	     "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
 523	     ";;\n"
 524	     "cmp.ne p6, p0 = r10, r3\n"
 525	     "(p6) hint @pause\n"
 526	     "(p6) br.cond.spnt 888b\n");
 527
 528DEFINE_VOID_FUNC1_VOID(fc,
 529		       "break " __stringify(HYPERPRIVOP_FC) "\n");
 530
 531/*
 532 * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
 533 * masked_addr = *psr_i_addr_addr
 534 * pending_intr_addr = masked_addr - 1
 535 * if (val & IA64_PSR_I) {
 536 *   masked = *masked_addr
 537 *   *masked_addr = 0:xen_set_virtual_psr_i(1)
 538 *   compiler barrier
 539 *   if (masked) {
 540 *      uint8_t pending = *pending_intr_addr;
 541 *      if (pending)
 542 *              XEN_HYPER_SSM_I
 543 *   }
 544 * } else {
 545 *   *masked_addr = 1:xen_set_virtual_psr_i(0)
 546 * }
 547 */
 548/* 6 bundles */
 549DEFINE_VOID_FUNC1(intrin_local_irq_restore,
 550		  /* r8 = input value: 0 or IA64_PSR_I
 551		   * p6 =  (flags & IA64_PSR_I)
 552		   *    = if clause
 553		   * p7 = !(flags & IA64_PSR_I)
 554		   *    = else clause
 555		   */
 556		  "cmp.ne p6, p7 = r8, r0\n"
 557		  "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
 558		  ";;\n"
 559		  /* r9 = XEN_PSR_I_ADDR */
 560		  "ld8 r9 = [r9]\n"
 561		  ";;\n"
 562
 563		  /* r10 = masked previous value */
 564		  "(p6)	ld1.acq r10 = [r9]\n"
 565		  ";;\n"
 566
 567		  /* p8 = !masked interrupt masked previously? */
 568		  "(p6)	cmp.ne.unc p8, p0 = r10, r0\n"
 569
 570		  /* p7 = else clause */
 571		  "(p7)	mov r11 = 1\n"
 572		  ";;\n"
 573		  /* masked = 1 */
 574		  "(p7)	st1.rel [r9] = r11\n"
 575
 576		  /* p6 = if clause */
 577		  /* masked = 0
 578		   * r9 = masked_addr - 1
 579		   *    = pending_intr_addr
 580		   */
 581		  "(p8)	st1.rel [r9] = r0, -1\n"
 582		  ";;\n"
 583		  /* r8 = pending_intr */
 584		  "(p8)	ld1.acq r11 = [r9]\n"
 585		  ";;\n"
 586		  /* p9 = interrupt pending? */
 587		  "(p8)	cmp.ne.unc p9, p10 = r11, r0\n"
 588		  ";;\n"
 589		  "(p10) mf\n"
 590		  /* issue hypercall to trigger interrupt */
 591		  "(p9)	break " __stringify(HYPERPRIVOP_SSM_I) "\n");
 592
 593DEFINE_VOID_FUNC2(ptcga,
 594		  "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
 595DEFINE_VOID_FUNC2(set_rr,
 596		  "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
 597
 598/*
 599 * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
 600 * tmp = *tmp
 601 * tmp = *tmp;
 602 * psr_i = tmp? 0: IA64_PSR_I;
 603 */
 604/* 4 bundles */
 605DEFINE_FUNC0(get_psr_i,
 606	     "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
 607	     ";;\n"
 608	     "ld8 r9 = [r9]\n"			/* r9 = XEN_PSR_I_ADDR */
 609	     "mov r8 = 0\n"			/* psr_i = 0 */
 610	     ";;\n"
 611	     "ld1.acq r9 = [r9]\n"		/* r9 = XEN_PSR_I */
 612	     ";;\n"
 613	     "cmp.eq.unc p6, p0 = r9, r0\n"	/* p6 = (XEN_PSR_I != 0) */
 614	     ";;\n"
 615	     "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
 616
 617DEFINE_FUNC1(thash, unsigned long,
 618	     "break " __stringify(HYPERPRIVOP_THASH) "\n");
 619DEFINE_FUNC1(get_cpuid, int,
 620	     "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
 621DEFINE_FUNC1(get_pmd, int,
 622	     "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
 623DEFINE_FUNC1(get_rr, unsigned long,
 624	     "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
 625
 626/*
 627 * void xen_privop_ssm_i(void)
 628 *
 629 * int masked = !xen_get_virtual_psr_i();
 630 *	// masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
 631 * xen_set_virtual_psr_i(1)
 632 *	// *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
 633 * // compiler barrier
 634 * if (masked) {
 635 *	uint8_t* pend_int_addr =
 636 *		(uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
 637 *	uint8_t pending = *pend_int_addr;
 638 *	if (pending)
 639 *		XEN_HYPER_SSM_I
 640 * }
 641 */
 642/* 4 bundles */
 643DEFINE_VOID_FUNC0(ssm_i,
 644		  "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
 645		  ";;\n"
 646		  "ld8 r8 = [r8]\n"		/* r8 = XEN_PSR_I_ADDR */
 647		  ";;\n"
 648		  "ld1.acq r9 = [r8]\n"		/* r9 = XEN_PSR_I */
 649		  ";;\n"
 650		  "st1.rel [r8] = r0, -1\n"	/* psr_i = 0. enable interrupt
 651						 * r8 = XEN_PSR_I_ADDR - 1
 652						 *    = pend_int_addr
 653						 */
 654		  "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
 655						 * previously interrupt
 656						 * masked?
 657						 */
 658		  ";;\n"
 659		  "(p6) ld1.acq r8 = [r8]\n"	/* r8 = xen_pend_int */
 660		  ";;\n"
 661		  "(p6) cmp.eq.unc p6, p7 = r8, r0\n"	/*interrupt pending?*/
 662		  ";;\n"
 663		  /* issue hypercall to get interrupt */
 664		  "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
 665		  ";;\n");
 666
 667/*
 668 * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
 669 *		   = XEN_PSR_I_ADDR_ADDR;
 670 * psr_i_addr = *psr_i_addr_addr;
 671 * *psr_i_addr = 1;
 672 */
 673/* 2 bundles */
 674DEFINE_VOID_FUNC0(rsm_i,
 675		  "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
 676						/* r8 = XEN_PSR_I_ADDR */
 677		  "mov r9 = 1\n"
 678		  ";;\n"
 679		  "ld8 r8 = [r8]\n"		/* r8 = XEN_PSR_I */
 680		  ";;\n"
 681		  "st1.rel [r8] = r9\n");	/* XEN_PSR_I = 1 */
 682
 683extern void
 684xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
 685		   unsigned long val2, unsigned long val3,
 686		   unsigned long val4);
 687__DEFINE_FUNC(set_rr0_to_rr4,
 688	      "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
 689
 690
 691extern unsigned long xen_getreg(int regnum);
 692#define __DEFINE_GET_REG(id, privop)					\
 693	"mov r2 = " __stringify(_IA64_REG_ ## id) "\n"			\
 694	";;\n"								\
 695	"cmp.eq p6, p0 = r2, r8\n"					\
 696	";;\n"								\
 697	"(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n"	\
 698	"(p6) br.cond.sptk.many b6\n"					\
 699	";;\n"
 700
 701__DEFINE_FUNC(getreg,
 702	      __DEFINE_GET_REG(PSR, PSR)
 703
 704	      /* get_itc */
 705	      "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
 706	      ";;\n"
 707	      "cmp.eq p6, p0 = r2, r8\n"
 708	      ";;\n"
 709	      "(p6) br.cond.spnt xen_get_itc\n"
 710	      ";;\n"
 711
 712	      /* get itm */
 713	      "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
 714	      ";;\n"
 715	      "cmp.eq p6, p0 = r2, r8\n"
 716	      ";;\n"
 717	      "(p6) br.cond.spnt xen_get_itm_with_offset\n"
 718	      ";;\n"
 719
 720	      __DEFINE_GET_REG(CR_IVR, IVR)
 721	      __DEFINE_GET_REG(CR_TPR, TPR)
 722
 723	      /* fall back */
 724	      "movl r2 = ia64_native_getreg_func\n"
 725	      ";;\n"
 726	      "mov b7 = r2\n"
 727	      ";;\n"
 728	      "br.cond.sptk.many b7\n");
 729
 730extern void xen_setreg(int regnum, unsigned long val);
 731#define __DEFINE_SET_REG(id, privop)					\
 732	"mov r2 = " __stringify(_IA64_REG_ ## id) "\n"			\
 733	";;\n"								\
 734	"cmp.eq p6, p0 = r2, r9\n"					\
 735	";;\n"								\
 736	"(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n"		\
 737	"(p6) br.cond.sptk.many b6\n"					\
 738	";;\n"
 739
 740__DEFINE_FUNC(setreg,
 741	      /* kr0 .. kr 7*/
 742	      /*
 743	       * if (_IA64_REG_AR_KR0 <= regnum &&
 744	       *     regnum <= _IA64_REG_AR_KR7) {
 745	       *     register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
 746	       *     register __val asm ("r9") = val
 747	       *    "break HYPERPRIVOP_SET_KR"
 748	       * }
 749	       */
 750	      "mov r17 = r9\n"
 751	      "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
 752	      ";;\n"
 753	      "cmp.ge p6, p0 = r9, r2\n"
 754	      "sub r17 = r17, r2\n"
 755	      ";;\n"
 756	      "(p6) cmp.ge.unc p7, p0 = "
 757	      __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
 758	      ", r17\n"
 759	      ";;\n"
 760	      "(p7) mov r9 = r8\n"
 761	      ";;\n"
 762	      "(p7) mov r8 = r17\n"
 763	      "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
 764
 765	      /* set itm */
 766	      "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
 767	      ";;\n"
 768	      "cmp.eq p6, p0 = r2, r8\n"
 769	      ";;\n"
 770	      "(p6) br.cond.spnt xen_set_itm_with_offset\n"
 771
 772	      /* set itc */
 773	      "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
 774	      ";;\n"
 775	      "cmp.eq p6, p0 = r2, r8\n"
 776	      ";;\n"
 777	      "(p6) br.cond.spnt xen_set_itc\n"
 778
 779	      __DEFINE_SET_REG(CR_TPR, SET_TPR)
 780	      __DEFINE_SET_REG(CR_EOI, EOI)
 781
 782	      /* fall back */
 783	      "movl r2 = ia64_native_setreg_func\n"
 784	      ";;\n"
 785	      "mov b7 = r2\n"
 786	      ";;\n"
 787	      "br.cond.sptk.many b7\n");
 788#endif
 789
 790static const struct pv_cpu_ops xen_cpu_ops __initconst = {
 791	.fc		= xen_fc,
 792	.thash		= xen_thash,
 793	.get_cpuid	= xen_get_cpuid,
 794	.get_pmd	= xen_get_pmd,
 795	.getreg		= xen_getreg,
 796	.setreg		= xen_setreg,
 797	.ptcga		= xen_ptcga,
 798	.get_rr		= xen_get_rr,
 799	.set_rr		= xen_set_rr,
 800	.set_rr0_to_rr4	= xen_set_rr0_to_rr4,
 801	.ssm_i		= xen_ssm_i,
 802	.rsm_i		= xen_rsm_i,
 803	.get_psr_i	= xen_get_psr_i,
 804	.intrin_local_irq_restore
 805			= xen_intrin_local_irq_restore,
 806};
 807
 808/******************************************************************************
 809 * replacement of hand written assembly codes.
 810 */
 811
 812extern char xen_switch_to;
 813extern char xen_leave_syscall;
 814extern char xen_work_processed_syscall;
 815extern char xen_leave_kernel;
 816
 817const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
 818	.switch_to		= (unsigned long)&xen_switch_to,
 819	.leave_syscall		= (unsigned long)&xen_leave_syscall,
 820	.work_processed_syscall	= (unsigned long)&xen_work_processed_syscall,
 821	.leave_kernel		= (unsigned long)&xen_leave_kernel,
 822};
 823
 824/***************************************************************************
 825 * pv_iosapic_ops
 826 * iosapic read/write hooks.
 827 */
 828static void
 829xen_pcat_compat_init(void)
 830{
 831	/* nothing */
 832}
 833
 834static struct irq_chip*
 835xen_iosapic_get_irq_chip(unsigned long trigger)
 836{
 837	return NULL;
 838}
 839
 840static unsigned int
 841xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
 842{
 843	struct physdev_apic apic_op;
 844	int ret;
 845
 846	apic_op.apic_physbase = (unsigned long)iosapic -
 847					__IA64_UNCACHED_OFFSET;
 848	apic_op.reg = reg;
 849	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
 850	if (ret)
 851		return ret;
 852	return apic_op.value;
 853}
 854
 855static void
 856xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
 857{
 858	struct physdev_apic apic_op;
 859
 860	apic_op.apic_physbase = (unsigned long)iosapic -
 861					__IA64_UNCACHED_OFFSET;
 862	apic_op.reg = reg;
 863	apic_op.value = val;
 864	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
 865}
 866
 867static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
 868	.pcat_compat_init = xen_pcat_compat_init,
 869	.__get_irq_chip = xen_iosapic_get_irq_chip,
 870
 871	.__read = xen_iosapic_read,
 872	.__write = xen_iosapic_write,
 873};
 874
 875/***************************************************************************
 876 * pv_ops initialization
 877 */
 878
 879void __init
 880xen_setup_pv_ops(void)
 881{
 882	xen_info_init();
 883	pv_info = xen_info;
 884	pv_init_ops = xen_init_ops;
 885	pv_fsys_data = xen_fsys_data;
 886	pv_patchdata = xen_patchdata;
 887	pv_cpu_ops = xen_cpu_ops;
 888	pv_iosapic_ops = xen_iosapic_ops;
 889	pv_irq_ops = xen_irq_ops;
 890	pv_time_ops = xen_time_ops;
 891
 892	paravirt_cpu_asm_init(&xen_cpu_asm_switch);
 893}
 894
 895#ifdef ASM_SUPPORTED
 896/***************************************************************************
 897 * binary pacthing
 898 * pv_init_ops.patch_bundle
 899 */
 900
 901#define DEFINE_FUNC_GETREG(name, privop)				\
 902	DEFINE_FUNC0(get_ ## name,					\
 903		     "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
 904
 905DEFINE_FUNC_GETREG(psr, PSR);
 906DEFINE_FUNC_GETREG(eflag, EFLAG);
 907DEFINE_FUNC_GETREG(ivr, IVR);
 908DEFINE_FUNC_GETREG(tpr, TPR);
 909
 910#define DEFINE_FUNC_SET_KR(n)						\
 911	DEFINE_VOID_FUNC0(set_kr ## n,					\
 912			  ";;\n"					\
 913			  "mov r9 = r8\n"				\
 914			  "mov r8 = " #n "\n"				\
 915			  "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
 916
 917DEFINE_FUNC_SET_KR(0);
 918DEFINE_FUNC_SET_KR(1);
 919DEFINE_FUNC_SET_KR(2);
 920DEFINE_FUNC_SET_KR(3);
 921DEFINE_FUNC_SET_KR(4);
 922DEFINE_FUNC_SET_KR(5);
 923DEFINE_FUNC_SET_KR(6);
 924DEFINE_FUNC_SET_KR(7);
 925
 926#define __DEFINE_FUNC_SETREG(name, privop)				\
 927	DEFINE_VOID_FUNC0(name,						\
 928			  "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
 929
 930#define DEFINE_FUNC_SETREG(name, privop)			\
 931	__DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
 932
 933DEFINE_FUNC_SETREG(eflag, EFLAG);
 934DEFINE_FUNC_SETREG(tpr, TPR);
 935__DEFINE_FUNC_SETREG(eoi, EOI);
 936
 937extern const char xen_check_events[];
 938extern const char __xen_intrin_local_irq_restore_direct_start[];
 939extern const char __xen_intrin_local_irq_restore_direct_end[];
 940extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
 941
 942asm (
 943	".align 32\n"
 944	".proc xen_check_events\n"
 945	"xen_check_events:\n"
 946	/* masked = 0
 947	 * r9 = masked_addr - 1
 948	 *    = pending_intr_addr
 949	 */
 950	"st1.rel [r9] = r0, -1\n"
 951	";;\n"
 952	/* r8 = pending_intr */
 953	"ld1.acq r11 = [r9]\n"
 954	";;\n"
 955	/* p9 = interrupt pending? */
 956	"cmp.ne p9, p10 = r11, r0\n"
 957	";;\n"
 958	"(p10) mf\n"
 959	/* issue hypercall to trigger interrupt */
 960	"(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
 961	"br.cond.sptk.many b6\n"
 962	".endp xen_check_events\n"
 963	"\n"
 964	".align 32\n"
 965	".proc __xen_intrin_local_irq_restore_direct\n"
 966	"__xen_intrin_local_irq_restore_direct:\n"
 967	"__xen_intrin_local_irq_restore_direct_start:\n"
 968	"1:\n"
 969	"{\n"
 970	"cmp.ne p6, p7 = r8, r0\n"
 971	"mov r17 = ip\n" /* get ip to calc return address */
 972	"mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
 973	";;\n"
 974	"}\n"
 975	"{\n"
 976	/* r9 = XEN_PSR_I_ADDR */
 977	"ld8 r9 = [r9]\n"
 978	";;\n"
 979	/* r10 = masked previous value */
 980	"(p6) ld1.acq r10 = [r9]\n"
 981	"adds r17 =  1f - 1b, r17\n" /* calculate return address */
 982	";;\n"
 983	"}\n"
 984	"{\n"
 985	/* p8 = !masked interrupt masked previously? */
 986	"(p6) cmp.ne.unc p8, p0 = r10, r0\n"
 987	"\n"
 988	/* p7 = else clause */
 989	"(p7) mov r11 = 1\n"
 990	";;\n"
 991	"(p8) mov b6 = r17\n" /* set return address */
 992	"}\n"
 993	"{\n"
 994	/* masked = 1 */
 995	"(p7) st1.rel [r9] = r11\n"
 996	"\n"
 997	"[99:]\n"
 998	"(p8) brl.cond.dptk.few xen_check_events\n"
 999	"}\n"
1000	/* pv calling stub is 5 bundles. fill nop to adjust return address */
1001	"{\n"
1002	"nop 0\n"
1003	"nop 0\n"
1004	"nop 0\n"
1005	"}\n"
1006	"1:\n"
1007	"__xen_intrin_local_irq_restore_direct_end:\n"
1008	".endp __xen_intrin_local_irq_restore_direct\n"
1009	"\n"
1010	".align 8\n"
1011	"__xen_intrin_local_irq_restore_direct_reloc:\n"
1012	"data8 99b\n"
1013);
1014
1015static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
1016__initdata_or_module =
1017{
1018#define XEN_PATCH_BUNDLE_ELEM(name, type)		\
1019	{						\
1020		(void*)xen_ ## name ## _direct_start,	\
1021		(void*)xen_ ## name ## _direct_end,	\
1022		PARAVIRT_PATCH_TYPE_ ## type,		\
1023	}
1024
1025	XEN_PATCH_BUNDLE_ELEM(fc, FC),
1026	XEN_PATCH_BUNDLE_ELEM(thash, THASH),
1027	XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
1028	XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
1029	XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
1030	XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
1031	XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
1032	XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
1033	XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
1034	XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
1035	XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
1036	{
1037		(void*)__xen_intrin_local_irq_restore_direct_start,
1038		(void*)__xen_intrin_local_irq_restore_direct_end,
1039		PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
1040	},
1041
1042#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg)			\
1043	{							\
1044		xen_get_ ## name ## _direct_start,		\
1045		xen_get_ ## name ## _direct_end,		\
1046		PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
1047	}
1048
1049	XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
1050	XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
1051
1052	XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
1053	XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
1054
1055	XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
1056	XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
1057
1058
1059#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg)		\
1060	{							\
1061		xen_ ## name ## _direct_start,			\
1062		xen_ ## name ## _direct_end,			\
1063		PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
1064	}
1065
1066#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg)			\
1067	__XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
1068
1069	XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
1070	XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
1071	XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
1072	XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
1073	XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
1074	XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
1075	XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
1076	XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
1077
1078	XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
1079	XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
1080	__XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
1081
1082	XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
1083	XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
1084};
1085
1086static unsigned long __init_or_module
1087xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
1088{
1089	const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
1090		sizeof(xen_patch_bundle_elems[0]);
1091	unsigned long used;
1092	const struct paravirt_patch_bundle_elem *found;
1093
1094	used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
1095					     xen_patch_bundle_elems, nelems,
1096					     &found);
1097
1098	if (found == NULL)
1099		/* fallback */
1100		return ia64_native_patch_bundle(sbundle, ebundle, type);
1101	if (used == 0)
1102		return used;
1103
1104	/* relocation */
1105	switch (type) {
1106	case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
1107		unsigned long reloc =
1108			__xen_intrin_local_irq_restore_direct_reloc;
1109		unsigned long reloc_offset = reloc - (unsigned long)
1110			__xen_intrin_local_irq_restore_direct_start;
1111		unsigned long tag = (unsigned long)sbundle + reloc_offset;
1112		paravirt_patch_reloc_brl(tag, xen_check_events);
1113		break;
1114	}
1115	default:
1116		/* nothing */
1117		break;
1118	}
1119	return used;
1120}
1121#endif /* ASM_SUPPOTED */
1122
1123const struct paravirt_patch_branch_target xen_branch_target[]
1124__initconst = {
1125#define PARAVIRT_BR_TARGET(name, type)			\
1126	{						\
1127		&xen_ ## name,				\
1128		PARAVIRT_PATCH_TYPE_BR_ ## type,	\
1129	}
1130	PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
1131	PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
1132	PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
1133	PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
1134};
1135
1136static void __init
1137xen_patch_branch(unsigned long tag, unsigned long type)
1138{
1139	__paravirt_patch_apply_branch(tag, type, xen_branch_target,
1140					ARRAY_SIZE(xen_branch_target));
1141}