Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright © 2021 Amazon.com, Inc. or its affiliates.
   4 */
   5
   6#include "test_util.h"
   7#include "kvm_util.h"
   8#include "processor.h"
   9
  10#include <stdint.h>
  11#include <time.h>
  12#include <sched.h>
  13#include <signal.h>
  14#include <pthread.h>
  15
  16#include <sys/eventfd.h>
  17
  18#define SHINFO_REGION_GVA	0xc0000000ULL
  19#define SHINFO_REGION_GPA	0xc0000000ULL
  20#define SHINFO_REGION_SLOT	10
  21
  22#define DUMMY_REGION_GPA	(SHINFO_REGION_GPA + (3 * PAGE_SIZE))
  23#define DUMMY_REGION_SLOT	11
  24
  25#define DUMMY_REGION_GPA_2	(SHINFO_REGION_GPA + (4 * PAGE_SIZE))
  26#define DUMMY_REGION_SLOT_2	12
  27
  28#define SHINFO_ADDR	(SHINFO_REGION_GPA)
  29#define VCPU_INFO_ADDR	(SHINFO_REGION_GPA + 0x40)
  30#define PVTIME_ADDR	(SHINFO_REGION_GPA + PAGE_SIZE)
  31#define RUNSTATE_ADDR	(SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE - 15)
  32
  33#define SHINFO_VADDR	(SHINFO_REGION_GVA)
  34#define VCPU_INFO_VADDR	(SHINFO_REGION_GVA + 0x40)
  35#define RUNSTATE_VADDR	(SHINFO_REGION_GVA + PAGE_SIZE + PAGE_SIZE - 15)
  36
  37#define EVTCHN_VECTOR	0x10
  38
  39#define EVTCHN_TEST1 15
  40#define EVTCHN_TEST2 66
  41#define EVTCHN_TIMER 13
  42
  43enum {
  44	TEST_INJECT_VECTOR = 0,
  45	TEST_RUNSTATE_runnable,
  46	TEST_RUNSTATE_blocked,
  47	TEST_RUNSTATE_offline,
  48	TEST_RUNSTATE_ADJUST,
  49	TEST_RUNSTATE_DATA,
  50	TEST_STEAL_TIME,
  51	TEST_EVTCHN_MASKED,
  52	TEST_EVTCHN_UNMASKED,
  53	TEST_EVTCHN_SLOWPATH,
  54	TEST_EVTCHN_SEND_IOCTL,
  55	TEST_EVTCHN_HCALL,
  56	TEST_EVTCHN_HCALL_SLOWPATH,
  57	TEST_EVTCHN_HCALL_EVENTFD,
  58	TEST_TIMER_SETUP,
  59	TEST_TIMER_WAIT,
  60	TEST_TIMER_RESTORE,
  61	TEST_POLL_READY,
  62	TEST_POLL_TIMEOUT,
  63	TEST_POLL_MASKED,
  64	TEST_POLL_WAKE,
  65	SET_VCPU_INFO,
  66	TEST_TIMER_PAST,
  67	TEST_LOCKING_SEND_RACE,
  68	TEST_LOCKING_POLL_RACE,
  69	TEST_LOCKING_POLL_TIMEOUT,
  70	TEST_DONE,
  71
  72	TEST_GUEST_SAW_IRQ,
  73};
  74
  75#define XEN_HYPERCALL_MSR	0x40000000
  76
  77#define MIN_STEAL_TIME		50000
  78
  79#define SHINFO_RACE_TIMEOUT	2	/* seconds */
  80
  81#define __HYPERVISOR_set_timer_op	15
  82#define __HYPERVISOR_sched_op		29
  83#define __HYPERVISOR_event_channel_op	32
  84
  85#define SCHEDOP_poll			3
  86
  87#define EVTCHNOP_send			4
  88
  89#define EVTCHNSTAT_interdomain		2
  90
  91struct evtchn_send {
  92	u32 port;
  93};
  94
  95struct sched_poll {
  96	u32 *ports;
  97	unsigned int nr_ports;
  98	u64 timeout;
  99};
 100
 101struct pvclock_vcpu_time_info {
 102	u32   version;
 103	u32   pad0;
 104	u64   tsc_timestamp;
 105	u64   system_time;
 106	u32   tsc_to_system_mul;
 107	s8    tsc_shift;
 108	u8    flags;
 109	u8    pad[2];
 110} __attribute__((__packed__)); /* 32 bytes */
 111
 112struct pvclock_wall_clock {
 113	u32   version;
 114	u32   sec;
 115	u32   nsec;
 116} __attribute__((__packed__));
 117
 118struct vcpu_runstate_info {
 119	uint32_t state;
 120	uint64_t state_entry_time;
 121	uint64_t time[5]; /* Extra field for overrun check */
 122};
 123
 124struct compat_vcpu_runstate_info {
 125	uint32_t state;
 126	uint64_t state_entry_time;
 127	uint64_t time[5];
 128} __attribute__((__packed__));;
 129
 130struct arch_vcpu_info {
 131	unsigned long cr2;
 132	unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
 133};
 134
 135struct vcpu_info {
 136	uint8_t evtchn_upcall_pending;
 137	uint8_t evtchn_upcall_mask;
 138	unsigned long evtchn_pending_sel;
 139	struct arch_vcpu_info arch;
 140	struct pvclock_vcpu_time_info time;
 141}; /* 64 bytes (x86) */
 142
 143struct shared_info {
 144	struct vcpu_info vcpu_info[32];
 145	unsigned long evtchn_pending[64];
 146	unsigned long evtchn_mask[64];
 147	struct pvclock_wall_clock wc;
 148	uint32_t wc_sec_hi;
 149	/* arch_shared_info here */
 150};
 151
 152#define RUNSTATE_running  0
 153#define RUNSTATE_runnable 1
 154#define RUNSTATE_blocked  2
 155#define RUNSTATE_offline  3
 156
 157static const char *runstate_names[] = {
 158	"running",
 159	"runnable",
 160	"blocked",
 161	"offline"
 162};
 163
 164struct {
 165	struct kvm_irq_routing info;
 166	struct kvm_irq_routing_entry entries[2];
 167} irq_routes;
 168
 169static volatile bool guest_saw_irq;
 170
 171static void evtchn_handler(struct ex_regs *regs)
 172{
 173	struct vcpu_info *vi = (void *)VCPU_INFO_VADDR;
 174	vi->evtchn_upcall_pending = 0;
 175	vi->evtchn_pending_sel = 0;
 176	guest_saw_irq = true;
 177
 178	GUEST_SYNC(TEST_GUEST_SAW_IRQ);
 179}
 180
 181static void guest_wait_for_irq(void)
 182{
 183	while (!guest_saw_irq)
 184		__asm__ __volatile__ ("rep nop" : : : "memory");
 185	guest_saw_irq = false;
 186}
 187
 188static void guest_code(void)
 189{
 190	struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
 191	int i;
 192
 193	__asm__ __volatile__(
 194		"sti\n"
 195		"nop\n"
 196	);
 197
 198	/* Trigger an interrupt injection */
 199	GUEST_SYNC(TEST_INJECT_VECTOR);
 200
 201	guest_wait_for_irq();
 202
 203	/* Test having the host set runstates manually */
 204	GUEST_SYNC(TEST_RUNSTATE_runnable);
 205	GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
 206	GUEST_ASSERT(rs->state == 0);
 207
 208	GUEST_SYNC(TEST_RUNSTATE_blocked);
 209	GUEST_ASSERT(rs->time[RUNSTATE_blocked] != 0);
 210	GUEST_ASSERT(rs->state == 0);
 211
 212	GUEST_SYNC(TEST_RUNSTATE_offline);
 213	GUEST_ASSERT(rs->time[RUNSTATE_offline] != 0);
 214	GUEST_ASSERT(rs->state == 0);
 215
 216	/* Test runstate time adjust */
 217	GUEST_SYNC(TEST_RUNSTATE_ADJUST);
 218	GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x5a);
 219	GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x6b6b);
 220
 221	/* Test runstate time set */
 222	GUEST_SYNC(TEST_RUNSTATE_DATA);
 223	GUEST_ASSERT(rs->state_entry_time >= 0x8000);
 224	GUEST_ASSERT(rs->time[RUNSTATE_runnable] == 0);
 225	GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x6b6b);
 226	GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x5a);
 227
 228	/* sched_yield() should result in some 'runnable' time */
 229	GUEST_SYNC(TEST_STEAL_TIME);
 230	GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME);
 231
 232	/* Attempt to deliver a *masked* interrupt */
 233	GUEST_SYNC(TEST_EVTCHN_MASKED);
 234
 235	/* Wait until we see the bit set */
 236	struct shared_info *si = (void *)SHINFO_VADDR;
 237	while (!si->evtchn_pending[0])
 238		__asm__ __volatile__ ("rep nop" : : : "memory");
 239
 240	/* Now deliver an *unmasked* interrupt */
 241	GUEST_SYNC(TEST_EVTCHN_UNMASKED);
 242
 243	guest_wait_for_irq();
 244
 245	/* Change memslots and deliver an interrupt */
 246	GUEST_SYNC(TEST_EVTCHN_SLOWPATH);
 247
 248	guest_wait_for_irq();
 249
 250	/* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */
 251	GUEST_SYNC(TEST_EVTCHN_SEND_IOCTL);
 252
 253	guest_wait_for_irq();
 254
 255	GUEST_SYNC(TEST_EVTCHN_HCALL);
 256
 257	/* Our turn. Deliver event channel (to ourselves) with
 258	 * EVTCHNOP_send hypercall. */
 259	struct evtchn_send s = { .port = 127 };
 260	xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
 261
 262	guest_wait_for_irq();
 263
 264	GUEST_SYNC(TEST_EVTCHN_HCALL_SLOWPATH);
 265
 266	/*
 267	 * Same again, but this time the host has messed with memslots so it
 268	 * should take the slow path in kvm_xen_set_evtchn().
 269	 */
 270	xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
 271
 272	guest_wait_for_irq();
 273
 274	GUEST_SYNC(TEST_EVTCHN_HCALL_EVENTFD);
 275
 276	/* Deliver "outbound" event channel to an eventfd which
 277	 * happens to be one of our own irqfds. */
 278	s.port = 197;
 279	xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
 280
 281	guest_wait_for_irq();
 282
 283	GUEST_SYNC(TEST_TIMER_SETUP);
 284
 285	/* Set a timer 100ms in the future. */
 286	xen_hypercall(__HYPERVISOR_set_timer_op,
 287		      rs->state_entry_time + 100000000, NULL);
 288
 289	GUEST_SYNC(TEST_TIMER_WAIT);
 290
 291	/* Now wait for the timer */
 292	guest_wait_for_irq();
 293
 294	GUEST_SYNC(TEST_TIMER_RESTORE);
 295
 296	/* The host has 'restored' the timer. Just wait for it. */
 297	guest_wait_for_irq();
 298
 299	GUEST_SYNC(TEST_POLL_READY);
 300
 301	/* Poll for an event channel port which is already set */
 302	u32 ports[1] = { EVTCHN_TIMER };
 303	struct sched_poll p = {
 304		.ports = ports,
 305		.nr_ports = 1,
 306		.timeout = 0,
 307	};
 308
 309	xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
 310
 311	GUEST_SYNC(TEST_POLL_TIMEOUT);
 312
 313	/* Poll for an unset port and wait for the timeout. */
 314	p.timeout = 100000000;
 315	xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
 316
 317	GUEST_SYNC(TEST_POLL_MASKED);
 318
 319	/* A timer will wake the masked port we're waiting on, while we poll */
 320	p.timeout = 0;
 321	xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
 322
 323	GUEST_SYNC(TEST_POLL_WAKE);
 324
 325	/* Set the vcpu_info to point at exactly the place it already is to
 326	 * make sure the attribute is functional. */
 327	GUEST_SYNC(SET_VCPU_INFO);
 328
 329	/* A timer wake an *unmasked* port which should wake us with an
 330	 * actual interrupt, while we're polling on a different port. */
 331	ports[0]++;
 332	p.timeout = 0;
 333	xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
 334
 335	guest_wait_for_irq();
 336
 337	GUEST_SYNC(TEST_TIMER_PAST);
 338
 339	/* Timer should have fired already */
 340	guest_wait_for_irq();
 341
 342	GUEST_SYNC(TEST_LOCKING_SEND_RACE);
 343	/* Racing host ioctls */
 344
 345	guest_wait_for_irq();
 346
 347	GUEST_SYNC(TEST_LOCKING_POLL_RACE);
 348	/* Racing vmcall against host ioctl */
 349
 350	ports[0] = 0;
 351
 352	p = (struct sched_poll) {
 353		.ports = ports,
 354		.nr_ports = 1,
 355		.timeout = 0
 356	};
 357
 358wait_for_timer:
 359	/*
 360	 * Poll for a timer wake event while the worker thread is mucking with
 361	 * the shared info.  KVM XEN drops timer IRQs if the shared info is
 362	 * invalid when the timer expires.  Arbitrarily poll 100 times before
 363	 * giving up and asking the VMM to re-arm the timer.  100 polls should
 364	 * consume enough time to beat on KVM without taking too long if the
 365	 * timer IRQ is dropped due to an invalid event channel.
 366	 */
 367	for (i = 0; i < 100 && !guest_saw_irq; i++)
 368		__xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
 369
 370	/*
 371	 * Re-send the timer IRQ if it was (likely) dropped due to the timer
 372	 * expiring while the event channel was invalid.
 373	 */
 374	if (!guest_saw_irq) {
 375		GUEST_SYNC(TEST_LOCKING_POLL_TIMEOUT);
 376		goto wait_for_timer;
 377	}
 378	guest_saw_irq = false;
 379
 380	GUEST_SYNC(TEST_DONE);
 381}
 382
 383static int cmp_timespec(struct timespec *a, struct timespec *b)
 384{
 385	if (a->tv_sec > b->tv_sec)
 386		return 1;
 387	else if (a->tv_sec < b->tv_sec)
 388		return -1;
 389	else if (a->tv_nsec > b->tv_nsec)
 390		return 1;
 391	else if (a->tv_nsec < b->tv_nsec)
 392		return -1;
 393	else
 394		return 0;
 395}
 396
 397static struct shared_info *shinfo;
 398static struct vcpu_info *vinfo;
 399static struct kvm_vcpu *vcpu;
 400
 401static void handle_alrm(int sig)
 402{
 403	if (vinfo)
 404		printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending);
 405	vcpu_dump(stdout, vcpu, 0);
 406	TEST_FAIL("IRQ delivery timed out");
 407}
 408
 409static void *juggle_shinfo_state(void *arg)
 410{
 411	struct kvm_vm *vm = (struct kvm_vm *)arg;
 412
 413	struct kvm_xen_hvm_attr cache_activate_gfn = {
 414		.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
 415		.u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
 416	};
 417
 418	struct kvm_xen_hvm_attr cache_deactivate_gfn = {
 419		.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
 420		.u.shared_info.gfn = KVM_XEN_INVALID_GFN
 421	};
 422
 423	struct kvm_xen_hvm_attr cache_activate_hva = {
 424		.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA,
 425		.u.shared_info.hva = (unsigned long)shinfo
 426	};
 427
 428	struct kvm_xen_hvm_attr cache_deactivate_hva = {
 429		.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
 430		.u.shared_info.hva = 0
 431	};
 432
 433	int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
 434
 435	for (;;) {
 436		__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_gfn);
 437		pthread_testcancel();
 438		__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_gfn);
 439
 440		if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) {
 441			__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva);
 442			pthread_testcancel();
 443			__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva);
 444		}
 445	}
 446
 447	return NULL;
 448}
 449
 450int main(int argc, char *argv[])
 451{
 452	struct timespec min_ts, max_ts, vm_ts;
 453	struct kvm_xen_hvm_attr evt_reset;
 454	struct kvm_vm *vm;
 455	pthread_t thread;
 456	bool verbose;
 457	int ret;
 458
 459	verbose = argc > 1 && (!strncmp(argv[1], "-v", 3) ||
 460			       !strncmp(argv[1], "--verbose", 10));
 461
 462	int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
 463	TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO);
 464
 465	bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE);
 466	bool do_runstate_flag = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG);
 467	bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
 468	bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
 469	bool has_shinfo_hva = !!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA);
 470
 471	clock_gettime(CLOCK_REALTIME, &min_ts);
 472
 473	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
 474
 475	/* Map a region for the shared_info page */
 476	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
 477				    SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0);
 478	virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3);
 479
 480	shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
 481
 482	int zero_fd = open("/dev/zero", O_RDONLY);
 483	TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero");
 484
 485	struct kvm_xen_hvm_config hvmc = {
 486		.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
 487		.msr = XEN_HYPERCALL_MSR,
 488	};
 489
 490	/* Let the kernel know that we *will* use it for sending all
 491	 * event channels, which lets it intercept SCHEDOP_poll */
 492	if (do_evtchn_tests)
 493		hvmc.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
 494
 495	vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
 496
 497	struct kvm_xen_hvm_attr lm = {
 498		.type = KVM_XEN_ATTR_TYPE_LONG_MODE,
 499		.u.long_mode = 1,
 500	};
 501	vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
 502
 503	if (do_runstate_flag) {
 504		struct kvm_xen_hvm_attr ruf = {
 505			.type = KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG,
 506			.u.runstate_update_flag = 1,
 507		};
 508		vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ruf);
 509
 510		ruf.u.runstate_update_flag = 0;
 511		vm_ioctl(vm, KVM_XEN_HVM_GET_ATTR, &ruf);
 512		TEST_ASSERT(ruf.u.runstate_update_flag == 1,
 513			    "Failed to read back RUNSTATE_UPDATE_FLAG attr");
 514	}
 515
 516	struct kvm_xen_hvm_attr ha = {};
 517
 518	if (has_shinfo_hva) {
 519		ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA;
 520		ha.u.shared_info.hva = (unsigned long)shinfo;
 521	} else {
 522		ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO;
 523		ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE;
 524	}
 525
 526	vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
 527
 528	/*
 529	 * Test what happens when the HVA of the shinfo page is remapped after
 530	 * the kernel has a reference to it. But make sure we copy the clock
 531	 * info over since that's only set at setup time, and we test it later.
 532	 */
 533	struct pvclock_wall_clock wc_copy = shinfo->wc;
 534	void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0);
 535	TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info");
 536	shinfo->wc = wc_copy;
 537
 538	struct kvm_xen_vcpu_attr vi = {
 539		.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
 540		.u.gpa = VCPU_INFO_ADDR,
 541	};
 542	vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi);
 543
 544	struct kvm_xen_vcpu_attr pvclock = {
 545		.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
 546		.u.gpa = PVTIME_ADDR,
 547	};
 548	vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &pvclock);
 549
 550	struct kvm_xen_hvm_attr vec = {
 551		.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
 552		.u.vector = EVTCHN_VECTOR,
 553	};
 554	vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
 555
 556	vm_init_descriptor_tables(vm);
 557	vcpu_init_descriptor_tables(vcpu);
 558	vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
 559
 560	if (do_runstate_tests) {
 561		struct kvm_xen_vcpu_attr st = {
 562			.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
 563			.u.gpa = RUNSTATE_ADDR,
 564		};
 565		vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
 566	}
 567
 568	int irq_fd[2] = { -1, -1 };
 569
 570	if (do_eventfd_tests) {
 571		irq_fd[0] = eventfd(0, 0);
 572		irq_fd[1] = eventfd(0, 0);
 573
 574		/* Unexpected, but not a KVM failure */
 575		if (irq_fd[0] == -1 || irq_fd[1] == -1)
 576			do_evtchn_tests = do_eventfd_tests = false;
 577	}
 578
 579	if (do_eventfd_tests) {
 580		irq_routes.info.nr = 2;
 581
 582		irq_routes.entries[0].gsi = 32;
 583		irq_routes.entries[0].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
 584		irq_routes.entries[0].u.xen_evtchn.port = EVTCHN_TEST1;
 585		irq_routes.entries[0].u.xen_evtchn.vcpu = vcpu->id;
 586		irq_routes.entries[0].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
 587
 588		irq_routes.entries[1].gsi = 33;
 589		irq_routes.entries[1].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
 590		irq_routes.entries[1].u.xen_evtchn.port = EVTCHN_TEST2;
 591		irq_routes.entries[1].u.xen_evtchn.vcpu = vcpu->id;
 592		irq_routes.entries[1].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
 593
 594		vm_ioctl(vm, KVM_SET_GSI_ROUTING, &irq_routes.info);
 595
 596		struct kvm_irqfd ifd = { };
 597
 598		ifd.fd = irq_fd[0];
 599		ifd.gsi = 32;
 600		vm_ioctl(vm, KVM_IRQFD, &ifd);
 601
 602		ifd.fd = irq_fd[1];
 603		ifd.gsi = 33;
 604		vm_ioctl(vm, KVM_IRQFD, &ifd);
 605
 606		struct sigaction sa = { };
 607		sa.sa_handler = handle_alrm;
 608		sigaction(SIGALRM, &sa, NULL);
 609	}
 610
 611	struct kvm_xen_vcpu_attr tmr = {
 612		.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
 613		.u.timer.port = EVTCHN_TIMER,
 614		.u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
 615		.u.timer.expires_ns = 0
 616	};
 617
 618	if (do_evtchn_tests) {
 619		struct kvm_xen_hvm_attr inj = {
 620			.type = KVM_XEN_ATTR_TYPE_EVTCHN,
 621			.u.evtchn.send_port = 127,
 622			.u.evtchn.type = EVTCHNSTAT_interdomain,
 623			.u.evtchn.flags = 0,
 624			.u.evtchn.deliver.port.port = EVTCHN_TEST1,
 625			.u.evtchn.deliver.port.vcpu = vcpu->id + 1,
 626			.u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
 627		};
 628		vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
 629
 630		/* Test migration to a different vCPU */
 631		inj.u.evtchn.flags = KVM_XEN_EVTCHN_UPDATE;
 632		inj.u.evtchn.deliver.port.vcpu = vcpu->id;
 633		vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
 634
 635		inj.u.evtchn.send_port = 197;
 636		inj.u.evtchn.deliver.eventfd.port = 0;
 637		inj.u.evtchn.deliver.eventfd.fd = irq_fd[1];
 638		inj.u.evtchn.flags = 0;
 639		vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
 640
 641		vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
 642	}
 643	vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
 644	vinfo->evtchn_upcall_pending = 0;
 645
 646	struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
 647	rs->state = 0x5a;
 648
 649	bool evtchn_irq_expected = false;
 650
 651	for (;;) {
 652		struct ucall uc;
 653
 654		vcpu_run(vcpu);
 655		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
 656
 657		switch (get_ucall(vcpu, &uc)) {
 658		case UCALL_ABORT:
 659			REPORT_GUEST_ASSERT(uc);
 660			/* NOT REACHED */
 661		case UCALL_SYNC: {
 662			struct kvm_xen_vcpu_attr rst;
 663			long rundelay;
 664
 665			if (do_runstate_tests)
 666				TEST_ASSERT(rs->state_entry_time == rs->time[0] +
 667					    rs->time[1] + rs->time[2] + rs->time[3],
 668					    "runstate times don't add up");
 669
 670			switch (uc.args[1]) {
 671			case TEST_INJECT_VECTOR:
 672				if (verbose)
 673					printf("Delivering evtchn upcall\n");
 674				evtchn_irq_expected = true;
 675				vinfo->evtchn_upcall_pending = 1;
 676				break;
 677
 678			case TEST_RUNSTATE_runnable...TEST_RUNSTATE_offline:
 679				TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen");
 680				if (!do_runstate_tests)
 681					goto done;
 682				if (verbose)
 683					printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
 684				rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
 685				rst.u.runstate.state = uc.args[1] + RUNSTATE_runnable -
 686					TEST_RUNSTATE_runnable;
 687				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
 688				break;
 689
 690			case TEST_RUNSTATE_ADJUST:
 691				if (verbose)
 692					printf("Testing RUNSTATE_ADJUST\n");
 693				rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST;
 694				memset(&rst.u, 0, sizeof(rst.u));
 695				rst.u.runstate.state = (uint64_t)-1;
 696				rst.u.runstate.time_blocked =
 697					0x5a - rs->time[RUNSTATE_blocked];
 698				rst.u.runstate.time_offline =
 699					0x6b6b - rs->time[RUNSTATE_offline];
 700				rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked -
 701					rst.u.runstate.time_offline;
 702				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
 703				break;
 704
 705			case TEST_RUNSTATE_DATA:
 706				if (verbose)
 707					printf("Testing RUNSTATE_DATA\n");
 708				rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA;
 709				memset(&rst.u, 0, sizeof(rst.u));
 710				rst.u.runstate.state = RUNSTATE_running;
 711				rst.u.runstate.state_entry_time = 0x6b6b + 0x5a;
 712				rst.u.runstate.time_blocked = 0x6b6b;
 713				rst.u.runstate.time_offline = 0x5a;
 714				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
 715				break;
 716
 717			case TEST_STEAL_TIME:
 718				if (verbose)
 719					printf("Testing steal time\n");
 720				/* Yield until scheduler delay exceeds target */
 721				rundelay = get_run_delay() + MIN_STEAL_TIME;
 722				do {
 723					sched_yield();
 724				} while (get_run_delay() < rundelay);
 725				break;
 726
 727			case TEST_EVTCHN_MASKED:
 728				if (!do_eventfd_tests)
 729					goto done;
 730				if (verbose)
 731					printf("Testing masked event channel\n");
 732				shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1;
 733				eventfd_write(irq_fd[0], 1UL);
 734				alarm(1);
 735				break;
 736
 737			case TEST_EVTCHN_UNMASKED:
 738				if (verbose)
 739					printf("Testing unmasked event channel\n");
 740				/* Unmask that, but deliver the other one */
 741				shinfo->evtchn_pending[0] = 0;
 742				shinfo->evtchn_mask[0] = 0;
 743				eventfd_write(irq_fd[1], 1UL);
 744				evtchn_irq_expected = true;
 745				alarm(1);
 746				break;
 747
 748			case TEST_EVTCHN_SLOWPATH:
 749				TEST_ASSERT(!evtchn_irq_expected,
 750					    "Expected event channel IRQ but it didn't happen");
 751				shinfo->evtchn_pending[1] = 0;
 752				if (verbose)
 753					printf("Testing event channel after memslot change\n");
 754				vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
 755							    DUMMY_REGION_GPA, DUMMY_REGION_SLOT, 1, 0);
 756				eventfd_write(irq_fd[0], 1UL);
 757				evtchn_irq_expected = true;
 758				alarm(1);
 759				break;
 760
 761			case TEST_EVTCHN_SEND_IOCTL:
 762				TEST_ASSERT(!evtchn_irq_expected,
 763					    "Expected event channel IRQ but it didn't happen");
 764				if (!do_evtchn_tests)
 765					goto done;
 766
 767				shinfo->evtchn_pending[0] = 0;
 768				if (verbose)
 769					printf("Testing injection with KVM_XEN_HVM_EVTCHN_SEND\n");
 770
 771				struct kvm_irq_routing_xen_evtchn e;
 772				e.port = EVTCHN_TEST2;
 773				e.vcpu = vcpu->id;
 774				e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
 775
 776				vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &e);
 777				evtchn_irq_expected = true;
 778				alarm(1);
 779				break;
 780
 781			case TEST_EVTCHN_HCALL:
 782				TEST_ASSERT(!evtchn_irq_expected,
 783					    "Expected event channel IRQ but it didn't happen");
 784				shinfo->evtchn_pending[1] = 0;
 785
 786				if (verbose)
 787					printf("Testing guest EVTCHNOP_send direct to evtchn\n");
 788				evtchn_irq_expected = true;
 789				alarm(1);
 790				break;
 791
 792			case TEST_EVTCHN_HCALL_SLOWPATH:
 793				TEST_ASSERT(!evtchn_irq_expected,
 794					    "Expected event channel IRQ but it didn't happen");
 795				shinfo->evtchn_pending[0] = 0;
 796
 797				if (verbose)
 798					printf("Testing guest EVTCHNOP_send direct to evtchn after memslot change\n");
 799				vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
 800							    DUMMY_REGION_GPA_2, DUMMY_REGION_SLOT_2, 1, 0);
 801				evtchn_irq_expected = true;
 802				alarm(1);
 803				break;
 804
 805			case TEST_EVTCHN_HCALL_EVENTFD:
 806				TEST_ASSERT(!evtchn_irq_expected,
 807					    "Expected event channel IRQ but it didn't happen");
 808				shinfo->evtchn_pending[0] = 0;
 809
 810				if (verbose)
 811					printf("Testing guest EVTCHNOP_send to eventfd\n");
 812				evtchn_irq_expected = true;
 813				alarm(1);
 814				break;
 815
 816			case TEST_TIMER_SETUP:
 817				TEST_ASSERT(!evtchn_irq_expected,
 818					    "Expected event channel IRQ but it didn't happen");
 819				shinfo->evtchn_pending[1] = 0;
 820
 821				if (verbose)
 822					printf("Testing guest oneshot timer\n");
 823				break;
 824
 825			case TEST_TIMER_WAIT:
 826				memset(&tmr, 0, sizeof(tmr));
 827				tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER;
 828				vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
 829				TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER,
 830					    "Timer port not returned");
 831				TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
 832					    "Timer priority not returned");
 833				TEST_ASSERT(tmr.u.timer.expires_ns > rs->state_entry_time,
 834					    "Timer expiry not returned");
 835				evtchn_irq_expected = true;
 836				alarm(1);
 837				break;
 838
 839			case TEST_TIMER_RESTORE:
 840				TEST_ASSERT(!evtchn_irq_expected,
 841					    "Expected event channel IRQ but it didn't happen");
 842				shinfo->evtchn_pending[0] = 0;
 843
 844				if (verbose)
 845					printf("Testing restored oneshot timer\n");
 846
 847				tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
 848				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
 849				evtchn_irq_expected = true;
 850				alarm(1);
 851				break;
 852
 853			case TEST_POLL_READY:
 854				TEST_ASSERT(!evtchn_irq_expected,
 855					    "Expected event channel IRQ but it didn't happen");
 856
 857				if (verbose)
 858					printf("Testing SCHEDOP_poll with already pending event\n");
 859				shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 1UL << EVTCHN_TIMER;
 860				alarm(1);
 861				break;
 862
 863			case TEST_POLL_TIMEOUT:
 864				if (verbose)
 865					printf("Testing SCHEDOP_poll timeout\n");
 866				shinfo->evtchn_pending[0] = 0;
 867				alarm(1);
 868				break;
 869
 870			case TEST_POLL_MASKED:
 871				if (verbose)
 872					printf("Testing SCHEDOP_poll wake on masked event\n");
 873
 874				tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
 875				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
 876				alarm(1);
 877				break;
 878
 879			case TEST_POLL_WAKE:
 880				shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 0;
 881				if (verbose)
 882					printf("Testing SCHEDOP_poll wake on unmasked event\n");
 883
 884				evtchn_irq_expected = true;
 885				tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
 886				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
 887
 888				/* Read it back and check the pending time is reported correctly */
 889				tmr.u.timer.expires_ns = 0;
 890				vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
 891				TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000,
 892					    "Timer not reported pending");
 893				alarm(1);
 894				break;
 895
 896			case SET_VCPU_INFO:
 897				if (has_shinfo_hva) {
 898					struct kvm_xen_vcpu_attr vih = {
 899						.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA,
 900						.u.hva = (unsigned long)vinfo
 901					};
 902					vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vih);
 903				}
 904				break;
 905
 906			case TEST_TIMER_PAST:
 907				TEST_ASSERT(!evtchn_irq_expected,
 908					    "Expected event channel IRQ but it didn't happen");
 909				/* Read timer and check it is no longer pending */
 910				vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
 911				TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending");
 912
 913				shinfo->evtchn_pending[0] = 0;
 914				if (verbose)
 915					printf("Testing timer in the past\n");
 916
 917				evtchn_irq_expected = true;
 918				tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL;
 919				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
 920				alarm(1);
 921				break;
 922
 923			case TEST_LOCKING_SEND_RACE:
 924				TEST_ASSERT(!evtchn_irq_expected,
 925					    "Expected event channel IRQ but it didn't happen");
 926				alarm(0);
 927
 928				if (verbose)
 929					printf("Testing shinfo lock corruption (KVM_XEN_HVM_EVTCHN_SEND)\n");
 930
 931				ret = pthread_create(&thread, NULL, &juggle_shinfo_state, (void *)vm);
 932				TEST_ASSERT(ret == 0, "pthread_create() failed: %s", strerror(ret));
 933
 934				struct kvm_irq_routing_xen_evtchn uxe = {
 935					.port = 1,
 936					.vcpu = vcpu->id,
 937					.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL
 938				};
 939
 940				evtchn_irq_expected = true;
 941				for (time_t t = time(NULL) + SHINFO_RACE_TIMEOUT; time(NULL) < t;)
 942					__vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &uxe);
 943				break;
 944
 945			case TEST_LOCKING_POLL_RACE:
 946				TEST_ASSERT(!evtchn_irq_expected,
 947					    "Expected event channel IRQ but it didn't happen");
 948
 949				if (verbose)
 950					printf("Testing shinfo lock corruption (SCHEDOP_poll)\n");
 951
 952				shinfo->evtchn_pending[0] = 1;
 953
 954				evtchn_irq_expected = true;
 955				tmr.u.timer.expires_ns = rs->state_entry_time +
 956							 SHINFO_RACE_TIMEOUT * 1000000000ULL;
 957				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
 958				break;
 959
 960			case TEST_LOCKING_POLL_TIMEOUT:
 961				/*
 962				 * Optional and possibly repeated sync point.
 963				 * Injecting the timer IRQ may fail if the
 964				 * shinfo is invalid when the timer expires.
 965				 * If the timer has expired but the IRQ hasn't
 966				 * been delivered, rearm the timer and retry.
 967				 */
 968				vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
 969
 970				/* Resume the guest if the timer is still pending. */
 971				if (tmr.u.timer.expires_ns)
 972					break;
 973
 974				/* All done if the IRQ was delivered. */
 975				if (!evtchn_irq_expected)
 976					break;
 977
 978				tmr.u.timer.expires_ns = rs->state_entry_time +
 979							 SHINFO_RACE_TIMEOUT * 1000000000ULL;
 980				vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
 981				break;
 982			case TEST_DONE:
 983				TEST_ASSERT(!evtchn_irq_expected,
 984					    "Expected event channel IRQ but it didn't happen");
 985
 986				ret = pthread_cancel(thread);
 987				TEST_ASSERT(ret == 0, "pthread_cancel() failed: %s", strerror(ret));
 988
 989				ret = pthread_join(thread, 0);
 990				TEST_ASSERT(ret == 0, "pthread_join() failed: %s", strerror(ret));
 991				goto done;
 992
 993			case TEST_GUEST_SAW_IRQ:
 994				TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
 995				evtchn_irq_expected = false;
 996				break;
 997			}
 998			break;
 999		}
1000		case UCALL_DONE:
1001			goto done;
1002		default:
1003			TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
1004		}
1005	}
1006
1007 done:
1008	evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN;
1009	evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET;
1010	vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
1011
1012	alarm(0);
1013	clock_gettime(CLOCK_REALTIME, &max_ts);
1014
1015	/*
1016	 * Just a *really* basic check that things are being put in the
1017	 * right place. The actual calculations are much the same for
1018	 * Xen as they are for the KVM variants, so no need to check.
1019	 */
1020	struct pvclock_wall_clock *wc;
1021	struct pvclock_vcpu_time_info *ti, *ti2;
1022
1023	wc = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0xc00);
1024	ti = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20);
1025	ti2 = addr_gpa2hva(vm, PVTIME_ADDR);
1026
1027	if (verbose) {
1028		printf("Wall clock (v %d) %d.%09d\n", wc->version, wc->sec, wc->nsec);
1029		printf("Time info 1: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n",
1030		       ti->version, ti->tsc_timestamp, ti->system_time, ti->tsc_to_system_mul,
1031		       ti->tsc_shift, ti->flags);
1032		printf("Time info 2: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n",
1033		       ti2->version, ti2->tsc_timestamp, ti2->system_time, ti2->tsc_to_system_mul,
1034		       ti2->tsc_shift, ti2->flags);
1035	}
1036
1037	vm_ts.tv_sec = wc->sec;
1038	vm_ts.tv_nsec = wc->nsec;
1039	TEST_ASSERT(wc->version && !(wc->version & 1),
1040		    "Bad wallclock version %x", wc->version);
1041	TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old");
1042	TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new");
1043
1044	TEST_ASSERT(ti->version && !(ti->version & 1),
1045		    "Bad time_info version %x", ti->version);
1046	TEST_ASSERT(ti2->version && !(ti2->version & 1),
1047		    "Bad time_info version %x", ti->version);
1048
1049	if (do_runstate_tests) {
1050		/*
1051		 * Fetch runstate and check sanity. Strictly speaking in the
1052		 * general case we might not expect the numbers to be identical
1053		 * but in this case we know we aren't running the vCPU any more.
1054		 */
1055		struct kvm_xen_vcpu_attr rst = {
1056			.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA,
1057		};
1058		vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst);
1059
1060		if (verbose) {
1061			printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
1062			       rs->state <= RUNSTATE_offline ? runstate_names[rs->state] : "unknown",
1063			       rs->state, rs->state_entry_time);
1064			for (int i = RUNSTATE_running; i <= RUNSTATE_offline; i++) {
1065				printf("State %s: %" PRIu64 " ns\n",
1066				       runstate_names[i], rs->time[i]);
1067			}
1068		}
1069
1070		/*
1071		 * Exercise runstate info at all points across the page boundary, in
1072		 * 32-bit and 64-bit mode. In particular, test the case where it is
1073		 * configured in 32-bit mode and then switched to 64-bit mode while
1074		 * active, which takes it onto the second page.
1075		 */
1076		unsigned long runstate_addr;
1077		struct compat_vcpu_runstate_info *crs;
1078		for (runstate_addr = SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE - sizeof(*rs) - 4;
1079		     runstate_addr < SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE + 4; runstate_addr++) {
1080
1081			rs = addr_gpa2hva(vm, runstate_addr);
1082			crs = (void *)rs;
1083
1084			memset(rs, 0xa5, sizeof(*rs));
1085
1086			/* Set to compatibility mode */
1087			lm.u.long_mode = 0;
1088			vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
1089
1090			/* Set runstate to new address (kernel will write it) */
1091			struct kvm_xen_vcpu_attr st = {
1092				.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
1093				.u.gpa = runstate_addr,
1094			};
1095			vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
1096
1097			if (verbose)
1098				printf("Compatibility runstate at %08lx\n", runstate_addr);
1099
1100			TEST_ASSERT(crs->state == rst.u.runstate.state, "Runstate mismatch");
1101			TEST_ASSERT(crs->state_entry_time == rst.u.runstate.state_entry_time,
1102				    "State entry time mismatch");
1103			TEST_ASSERT(crs->time[RUNSTATE_running] == rst.u.runstate.time_running,
1104				    "Running time mismatch");
1105			TEST_ASSERT(crs->time[RUNSTATE_runnable] == rst.u.runstate.time_runnable,
1106				    "Runnable time mismatch");
1107			TEST_ASSERT(crs->time[RUNSTATE_blocked] == rst.u.runstate.time_blocked,
1108				    "Blocked time mismatch");
1109			TEST_ASSERT(crs->time[RUNSTATE_offline] == rst.u.runstate.time_offline,
1110				    "Offline time mismatch");
1111			TEST_ASSERT(crs->time[RUNSTATE_offline + 1] == 0xa5a5a5a5a5a5a5a5ULL,
1112				    "Structure overrun");
1113			TEST_ASSERT(crs->state_entry_time == crs->time[0] +
1114				    crs->time[1] + crs->time[2] + crs->time[3],
1115				    "runstate times don't add up");
1116
1117
1118			/* Now switch to 64-bit mode */
1119			lm.u.long_mode = 1;
1120			vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
1121
1122			memset(rs, 0xa5, sizeof(*rs));
1123
1124			/* Don't change the address, just trigger a write */
1125			struct kvm_xen_vcpu_attr adj = {
1126				.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST,
1127				.u.runstate.state = (uint64_t)-1
1128			};
1129			vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &adj);
1130
1131			if (verbose)
1132				printf("64-bit runstate at %08lx\n", runstate_addr);
1133
1134			TEST_ASSERT(rs->state == rst.u.runstate.state, "Runstate mismatch");
1135			TEST_ASSERT(rs->state_entry_time == rst.u.runstate.state_entry_time,
1136				    "State entry time mismatch");
1137			TEST_ASSERT(rs->time[RUNSTATE_running] == rst.u.runstate.time_running,
1138				    "Running time mismatch");
1139			TEST_ASSERT(rs->time[RUNSTATE_runnable] == rst.u.runstate.time_runnable,
1140				    "Runnable time mismatch");
1141			TEST_ASSERT(rs->time[RUNSTATE_blocked] == rst.u.runstate.time_blocked,
1142				    "Blocked time mismatch");
1143			TEST_ASSERT(rs->time[RUNSTATE_offline] == rst.u.runstate.time_offline,
1144				    "Offline time mismatch");
1145			TEST_ASSERT(rs->time[RUNSTATE_offline + 1] == 0xa5a5a5a5a5a5a5a5ULL,
1146				    "Structure overrun");
1147
1148			TEST_ASSERT(rs->state_entry_time == rs->time[0] +
1149				    rs->time[1] + rs->time[2] + rs->time[3],
1150				    "runstate times don't add up");
1151		}
1152	}
1153
1154	kvm_vm_free(vm);
1155	return 0;
1156}
1