Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) Microsoft Corporation.
   4 *
   5 * Author:
   6 *   Jake Oshins <jakeo@microsoft.com>
   7 *
   8 * This driver acts as a paravirtual front-end for PCI Express root buses.
   9 * When a PCI Express function (either an entire device or an SR-IOV
  10 * Virtual Function) is being passed through to the VM, this driver exposes
  11 * a new bus to the guest VM.  This is modeled as a root PCI bus because
  12 * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
  13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
  14 * until a device as been exposed using this driver.
  15 *
  16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
  17 * the PCI Firmware Specifications.  Thus while each device passed through
  18 * to the VM using this front-end will appear at "device 0", the domain will
  19 * be unique.  Typically, each bus will have one PCI function on it, though
  20 * this driver does support more than one.
  21 *
  22 * In order to map the interrupts from the device through to the guest VM,
  23 * this driver also implements an IRQ Domain, which handles interrupts (either
  24 * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
  25 * set up, torn down, or reaffined, this driver communicates with the
  26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
  27 * interrupt will be delivered to the correct virtual processor at the right
  28 * vector.  This driver does not support level-triggered (line-based)
  29 * interrupts, and will report that the Interrupt Line register in the
  30 * function's configuration space is zero.
  31 *
  32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
  33 * facilities.  For instance, the configuration space of a function exposed
  34 * by Hyper-V is mapped into a single page of memory space, and the
  35 * read and write handlers for config space must be aware of this mechanism.
  36 * Similarly, device setup and teardown involves messages sent to and from
  37 * the PCI back-end driver in Hyper-V.
  38 */
  39
  40#include <linux/kernel.h>
  41#include <linux/module.h>
  42#include <linux/pci.h>
  43#include <linux/pci-ecam.h>
  44#include <linux/delay.h>
  45#include <linux/semaphore.h>
  46#include <linux/irq.h>
  47#include <linux/msi.h>
  48#include <linux/hyperv.h>
  49#include <linux/refcount.h>
  50#include <linux/irqdomain.h>
  51#include <linux/acpi.h>
  52#include <linux/sizes.h>
  53#include <asm/mshyperv.h>
  54
  55/*
  56 * Protocol versions. The low word is the minor version, the high word the
  57 * major version.
  58 */
  59
  60#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
  61#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
  62#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
  63
  64enum pci_protocol_version_t {
  65	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
  66	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
  67	PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),	/* Vibranium */
  68	PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4),	/* WS2022 */
  69};
  70
  71#define CPU_AFFINITY_ALL	-1ULL
  72
  73/*
  74 * Supported protocol versions in the order of probing - highest go
  75 * first.
  76 */
  77static enum pci_protocol_version_t pci_protocol_versions[] = {
  78	PCI_PROTOCOL_VERSION_1_4,
  79	PCI_PROTOCOL_VERSION_1_3,
  80	PCI_PROTOCOL_VERSION_1_2,
  81	PCI_PROTOCOL_VERSION_1_1,
  82};
  83
  84#define PCI_CONFIG_MMIO_LENGTH	0x2000
  85#define CFG_PAGE_OFFSET 0x1000
  86#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
  87
  88#define MAX_SUPPORTED_MSI_MESSAGES 0x400
  89
  90#define STATUS_REVISION_MISMATCH 0xC0000059
  91
  92/* space for 32bit serial number as string */
  93#define SLOT_NAME_SIZE 11
  94
  95/*
  96 * Size of requestor for VMbus; the value is based on the observation
  97 * that having more than one request outstanding is 'rare', and so 64
  98 * should be generous in ensuring that we don't ever run out.
  99 */
 100#define HV_PCI_RQSTOR_SIZE 64
 101
 102/*
 103 * Message Types
 104 */
 105
 106enum pci_message_type {
 107	/*
 108	 * Version 1.1
 109	 */
 110	PCI_MESSAGE_BASE                = 0x42490000,
 111	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
 112	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
 113	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
 114	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
 115	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
 116	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
 117	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
 118	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
 119	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
 120	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
 121	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
 122	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
 123	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
 124	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
 125	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
 126	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
 127	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
 128	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
 129	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
 130	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
 131	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
 132	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
 133	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
 134	PCI_BUS_RELATIONS2		= PCI_MESSAGE_BASE + 0x19,
 135	PCI_RESOURCES_ASSIGNED3         = PCI_MESSAGE_BASE + 0x1A,
 136	PCI_CREATE_INTERRUPT_MESSAGE3   = PCI_MESSAGE_BASE + 0x1B,
 137	PCI_MESSAGE_MAXIMUM
 138};
 139
 140/*
 141 * Structures defining the virtual PCI Express protocol.
 142 */
 143
 144union pci_version {
 145	struct {
 146		u16 minor_version;
 147		u16 major_version;
 148	} parts;
 149	u32 version;
 150} __packed;
 151
 152/*
 153 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
 154 * which is all this driver does.  This representation is the one used in
 155 * Windows, which is what is expected when sending this back and forth with
 156 * the Hyper-V parent partition.
 157 */
 158union win_slot_encoding {
 159	struct {
 160		u32	dev:5;
 161		u32	func:3;
 162		u32	reserved:24;
 163	} bits;
 164	u32 slot;
 165} __packed;
 166
 167/*
 168 * Pretty much as defined in the PCI Specifications.
 169 */
 170struct pci_function_description {
 171	u16	v_id;	/* vendor ID */
 172	u16	d_id;	/* device ID */
 173	u8	rev;
 174	u8	prog_intf;
 175	u8	subclass;
 176	u8	base_class;
 177	u32	subsystem_id;
 178	union win_slot_encoding win_slot;
 179	u32	ser;	/* serial number */
 180} __packed;
 181
 182enum pci_device_description_flags {
 183	HV_PCI_DEVICE_FLAG_NONE			= 0x0,
 184	HV_PCI_DEVICE_FLAG_NUMA_AFFINITY	= 0x1,
 185};
 186
 187struct pci_function_description2 {
 188	u16	v_id;	/* vendor ID */
 189	u16	d_id;	/* device ID */
 190	u8	rev;
 191	u8	prog_intf;
 192	u8	subclass;
 193	u8	base_class;
 194	u32	subsystem_id;
 195	union	win_slot_encoding win_slot;
 196	u32	ser;	/* serial number */
 197	u32	flags;
 198	u16	virtual_numa_node;
 199	u16	reserved;
 200} __packed;
 201
 202/**
 203 * struct hv_msi_desc
 204 * @vector:		IDT entry
 205 * @delivery_mode:	As defined in Intel's Programmer's
 206 *			Reference Manual, Volume 3, Chapter 8.
 207 * @vector_count:	Number of contiguous entries in the
 208 *			Interrupt Descriptor Table that are
 209 *			occupied by this Message-Signaled
 210 *			Interrupt. For "MSI", as first defined
 211 *			in PCI 2.2, this can be between 1 and
 212 *			32. For "MSI-X," as first defined in PCI
 213 *			3.0, this must be 1, as each MSI-X table
 214 *			entry would have its own descriptor.
 215 * @reserved:		Empty space
 216 * @cpu_mask:		All the target virtual processors.
 217 */
 218struct hv_msi_desc {
 219	u8	vector;
 220	u8	delivery_mode;
 221	u16	vector_count;
 222	u32	reserved;
 223	u64	cpu_mask;
 224} __packed;
 225
 226/**
 227 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
 228 * @vector:		IDT entry
 229 * @delivery_mode:	As defined in Intel's Programmer's
 230 *			Reference Manual, Volume 3, Chapter 8.
 231 * @vector_count:	Number of contiguous entries in the
 232 *			Interrupt Descriptor Table that are
 233 *			occupied by this Message-Signaled
 234 *			Interrupt. For "MSI", as first defined
 235 *			in PCI 2.2, this can be between 1 and
 236 *			32. For "MSI-X," as first defined in PCI
 237 *			3.0, this must be 1, as each MSI-X table
 238 *			entry would have its own descriptor.
 239 * @processor_count:	number of bits enabled in array.
 240 * @processor_array:	All the target virtual processors.
 241 */
 242struct hv_msi_desc2 {
 243	u8	vector;
 244	u8	delivery_mode;
 245	u16	vector_count;
 246	u16	processor_count;
 247	u16	processor_array[32];
 248} __packed;
 249
 250/*
 251 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
 252 *	Everything is the same as in 'hv_msi_desc2' except that the size of the
 253 *	'vector' field is larger to support bigger vector values. For ex: LPI
 254 *	vectors on ARM.
 255 */
 256struct hv_msi_desc3 {
 257	u32	vector;
 258	u8	delivery_mode;
 259	u8	reserved;
 260	u16	vector_count;
 261	u16	processor_count;
 262	u16	processor_array[32];
 263} __packed;
 264
 265/**
 266 * struct tran_int_desc
 267 * @reserved:		unused, padding
 268 * @vector_count:	same as in hv_msi_desc
 269 * @data:		This is the "data payload" value that is
 270 *			written by the device when it generates
 271 *			a message-signaled interrupt, either MSI
 272 *			or MSI-X.
 273 * @address:		This is the address to which the data
 274 *			payload is written on interrupt
 275 *			generation.
 276 */
 277struct tran_int_desc {
 278	u16	reserved;
 279	u16	vector_count;
 280	u32	data;
 281	u64	address;
 282} __packed;
 283
 284/*
 285 * A generic message format for virtual PCI.
 286 * Specific message formats are defined later in the file.
 287 */
 288
 289struct pci_message {
 290	u32 type;
 291} __packed;
 292
 293struct pci_child_message {
 294	struct pci_message message_type;
 295	union win_slot_encoding wslot;
 296} __packed;
 297
 298struct pci_incoming_message {
 299	struct vmpacket_descriptor hdr;
 300	struct pci_message message_type;
 301} __packed;
 302
 303struct pci_response {
 304	struct vmpacket_descriptor hdr;
 305	s32 status;			/* negative values are failures */
 306} __packed;
 307
 308struct pci_packet {
 309	void (*completion_func)(void *context, struct pci_response *resp,
 310				int resp_packet_size);
 311	void *compl_ctxt;
 312
 313	struct pci_message message[];
 314};
 315
 316/*
 317 * Specific message types supporting the PCI protocol.
 318 */
 319
 320/*
 321 * Version negotiation message. Sent from the guest to the host.
 322 * The guest is free to try different versions until the host
 323 * accepts the version.
 324 *
 325 * pci_version: The protocol version requested.
 326 * is_last_attempt: If TRUE, this is the last version guest will request.
 327 * reservedz: Reserved field, set to zero.
 328 */
 329
 330struct pci_version_request {
 331	struct pci_message message_type;
 332	u32 protocol_version;
 333} __packed;
 334
 335/*
 336 * Bus D0 Entry.  This is sent from the guest to the host when the virtual
 337 * bus (PCI Express port) is ready for action.
 338 */
 339
 340struct pci_bus_d0_entry {
 341	struct pci_message message_type;
 342	u32 reserved;
 343	u64 mmio_base;
 344} __packed;
 345
 346struct pci_bus_relations {
 347	struct pci_incoming_message incoming;
 348	u32 device_count;
 349	struct pci_function_description func[];
 350} __packed;
 351
 352struct pci_bus_relations2 {
 353	struct pci_incoming_message incoming;
 354	u32 device_count;
 355	struct pci_function_description2 func[];
 356} __packed;
 357
 358struct pci_q_res_req_response {
 359	struct vmpacket_descriptor hdr;
 360	s32 status;			/* negative values are failures */
 361	u32 probed_bar[PCI_STD_NUM_BARS];
 362} __packed;
 363
 364struct pci_set_power {
 365	struct pci_message message_type;
 366	union win_slot_encoding wslot;
 367	u32 power_state;		/* In Windows terms */
 368	u32 reserved;
 369} __packed;
 370
 371struct pci_set_power_response {
 372	struct vmpacket_descriptor hdr;
 373	s32 status;			/* negative values are failures */
 374	union win_slot_encoding wslot;
 375	u32 resultant_state;		/* In Windows terms */
 376	u32 reserved;
 377} __packed;
 378
 379struct pci_resources_assigned {
 380	struct pci_message message_type;
 381	union win_slot_encoding wslot;
 382	u8 memory_range[0x14][6];	/* not used here */
 383	u32 msi_descriptors;
 384	u32 reserved[4];
 385} __packed;
 386
 387struct pci_resources_assigned2 {
 388	struct pci_message message_type;
 389	union win_slot_encoding wslot;
 390	u8 memory_range[0x14][6];	/* not used here */
 391	u32 msi_descriptor_count;
 392	u8 reserved[70];
 393} __packed;
 394
 395struct pci_create_interrupt {
 396	struct pci_message message_type;
 397	union win_slot_encoding wslot;
 398	struct hv_msi_desc int_desc;
 399} __packed;
 400
 401struct pci_create_int_response {
 402	struct pci_response response;
 403	u32 reserved;
 404	struct tran_int_desc int_desc;
 405} __packed;
 406
 407struct pci_create_interrupt2 {
 408	struct pci_message message_type;
 409	union win_slot_encoding wslot;
 410	struct hv_msi_desc2 int_desc;
 411} __packed;
 412
 413struct pci_create_interrupt3 {
 414	struct pci_message message_type;
 415	union win_slot_encoding wslot;
 416	struct hv_msi_desc3 int_desc;
 417} __packed;
 418
 419struct pci_delete_interrupt {
 420	struct pci_message message_type;
 421	union win_slot_encoding wslot;
 422	struct tran_int_desc int_desc;
 423} __packed;
 424
 425/*
 426 * Note: the VM must pass a valid block id, wslot and bytes_requested.
 427 */
 428struct pci_read_block {
 429	struct pci_message message_type;
 430	u32 block_id;
 431	union win_slot_encoding wslot;
 432	u32 bytes_requested;
 433} __packed;
 434
 435struct pci_read_block_response {
 436	struct vmpacket_descriptor hdr;
 437	u32 status;
 438	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
 439} __packed;
 440
 441/*
 442 * Note: the VM must pass a valid block id, wslot and byte_count.
 443 */
 444struct pci_write_block {
 445	struct pci_message message_type;
 446	u32 block_id;
 447	union win_slot_encoding wslot;
 448	u32 byte_count;
 449	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
 450} __packed;
 451
 452struct pci_dev_inval_block {
 453	struct pci_incoming_message incoming;
 454	union win_slot_encoding wslot;
 455	u64 block_mask;
 456} __packed;
 457
 458struct pci_dev_incoming {
 459	struct pci_incoming_message incoming;
 460	union win_slot_encoding wslot;
 461} __packed;
 462
 463struct pci_eject_response {
 464	struct pci_message message_type;
 465	union win_slot_encoding wslot;
 466	u32 status;
 467} __packed;
 468
 469static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
 470
 471/*
 472 * Driver specific state.
 473 */
 474
 475enum hv_pcibus_state {
 476	hv_pcibus_init = 0,
 477	hv_pcibus_probed,
 478	hv_pcibus_installed,
 479	hv_pcibus_removing,
 480	hv_pcibus_maximum
 481};
 482
 483struct hv_pcibus_device {
 484#ifdef CONFIG_X86
 485	struct pci_sysdata sysdata;
 486#elif defined(CONFIG_ARM64)
 487	struct pci_config_window sysdata;
 488#endif
 489	struct pci_host_bridge *bridge;
 490	struct fwnode_handle *fwnode;
 491	/* Protocol version negotiated with the host */
 492	enum pci_protocol_version_t protocol_version;
 493
 494	struct mutex state_lock;
 495	enum hv_pcibus_state state;
 496
 497	struct hv_device *hdev;
 498	resource_size_t low_mmio_space;
 499	resource_size_t high_mmio_space;
 500	struct resource *mem_config;
 501	struct resource *low_mmio_res;
 502	struct resource *high_mmio_res;
 503	struct completion *survey_event;
 504	struct pci_bus *pci_bus;
 505	spinlock_t config_lock;	/* Avoid two threads writing index page */
 506	spinlock_t device_list_lock;	/* Protect lists below */
 507	void __iomem *cfg_addr;
 508
 509	struct list_head children;
 510	struct list_head dr_list;
 511
 512	struct msi_domain_info msi_info;
 513	struct irq_domain *irq_domain;
 514
 
 
 515	struct workqueue_struct *wq;
 516
 517	/* Highest slot of child device with resources allocated */
 518	int wslot_res_allocated;
 519	bool use_calls; /* Use hypercalls to access mmio cfg space */
 
 
 
 
 
 
 520};
 521
 522/*
 523 * Tracks "Device Relations" messages from the host, which must be both
 524 * processed in order and deferred so that they don't run in the context
 525 * of the incoming packet callback.
 526 */
 527struct hv_dr_work {
 528	struct work_struct wrk;
 529	struct hv_pcibus_device *bus;
 530};
 531
 532struct hv_pcidev_description {
 533	u16	v_id;	/* vendor ID */
 534	u16	d_id;	/* device ID */
 535	u8	rev;
 536	u8	prog_intf;
 537	u8	subclass;
 538	u8	base_class;
 539	u32	subsystem_id;
 540	union	win_slot_encoding win_slot;
 541	u32	ser;	/* serial number */
 542	u32	flags;
 543	u16	virtual_numa_node;
 544};
 545
 546struct hv_dr_state {
 547	struct list_head list_entry;
 548	u32 device_count;
 549	struct hv_pcidev_description func[] __counted_by(device_count);
 
 
 
 
 
 
 
 
 550};
 551
 552struct hv_pci_dev {
 553	/* List protected by pci_rescan_remove_lock */
 554	struct list_head list_entry;
 555	refcount_t refs;
 
 556	struct pci_slot *pci_slot;
 557	struct hv_pcidev_description desc;
 558	bool reported_missing;
 559	struct hv_pcibus_device *hbus;
 560	struct work_struct wrk;
 561
 562	void (*block_invalidate)(void *context, u64 block_mask);
 563	void *invalidate_context;
 564
 565	/*
 566	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
 567	 * read it back, for each of the BAR offsets within config space.
 568	 */
 569	u32 probed_bar[PCI_STD_NUM_BARS];
 570};
 571
 572struct hv_pci_compl {
 573	struct completion host_event;
 574	s32 completion_status;
 575};
 576
 577static void hv_pci_onchannelcallback(void *context);
 578
 579#ifdef CONFIG_X86
 580#define DELIVERY_MODE	APIC_DELIVERY_MODE_FIXED
 581#define FLOW_HANDLER	handle_edge_irq
 582#define FLOW_NAME	"edge"
 583
 584static int hv_pci_irqchip_init(void)
 585{
 586	return 0;
 587}
 588
 589static struct irq_domain *hv_pci_get_root_domain(void)
 590{
 591	return x86_vector_domain;
 592}
 593
 594static unsigned int hv_msi_get_int_vector(struct irq_data *data)
 595{
 596	struct irq_cfg *cfg = irqd_cfg(data);
 597
 598	return cfg->vector;
 599}
 600
 601#define hv_msi_prepare		pci_msi_prepare
 602
 603/**
 604 * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
 605 * affinity.
 606 * @data:	Describes the IRQ
 607 *
 608 * Build new a destination for the MSI and make a hypercall to
 609 * update the Interrupt Redirection Table. "Device Logical ID"
 610 * is built out of this PCI bus's instance GUID and the function
 611 * number of the device.
 612 */
 613static void hv_arch_irq_unmask(struct irq_data *data)
 614{
 615	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
 616	struct hv_retarget_device_interrupt *params;
 617	struct tran_int_desc *int_desc;
 618	struct hv_pcibus_device *hbus;
 619	const struct cpumask *dest;
 620	cpumask_var_t tmp;
 621	struct pci_bus *pbus;
 622	struct pci_dev *pdev;
 623	unsigned long flags;
 624	u32 var_size = 0;
 625	int cpu, nr_bank;
 626	u64 res;
 627
 628	dest = irq_data_get_effective_affinity_mask(data);
 629	pdev = msi_desc_to_pci_dev(msi_desc);
 630	pbus = pdev->bus;
 631	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
 632	int_desc = data->chip_data;
 633	if (!int_desc) {
 634		dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
 635			 __func__, data->irq);
 636		return;
 637	}
 638
 639	local_irq_save(flags);
 640
 641	params = *this_cpu_ptr(hyperv_pcpu_input_arg);
 642	memset(params, 0, sizeof(*params));
 643	params->partition_id = HV_PARTITION_ID_SELF;
 644	params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
 645	params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
 646	params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
 647	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
 648			   (hbus->hdev->dev_instance.b[4] << 16) |
 649			   (hbus->hdev->dev_instance.b[7] << 8) |
 650			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
 651			   PCI_FUNC(pdev->devfn);
 652	params->int_target.vector = hv_msi_get_int_vector(data);
 653
 
 
 
 
 
 
 
 654	if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
 655		/*
 656		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
 657		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
 658		 * with >64 VP support.
 659		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
 660		 * is not sufficient for this hypercall.
 661		 */
 662		params->int_target.flags |=
 663			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
 664
 665		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
 666			res = 1;
 667			goto out;
 668		}
 669
 670		cpumask_and(tmp, dest, cpu_online_mask);
 671		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
 672		free_cpumask_var(tmp);
 673
 674		if (nr_bank <= 0) {
 675			res = 1;
 676			goto out;
 677		}
 678
 679		/*
 680		 * var-sized hypercall, var-size starts after vp_mask (thus
 681		 * vp_set.format does not count, but vp_set.valid_bank_mask
 682		 * does).
 683		 */
 684		var_size = 1 + nr_bank;
 685	} else {
 686		for_each_cpu_and(cpu, dest, cpu_online_mask) {
 687			params->int_target.vp_mask |=
 688				(1ULL << hv_cpu_number_to_vp_number(cpu));
 689		}
 690	}
 691
 692	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
 693			      params, NULL);
 694
 695out:
 696	local_irq_restore(flags);
 697
 698	/*
 699	 * During hibernation, when a CPU is offlined, the kernel tries
 700	 * to move the interrupt to the remaining CPUs that haven't
 701	 * been offlined yet. In this case, the below hv_do_hypercall()
 702	 * always fails since the vmbus channel has been closed:
 703	 * refer to cpu_disable_common() -> fixup_irqs() ->
 704	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
 705	 *
 706	 * Suppress the error message for hibernation because the failure
 707	 * during hibernation does not matter (at this time all the devices
 708	 * have been frozen). Note: the correct affinity info is still updated
 709	 * into the irqdata data structure in migrate_one_irq() ->
 710	 * irq_do_set_affinity(), so later when the VM resumes,
 711	 * hv_pci_restore_msi_state() is able to correctly restore the
 712	 * interrupt with the correct affinity.
 713	 */
 714	if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
 715		dev_err(&hbus->hdev->device,
 716			"%s() failed: %#llx", __func__, res);
 717}
 718#elif defined(CONFIG_ARM64)
 719/*
 720 * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
 721 * of room at the start to allow for SPIs to be specified through ACPI and
 722 * starting with a power of two to satisfy power of 2 multi-MSI requirement.
 723 */
 724#define HV_PCI_MSI_SPI_START	64
 725#define HV_PCI_MSI_SPI_NR	(1020 - HV_PCI_MSI_SPI_START)
 726#define DELIVERY_MODE		0
 727#define FLOW_HANDLER		NULL
 728#define FLOW_NAME		NULL
 729#define hv_msi_prepare		NULL
 730
 731struct hv_pci_chip_data {
 732	DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
 733	struct mutex	map_lock;
 734};
 735
 736/* Hyper-V vPCI MSI GIC IRQ domain */
 737static struct irq_domain *hv_msi_gic_irq_domain;
 738
 739/* Hyper-V PCI MSI IRQ chip */
 740static struct irq_chip hv_arm64_msi_irq_chip = {
 741	.name = "MSI",
 742	.irq_set_affinity = irq_chip_set_affinity_parent,
 743	.irq_eoi = irq_chip_eoi_parent,
 744	.irq_mask = irq_chip_mask_parent,
 745	.irq_unmask = irq_chip_unmask_parent
 746};
 747
 748static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
 749{
 750	return irqd->parent_data->hwirq;
 751}
 752
 753/*
 754 * @nr_bm_irqs:		Indicates the number of IRQs that were allocated from
 755 *			the bitmap.
 756 * @nr_dom_irqs:	Indicates the number of IRQs that were allocated from
 757 *			the parent domain.
 758 */
 759static void hv_pci_vec_irq_free(struct irq_domain *domain,
 760				unsigned int virq,
 761				unsigned int nr_bm_irqs,
 762				unsigned int nr_dom_irqs)
 763{
 764	struct hv_pci_chip_data *chip_data = domain->host_data;
 765	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 766	int first = d->hwirq - HV_PCI_MSI_SPI_START;
 767	int i;
 768
 769	mutex_lock(&chip_data->map_lock);
 770	bitmap_release_region(chip_data->spi_map,
 771			      first,
 772			      get_count_order(nr_bm_irqs));
 773	mutex_unlock(&chip_data->map_lock);
 774	for (i = 0; i < nr_dom_irqs; i++) {
 775		if (i)
 776			d = irq_domain_get_irq_data(domain, virq + i);
 777		irq_domain_reset_irq_data(d);
 778	}
 779
 780	irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
 781}
 782
 783static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
 784				       unsigned int virq,
 785				       unsigned int nr_irqs)
 786{
 787	hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
 788}
 789
 790static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
 791				       unsigned int nr_irqs,
 792				       irq_hw_number_t *hwirq)
 793{
 794	struct hv_pci_chip_data *chip_data = domain->host_data;
 795	int index;
 796
 797	/* Find and allocate region from the SPI bitmap */
 798	mutex_lock(&chip_data->map_lock);
 799	index = bitmap_find_free_region(chip_data->spi_map,
 800					HV_PCI_MSI_SPI_NR,
 801					get_count_order(nr_irqs));
 802	mutex_unlock(&chip_data->map_lock);
 803	if (index < 0)
 804		return -ENOSPC;
 805
 806	*hwirq = index + HV_PCI_MSI_SPI_START;
 807
 808	return 0;
 809}
 810
 811static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
 812					   unsigned int virq,
 813					   irq_hw_number_t hwirq)
 814{
 815	struct irq_fwspec fwspec;
 816	struct irq_data *d;
 817	int ret;
 818
 819	fwspec.fwnode = domain->parent->fwnode;
 820	fwspec.param_count = 2;
 821	fwspec.param[0] = hwirq;
 822	fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
 823
 824	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
 825	if (ret)
 826		return ret;
 827
 828	/*
 829	 * Since the interrupt specifier is not coming from ACPI or DT, the
 830	 * trigger type will need to be set explicitly. Otherwise, it will be
 831	 * set to whatever is in the GIC configuration.
 832	 */
 833	d = irq_domain_get_irq_data(domain->parent, virq);
 834
 835	return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
 836}
 837
 838static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
 839				       unsigned int virq, unsigned int nr_irqs,
 840				       void *args)
 841{
 842	irq_hw_number_t hwirq;
 843	unsigned int i;
 844	int ret;
 845
 846	ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
 847	if (ret)
 848		return ret;
 849
 850	for (i = 0; i < nr_irqs; i++) {
 851		ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
 852						      hwirq + i);
 853		if (ret) {
 854			hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
 855			return ret;
 856		}
 857
 858		irq_domain_set_hwirq_and_chip(domain, virq + i,
 859					      hwirq + i,
 860					      &hv_arm64_msi_irq_chip,
 861					      domain->host_data);
 862		pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
 863	}
 864
 865	return 0;
 866}
 867
 868/*
 869 * Pick the first cpu as the irq affinity that can be temporarily used for
 870 * composing MSI from the hypervisor. GIC will eventually set the right
 871 * affinity for the irq and the 'unmask' will retarget the interrupt to that
 872 * cpu.
 873 */
 874static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
 875					  struct irq_data *irqd, bool reserve)
 876{
 877	int cpu = cpumask_first(cpu_present_mask);
 878
 879	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
 880
 881	return 0;
 882}
 883
 884static const struct irq_domain_ops hv_pci_domain_ops = {
 885	.alloc	= hv_pci_vec_irq_domain_alloc,
 886	.free	= hv_pci_vec_irq_domain_free,
 887	.activate = hv_pci_vec_irq_domain_activate,
 888};
 889
 890static int hv_pci_irqchip_init(void)
 891{
 892	static struct hv_pci_chip_data *chip_data;
 893	struct fwnode_handle *fn = NULL;
 894	int ret = -ENOMEM;
 895
 896	chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
 897	if (!chip_data)
 898		return ret;
 899
 900	mutex_init(&chip_data->map_lock);
 901	fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
 902	if (!fn)
 903		goto free_chip;
 904
 905	/*
 906	 * IRQ domain once enabled, should not be removed since there is no
 907	 * way to ensure that all the corresponding devices are also gone and
 908	 * no interrupts will be generated.
 909	 */
 910	hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
 911							  fn, &hv_pci_domain_ops,
 912							  chip_data);
 913
 914	if (!hv_msi_gic_irq_domain) {
 915		pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
 916		goto free_chip;
 917	}
 918
 919	return 0;
 920
 921free_chip:
 922	kfree(chip_data);
 923	if (fn)
 924		irq_domain_free_fwnode(fn);
 925
 926	return ret;
 927}
 928
 929static struct irq_domain *hv_pci_get_root_domain(void)
 930{
 931	return hv_msi_gic_irq_domain;
 932}
 933
 934/*
 935 * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
 936 * registers which Hyper-V already supports, so no hypercall needed.
 937 */
 938static void hv_arch_irq_unmask(struct irq_data *data) { }
 939#endif /* CONFIG_ARM64 */
 940
 941/**
 942 * hv_pci_generic_compl() - Invoked for a completion packet
 943 * @context:		Set up by the sender of the packet.
 944 * @resp:		The response packet
 945 * @resp_packet_size:	Size in bytes of the packet
 946 *
 947 * This function is used to trigger an event and report status
 948 * for any message for which the completion packet contains a
 949 * status and nothing else.
 950 */
 951static void hv_pci_generic_compl(void *context, struct pci_response *resp,
 952				 int resp_packet_size)
 953{
 954	struct hv_pci_compl *comp_pkt = context;
 955
 956	comp_pkt->completion_status = resp->status;
 957	complete(&comp_pkt->host_event);
 958}
 959
 960static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
 961						u32 wslot);
 962
 963static void get_pcichild(struct hv_pci_dev *hpdev)
 964{
 965	refcount_inc(&hpdev->refs);
 966}
 967
 968static void put_pcichild(struct hv_pci_dev *hpdev)
 969{
 970	if (refcount_dec_and_test(&hpdev->refs))
 971		kfree(hpdev);
 972}
 973
 974/*
 975 * There is no good way to get notified from vmbus_onoffer_rescind(),
 976 * so let's use polling here, since this is not a hot path.
 977 */
 978static int wait_for_response(struct hv_device *hdev,
 979			     struct completion *comp)
 980{
 981	while (true) {
 982		if (hdev->channel->rescind) {
 983			dev_warn_once(&hdev->device, "The device is gone.\n");
 984			return -ENODEV;
 985		}
 986
 987		if (wait_for_completion_timeout(comp, HZ / 10))
 988			break;
 989	}
 990
 991	return 0;
 992}
 993
 994/**
 995 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
 996 * @devfn:	The Linux representation of PCI slot
 997 *
 998 * Windows uses a slightly different representation of PCI slot.
 999 *
1000 * Return: The Windows representation
1001 */
1002static u32 devfn_to_wslot(int devfn)
1003{
1004	union win_slot_encoding wslot;
1005
1006	wslot.slot = 0;
1007	wslot.bits.dev = PCI_SLOT(devfn);
1008	wslot.bits.func = PCI_FUNC(devfn);
1009
1010	return wslot.slot;
1011}
1012
1013/**
1014 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1015 * @wslot:	The Windows representation of PCI slot
1016 *
1017 * Windows uses a slightly different representation of PCI slot.
1018 *
1019 * Return: The Linux representation
1020 */
1021static int wslot_to_devfn(u32 wslot)
1022{
1023	union win_slot_encoding slot_no;
1024
1025	slot_no.slot = wslot;
1026	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1027}
1028
1029static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1030{
1031	struct hv_mmio_read_input *in;
1032	struct hv_mmio_read_output *out;
1033	u64 ret;
1034
1035	/*
1036	 * Must be called with interrupts disabled so it is safe
1037	 * to use the per-cpu input argument page.  Use it for
1038	 * both input and output.
1039	 */
1040	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1041	out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1042	in->gpa = gpa;
1043	in->size = size;
1044
1045	ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1046	if (hv_result_success(ret)) {
1047		switch (size) {
1048		case 1:
1049			*val = *(u8 *)(out->data);
1050			break;
1051		case 2:
1052			*val = *(u16 *)(out->data);
1053			break;
1054		default:
1055			*val = *(u32 *)(out->data);
1056			break;
1057		}
1058	} else
1059		dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1060				ret, gpa, size);
1061}
1062
1063static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1064{
1065	struct hv_mmio_write_input *in;
1066	u64 ret;
1067
1068	/*
1069	 * Must be called with interrupts disabled so it is safe
1070	 * to use the per-cpu input argument memory.
1071	 */
1072	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1073	in->gpa = gpa;
1074	in->size = size;
1075	switch (size) {
1076	case 1:
1077		*(u8 *)(in->data) = val;
1078		break;
1079	case 2:
1080		*(u16 *)(in->data) = val;
1081		break;
1082	default:
1083		*(u32 *)(in->data) = val;
1084		break;
1085	}
1086
1087	ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1088	if (!hv_result_success(ret))
1089		dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1090				ret, gpa, size);
1091}
1092
1093/*
1094 * PCI Configuration Space for these root PCI buses is implemented as a pair
1095 * of pages in memory-mapped I/O space.  Writing to the first page chooses
1096 * the PCI function being written or read.  Once the first page has been
1097 * written to, the following page maps in the entire configuration space of
1098 * the function.
1099 */
1100
1101/**
1102 * _hv_pcifront_read_config() - Internal PCI config read
1103 * @hpdev:	The PCI driver's representation of the device
1104 * @where:	Offset within config space
1105 * @size:	Size of the transfer
1106 * @val:	Pointer to the buffer receiving the data
1107 */
1108static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1109				     int size, u32 *val)
1110{
1111	struct hv_pcibus_device *hbus = hpdev->hbus;
1112	struct device *dev = &hbus->hdev->device;
1113	int offset = where + CFG_PAGE_OFFSET;
1114	unsigned long flags;
 
1115
1116	/*
1117	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1118	 */
1119	if (where + size <= PCI_COMMAND) {
1120		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1121	} else if (where >= PCI_CLASS_REVISION && where + size <=
1122		   PCI_CACHE_LINE_SIZE) {
1123		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1124		       PCI_CLASS_REVISION, size);
1125	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1126		   PCI_ROM_ADDRESS) {
1127		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1128		       PCI_SUBSYSTEM_VENDOR_ID, size);
1129	} else if (where >= PCI_ROM_ADDRESS && where + size <=
1130		   PCI_CAPABILITY_LIST) {
1131		/* ROM BARs are unimplemented */
1132		*val = 0;
1133	} else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
1134		   (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
1135		/*
1136		 * Interrupt Line and Interrupt PIN are hard-wired to zero
1137		 * because this front-end only supports message-signaled
1138		 * interrupts.
1139		 */
1140		*val = 0;
1141	} else if (where + size <= CFG_PAGE_SIZE) {
1142
1143		spin_lock_irqsave(&hbus->config_lock, flags);
1144		if (hbus->use_calls) {
1145			phys_addr_t addr = hbus->mem_config->start + offset;
1146
1147			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1148						hpdev->desc.win_slot.slot);
1149			hv_pci_read_mmio(dev, addr, size, val);
1150		} else {
1151			void __iomem *addr = hbus->cfg_addr + offset;
1152
1153			/* Choose the function to be read. (See comment above) */
1154			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1155			/* Make sure the function was chosen before reading. */
1156			mb();
1157			/* Read from that function's config space. */
1158			switch (size) {
1159			case 1:
1160				*val = readb(addr);
1161				break;
1162			case 2:
1163				*val = readw(addr);
1164				break;
1165			default:
1166				*val = readl(addr);
1167				break;
1168			}
1169			/*
1170			 * Make sure the read was done before we release the
1171			 * spinlock allowing consecutive reads/writes.
1172			 */
1173			mb();
1174		}
1175		spin_unlock_irqrestore(&hbus->config_lock, flags);
 
 
 
 
 
1176	} else {
1177		dev_err(dev, "Attempt to read beyond a function's config space.\n");
 
1178	}
1179}
1180
1181static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1182{
1183	struct hv_pcibus_device *hbus = hpdev->hbus;
1184	struct device *dev = &hbus->hdev->device;
1185	u32 val;
1186	u16 ret;
1187	unsigned long flags;
 
 
1188
1189	spin_lock_irqsave(&hbus->config_lock, flags);
1190
1191	if (hbus->use_calls) {
1192		phys_addr_t addr = hbus->mem_config->start +
1193					 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1194
1195		hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1196					hpdev->desc.win_slot.slot);
1197		hv_pci_read_mmio(dev, addr, 2, &val);
1198		ret = val;  /* Truncates to 16 bits */
1199	} else {
1200		void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1201					     PCI_VENDOR_ID;
1202		/* Choose the function to be read. (See comment above) */
1203		writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1204		/* Make sure the function was chosen before we start reading. */
1205		mb();
1206		/* Read from that function's config space. */
1207		ret = readw(addr);
1208		/*
1209		 * mb() is not required here, because the
1210		 * spin_unlock_irqrestore() is a barrier.
1211		 */
1212	}
1213
1214	spin_unlock_irqrestore(&hbus->config_lock, flags);
1215
1216	return ret;
1217}
1218
1219/**
1220 * _hv_pcifront_write_config() - Internal PCI config write
1221 * @hpdev:	The PCI driver's representation of the device
1222 * @where:	Offset within config space
1223 * @size:	Size of the transfer
1224 * @val:	The data being transferred
1225 */
1226static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1227				      int size, u32 val)
1228{
1229	struct hv_pcibus_device *hbus = hpdev->hbus;
1230	struct device *dev = &hbus->hdev->device;
1231	int offset = where + CFG_PAGE_OFFSET;
1232	unsigned long flags;
 
1233
1234	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1235	    where + size <= PCI_CAPABILITY_LIST) {
1236		/* SSIDs and ROM BARs are read-only */
1237	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1238		spin_lock_irqsave(&hbus->config_lock, flags);
1239
1240		if (hbus->use_calls) {
1241			phys_addr_t addr = hbus->mem_config->start + offset;
1242
1243			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1244						hpdev->desc.win_slot.slot);
1245			hv_pci_write_mmio(dev, addr, size, val);
1246		} else {
1247			void __iomem *addr = hbus->cfg_addr + offset;
1248
1249			/* Choose the function to write. (See comment above) */
1250			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1251			/* Make sure the function was chosen before writing. */
1252			wmb();
1253			/* Write to that function's config space. */
1254			switch (size) {
1255			case 1:
1256				writeb(val, addr);
1257				break;
1258			case 2:
1259				writew(val, addr);
1260				break;
1261			default:
1262				writel(val, addr);
1263				break;
1264			}
1265			/*
1266			 * Make sure the write was done before we release the
1267			 * spinlock allowing consecutive reads/writes.
1268			 */
1269			mb();
1270		}
1271		spin_unlock_irqrestore(&hbus->config_lock, flags);
 
 
 
 
 
1272	} else {
1273		dev_err(dev, "Attempt to write beyond a function's config space.\n");
 
1274	}
1275}
1276
1277/**
1278 * hv_pcifront_read_config() - Read configuration space
1279 * @bus: PCI Bus structure
1280 * @devfn: Device/function
1281 * @where: Offset from base
1282 * @size: Byte/word/dword
1283 * @val: Value to be read
1284 *
1285 * Return: PCIBIOS_SUCCESSFUL on success
1286 *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1287 */
1288static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1289				   int where, int size, u32 *val)
1290{
1291	struct hv_pcibus_device *hbus =
1292		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1293	struct hv_pci_dev *hpdev;
1294
1295	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1296	if (!hpdev)
1297		return PCIBIOS_DEVICE_NOT_FOUND;
1298
1299	_hv_pcifront_read_config(hpdev, where, size, val);
1300
1301	put_pcichild(hpdev);
1302	return PCIBIOS_SUCCESSFUL;
1303}
1304
1305/**
1306 * hv_pcifront_write_config() - Write configuration space
1307 * @bus: PCI Bus structure
1308 * @devfn: Device/function
1309 * @where: Offset from base
1310 * @size: Byte/word/dword
1311 * @val: Value to be written to device
1312 *
1313 * Return: PCIBIOS_SUCCESSFUL on success
1314 *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1315 */
1316static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1317				    int where, int size, u32 val)
1318{
1319	struct hv_pcibus_device *hbus =
1320	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1321	struct hv_pci_dev *hpdev;
1322
1323	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1324	if (!hpdev)
1325		return PCIBIOS_DEVICE_NOT_FOUND;
1326
1327	_hv_pcifront_write_config(hpdev, where, size, val);
1328
1329	put_pcichild(hpdev);
1330	return PCIBIOS_SUCCESSFUL;
1331}
1332
1333/* PCIe operations */
1334static struct pci_ops hv_pcifront_ops = {
1335	.read  = hv_pcifront_read_config,
1336	.write = hv_pcifront_write_config,
1337};
1338
1339/*
1340 * Paravirtual backchannel
1341 *
1342 * Hyper-V SR-IOV provides a backchannel mechanism in software for
1343 * communication between a VF driver and a PF driver.  These
1344 * "configuration blocks" are similar in concept to PCI configuration space,
1345 * but instead of doing reads and writes in 32-bit chunks through a very slow
1346 * path, packets of up to 128 bytes can be sent or received asynchronously.
1347 *
1348 * Nearly every SR-IOV device contains just such a communications channel in
1349 * hardware, so using this one in software is usually optional.  Using the
1350 * software channel, however, allows driver implementers to leverage software
1351 * tools that fuzz the communications channel looking for vulnerabilities.
1352 *
1353 * The usage model for these packets puts the responsibility for reading or
1354 * writing on the VF driver.  The VF driver sends a read or a write packet,
1355 * indicating which "block" is being referred to by number.
1356 *
1357 * If the PF driver wishes to initiate communication, it can "invalidate" one or
1358 * more of the first 64 blocks.  This invalidation is delivered via a callback
1359 * supplied by the VF driver by this driver.
1360 *
1361 * No protocol is implied, except that supplied by the PF and VF drivers.
1362 */
1363
1364struct hv_read_config_compl {
1365	struct hv_pci_compl comp_pkt;
1366	void *buf;
1367	unsigned int len;
1368	unsigned int bytes_returned;
1369};
1370
1371/**
1372 * hv_pci_read_config_compl() - Invoked when a response packet
1373 * for a read config block operation arrives.
1374 * @context:		Identifies the read config operation
1375 * @resp:		The response packet itself
1376 * @resp_packet_size:	Size in bytes of the response packet
1377 */
1378static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1379				     int resp_packet_size)
1380{
1381	struct hv_read_config_compl *comp = context;
1382	struct pci_read_block_response *read_resp =
1383		(struct pci_read_block_response *)resp;
1384	unsigned int data_len, hdr_len;
1385
1386	hdr_len = offsetof(struct pci_read_block_response, bytes);
1387	if (resp_packet_size < hdr_len) {
1388		comp->comp_pkt.completion_status = -1;
1389		goto out;
1390	}
1391
1392	data_len = resp_packet_size - hdr_len;
1393	if (data_len > 0 && read_resp->status == 0) {
1394		comp->bytes_returned = min(comp->len, data_len);
1395		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1396	} else {
1397		comp->bytes_returned = 0;
1398	}
1399
1400	comp->comp_pkt.completion_status = read_resp->status;
1401out:
1402	complete(&comp->comp_pkt.host_event);
1403}
1404
1405/**
1406 * hv_read_config_block() - Sends a read config block request to
1407 * the back-end driver running in the Hyper-V parent partition.
1408 * @pdev:		The PCI driver's representation for this device.
1409 * @buf:		Buffer into which the config block will be copied.
1410 * @len:		Size in bytes of buf.
1411 * @block_id:		Identifies the config block which has been requested.
1412 * @bytes_returned:	Size which came back from the back-end driver.
1413 *
1414 * Return: 0 on success, -errno on failure
1415 */
1416static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1417				unsigned int len, unsigned int block_id,
1418				unsigned int *bytes_returned)
1419{
1420	struct hv_pcibus_device *hbus =
1421		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1422			     sysdata);
1423	struct {
1424		struct pci_packet pkt;
1425		char buf[sizeof(struct pci_read_block)];
1426	} pkt;
1427	struct hv_read_config_compl comp_pkt;
1428	struct pci_read_block *read_blk;
1429	int ret;
1430
1431	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1432		return -EINVAL;
1433
1434	init_completion(&comp_pkt.comp_pkt.host_event);
1435	comp_pkt.buf = buf;
1436	comp_pkt.len = len;
1437
1438	memset(&pkt, 0, sizeof(pkt));
1439	pkt.pkt.completion_func = hv_pci_read_config_compl;
1440	pkt.pkt.compl_ctxt = &comp_pkt;
1441	read_blk = (struct pci_read_block *)&pkt.pkt.message;
1442	read_blk->message_type.type = PCI_READ_BLOCK;
1443	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1444	read_blk->block_id = block_id;
1445	read_blk->bytes_requested = len;
1446
1447	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1448			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
1449			       VM_PKT_DATA_INBAND,
1450			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1451	if (ret)
1452		return ret;
1453
1454	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1455	if (ret)
1456		return ret;
1457
1458	if (comp_pkt.comp_pkt.completion_status != 0 ||
1459	    comp_pkt.bytes_returned == 0) {
1460		dev_err(&hbus->hdev->device,
1461			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
1462			comp_pkt.comp_pkt.completion_status,
1463			comp_pkt.bytes_returned);
1464		return -EIO;
1465	}
1466
1467	*bytes_returned = comp_pkt.bytes_returned;
1468	return 0;
1469}
1470
1471/**
1472 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1473 * config block operation arrives.
1474 * @context:		Identifies the write config operation
1475 * @resp:		The response packet itself
1476 * @resp_packet_size:	Size in bytes of the response packet
1477 */
1478static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1479				      int resp_packet_size)
1480{
1481	struct hv_pci_compl *comp_pkt = context;
1482
1483	comp_pkt->completion_status = resp->status;
1484	complete(&comp_pkt->host_event);
1485}
1486
1487/**
1488 * hv_write_config_block() - Sends a write config block request to the
1489 * back-end driver running in the Hyper-V parent partition.
1490 * @pdev:		The PCI driver's representation for this device.
1491 * @buf:		Buffer from which the config block will	be copied.
1492 * @len:		Size in bytes of buf.
1493 * @block_id:		Identifies the config block which is being written.
1494 *
1495 * Return: 0 on success, -errno on failure
1496 */
1497static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1498				unsigned int len, unsigned int block_id)
1499{
1500	struct hv_pcibus_device *hbus =
1501		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1502			     sysdata);
1503	struct {
1504		struct pci_packet pkt;
1505		char buf[sizeof(struct pci_write_block)];
1506		u32 reserved;
1507	} pkt;
1508	struct hv_pci_compl comp_pkt;
1509	struct pci_write_block *write_blk;
1510	u32 pkt_size;
1511	int ret;
1512
1513	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1514		return -EINVAL;
1515
1516	init_completion(&comp_pkt.host_event);
1517
1518	memset(&pkt, 0, sizeof(pkt));
1519	pkt.pkt.completion_func = hv_pci_write_config_compl;
1520	pkt.pkt.compl_ctxt = &comp_pkt;
1521	write_blk = (struct pci_write_block *)&pkt.pkt.message;
1522	write_blk->message_type.type = PCI_WRITE_BLOCK;
1523	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1524	write_blk->block_id = block_id;
1525	write_blk->byte_count = len;
1526	memcpy(write_blk->bytes, buf, len);
1527	pkt_size = offsetof(struct pci_write_block, bytes) + len;
1528	/*
1529	 * This quirk is required on some hosts shipped around 2018, because
1530	 * these hosts don't check the pkt_size correctly (new hosts have been
1531	 * fixed since early 2019). The quirk is also safe on very old hosts
1532	 * and new hosts, because, on them, what really matters is the length
1533	 * specified in write_blk->byte_count.
1534	 */
1535	pkt_size += sizeof(pkt.reserved);
1536
1537	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1538			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1539			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1540	if (ret)
1541		return ret;
1542
1543	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1544	if (ret)
1545		return ret;
1546
1547	if (comp_pkt.completion_status != 0) {
1548		dev_err(&hbus->hdev->device,
1549			"Write Config Block failed: 0x%x\n",
1550			comp_pkt.completion_status);
1551		return -EIO;
1552	}
1553
1554	return 0;
1555}
1556
1557/**
1558 * hv_register_block_invalidate() - Invoked when a config block invalidation
1559 * arrives from the back-end driver.
1560 * @pdev:		The PCI driver's representation for this device.
1561 * @context:		Identifies the device.
1562 * @block_invalidate:	Identifies all of the blocks being invalidated.
1563 *
1564 * Return: 0 on success, -errno on failure
1565 */
1566static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1567					void (*block_invalidate)(void *context,
1568								 u64 block_mask))
1569{
1570	struct hv_pcibus_device *hbus =
1571		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1572			     sysdata);
1573	struct hv_pci_dev *hpdev;
1574
1575	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1576	if (!hpdev)
1577		return -ENODEV;
1578
1579	hpdev->block_invalidate = block_invalidate;
1580	hpdev->invalidate_context = context;
1581
1582	put_pcichild(hpdev);
1583	return 0;
1584
1585}
1586
1587/* Interrupt management hooks */
1588static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1589			     struct tran_int_desc *int_desc)
1590{
1591	struct pci_delete_interrupt *int_pkt;
1592	struct {
1593		struct pci_packet pkt;
1594		u8 buffer[sizeof(struct pci_delete_interrupt)];
1595	} ctxt;
1596
1597	if (!int_desc->vector_count) {
1598		kfree(int_desc);
1599		return;
1600	}
1601	memset(&ctxt, 0, sizeof(ctxt));
1602	int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1603	int_pkt->message_type.type =
1604		PCI_DELETE_INTERRUPT_MESSAGE;
1605	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1606	int_pkt->int_desc = *int_desc;
1607	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1608			 0, VM_PKT_DATA_INBAND, 0);
1609	kfree(int_desc);
1610}
1611
1612/**
1613 * hv_msi_free() - Free the MSI.
1614 * @domain:	The interrupt domain pointer
1615 * @info:	Extra MSI-related context
1616 * @irq:	Identifies the IRQ.
1617 *
1618 * The Hyper-V parent partition and hypervisor are tracking the
1619 * messages that are in use, keeping the interrupt redirection
1620 * table up to date.  This callback sends a message that frees
1621 * the IRT entry and related tracking nonsense.
1622 */
1623static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1624			unsigned int irq)
1625{
1626	struct hv_pcibus_device *hbus;
1627	struct hv_pci_dev *hpdev;
1628	struct pci_dev *pdev;
1629	struct tran_int_desc *int_desc;
1630	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1631	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1632
1633	pdev = msi_desc_to_pci_dev(msi);
1634	hbus = info->data;
1635	int_desc = irq_data_get_irq_chip_data(irq_data);
1636	if (!int_desc)
1637		return;
1638
1639	irq_data->chip_data = NULL;
1640	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1641	if (!hpdev) {
1642		kfree(int_desc);
1643		return;
1644	}
1645
1646	hv_int_desc_free(hpdev, int_desc);
1647	put_pcichild(hpdev);
1648}
1649
1650static void hv_irq_mask(struct irq_data *data)
1651{
1652	pci_msi_mask_irq(data);
1653	if (data->parent_data->chip->irq_mask)
1654		irq_chip_mask_parent(data);
1655}
1656
1657static void hv_irq_unmask(struct irq_data *data)
1658{
1659	hv_arch_irq_unmask(data);
1660
1661	if (data->parent_data->chip->irq_unmask)
1662		irq_chip_unmask_parent(data);
1663	pci_msi_unmask_irq(data);
1664}
1665
1666struct compose_comp_ctxt {
1667	struct hv_pci_compl comp_pkt;
1668	struct tran_int_desc int_desc;
1669};
1670
1671static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1672				 int resp_packet_size)
1673{
1674	struct compose_comp_ctxt *comp_pkt = context;
1675	struct pci_create_int_response *int_resp =
1676		(struct pci_create_int_response *)resp;
1677
1678	if (resp_packet_size < sizeof(*int_resp)) {
1679		comp_pkt->comp_pkt.completion_status = -1;
1680		goto out;
1681	}
1682	comp_pkt->comp_pkt.completion_status = resp->status;
1683	comp_pkt->int_desc = int_resp->int_desc;
1684out:
1685	complete(&comp_pkt->comp_pkt.host_event);
1686}
1687
1688static u32 hv_compose_msi_req_v1(
1689	struct pci_create_interrupt *int_pkt,
1690	u32 slot, u8 vector, u16 vector_count)
1691{
1692	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1693	int_pkt->wslot.slot = slot;
1694	int_pkt->int_desc.vector = vector;
1695	int_pkt->int_desc.vector_count = vector_count;
1696	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1697
1698	/*
1699	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1700	 * hv_irq_unmask().
1701	 */
1702	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1703
1704	return sizeof(*int_pkt);
1705}
1706
1707/*
1708 * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1709 * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1710 * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1711 * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1712 * not irrelevant because Hyper-V chooses the physical CPU to handle the
1713 * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1714 * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1715 * but assigning too many vPCI device interrupts to the same pCPU can cause a
1716 * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1717 * to spread out the pCPUs that it selects.
1718 *
1719 * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1720 * to always return the same dummy vCPU, because a second call to
1721 * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1722 * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1723 * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1724 * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1725 * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1726 * the same pCPU, even though the vCPUs will be spread out by later calls
1727 * to hv_irq_unmask(), but that is the best we can do now.
1728 *
1729 * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1730 * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1731 * enhancement is planned for a future version. With that enhancement, the
1732 * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1733 * device will be spread across multiple pCPUs.
1734 */
1735
1736/*
1737 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1738 * by subsequent retarget in hv_irq_unmask().
1739 */
1740static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1741{
1742	return cpumask_first_and(affinity, cpu_online_mask);
1743}
1744
1745/*
1746 * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1747 */
1748static int hv_compose_multi_msi_req_get_cpu(void)
1749{
1750	static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1751
1752	/* -1 means starting with CPU 0 */
1753	static int cpu_next = -1;
1754
1755	unsigned long flags;
1756	int cpu;
1757
1758	spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1759
1760	cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
1761				     false);
1762	cpu = cpu_next;
1763
1764	spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1765
1766	return cpu;
1767}
1768
1769static u32 hv_compose_msi_req_v2(
1770	struct pci_create_interrupt2 *int_pkt, int cpu,
1771	u32 slot, u8 vector, u16 vector_count)
1772{
1773	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1774	int_pkt->wslot.slot = slot;
1775	int_pkt->int_desc.vector = vector;
1776	int_pkt->int_desc.vector_count = vector_count;
1777	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1778	int_pkt->int_desc.processor_array[0] =
1779		hv_cpu_number_to_vp_number(cpu);
1780	int_pkt->int_desc.processor_count = 1;
1781
1782	return sizeof(*int_pkt);
1783}
1784
1785static u32 hv_compose_msi_req_v3(
1786	struct pci_create_interrupt3 *int_pkt, int cpu,
1787	u32 slot, u32 vector, u16 vector_count)
1788{
1789	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1790	int_pkt->wslot.slot = slot;
1791	int_pkt->int_desc.vector = vector;
1792	int_pkt->int_desc.reserved = 0;
1793	int_pkt->int_desc.vector_count = vector_count;
1794	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1795	int_pkt->int_desc.processor_array[0] =
1796		hv_cpu_number_to_vp_number(cpu);
1797	int_pkt->int_desc.processor_count = 1;
1798
1799	return sizeof(*int_pkt);
1800}
1801
1802/**
1803 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1804 * @data:	Everything about this MSI
1805 * @msg:	Buffer that is filled in by this function
1806 *
1807 * This function unpacks the IRQ looking for target CPU set, IDT
1808 * vector and mode and sends a message to the parent partition
1809 * asking for a mapping for that tuple in this partition.  The
1810 * response supplies a data value and address to which that data
1811 * should be written to trigger that interrupt.
1812 */
1813static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1814{
1815	struct hv_pcibus_device *hbus;
1816	struct vmbus_channel *channel;
1817	struct hv_pci_dev *hpdev;
1818	struct pci_bus *pbus;
1819	struct pci_dev *pdev;
1820	const struct cpumask *dest;
1821	struct compose_comp_ctxt comp;
1822	struct tran_int_desc *int_desc;
1823	struct msi_desc *msi_desc;
1824	/*
1825	 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1826	 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1827	 */
1828	u16 vector_count;
1829	u32 vector;
1830	struct {
1831		struct pci_packet pci_pkt;
1832		union {
1833			struct pci_create_interrupt v1;
1834			struct pci_create_interrupt2 v2;
1835			struct pci_create_interrupt3 v3;
1836		} int_pkts;
1837	} __packed ctxt;
1838	bool multi_msi;
1839	u64 trans_id;
1840	u32 size;
1841	int ret;
1842	int cpu;
1843
1844	msi_desc  = irq_data_get_msi_desc(data);
1845	multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1846		    msi_desc->nvec_used > 1;
1847
1848	/* Reuse the previous allocation */
1849	if (data->chip_data && multi_msi) {
1850		int_desc = data->chip_data;
1851		msg->address_hi = int_desc->address >> 32;
1852		msg->address_lo = int_desc->address & 0xffffffff;
1853		msg->data = int_desc->data;
1854		return;
1855	}
1856
1857	pdev = msi_desc_to_pci_dev(msi_desc);
1858	dest = irq_data_get_effective_affinity_mask(data);
1859	pbus = pdev->bus;
1860	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1861	channel = hbus->hdev->channel;
1862	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1863	if (!hpdev)
1864		goto return_null_message;
1865
1866	/* Free any previous message that might have already been composed. */
1867	if (data->chip_data && !multi_msi) {
1868		int_desc = data->chip_data;
1869		data->chip_data = NULL;
1870		hv_int_desc_free(hpdev, int_desc);
1871	}
1872
1873	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1874	if (!int_desc)
1875		goto drop_reference;
1876
1877	if (multi_msi) {
1878		/*
1879		 * If this is not the first MSI of Multi MSI, we already have
1880		 * a mapping.  Can exit early.
1881		 */
1882		if (msi_desc->irq != data->irq) {
1883			data->chip_data = int_desc;
1884			int_desc->address = msi_desc->msg.address_lo |
1885					    (u64)msi_desc->msg.address_hi << 32;
1886			int_desc->data = msi_desc->msg.data +
1887					 (data->irq - msi_desc->irq);
1888			msg->address_hi = msi_desc->msg.address_hi;
1889			msg->address_lo = msi_desc->msg.address_lo;
1890			msg->data = int_desc->data;
1891			put_pcichild(hpdev);
1892			return;
1893		}
1894		/*
1895		 * The vector we select here is a dummy value.  The correct
1896		 * value gets sent to the hypervisor in unmask().  This needs
1897		 * to be aligned with the count, and also not zero.  Multi-msi
1898		 * is powers of 2 up to 32, so 32 will always work here.
1899		 */
1900		vector = 32;
1901		vector_count = msi_desc->nvec_used;
1902		cpu = hv_compose_multi_msi_req_get_cpu();
1903	} else {
1904		vector = hv_msi_get_int_vector(data);
1905		vector_count = 1;
1906		cpu = hv_compose_msi_req_get_cpu(dest);
1907	}
1908
1909	/*
1910	 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1911	 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1912	 * for better readability.
1913	 */
1914	memset(&ctxt, 0, sizeof(ctxt));
1915	init_completion(&comp.comp_pkt.host_event);
1916	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1917	ctxt.pci_pkt.compl_ctxt = &comp;
1918
1919	switch (hbus->protocol_version) {
1920	case PCI_PROTOCOL_VERSION_1_1:
1921		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1922					hpdev->desc.win_slot.slot,
1923					(u8)vector,
1924					vector_count);
1925		break;
1926
1927	case PCI_PROTOCOL_VERSION_1_2:
1928	case PCI_PROTOCOL_VERSION_1_3:
1929		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1930					cpu,
1931					hpdev->desc.win_slot.slot,
1932					(u8)vector,
1933					vector_count);
1934		break;
1935
1936	case PCI_PROTOCOL_VERSION_1_4:
1937		size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
1938					cpu,
1939					hpdev->desc.win_slot.slot,
1940					vector,
1941					vector_count);
1942		break;
1943
1944	default:
1945		/* As we only negotiate protocol versions known to this driver,
1946		 * this path should never hit. However, this is it not a hot
1947		 * path so we print a message to aid future updates.
1948		 */
1949		dev_err(&hbus->hdev->device,
1950			"Unexpected vPCI protocol, update driver.");
1951		goto free_int_desc;
1952	}
1953
1954	ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1955				     size, (unsigned long)&ctxt.pci_pkt,
1956				     &trans_id, VM_PKT_DATA_INBAND,
1957				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1958	if (ret) {
1959		dev_err(&hbus->hdev->device,
1960			"Sending request for interrupt failed: 0x%x",
1961			comp.comp_pkt.completion_status);
1962		goto free_int_desc;
1963	}
1964
1965	/*
1966	 * Prevents hv_pci_onchannelcallback() from running concurrently
1967	 * in the tasklet.
1968	 */
1969	tasklet_disable_in_atomic(&channel->callback_event);
1970
1971	/*
1972	 * Since this function is called with IRQ locks held, can't
1973	 * do normal wait for completion; instead poll.
1974	 */
1975	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1976		unsigned long flags;
1977
1978		/* 0xFFFF means an invalid PCI VENDOR ID. */
1979		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1980			dev_err_once(&hbus->hdev->device,
1981				     "the device has gone\n");
1982			goto enable_tasklet;
1983		}
1984
1985		/*
1986		 * Make sure that the ring buffer data structure doesn't get
1987		 * freed while we dereference the ring buffer pointer.  Test
1988		 * for the channel's onchannel_callback being NULL within a
1989		 * sched_lock critical section.  See also the inline comments
1990		 * in vmbus_reset_channel_cb().
1991		 */
1992		spin_lock_irqsave(&channel->sched_lock, flags);
1993		if (unlikely(channel->onchannel_callback == NULL)) {
1994			spin_unlock_irqrestore(&channel->sched_lock, flags);
1995			goto enable_tasklet;
1996		}
1997		hv_pci_onchannelcallback(hbus);
1998		spin_unlock_irqrestore(&channel->sched_lock, flags);
1999
 
 
 
 
 
 
2000		udelay(100);
2001	}
2002
2003	tasklet_enable(&channel->callback_event);
2004
2005	if (comp.comp_pkt.completion_status < 0) {
2006		dev_err(&hbus->hdev->device,
2007			"Request for interrupt failed: 0x%x",
2008			comp.comp_pkt.completion_status);
2009		goto free_int_desc;
2010	}
2011
2012	/*
2013	 * Record the assignment so that this can be unwound later. Using
2014	 * irq_set_chip_data() here would be appropriate, but the lock it takes
2015	 * is already held.
2016	 */
2017	*int_desc = comp.int_desc;
2018	data->chip_data = int_desc;
2019
2020	/* Pass up the result. */
2021	msg->address_hi = comp.int_desc.address >> 32;
2022	msg->address_lo = comp.int_desc.address & 0xffffffff;
2023	msg->data = comp.int_desc.data;
2024
2025	put_pcichild(hpdev);
2026	return;
2027
2028enable_tasklet:
2029	tasklet_enable(&channel->callback_event);
2030	/*
2031	 * The completion packet on the stack becomes invalid after 'return';
2032	 * remove the ID from the VMbus requestor if the identifier is still
2033	 * mapped to/associated with the packet.  (The identifier could have
2034	 * been 're-used', i.e., already removed and (re-)mapped.)
2035	 *
2036	 * Cf. hv_pci_onchannelcallback().
2037	 */
2038	vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2039free_int_desc:
2040	kfree(int_desc);
2041drop_reference:
2042	put_pcichild(hpdev);
2043return_null_message:
2044	msg->address_hi = 0;
2045	msg->address_lo = 0;
2046	msg->data = 0;
2047}
2048
2049/* HW Interrupt Chip Descriptor */
2050static struct irq_chip hv_msi_irq_chip = {
2051	.name			= "Hyper-V PCIe MSI",
2052	.irq_compose_msi_msg	= hv_compose_msi_msg,
2053	.irq_set_affinity	= irq_chip_set_affinity_parent,
2054#ifdef CONFIG_X86
2055	.irq_ack		= irq_chip_ack_parent,
2056#elif defined(CONFIG_ARM64)
2057	.irq_eoi		= irq_chip_eoi_parent,
2058#endif
2059	.irq_mask		= hv_irq_mask,
2060	.irq_unmask		= hv_irq_unmask,
2061};
2062
2063static struct msi_domain_ops hv_msi_ops = {
2064	.msi_prepare	= hv_msi_prepare,
2065	.msi_free	= hv_msi_free,
2066};
2067
2068/**
2069 * hv_pcie_init_irq_domain() - Initialize IRQ domain
2070 * @hbus:	The root PCI bus
2071 *
2072 * This function creates an IRQ domain which will be used for
2073 * interrupts from devices that have been passed through.  These
2074 * devices only support MSI and MSI-X, not line-based interrupts
2075 * or simulations of line-based interrupts through PCIe's
2076 * fabric-layer messages.  Because interrupts are remapped, we
2077 * can support multi-message MSI here.
2078 *
2079 * Return: '0' on success and error value on failure
2080 */
2081static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2082{
2083	hbus->msi_info.chip = &hv_msi_irq_chip;
2084	hbus->msi_info.ops = &hv_msi_ops;
2085	hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
2086		MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
2087		MSI_FLAG_PCI_MSIX);
2088	hbus->msi_info.handler = FLOW_HANDLER;
2089	hbus->msi_info.handler_name = FLOW_NAME;
2090	hbus->msi_info.data = hbus;
2091	hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
2092						     &hbus->msi_info,
2093						     hv_pci_get_root_domain());
2094	if (!hbus->irq_domain) {
2095		dev_err(&hbus->hdev->device,
2096			"Failed to build an MSI IRQ domain\n");
2097		return -ENODEV;
2098	}
2099
2100	dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2101
2102	return 0;
2103}
2104
2105/**
2106 * get_bar_size() - Get the address space consumed by a BAR
2107 * @bar_val:	Value that a BAR returned after -1 was written
2108 *              to it.
2109 *
2110 * This function returns the size of the BAR, rounded up to 1
2111 * page.  It has to be rounded up because the hypervisor's page
2112 * table entry that maps the BAR into the VM can't specify an
2113 * offset within a page.  The invariant is that the hypervisor
2114 * must place any BARs of smaller than page length at the
2115 * beginning of a page.
2116 *
2117 * Return:	Size in bytes of the consumed MMIO space.
2118 */
2119static u64 get_bar_size(u64 bar_val)
2120{
2121	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2122			PAGE_SIZE);
2123}
2124
2125/**
2126 * survey_child_resources() - Total all MMIO requirements
2127 * @hbus:	Root PCI bus, as understood by this driver
2128 */
2129static void survey_child_resources(struct hv_pcibus_device *hbus)
2130{
2131	struct hv_pci_dev *hpdev;
2132	resource_size_t bar_size = 0;
2133	unsigned long flags;
2134	struct completion *event;
2135	u64 bar_val;
2136	int i;
2137
2138	/* If nobody is waiting on the answer, don't compute it. */
2139	event = xchg(&hbus->survey_event, NULL);
2140	if (!event)
2141		return;
2142
2143	/* If the answer has already been computed, go with it. */
2144	if (hbus->low_mmio_space || hbus->high_mmio_space) {
2145		complete(event);
2146		return;
2147	}
2148
2149	spin_lock_irqsave(&hbus->device_list_lock, flags);
2150
2151	/*
2152	 * Due to an interesting quirk of the PCI spec, all memory regions
2153	 * for a child device are a power of 2 in size and aligned in memory,
2154	 * so it's sufficient to just add them up without tracking alignment.
2155	 */
2156	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2157		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2158			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2159				dev_err(&hbus->hdev->device,
2160					"There's an I/O BAR in this list!\n");
2161
2162			if (hpdev->probed_bar[i] != 0) {
2163				/*
2164				 * A probed BAR has all the upper bits set that
2165				 * can be changed.
2166				 */
2167
2168				bar_val = hpdev->probed_bar[i];
2169				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2170					bar_val |=
2171					((u64)hpdev->probed_bar[++i] << 32);
2172				else
2173					bar_val |= 0xffffffff00000000ULL;
2174
2175				bar_size = get_bar_size(bar_val);
2176
2177				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2178					hbus->high_mmio_space += bar_size;
2179				else
2180					hbus->low_mmio_space += bar_size;
2181			}
2182		}
2183	}
2184
2185	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2186	complete(event);
2187}
2188
2189/**
2190 * prepopulate_bars() - Fill in BARs with defaults
2191 * @hbus:	Root PCI bus, as understood by this driver
2192 *
2193 * The core PCI driver code seems much, much happier if the BARs
2194 * for a device have values upon first scan. So fill them in.
2195 * The algorithm below works down from large sizes to small,
2196 * attempting to pack the assignments optimally. The assumption,
2197 * enforced in other parts of the code, is that the beginning of
2198 * the memory-mapped I/O space will be aligned on the largest
2199 * BAR size.
2200 */
2201static void prepopulate_bars(struct hv_pcibus_device *hbus)
2202{
2203	resource_size_t high_size = 0;
2204	resource_size_t low_size = 0;
2205	resource_size_t high_base = 0;
2206	resource_size_t low_base = 0;
2207	resource_size_t bar_size;
2208	struct hv_pci_dev *hpdev;
2209	unsigned long flags;
2210	u64 bar_val;
2211	u32 command;
2212	bool high;
2213	int i;
2214
2215	if (hbus->low_mmio_space) {
2216		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2217		low_base = hbus->low_mmio_res->start;
2218	}
2219
2220	if (hbus->high_mmio_space) {
2221		high_size = 1ULL <<
2222			(63 - __builtin_clzll(hbus->high_mmio_space));
2223		high_base = hbus->high_mmio_res->start;
2224	}
2225
2226	spin_lock_irqsave(&hbus->device_list_lock, flags);
2227
2228	/*
2229	 * Clear the memory enable bit, in case it's already set. This occurs
2230	 * in the suspend path of hibernation, where the device is suspended,
2231	 * resumed and suspended again: see hibernation_snapshot() and
2232	 * hibernation_platform_enter().
2233	 *
2234	 * If the memory enable bit is already set, Hyper-V silently ignores
2235	 * the below BAR updates, and the related PCI device driver can not
2236	 * work, because reading from the device register(s) always returns
2237	 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2238	 */
2239	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2240		_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2241		command &= ~PCI_COMMAND_MEMORY;
2242		_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2243	}
2244
2245	/* Pick addresses for the BARs. */
2246	do {
2247		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2248			for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2249				bar_val = hpdev->probed_bar[i];
2250				if (bar_val == 0)
2251					continue;
2252				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2253				if (high) {
2254					bar_val |=
2255						((u64)hpdev->probed_bar[i + 1]
2256						 << 32);
2257				} else {
2258					bar_val |= 0xffffffffULL << 32;
2259				}
2260				bar_size = get_bar_size(bar_val);
2261				if (high) {
2262					if (high_size != bar_size) {
2263						i++;
2264						continue;
2265					}
2266					_hv_pcifront_write_config(hpdev,
2267						PCI_BASE_ADDRESS_0 + (4 * i),
2268						4,
2269						(u32)(high_base & 0xffffff00));
2270					i++;
2271					_hv_pcifront_write_config(hpdev,
2272						PCI_BASE_ADDRESS_0 + (4 * i),
2273						4, (u32)(high_base >> 32));
2274					high_base += bar_size;
2275				} else {
2276					if (low_size != bar_size)
2277						continue;
2278					_hv_pcifront_write_config(hpdev,
2279						PCI_BASE_ADDRESS_0 + (4 * i),
2280						4,
2281						(u32)(low_base & 0xffffff00));
2282					low_base += bar_size;
2283				}
2284			}
2285			if (high_size <= 1 && low_size <= 1) {
2286				/*
2287				 * No need to set the PCI_COMMAND_MEMORY bit as
2288				 * the core PCI driver doesn't require the bit
2289				 * to be pre-set. Actually here we intentionally
2290				 * keep the bit off so that the PCI BAR probing
2291				 * in the core PCI driver doesn't cause Hyper-V
2292				 * to unnecessarily unmap/map the virtual BARs
2293				 * from/to the physical BARs multiple times.
2294				 * This reduces the VM boot time significantly
2295				 * if the BAR sizes are huge.
2296				 */
2297				break;
2298			}
2299		}
2300
2301		high_size >>= 1;
2302		low_size >>= 1;
2303	}  while (high_size || low_size);
2304
2305	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2306}
2307
2308/*
2309 * Assign entries in sysfs pci slot directory.
2310 *
2311 * Note that this function does not need to lock the children list
2312 * because it is called from pci_devices_present_work which
2313 * is serialized with hv_eject_device_work because they are on the
2314 * same ordered workqueue. Therefore hbus->children list will not change
2315 * even when pci_create_slot sleeps.
2316 */
2317static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2318{
2319	struct hv_pci_dev *hpdev;
2320	char name[SLOT_NAME_SIZE];
2321	int slot_nr;
2322
2323	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2324		if (hpdev->pci_slot)
2325			continue;
2326
2327		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2328		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2329		hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2330					  name, NULL);
2331		if (IS_ERR(hpdev->pci_slot)) {
2332			pr_warn("pci_create slot %s failed\n", name);
2333			hpdev->pci_slot = NULL;
2334		}
2335	}
2336}
2337
2338/*
2339 * Remove entries in sysfs pci slot directory.
2340 */
2341static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2342{
2343	struct hv_pci_dev *hpdev;
2344
2345	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2346		if (!hpdev->pci_slot)
2347			continue;
2348		pci_destroy_slot(hpdev->pci_slot);
2349		hpdev->pci_slot = NULL;
2350	}
2351}
2352
2353/*
2354 * Set NUMA node for the devices on the bus
2355 */
2356static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2357{
2358	struct pci_dev *dev;
2359	struct pci_bus *bus = hbus->bridge->bus;
2360	struct hv_pci_dev *hv_dev;
2361
2362	list_for_each_entry(dev, &bus->devices, bus_list) {
2363		hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2364		if (!hv_dev)
2365			continue;
2366
2367		if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2368		    hv_dev->desc.virtual_numa_node < num_possible_nodes())
2369			/*
2370			 * The kernel may boot with some NUMA nodes offline
2371			 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2372			 * "numa=off". In those cases, adjust the host provided
2373			 * NUMA node to a valid NUMA node used by the kernel.
2374			 */
2375			set_dev_node(&dev->dev,
2376				     numa_map_to_online_node(
2377					     hv_dev->desc.virtual_numa_node));
2378
2379		put_pcichild(hv_dev);
2380	}
2381}
2382
2383/**
2384 * create_root_hv_pci_bus() - Expose a new root PCI bus
2385 * @hbus:	Root PCI bus, as understood by this driver
2386 *
2387 * Return: 0 on success, -errno on failure
2388 */
2389static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2390{
2391	int error;
2392	struct pci_host_bridge *bridge = hbus->bridge;
2393
2394	bridge->dev.parent = &hbus->hdev->device;
2395	bridge->sysdata = &hbus->sysdata;
2396	bridge->ops = &hv_pcifront_ops;
2397
2398	error = pci_scan_root_bus_bridge(bridge);
2399	if (error)
2400		return error;
2401
2402	pci_lock_rescan_remove();
2403	hv_pci_assign_numa_node(hbus);
2404	pci_bus_assign_resources(bridge->bus);
2405	hv_pci_assign_slots(hbus);
2406	pci_bus_add_devices(bridge->bus);
2407	pci_unlock_rescan_remove();
2408	hbus->state = hv_pcibus_installed;
2409	return 0;
2410}
2411
2412struct q_res_req_compl {
2413	struct completion host_event;
2414	struct hv_pci_dev *hpdev;
2415};
2416
2417/**
2418 * q_resource_requirements() - Query Resource Requirements
2419 * @context:		The completion context.
2420 * @resp:		The response that came from the host.
2421 * @resp_packet_size:	The size in bytes of resp.
2422 *
2423 * This function is invoked on completion of a Query Resource
2424 * Requirements packet.
2425 */
2426static void q_resource_requirements(void *context, struct pci_response *resp,
2427				    int resp_packet_size)
2428{
2429	struct q_res_req_compl *completion = context;
2430	struct pci_q_res_req_response *q_res_req =
2431		(struct pci_q_res_req_response *)resp;
2432	s32 status;
2433	int i;
2434
2435	status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2436	if (status < 0) {
2437		dev_err(&completion->hpdev->hbus->hdev->device,
2438			"query resource requirements failed: %x\n",
2439			status);
2440	} else {
2441		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2442			completion->hpdev->probed_bar[i] =
2443				q_res_req->probed_bar[i];
2444		}
2445	}
2446
2447	complete(&completion->host_event);
2448}
2449
2450/**
2451 * new_pcichild_device() - Create a new child device
2452 * @hbus:	The internal struct tracking this root PCI bus.
2453 * @desc:	The information supplied so far from the host
2454 *              about the device.
2455 *
2456 * This function creates the tracking structure for a new child
2457 * device and kicks off the process of figuring out what it is.
2458 *
2459 * Return: Pointer to the new tracking struct
2460 */
2461static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2462		struct hv_pcidev_description *desc)
2463{
2464	struct hv_pci_dev *hpdev;
2465	struct pci_child_message *res_req;
2466	struct q_res_req_compl comp_pkt;
2467	struct {
2468		struct pci_packet init_packet;
2469		u8 buffer[sizeof(struct pci_child_message)];
2470	} pkt;
2471	unsigned long flags;
2472	int ret;
2473
2474	hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2475	if (!hpdev)
2476		return NULL;
2477
2478	hpdev->hbus = hbus;
2479
2480	memset(&pkt, 0, sizeof(pkt));
2481	init_completion(&comp_pkt.host_event);
2482	comp_pkt.hpdev = hpdev;
2483	pkt.init_packet.compl_ctxt = &comp_pkt;
2484	pkt.init_packet.completion_func = q_resource_requirements;
2485	res_req = (struct pci_child_message *)&pkt.init_packet.message;
2486	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2487	res_req->wslot.slot = desc->win_slot.slot;
2488
2489	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2490			       sizeof(struct pci_child_message),
2491			       (unsigned long)&pkt.init_packet,
2492			       VM_PKT_DATA_INBAND,
2493			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2494	if (ret)
2495		goto error;
2496
2497	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2498		goto error;
2499
2500	hpdev->desc = *desc;
2501	refcount_set(&hpdev->refs, 1);
2502	get_pcichild(hpdev);
2503	spin_lock_irqsave(&hbus->device_list_lock, flags);
2504
2505	list_add_tail(&hpdev->list_entry, &hbus->children);
2506	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2507	return hpdev;
2508
2509error:
2510	kfree(hpdev);
2511	return NULL;
2512}
2513
2514/**
2515 * get_pcichild_wslot() - Find device from slot
2516 * @hbus:	Root PCI bus, as understood by this driver
2517 * @wslot:	Location on the bus
2518 *
2519 * This function looks up a PCI device and returns the internal
2520 * representation of it.  It acquires a reference on it, so that
2521 * the device won't be deleted while somebody is using it.  The
2522 * caller is responsible for calling put_pcichild() to release
2523 * this reference.
2524 *
2525 * Return:	Internal representation of a PCI device
2526 */
2527static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2528					     u32 wslot)
2529{
2530	unsigned long flags;
2531	struct hv_pci_dev *iter, *hpdev = NULL;
2532
2533	spin_lock_irqsave(&hbus->device_list_lock, flags);
2534	list_for_each_entry(iter, &hbus->children, list_entry) {
2535		if (iter->desc.win_slot.slot == wslot) {
2536			hpdev = iter;
2537			get_pcichild(hpdev);
2538			break;
2539		}
2540	}
2541	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2542
2543	return hpdev;
2544}
2545
2546/**
2547 * pci_devices_present_work() - Handle new list of child devices
2548 * @work:	Work struct embedded in struct hv_dr_work
2549 *
2550 * "Bus Relations" is the Windows term for "children of this
2551 * bus."  The terminology is preserved here for people trying to
2552 * debug the interaction between Hyper-V and Linux.  This
2553 * function is called when the parent partition reports a list
2554 * of functions that should be observed under this PCI Express
2555 * port (bus).
2556 *
2557 * This function updates the list, and must tolerate being
2558 * called multiple times with the same information.  The typical
2559 * number of child devices is one, with very atypical cases
2560 * involving three or four, so the algorithms used here can be
2561 * simple and inefficient.
2562 *
2563 * It must also treat the omission of a previously observed device as
2564 * notification that the device no longer exists.
2565 *
2566 * Note that this function is serialized with hv_eject_device_work(),
2567 * because both are pushed to the ordered workqueue hbus->wq.
2568 */
2569static void pci_devices_present_work(struct work_struct *work)
2570{
2571	u32 child_no;
2572	bool found;
2573	struct hv_pcidev_description *new_desc;
2574	struct hv_pci_dev *hpdev;
2575	struct hv_pcibus_device *hbus;
2576	struct list_head removed;
2577	struct hv_dr_work *dr_wrk;
2578	struct hv_dr_state *dr = NULL;
2579	unsigned long flags;
2580
2581	dr_wrk = container_of(work, struct hv_dr_work, wrk);
2582	hbus = dr_wrk->bus;
2583	kfree(dr_wrk);
2584
2585	INIT_LIST_HEAD(&removed);
2586
2587	/* Pull this off the queue and process it if it was the last one. */
2588	spin_lock_irqsave(&hbus->device_list_lock, flags);
2589	while (!list_empty(&hbus->dr_list)) {
2590		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2591				      list_entry);
2592		list_del(&dr->list_entry);
2593
2594		/* Throw this away if the list still has stuff in it. */
2595		if (!list_empty(&hbus->dr_list)) {
2596			kfree(dr);
2597			continue;
2598		}
2599	}
2600	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2601
2602	if (!dr)
2603		return;
2604
2605	mutex_lock(&hbus->state_lock);
2606
2607	/* First, mark all existing children as reported missing. */
2608	spin_lock_irqsave(&hbus->device_list_lock, flags);
2609	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2610		hpdev->reported_missing = true;
2611	}
2612	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2613
2614	/* Next, add back any reported devices. */
2615	for (child_no = 0; child_no < dr->device_count; child_no++) {
2616		found = false;
2617		new_desc = &dr->func[child_no];
2618
2619		spin_lock_irqsave(&hbus->device_list_lock, flags);
2620		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2621			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2622			    (hpdev->desc.v_id == new_desc->v_id) &&
2623			    (hpdev->desc.d_id == new_desc->d_id) &&
2624			    (hpdev->desc.ser == new_desc->ser)) {
2625				hpdev->reported_missing = false;
2626				found = true;
2627			}
2628		}
2629		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2630
2631		if (!found) {
2632			hpdev = new_pcichild_device(hbus, new_desc);
2633			if (!hpdev)
2634				dev_err(&hbus->hdev->device,
2635					"couldn't record a child device.\n");
2636		}
2637	}
2638
2639	/* Move missing children to a list on the stack. */
2640	spin_lock_irqsave(&hbus->device_list_lock, flags);
2641	do {
2642		found = false;
2643		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2644			if (hpdev->reported_missing) {
2645				found = true;
2646				put_pcichild(hpdev);
2647				list_move_tail(&hpdev->list_entry, &removed);
2648				break;
2649			}
2650		}
2651	} while (found);
2652	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2653
2654	/* Delete everything that should no longer exist. */
2655	while (!list_empty(&removed)) {
2656		hpdev = list_first_entry(&removed, struct hv_pci_dev,
2657					 list_entry);
2658		list_del(&hpdev->list_entry);
2659
2660		if (hpdev->pci_slot)
2661			pci_destroy_slot(hpdev->pci_slot);
2662
2663		put_pcichild(hpdev);
2664	}
2665
2666	switch (hbus->state) {
2667	case hv_pcibus_installed:
2668		/*
2669		 * Tell the core to rescan bus
2670		 * because there may have been changes.
2671		 */
2672		pci_lock_rescan_remove();
2673		pci_scan_child_bus(hbus->bridge->bus);
2674		hv_pci_assign_numa_node(hbus);
2675		hv_pci_assign_slots(hbus);
2676		pci_unlock_rescan_remove();
2677		break;
2678
2679	case hv_pcibus_init:
2680	case hv_pcibus_probed:
2681		survey_child_resources(hbus);
2682		break;
2683
2684	default:
2685		break;
2686	}
2687
2688	mutex_unlock(&hbus->state_lock);
2689
2690	kfree(dr);
2691}
2692
2693/**
2694 * hv_pci_start_relations_work() - Queue work to start device discovery
2695 * @hbus:	Root PCI bus, as understood by this driver
2696 * @dr:		The list of children returned from host
2697 *
2698 * Return:  0 on success, -errno on failure
2699 */
2700static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2701				       struct hv_dr_state *dr)
2702{
2703	struct hv_dr_work *dr_wrk;
2704	unsigned long flags;
2705	bool pending_dr;
2706
2707	if (hbus->state == hv_pcibus_removing) {
2708		dev_info(&hbus->hdev->device,
2709			 "PCI VMBus BUS_RELATIONS: ignored\n");
2710		return -ENOENT;
2711	}
2712
2713	dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2714	if (!dr_wrk)
2715		return -ENOMEM;
2716
2717	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2718	dr_wrk->bus = hbus;
2719
2720	spin_lock_irqsave(&hbus->device_list_lock, flags);
2721	/*
2722	 * If pending_dr is true, we have already queued a work,
2723	 * which will see the new dr. Otherwise, we need to
2724	 * queue a new work.
2725	 */
2726	pending_dr = !list_empty(&hbus->dr_list);
2727	list_add_tail(&dr->list_entry, &hbus->dr_list);
2728	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2729
2730	if (pending_dr)
2731		kfree(dr_wrk);
2732	else
2733		queue_work(hbus->wq, &dr_wrk->wrk);
2734
2735	return 0;
2736}
2737
2738/**
2739 * hv_pci_devices_present() - Handle list of new children
2740 * @hbus:      Root PCI bus, as understood by this driver
2741 * @relations: Packet from host listing children
2742 *
2743 * Process a new list of devices on the bus. The list of devices is
2744 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2745 * whenever a new list of devices for this bus appears.
2746 */
2747static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2748				   struct pci_bus_relations *relations)
2749{
2750	struct hv_dr_state *dr;
2751	int i;
2752
2753	dr = kzalloc(struct_size(dr, func, relations->device_count),
2754		     GFP_NOWAIT);
2755	if (!dr)
2756		return;
2757
2758	dr->device_count = relations->device_count;
2759	for (i = 0; i < dr->device_count; i++) {
2760		dr->func[i].v_id = relations->func[i].v_id;
2761		dr->func[i].d_id = relations->func[i].d_id;
2762		dr->func[i].rev = relations->func[i].rev;
2763		dr->func[i].prog_intf = relations->func[i].prog_intf;
2764		dr->func[i].subclass = relations->func[i].subclass;
2765		dr->func[i].base_class = relations->func[i].base_class;
2766		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2767		dr->func[i].win_slot = relations->func[i].win_slot;
2768		dr->func[i].ser = relations->func[i].ser;
2769	}
2770
2771	if (hv_pci_start_relations_work(hbus, dr))
2772		kfree(dr);
2773}
2774
2775/**
2776 * hv_pci_devices_present2() - Handle list of new children
2777 * @hbus:	Root PCI bus, as understood by this driver
2778 * @relations:	Packet from host listing children
2779 *
2780 * This function is the v2 version of hv_pci_devices_present()
2781 */
2782static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2783				    struct pci_bus_relations2 *relations)
2784{
2785	struct hv_dr_state *dr;
2786	int i;
2787
2788	dr = kzalloc(struct_size(dr, func, relations->device_count),
2789		     GFP_NOWAIT);
2790	if (!dr)
2791		return;
2792
2793	dr->device_count = relations->device_count;
2794	for (i = 0; i < dr->device_count; i++) {
2795		dr->func[i].v_id = relations->func[i].v_id;
2796		dr->func[i].d_id = relations->func[i].d_id;
2797		dr->func[i].rev = relations->func[i].rev;
2798		dr->func[i].prog_intf = relations->func[i].prog_intf;
2799		dr->func[i].subclass = relations->func[i].subclass;
2800		dr->func[i].base_class = relations->func[i].base_class;
2801		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2802		dr->func[i].win_slot = relations->func[i].win_slot;
2803		dr->func[i].ser = relations->func[i].ser;
2804		dr->func[i].flags = relations->func[i].flags;
2805		dr->func[i].virtual_numa_node =
2806			relations->func[i].virtual_numa_node;
2807	}
2808
2809	if (hv_pci_start_relations_work(hbus, dr))
2810		kfree(dr);
2811}
2812
2813/**
2814 * hv_eject_device_work() - Asynchronously handles ejection
2815 * @work:	Work struct embedded in internal device struct
2816 *
2817 * This function handles ejecting a device.  Windows will
2818 * attempt to gracefully eject a device, waiting 60 seconds to
2819 * hear back from the guest OS that this completed successfully.
2820 * If this timer expires, the device will be forcibly removed.
2821 */
2822static void hv_eject_device_work(struct work_struct *work)
2823{
2824	struct pci_eject_response *ejct_pkt;
2825	struct hv_pcibus_device *hbus;
2826	struct hv_pci_dev *hpdev;
2827	struct pci_dev *pdev;
2828	unsigned long flags;
2829	int wslot;
2830	struct {
2831		struct pci_packet pkt;
2832		u8 buffer[sizeof(struct pci_eject_response)];
2833	} ctxt;
2834
2835	hpdev = container_of(work, struct hv_pci_dev, wrk);
2836	hbus = hpdev->hbus;
2837
2838	mutex_lock(&hbus->state_lock);
2839
2840	/*
2841	 * Ejection can come before or after the PCI bus has been set up, so
2842	 * attempt to find it and tear down the bus state, if it exists.  This
2843	 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2844	 * because hbus->bridge->bus may not exist yet.
2845	 */
2846	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2847	pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2848	if (pdev) {
2849		pci_lock_rescan_remove();
2850		pci_stop_and_remove_bus_device(pdev);
2851		pci_dev_put(pdev);
2852		pci_unlock_rescan_remove();
2853	}
2854
2855	spin_lock_irqsave(&hbus->device_list_lock, flags);
2856	list_del(&hpdev->list_entry);
2857	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2858
2859	if (hpdev->pci_slot)
2860		pci_destroy_slot(hpdev->pci_slot);
2861
2862	memset(&ctxt, 0, sizeof(ctxt));
2863	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2864	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2865	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2866	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2867			 sizeof(*ejct_pkt), 0,
2868			 VM_PKT_DATA_INBAND, 0);
2869
2870	/* For the get_pcichild() in hv_pci_eject_device() */
2871	put_pcichild(hpdev);
2872	/* For the two refs got in new_pcichild_device() */
2873	put_pcichild(hpdev);
2874	put_pcichild(hpdev);
2875	/* hpdev has been freed. Do not use it any more. */
2876
2877	mutex_unlock(&hbus->state_lock);
2878}
2879
2880/**
2881 * hv_pci_eject_device() - Handles device ejection
2882 * @hpdev:	Internal device tracking struct
2883 *
2884 * This function is invoked when an ejection packet arrives.  It
2885 * just schedules work so that we don't re-enter the packet
2886 * delivery code handling the ejection.
2887 */
2888static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2889{
2890	struct hv_pcibus_device *hbus = hpdev->hbus;
2891	struct hv_device *hdev = hbus->hdev;
2892
2893	if (hbus->state == hv_pcibus_removing) {
2894		dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2895		return;
2896	}
2897
 
2898	get_pcichild(hpdev);
2899	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2900	queue_work(hbus->wq, &hpdev->wrk);
2901}
2902
2903/**
2904 * hv_pci_onchannelcallback() - Handles incoming packets
2905 * @context:	Internal bus tracking struct
2906 *
2907 * This function is invoked whenever the host sends a packet to
2908 * this channel (which is private to this root PCI bus).
2909 */
2910static void hv_pci_onchannelcallback(void *context)
2911{
2912	const int packet_size = 0x100;
2913	int ret;
2914	struct hv_pcibus_device *hbus = context;
2915	struct vmbus_channel *chan = hbus->hdev->channel;
2916	u32 bytes_recvd;
2917	u64 req_id, req_addr;
2918	struct vmpacket_descriptor *desc;
2919	unsigned char *buffer;
2920	int bufferlen = packet_size;
2921	struct pci_packet *comp_packet;
2922	struct pci_response *response;
2923	struct pci_incoming_message *new_message;
2924	struct pci_bus_relations *bus_rel;
2925	struct pci_bus_relations2 *bus_rel2;
2926	struct pci_dev_inval_block *inval;
2927	struct pci_dev_incoming *dev_message;
2928	struct hv_pci_dev *hpdev;
2929	unsigned long flags;
2930
2931	buffer = kmalloc(bufferlen, GFP_ATOMIC);
2932	if (!buffer)
2933		return;
2934
2935	while (1) {
2936		ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
2937					   &bytes_recvd, &req_id);
2938
2939		if (ret == -ENOBUFS) {
2940			kfree(buffer);
2941			/* Handle large packet */
2942			bufferlen = bytes_recvd;
2943			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2944			if (!buffer)
2945				return;
2946			continue;
2947		}
2948
2949		/* Zero length indicates there are no more packets. */
2950		if (ret || !bytes_recvd)
2951			break;
2952
2953		/*
2954		 * All incoming packets must be at least as large as a
2955		 * response.
2956		 */
2957		if (bytes_recvd <= sizeof(struct pci_response))
2958			continue;
2959		desc = (struct vmpacket_descriptor *)buffer;
2960
2961		switch (desc->type) {
2962		case VM_PKT_COMP:
2963
2964			lock_requestor(chan, flags);
2965			req_addr = __vmbus_request_addr_match(chan, req_id,
2966							      VMBUS_RQST_ADDR_ANY);
2967			if (req_addr == VMBUS_RQST_ERROR) {
2968				unlock_requestor(chan, flags);
2969				dev_err(&hbus->hdev->device,
2970					"Invalid transaction ID %llx\n",
2971					req_id);
2972				break;
2973			}
2974			comp_packet = (struct pci_packet *)req_addr;
2975			response = (struct pci_response *)buffer;
2976			/*
2977			 * Call ->completion_func() within the critical section to make
2978			 * sure that the packet pointer is still valid during the call:
2979			 * here 'valid' means that there's a task still waiting for the
2980			 * completion, and that the packet data is still on the waiting
2981			 * task's stack.  Cf. hv_compose_msi_msg().
2982			 */
2983			comp_packet->completion_func(comp_packet->compl_ctxt,
2984						     response,
2985						     bytes_recvd);
2986			unlock_requestor(chan, flags);
2987			break;
2988
2989		case VM_PKT_DATA_INBAND:
2990
2991			new_message = (struct pci_incoming_message *)buffer;
2992			switch (new_message->message_type.type) {
2993			case PCI_BUS_RELATIONS:
2994
2995				bus_rel = (struct pci_bus_relations *)buffer;
2996				if (bytes_recvd < sizeof(*bus_rel) ||
2997				    bytes_recvd <
2998					struct_size(bus_rel, func,
2999						    bus_rel->device_count)) {
3000					dev_err(&hbus->hdev->device,
3001						"bus relations too small\n");
3002					break;
3003				}
3004
3005				hv_pci_devices_present(hbus, bus_rel);
3006				break;
3007
3008			case PCI_BUS_RELATIONS2:
3009
3010				bus_rel2 = (struct pci_bus_relations2 *)buffer;
3011				if (bytes_recvd < sizeof(*bus_rel2) ||
3012				    bytes_recvd <
3013					struct_size(bus_rel2, func,
3014						    bus_rel2->device_count)) {
3015					dev_err(&hbus->hdev->device,
3016						"bus relations v2 too small\n");
3017					break;
3018				}
3019
3020				hv_pci_devices_present2(hbus, bus_rel2);
3021				break;
3022
3023			case PCI_EJECT:
3024
3025				dev_message = (struct pci_dev_incoming *)buffer;
3026				if (bytes_recvd < sizeof(*dev_message)) {
3027					dev_err(&hbus->hdev->device,
3028						"eject message too small\n");
3029					break;
3030				}
3031				hpdev = get_pcichild_wslot(hbus,
3032						      dev_message->wslot.slot);
3033				if (hpdev) {
3034					hv_pci_eject_device(hpdev);
3035					put_pcichild(hpdev);
3036				}
3037				break;
3038
3039			case PCI_INVALIDATE_BLOCK:
3040
3041				inval = (struct pci_dev_inval_block *)buffer;
3042				if (bytes_recvd < sizeof(*inval)) {
3043					dev_err(&hbus->hdev->device,
3044						"invalidate message too small\n");
3045					break;
3046				}
3047				hpdev = get_pcichild_wslot(hbus,
3048							   inval->wslot.slot);
3049				if (hpdev) {
3050					if (hpdev->block_invalidate) {
3051						hpdev->block_invalidate(
3052						    hpdev->invalidate_context,
3053						    inval->block_mask);
3054					}
3055					put_pcichild(hpdev);
3056				}
3057				break;
3058
3059			default:
3060				dev_warn(&hbus->hdev->device,
3061					"Unimplemented protocol message %x\n",
3062					new_message->message_type.type);
3063				break;
3064			}
3065			break;
3066
3067		default:
3068			dev_err(&hbus->hdev->device,
3069				"unhandled packet type %d, tid %llx len %d\n",
3070				desc->type, req_id, bytes_recvd);
3071			break;
3072		}
3073	}
3074
3075	kfree(buffer);
3076}
3077
3078/**
3079 * hv_pci_protocol_negotiation() - Set up protocol
3080 * @hdev:		VMBus's tracking struct for this root PCI bus.
3081 * @version:		Array of supported channel protocol versions in
3082 *			the order of probing - highest go first.
3083 * @num_version:	Number of elements in the version array.
3084 *
3085 * This driver is intended to support running on Windows 10
3086 * (server) and later versions. It will not run on earlier
3087 * versions, as they assume that many of the operations which
3088 * Linux needs accomplished with a spinlock held were done via
3089 * asynchronous messaging via VMBus.  Windows 10 increases the
3090 * surface area of PCI emulation so that these actions can take
3091 * place by suspending a virtual processor for their duration.
3092 *
3093 * This function negotiates the channel protocol version,
3094 * failing if the host doesn't support the necessary protocol
3095 * level.
3096 */
3097static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3098				       enum pci_protocol_version_t version[],
3099				       int num_version)
3100{
3101	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3102	struct pci_version_request *version_req;
3103	struct hv_pci_compl comp_pkt;
3104	struct pci_packet *pkt;
3105	int ret;
3106	int i;
3107
3108	/*
3109	 * Initiate the handshake with the host and negotiate
3110	 * a version that the host can support. We start with the
3111	 * highest version number and go down if the host cannot
3112	 * support it.
3113	 */
3114	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3115	if (!pkt)
3116		return -ENOMEM;
3117
3118	init_completion(&comp_pkt.host_event);
3119	pkt->completion_func = hv_pci_generic_compl;
3120	pkt->compl_ctxt = &comp_pkt;
3121	version_req = (struct pci_version_request *)&pkt->message;
3122	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3123
3124	for (i = 0; i < num_version; i++) {
3125		version_req->protocol_version = version[i];
3126		ret = vmbus_sendpacket(hdev->channel, version_req,
3127				sizeof(struct pci_version_request),
3128				(unsigned long)pkt, VM_PKT_DATA_INBAND,
3129				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3130		if (!ret)
3131			ret = wait_for_response(hdev, &comp_pkt.host_event);
3132
3133		if (ret) {
3134			dev_err(&hdev->device,
3135				"PCI Pass-through VSP failed to request version: %d",
3136				ret);
3137			goto exit;
3138		}
3139
3140		if (comp_pkt.completion_status >= 0) {
3141			hbus->protocol_version = version[i];
3142			dev_info(&hdev->device,
3143				"PCI VMBus probing: Using version %#x\n",
3144				hbus->protocol_version);
3145			goto exit;
3146		}
3147
3148		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3149			dev_err(&hdev->device,
3150				"PCI Pass-through VSP failed version request: %#x",
3151				comp_pkt.completion_status);
3152			ret = -EPROTO;
3153			goto exit;
3154		}
3155
3156		reinit_completion(&comp_pkt.host_event);
3157	}
3158
3159	dev_err(&hdev->device,
3160		"PCI pass-through VSP failed to find supported version");
3161	ret = -EPROTO;
3162
3163exit:
3164	kfree(pkt);
3165	return ret;
3166}
3167
3168/**
3169 * hv_pci_free_bridge_windows() - Release memory regions for the
3170 * bus
3171 * @hbus:	Root PCI bus, as understood by this driver
3172 */
3173static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3174{
3175	/*
3176	 * Set the resources back to the way they looked when they
3177	 * were allocated by setting IORESOURCE_BUSY again.
3178	 */
3179
3180	if (hbus->low_mmio_space && hbus->low_mmio_res) {
3181		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3182		vmbus_free_mmio(hbus->low_mmio_res->start,
3183				resource_size(hbus->low_mmio_res));
3184	}
3185
3186	if (hbus->high_mmio_space && hbus->high_mmio_res) {
3187		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3188		vmbus_free_mmio(hbus->high_mmio_res->start,
3189				resource_size(hbus->high_mmio_res));
3190	}
3191}
3192
3193/**
3194 * hv_pci_allocate_bridge_windows() - Allocate memory regions
3195 * for the bus
3196 * @hbus:	Root PCI bus, as understood by this driver
3197 *
3198 * This function calls vmbus_allocate_mmio(), which is itself a
3199 * bit of a compromise.  Ideally, we might change the pnp layer
3200 * in the kernel such that it comprehends either PCI devices
3201 * which are "grandchildren of ACPI," with some intermediate bus
3202 * node (in this case, VMBus) or change it such that it
3203 * understands VMBus.  The pnp layer, however, has been declared
3204 * deprecated, and not subject to change.
3205 *
3206 * The workaround, implemented here, is to ask VMBus to allocate
3207 * MMIO space for this bus.  VMBus itself knows which ranges are
3208 * appropriate by looking at its own ACPI objects.  Then, after
3209 * these ranges are claimed, they're modified to look like they
3210 * would have looked if the ACPI and pnp code had allocated
3211 * bridge windows.  These descriptors have to exist in this form
3212 * in order to satisfy the code which will get invoked when the
3213 * endpoint PCI function driver calls request_mem_region() or
3214 * request_mem_region_exclusive().
3215 *
3216 * Return: 0 on success, -errno on failure
3217 */
3218static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3219{
3220	resource_size_t align;
3221	int ret;
3222
3223	if (hbus->low_mmio_space) {
3224		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3225		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3226					  (u64)(u32)0xffffffff,
3227					  hbus->low_mmio_space,
3228					  align, false);
3229		if (ret) {
3230			dev_err(&hbus->hdev->device,
3231				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3232				hbus->low_mmio_space);
3233			return ret;
3234		}
3235
3236		/* Modify this resource to become a bridge window. */
3237		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3238		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3239		pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3240	}
3241
3242	if (hbus->high_mmio_space) {
3243		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3244		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3245					  0x100000000, -1,
3246					  hbus->high_mmio_space, align,
3247					  false);
3248		if (ret) {
3249			dev_err(&hbus->hdev->device,
3250				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3251				hbus->high_mmio_space);
3252			goto release_low_mmio;
3253		}
3254
3255		/* Modify this resource to become a bridge window. */
3256		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3257		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3258		pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3259	}
3260
3261	return 0;
3262
3263release_low_mmio:
3264	if (hbus->low_mmio_res) {
3265		vmbus_free_mmio(hbus->low_mmio_res->start,
3266				resource_size(hbus->low_mmio_res));
3267	}
3268
3269	return ret;
3270}
3271
3272/**
3273 * hv_allocate_config_window() - Find MMIO space for PCI Config
3274 * @hbus:	Root PCI bus, as understood by this driver
3275 *
3276 * This function claims memory-mapped I/O space for accessing
3277 * configuration space for the functions on this bus.
3278 *
3279 * Return: 0 on success, -errno on failure
3280 */
3281static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3282{
3283	int ret;
3284
3285	/*
3286	 * Set up a region of MMIO space to use for accessing configuration
3287	 * space.
3288	 */
3289	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3290				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3291	if (ret)
3292		return ret;
3293
3294	/*
3295	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3296	 * resource claims (those which cannot be overlapped) and the ranges
3297	 * which are valid for the children of this bus, which are intended
3298	 * to be overlapped by those children.  Set the flag on this claim
3299	 * meaning that this region can't be overlapped.
3300	 */
3301
3302	hbus->mem_config->flags |= IORESOURCE_BUSY;
3303
3304	return 0;
3305}
3306
3307static void hv_free_config_window(struct hv_pcibus_device *hbus)
3308{
3309	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3310}
3311
3312static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3313
3314/**
3315 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3316 * @hdev:	VMBus's tracking struct for this root PCI bus
3317 *
3318 * Return: 0 on success, -errno on failure
3319 */
3320static int hv_pci_enter_d0(struct hv_device *hdev)
3321{
3322	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3323	struct pci_bus_d0_entry *d0_entry;
3324	struct hv_pci_compl comp_pkt;
3325	struct pci_packet *pkt;
3326	bool retry = true;
3327	int ret;
3328
3329enter_d0_retry:
3330	/*
3331	 * Tell the host that the bus is ready to use, and moved into the
3332	 * powered-on state.  This includes telling the host which region
3333	 * of memory-mapped I/O space has been chosen for configuration space
3334	 * access.
3335	 */
3336	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3337	if (!pkt)
3338		return -ENOMEM;
3339
3340	init_completion(&comp_pkt.host_event);
3341	pkt->completion_func = hv_pci_generic_compl;
3342	pkt->compl_ctxt = &comp_pkt;
3343	d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
3344	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3345	d0_entry->mmio_base = hbus->mem_config->start;
3346
3347	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3348			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
3349			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3350	if (!ret)
3351		ret = wait_for_response(hdev, &comp_pkt.host_event);
3352
3353	if (ret)
3354		goto exit;
3355
3356	/*
3357	 * In certain case (Kdump) the pci device of interest was
3358	 * not cleanly shut down and resource is still held on host
3359	 * side, the host could return invalid device status.
3360	 * We need to explicitly request host to release the resource
3361	 * and try to enter D0 again.
3362	 */
3363	if (comp_pkt.completion_status < 0 && retry) {
3364		retry = false;
3365
3366		dev_err(&hdev->device, "Retrying D0 Entry\n");
3367
3368		/*
3369		 * Hv_pci_bus_exit() calls hv_send_resource_released()
3370		 * to free up resources of its child devices.
3371		 * In the kdump kernel we need to set the
3372		 * wslot_res_allocated to 255 so it scans all child
3373		 * devices to release resources allocated in the
3374		 * normal kernel before panic happened.
3375		 */
3376		hbus->wslot_res_allocated = 255;
3377
3378		ret = hv_pci_bus_exit(hdev, true);
3379
3380		if (ret == 0) {
3381			kfree(pkt);
3382			goto enter_d0_retry;
3383		}
3384		dev_err(&hdev->device,
3385			"Retrying D0 failed with ret %d\n", ret);
3386	}
3387
3388	if (comp_pkt.completion_status < 0) {
3389		dev_err(&hdev->device,
3390			"PCI Pass-through VSP failed D0 Entry with status %x\n",
3391			comp_pkt.completion_status);
3392		ret = -EPROTO;
3393		goto exit;
3394	}
3395
3396	ret = 0;
3397
3398exit:
3399	kfree(pkt);
3400	return ret;
3401}
3402
3403/**
3404 * hv_pci_query_relations() - Ask host to send list of child
3405 * devices
3406 * @hdev:	VMBus's tracking struct for this root PCI bus
3407 *
3408 * Return: 0 on success, -errno on failure
3409 */
3410static int hv_pci_query_relations(struct hv_device *hdev)
3411{
3412	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3413	struct pci_message message;
3414	struct completion comp;
3415	int ret;
3416
3417	/* Ask the host to send along the list of child devices */
3418	init_completion(&comp);
3419	if (cmpxchg(&hbus->survey_event, NULL, &comp))
3420		return -ENOTEMPTY;
3421
3422	memset(&message, 0, sizeof(message));
3423	message.type = PCI_QUERY_BUS_RELATIONS;
3424
3425	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3426			       0, VM_PKT_DATA_INBAND, 0);
3427	if (!ret)
3428		ret = wait_for_response(hdev, &comp);
3429
3430	/*
3431	 * In the case of fast device addition/removal, it's possible that
3432	 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
3433	 * already got a PCI_BUS_RELATIONS* message from the host and the
3434	 * channel callback already scheduled a work to hbus->wq, which can be
3435	 * running pci_devices_present_work() -> survey_child_resources() ->
3436	 * complete(&hbus->survey_event), even after hv_pci_query_relations()
3437	 * exits and the stack variable 'comp' is no longer valid; as a result,
3438	 * a hang or a page fault may happen when the complete() calls
3439	 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
3440	 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
3441	 * -ENODEV, there can't be any more work item scheduled to hbus->wq
3442	 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
3443	 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
3444	 * channel->rescind = true.
3445	 */
3446	flush_workqueue(hbus->wq);
3447
3448	return ret;
3449}
3450
3451/**
3452 * hv_send_resources_allocated() - Report local resource choices
3453 * @hdev:	VMBus's tracking struct for this root PCI bus
3454 *
3455 * The host OS is expecting to be sent a request as a message
3456 * which contains all the resources that the device will use.
3457 * The response contains those same resources, "translated"
3458 * which is to say, the values which should be used by the
3459 * hardware, when it delivers an interrupt.  (MMIO resources are
3460 * used in local terms.)  This is nice for Windows, and lines up
3461 * with the FDO/PDO split, which doesn't exist in Linux.  Linux
3462 * is deeply expecting to scan an emulated PCI configuration
3463 * space.  So this message is sent here only to drive the state
3464 * machine on the host forward.
3465 *
3466 * Return: 0 on success, -errno on failure
3467 */
3468static int hv_send_resources_allocated(struct hv_device *hdev)
3469{
3470	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3471	struct pci_resources_assigned *res_assigned;
3472	struct pci_resources_assigned2 *res_assigned2;
3473	struct hv_pci_compl comp_pkt;
3474	struct hv_pci_dev *hpdev;
3475	struct pci_packet *pkt;
3476	size_t size_res;
3477	int wslot;
3478	int ret;
3479
3480	size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3481			? sizeof(*res_assigned) : sizeof(*res_assigned2);
3482
3483	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3484	if (!pkt)
3485		return -ENOMEM;
3486
3487	ret = 0;
3488
3489	for (wslot = 0; wslot < 256; wslot++) {
3490		hpdev = get_pcichild_wslot(hbus, wslot);
3491		if (!hpdev)
3492			continue;
3493
3494		memset(pkt, 0, sizeof(*pkt) + size_res);
3495		init_completion(&comp_pkt.host_event);
3496		pkt->completion_func = hv_pci_generic_compl;
3497		pkt->compl_ctxt = &comp_pkt;
3498
3499		if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3500			res_assigned =
3501				(struct pci_resources_assigned *)&pkt->message;
3502			res_assigned->message_type.type =
3503				PCI_RESOURCES_ASSIGNED;
3504			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3505		} else {
3506			res_assigned2 =
3507				(struct pci_resources_assigned2 *)&pkt->message;
3508			res_assigned2->message_type.type =
3509				PCI_RESOURCES_ASSIGNED2;
3510			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3511		}
3512		put_pcichild(hpdev);
3513
3514		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
3515				size_res, (unsigned long)pkt,
3516				VM_PKT_DATA_INBAND,
3517				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3518		if (!ret)
3519			ret = wait_for_response(hdev, &comp_pkt.host_event);
3520		if (ret)
3521			break;
3522
3523		if (comp_pkt.completion_status < 0) {
3524			ret = -EPROTO;
3525			dev_err(&hdev->device,
3526				"resource allocated returned 0x%x",
3527				comp_pkt.completion_status);
3528			break;
3529		}
3530
3531		hbus->wslot_res_allocated = wslot;
3532	}
3533
3534	kfree(pkt);
3535	return ret;
3536}
3537
3538/**
3539 * hv_send_resources_released() - Report local resources
3540 * released
3541 * @hdev:	VMBus's tracking struct for this root PCI bus
3542 *
3543 * Return: 0 on success, -errno on failure
3544 */
3545static int hv_send_resources_released(struct hv_device *hdev)
3546{
3547	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3548	struct pci_child_message pkt;
3549	struct hv_pci_dev *hpdev;
3550	int wslot;
3551	int ret;
3552
3553	for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3554		hpdev = get_pcichild_wslot(hbus, wslot);
3555		if (!hpdev)
3556			continue;
3557
3558		memset(&pkt, 0, sizeof(pkt));
3559		pkt.message_type.type = PCI_RESOURCES_RELEASED;
3560		pkt.wslot.slot = hpdev->desc.win_slot.slot;
3561
3562		put_pcichild(hpdev);
3563
3564		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3565				       VM_PKT_DATA_INBAND, 0);
3566		if (ret)
3567			return ret;
3568
3569		hbus->wslot_res_allocated = wslot - 1;
3570	}
3571
3572	hbus->wslot_res_allocated = -1;
3573
3574	return 0;
3575}
3576
3577#define HVPCI_DOM_MAP_SIZE (64 * 1024)
3578static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3579
3580/*
3581 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3582 * as invalid for passthrough PCI devices of this driver.
3583 */
3584#define HVPCI_DOM_INVALID 0
3585
3586/**
3587 * hv_get_dom_num() - Get a valid PCI domain number
3588 * Check if the PCI domain number is in use, and return another number if
3589 * it is in use.
3590 *
3591 * @dom: Requested domain number
3592 *
3593 * return: domain number on success, HVPCI_DOM_INVALID on failure
3594 */
3595static u16 hv_get_dom_num(u16 dom)
3596{
3597	unsigned int i;
3598
3599	if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3600		return dom;
3601
3602	for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3603		if (test_and_set_bit(i, hvpci_dom_map) == 0)
3604			return i;
3605	}
3606
3607	return HVPCI_DOM_INVALID;
3608}
3609
3610/**
3611 * hv_put_dom_num() - Mark the PCI domain number as free
3612 * @dom: Domain number to be freed
3613 */
3614static void hv_put_dom_num(u16 dom)
3615{
3616	clear_bit(dom, hvpci_dom_map);
3617}
3618
3619/**
3620 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3621 * @hdev:	VMBus's tracking struct for this root PCI bus
3622 * @dev_id:	Identifies the device itself
3623 *
3624 * Return: 0 on success, -errno on failure
3625 */
3626static int hv_pci_probe(struct hv_device *hdev,
3627			const struct hv_vmbus_device_id *dev_id)
3628{
3629	struct pci_host_bridge *bridge;
3630	struct hv_pcibus_device *hbus;
3631	u16 dom_req, dom;
3632	char *name;
 
3633	int ret;
3634
 
 
 
 
 
 
3635	bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3636	if (!bridge)
3637		return -ENOMEM;
3638
3639	hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3640	if (!hbus)
3641		return -ENOMEM;
3642
3643	hbus->bridge = bridge;
3644	mutex_init(&hbus->state_lock);
3645	hbus->state = hv_pcibus_init;
3646	hbus->wslot_res_allocated = -1;
3647
3648	/*
3649	 * The PCI bus "domain" is what is called "segment" in ACPI and other
3650	 * specs. Pull it from the instance ID, to get something usually
3651	 * unique. In rare cases of collision, we will find out another number
3652	 * not in use.
3653	 *
3654	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3655	 * together with this guest driver can guarantee that (1) The only
3656	 * domain used by Gen1 VMs for something that looks like a physical
3657	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3658	 * (2) There will be no overlap between domains (after fixing possible
3659	 * collisions) in the same VM.
3660	 */
3661	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3662	dom = hv_get_dom_num(dom_req);
3663
3664	if (dom == HVPCI_DOM_INVALID) {
3665		dev_err(&hdev->device,
3666			"Unable to use dom# 0x%x or other numbers", dom_req);
3667		ret = -EINVAL;
3668		goto free_bus;
3669	}
3670
3671	if (dom != dom_req)
3672		dev_info(&hdev->device,
3673			 "PCI dom# 0x%x has collision, using 0x%x",
3674			 dom_req, dom);
3675
3676	hbus->bridge->domain_nr = dom;
3677#ifdef CONFIG_X86
3678	hbus->sysdata.domain = dom;
3679	hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3680#elif defined(CONFIG_ARM64)
3681	/*
3682	 * Set the PCI bus parent to be the corresponding VMbus
3683	 * device. Then the VMbus device will be assigned as the
3684	 * ACPI companion in pcibios_root_bridge_prepare() and
3685	 * pci_dma_configure() will propagate device coherence
3686	 * information to devices created on the bus.
3687	 */
3688	hbus->sysdata.parent = hdev->device.parent;
3689	hbus->use_calls = false;
3690#endif
3691
3692	hbus->hdev = hdev;
3693	INIT_LIST_HEAD(&hbus->children);
3694	INIT_LIST_HEAD(&hbus->dr_list);
3695	spin_lock_init(&hbus->config_lock);
3696	spin_lock_init(&hbus->device_list_lock);
 
3697	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3698					   hbus->bridge->domain_nr);
3699	if (!hbus->wq) {
3700		ret = -ENOMEM;
3701		goto free_dom;
3702	}
3703
3704	hdev->channel->next_request_id_callback = vmbus_next_request_id;
3705	hdev->channel->request_addr_callback = vmbus_request_addr;
3706	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3707
3708	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3709			 hv_pci_onchannelcallback, hbus);
3710	if (ret)
3711		goto destroy_wq;
3712
3713	hv_set_drvdata(hdev, hbus);
3714
3715	ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3716					  ARRAY_SIZE(pci_protocol_versions));
3717	if (ret)
3718		goto close;
3719
3720	ret = hv_allocate_config_window(hbus);
3721	if (ret)
3722		goto close;
3723
3724	hbus->cfg_addr = ioremap(hbus->mem_config->start,
3725				 PCI_CONFIG_MMIO_LENGTH);
3726	if (!hbus->cfg_addr) {
3727		dev_err(&hdev->device,
3728			"Unable to map a virtual address for config space\n");
3729		ret = -ENOMEM;
3730		goto free_config;
3731	}
3732
3733	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3734	if (!name) {
3735		ret = -ENOMEM;
3736		goto unmap;
3737	}
3738
3739	hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3740	kfree(name);
3741	if (!hbus->fwnode) {
3742		ret = -ENOMEM;
3743		goto unmap;
3744	}
3745
3746	ret = hv_pcie_init_irq_domain(hbus);
3747	if (ret)
3748		goto free_fwnode;
3749
 
3750	ret = hv_pci_query_relations(hdev);
3751	if (ret)
3752		goto free_irq_domain;
3753
3754	mutex_lock(&hbus->state_lock);
3755
3756	ret = hv_pci_enter_d0(hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3757	if (ret)
3758		goto release_state_lock;
3759
3760	ret = hv_pci_allocate_bridge_windows(hbus);
3761	if (ret)
3762		goto exit_d0;
3763
3764	ret = hv_send_resources_allocated(hdev);
3765	if (ret)
3766		goto free_windows;
3767
3768	prepopulate_bars(hbus);
3769
3770	hbus->state = hv_pcibus_probed;
3771
3772	ret = create_root_hv_pci_bus(hbus);
3773	if (ret)
3774		goto free_windows;
3775
3776	mutex_unlock(&hbus->state_lock);
3777	return 0;
3778
3779free_windows:
3780	hv_pci_free_bridge_windows(hbus);
3781exit_d0:
3782	(void) hv_pci_bus_exit(hdev, true);
3783release_state_lock:
3784	mutex_unlock(&hbus->state_lock);
3785free_irq_domain:
3786	irq_domain_remove(hbus->irq_domain);
3787free_fwnode:
3788	irq_domain_free_fwnode(hbus->fwnode);
3789unmap:
3790	iounmap(hbus->cfg_addr);
3791free_config:
3792	hv_free_config_window(hbus);
3793close:
3794	vmbus_close(hdev->channel);
3795destroy_wq:
3796	destroy_workqueue(hbus->wq);
3797free_dom:
3798	hv_put_dom_num(hbus->bridge->domain_nr);
3799free_bus:
3800	kfree(hbus);
3801	return ret;
3802}
3803
3804static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3805{
3806	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3807	struct vmbus_channel *chan = hdev->channel;
3808	struct {
3809		struct pci_packet teardown_packet;
3810		u8 buffer[sizeof(struct pci_message)];
3811	} pkt;
3812	struct hv_pci_compl comp_pkt;
3813	struct hv_pci_dev *hpdev, *tmp;
3814	unsigned long flags;
3815	u64 trans_id;
3816	int ret;
3817
3818	/*
3819	 * After the host sends the RESCIND_CHANNEL message, it doesn't
3820	 * access the per-channel ringbuffer any longer.
3821	 */
3822	if (chan->rescind)
3823		return 0;
3824
3825	if (!keep_devs) {
3826		struct list_head removed;
3827
3828		/* Move all present children to the list on stack */
3829		INIT_LIST_HEAD(&removed);
3830		spin_lock_irqsave(&hbus->device_list_lock, flags);
3831		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3832			list_move_tail(&hpdev->list_entry, &removed);
3833		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3834
3835		/* Remove all children in the list */
3836		list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3837			list_del(&hpdev->list_entry);
3838			if (hpdev->pci_slot)
3839				pci_destroy_slot(hpdev->pci_slot);
3840			/* For the two refs got in new_pcichild_device() */
3841			put_pcichild(hpdev);
3842			put_pcichild(hpdev);
3843		}
3844	}
3845
3846	ret = hv_send_resources_released(hdev);
3847	if (ret) {
3848		dev_err(&hdev->device,
3849			"Couldn't send resources released packet(s)\n");
3850		return ret;
3851	}
3852
3853	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3854	init_completion(&comp_pkt.host_event);
3855	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3856	pkt.teardown_packet.compl_ctxt = &comp_pkt;
3857	pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3858
3859	ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
3860				     sizeof(struct pci_message),
3861				     (unsigned long)&pkt.teardown_packet,
3862				     &trans_id, VM_PKT_DATA_INBAND,
3863				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3864	if (ret)
3865		return ret;
3866
3867	if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3868		/*
3869		 * The completion packet on the stack becomes invalid after
3870		 * 'return'; remove the ID from the VMbus requestor if the
3871		 * identifier is still mapped to/associated with the packet.
3872		 *
3873		 * Cf. hv_pci_onchannelcallback().
3874		 */
3875		vmbus_request_addr_match(chan, trans_id,
3876					 (unsigned long)&pkt.teardown_packet);
3877		return -ETIMEDOUT;
3878	}
3879
3880	return 0;
3881}
3882
3883/**
3884 * hv_pci_remove() - Remove routine for this VMBus channel
3885 * @hdev:	VMBus's tracking struct for this root PCI bus
 
 
3886 */
3887static void hv_pci_remove(struct hv_device *hdev)
3888{
3889	struct hv_pcibus_device *hbus;
 
3890
3891	hbus = hv_get_drvdata(hdev);
3892	if (hbus->state == hv_pcibus_installed) {
3893		tasklet_disable(&hdev->channel->callback_event);
3894		hbus->state = hv_pcibus_removing;
3895		tasklet_enable(&hdev->channel->callback_event);
3896		destroy_workqueue(hbus->wq);
3897		hbus->wq = NULL;
3898		/*
3899		 * At this point, no work is running or can be scheduled
3900		 * on hbus-wq. We can't race with hv_pci_devices_present()
3901		 * or hv_pci_eject_device(), it's safe to proceed.
3902		 */
3903
3904		/* Remove the bus from PCI's point of view. */
3905		pci_lock_rescan_remove();
3906		pci_stop_root_bus(hbus->bridge->bus);
3907		hv_pci_remove_slots(hbus);
3908		pci_remove_root_bus(hbus->bridge->bus);
3909		pci_unlock_rescan_remove();
3910	}
3911
3912	hv_pci_bus_exit(hdev, false);
3913
3914	vmbus_close(hdev->channel);
3915
3916	iounmap(hbus->cfg_addr);
3917	hv_free_config_window(hbus);
3918	hv_pci_free_bridge_windows(hbus);
3919	irq_domain_remove(hbus->irq_domain);
3920	irq_domain_free_fwnode(hbus->fwnode);
3921
3922	hv_put_dom_num(hbus->bridge->domain_nr);
3923
3924	kfree(hbus);
 
3925}
3926
3927static int hv_pci_suspend(struct hv_device *hdev)
3928{
3929	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3930	enum hv_pcibus_state old_state;
3931	int ret;
3932
3933	/*
3934	 * hv_pci_suspend() must make sure there are no pending work items
3935	 * before calling vmbus_close(), since it runs in a process context
3936	 * as a callback in dpm_suspend().  When it starts to run, the channel
3937	 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3938	 * context, can be still running concurrently and scheduling new work
3939	 * items onto hbus->wq in hv_pci_devices_present() and
3940	 * hv_pci_eject_device(), and the work item handlers can access the
3941	 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3942	 * the work item handler pci_devices_present_work() ->
3943	 * new_pcichild_device() writes to the vmbus channel.
3944	 *
3945	 * To eliminate the race, hv_pci_suspend() disables the channel
3946	 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3947	 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3948	 * it knows that no new work item can be scheduled, and then it flushes
3949	 * hbus->wq and safely closes the vmbus channel.
3950	 */
3951	tasklet_disable(&hdev->channel->callback_event);
3952
3953	/* Change the hbus state to prevent new work items. */
3954	old_state = hbus->state;
3955	if (hbus->state == hv_pcibus_installed)
3956		hbus->state = hv_pcibus_removing;
3957
3958	tasklet_enable(&hdev->channel->callback_event);
3959
3960	if (old_state != hv_pcibus_installed)
3961		return -EINVAL;
3962
3963	flush_workqueue(hbus->wq);
3964
3965	ret = hv_pci_bus_exit(hdev, true);
3966	if (ret)
3967		return ret;
3968
3969	vmbus_close(hdev->channel);
3970
3971	return 0;
3972}
3973
3974static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
3975{
3976	struct irq_data *irq_data;
3977	struct msi_desc *entry;
3978	int ret = 0;
3979
3980	if (!pdev->msi_enabled && !pdev->msix_enabled)
3981		return 0;
3982
3983	msi_lock_descs(&pdev->dev);
3984	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
3985		irq_data = irq_get_irq_data(entry->irq);
3986		if (WARN_ON_ONCE(!irq_data)) {
3987			ret = -EINVAL;
3988			break;
3989		}
3990
3991		hv_compose_msi_msg(irq_data, &entry->msg);
3992	}
3993	msi_unlock_descs(&pdev->dev);
3994
3995	return ret;
3996}
3997
3998/*
3999 * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
4000 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
4001 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
4002 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
4003 * Table entries.
4004 */
4005static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
4006{
4007	pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
4008}
4009
4010static int hv_pci_resume(struct hv_device *hdev)
4011{
4012	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4013	enum pci_protocol_version_t version[1];
4014	int ret;
4015
4016	hbus->state = hv_pcibus_init;
4017
4018	hdev->channel->next_request_id_callback = vmbus_next_request_id;
4019	hdev->channel->request_addr_callback = vmbus_request_addr;
4020	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4021
4022	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4023			 hv_pci_onchannelcallback, hbus);
4024	if (ret)
4025		return ret;
4026
4027	/* Only use the version that was in use before hibernation. */
4028	version[0] = hbus->protocol_version;
4029	ret = hv_pci_protocol_negotiation(hdev, version, 1);
4030	if (ret)
4031		goto out;
4032
4033	ret = hv_pci_query_relations(hdev);
4034	if (ret)
4035		goto out;
4036
4037	mutex_lock(&hbus->state_lock);
4038
4039	ret = hv_pci_enter_d0(hdev);
4040	if (ret)
4041		goto release_state_lock;
4042
4043	ret = hv_send_resources_allocated(hdev);
4044	if (ret)
4045		goto release_state_lock;
4046
4047	prepopulate_bars(hbus);
4048
4049	hv_pci_restore_msi_state(hbus);
4050
4051	hbus->state = hv_pcibus_installed;
4052	mutex_unlock(&hbus->state_lock);
4053	return 0;
4054
4055release_state_lock:
4056	mutex_unlock(&hbus->state_lock);
4057out:
4058	vmbus_close(hdev->channel);
4059	return ret;
4060}
4061
4062static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4063	/* PCI Pass-through Class ID */
4064	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4065	{ HV_PCIE_GUID, },
4066	{ },
4067};
4068
4069MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4070
4071static struct hv_driver hv_pci_drv = {
4072	.name		= "hv_pci",
4073	.id_table	= hv_pci_id_table,
4074	.probe		= hv_pci_probe,
4075	.remove		= hv_pci_remove,
4076	.suspend	= hv_pci_suspend,
4077	.resume		= hv_pci_resume,
4078};
4079
4080static void __exit exit_hv_pci_drv(void)
4081{
4082	vmbus_driver_unregister(&hv_pci_drv);
4083
4084	hvpci_block_ops.read_block = NULL;
4085	hvpci_block_ops.write_block = NULL;
4086	hvpci_block_ops.reg_blk_invalidate = NULL;
4087}
4088
4089static int __init init_hv_pci_drv(void)
4090{
4091	int ret;
4092
4093	if (!hv_is_hyperv_initialized())
4094		return -ENODEV;
4095
4096	ret = hv_pci_irqchip_init();
4097	if (ret)
4098		return ret;
4099
4100	/* Set the invalid domain number's bit, so it will not be used */
4101	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4102
4103	/* Initialize PCI block r/w interface */
4104	hvpci_block_ops.read_block = hv_read_config_block;
4105	hvpci_block_ops.write_block = hv_write_config_block;
4106	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4107
4108	return vmbus_driver_register(&hv_pci_drv);
4109}
4110
4111module_init(init_hv_pci_drv);
4112module_exit(exit_hv_pci_drv);
4113
4114MODULE_DESCRIPTION("Hyper-V PCI");
4115MODULE_LICENSE("GPL v2");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) Microsoft Corporation.
   4 *
   5 * Author:
   6 *   Jake Oshins <jakeo@microsoft.com>
   7 *
   8 * This driver acts as a paravirtual front-end for PCI Express root buses.
   9 * When a PCI Express function (either an entire device or an SR-IOV
  10 * Virtual Function) is being passed through to the VM, this driver exposes
  11 * a new bus to the guest VM.  This is modeled as a root PCI bus because
  12 * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
  13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
  14 * until a device as been exposed using this driver.
  15 *
  16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
  17 * the PCI Firmware Specifications.  Thus while each device passed through
  18 * to the VM using this front-end will appear at "device 0", the domain will
  19 * be unique.  Typically, each bus will have one PCI function on it, though
  20 * this driver does support more than one.
  21 *
  22 * In order to map the interrupts from the device through to the guest VM,
  23 * this driver also implements an IRQ Domain, which handles interrupts (either
  24 * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
  25 * set up, torn down, or reaffined, this driver communicates with the
  26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
  27 * interrupt will be delivered to the correct virtual processor at the right
  28 * vector.  This driver does not support level-triggered (line-based)
  29 * interrupts, and will report that the Interrupt Line register in the
  30 * function's configuration space is zero.
  31 *
  32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
  33 * facilities.  For instance, the configuration space of a function exposed
  34 * by Hyper-V is mapped into a single page of memory space, and the
  35 * read and write handlers for config space must be aware of this mechanism.
  36 * Similarly, device setup and teardown involves messages sent to and from
  37 * the PCI back-end driver in Hyper-V.
  38 */
  39
  40#include <linux/kernel.h>
  41#include <linux/module.h>
  42#include <linux/pci.h>
  43#include <linux/pci-ecam.h>
  44#include <linux/delay.h>
  45#include <linux/semaphore.h>
  46#include <linux/irq.h>
  47#include <linux/msi.h>
  48#include <linux/hyperv.h>
  49#include <linux/refcount.h>
  50#include <linux/irqdomain.h>
  51#include <linux/acpi.h>
 
  52#include <asm/mshyperv.h>
  53
  54/*
  55 * Protocol versions. The low word is the minor version, the high word the
  56 * major version.
  57 */
  58
  59#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
  60#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
  61#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
  62
  63enum pci_protocol_version_t {
  64	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
  65	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
  66	PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),	/* Vibranium */
  67	PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4),	/* WS2022 */
  68};
  69
  70#define CPU_AFFINITY_ALL	-1ULL
  71
  72/*
  73 * Supported protocol versions in the order of probing - highest go
  74 * first.
  75 */
  76static enum pci_protocol_version_t pci_protocol_versions[] = {
  77	PCI_PROTOCOL_VERSION_1_4,
  78	PCI_PROTOCOL_VERSION_1_3,
  79	PCI_PROTOCOL_VERSION_1_2,
  80	PCI_PROTOCOL_VERSION_1_1,
  81};
  82
  83#define PCI_CONFIG_MMIO_LENGTH	0x2000
  84#define CFG_PAGE_OFFSET 0x1000
  85#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
  86
  87#define MAX_SUPPORTED_MSI_MESSAGES 0x400
  88
  89#define STATUS_REVISION_MISMATCH 0xC0000059
  90
  91/* space for 32bit serial number as string */
  92#define SLOT_NAME_SIZE 11
  93
  94/*
  95 * Size of requestor for VMbus; the value is based on the observation
  96 * that having more than one request outstanding is 'rare', and so 64
  97 * should be generous in ensuring that we don't ever run out.
  98 */
  99#define HV_PCI_RQSTOR_SIZE 64
 100
 101/*
 102 * Message Types
 103 */
 104
 105enum pci_message_type {
 106	/*
 107	 * Version 1.1
 108	 */
 109	PCI_MESSAGE_BASE                = 0x42490000,
 110	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
 111	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
 112	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
 113	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
 114	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
 115	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
 116	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
 117	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
 118	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
 119	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
 120	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
 121	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
 122	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
 123	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
 124	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
 125	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
 126	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
 127	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
 128	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
 129	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
 130	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
 131	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
 132	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
 133	PCI_BUS_RELATIONS2		= PCI_MESSAGE_BASE + 0x19,
 134	PCI_RESOURCES_ASSIGNED3         = PCI_MESSAGE_BASE + 0x1A,
 135	PCI_CREATE_INTERRUPT_MESSAGE3   = PCI_MESSAGE_BASE + 0x1B,
 136	PCI_MESSAGE_MAXIMUM
 137};
 138
 139/*
 140 * Structures defining the virtual PCI Express protocol.
 141 */
 142
 143union pci_version {
 144	struct {
 145		u16 minor_version;
 146		u16 major_version;
 147	} parts;
 148	u32 version;
 149} __packed;
 150
 151/*
 152 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
 153 * which is all this driver does.  This representation is the one used in
 154 * Windows, which is what is expected when sending this back and forth with
 155 * the Hyper-V parent partition.
 156 */
 157union win_slot_encoding {
 158	struct {
 159		u32	dev:5;
 160		u32	func:3;
 161		u32	reserved:24;
 162	} bits;
 163	u32 slot;
 164} __packed;
 165
 166/*
 167 * Pretty much as defined in the PCI Specifications.
 168 */
 169struct pci_function_description {
 170	u16	v_id;	/* vendor ID */
 171	u16	d_id;	/* device ID */
 172	u8	rev;
 173	u8	prog_intf;
 174	u8	subclass;
 175	u8	base_class;
 176	u32	subsystem_id;
 177	union win_slot_encoding win_slot;
 178	u32	ser;	/* serial number */
 179} __packed;
 180
 181enum pci_device_description_flags {
 182	HV_PCI_DEVICE_FLAG_NONE			= 0x0,
 183	HV_PCI_DEVICE_FLAG_NUMA_AFFINITY	= 0x1,
 184};
 185
 186struct pci_function_description2 {
 187	u16	v_id;	/* vendor ID */
 188	u16	d_id;	/* device ID */
 189	u8	rev;
 190	u8	prog_intf;
 191	u8	subclass;
 192	u8	base_class;
 193	u32	subsystem_id;
 194	union	win_slot_encoding win_slot;
 195	u32	ser;	/* serial number */
 196	u32	flags;
 197	u16	virtual_numa_node;
 198	u16	reserved;
 199} __packed;
 200
 201/**
 202 * struct hv_msi_desc
 203 * @vector:		IDT entry
 204 * @delivery_mode:	As defined in Intel's Programmer's
 205 *			Reference Manual, Volume 3, Chapter 8.
 206 * @vector_count:	Number of contiguous entries in the
 207 *			Interrupt Descriptor Table that are
 208 *			occupied by this Message-Signaled
 209 *			Interrupt. For "MSI", as first defined
 210 *			in PCI 2.2, this can be between 1 and
 211 *			32. For "MSI-X," as first defined in PCI
 212 *			3.0, this must be 1, as each MSI-X table
 213 *			entry would have its own descriptor.
 214 * @reserved:		Empty space
 215 * @cpu_mask:		All the target virtual processors.
 216 */
 217struct hv_msi_desc {
 218	u8	vector;
 219	u8	delivery_mode;
 220	u16	vector_count;
 221	u32	reserved;
 222	u64	cpu_mask;
 223} __packed;
 224
 225/**
 226 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
 227 * @vector:		IDT entry
 228 * @delivery_mode:	As defined in Intel's Programmer's
 229 *			Reference Manual, Volume 3, Chapter 8.
 230 * @vector_count:	Number of contiguous entries in the
 231 *			Interrupt Descriptor Table that are
 232 *			occupied by this Message-Signaled
 233 *			Interrupt. For "MSI", as first defined
 234 *			in PCI 2.2, this can be between 1 and
 235 *			32. For "MSI-X," as first defined in PCI
 236 *			3.0, this must be 1, as each MSI-X table
 237 *			entry would have its own descriptor.
 238 * @processor_count:	number of bits enabled in array.
 239 * @processor_array:	All the target virtual processors.
 240 */
 241struct hv_msi_desc2 {
 242	u8	vector;
 243	u8	delivery_mode;
 244	u16	vector_count;
 245	u16	processor_count;
 246	u16	processor_array[32];
 247} __packed;
 248
 249/*
 250 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
 251 *	Everything is the same as in 'hv_msi_desc2' except that the size of the
 252 *	'vector' field is larger to support bigger vector values. For ex: LPI
 253 *	vectors on ARM.
 254 */
 255struct hv_msi_desc3 {
 256	u32	vector;
 257	u8	delivery_mode;
 258	u8	reserved;
 259	u16	vector_count;
 260	u16	processor_count;
 261	u16	processor_array[32];
 262} __packed;
 263
 264/**
 265 * struct tran_int_desc
 266 * @reserved:		unused, padding
 267 * @vector_count:	same as in hv_msi_desc
 268 * @data:		This is the "data payload" value that is
 269 *			written by the device when it generates
 270 *			a message-signaled interrupt, either MSI
 271 *			or MSI-X.
 272 * @address:		This is the address to which the data
 273 *			payload is written on interrupt
 274 *			generation.
 275 */
 276struct tran_int_desc {
 277	u16	reserved;
 278	u16	vector_count;
 279	u32	data;
 280	u64	address;
 281} __packed;
 282
 283/*
 284 * A generic message format for virtual PCI.
 285 * Specific message formats are defined later in the file.
 286 */
 287
 288struct pci_message {
 289	u32 type;
 290} __packed;
 291
 292struct pci_child_message {
 293	struct pci_message message_type;
 294	union win_slot_encoding wslot;
 295} __packed;
 296
 297struct pci_incoming_message {
 298	struct vmpacket_descriptor hdr;
 299	struct pci_message message_type;
 300} __packed;
 301
 302struct pci_response {
 303	struct vmpacket_descriptor hdr;
 304	s32 status;			/* negative values are failures */
 305} __packed;
 306
 307struct pci_packet {
 308	void (*completion_func)(void *context, struct pci_response *resp,
 309				int resp_packet_size);
 310	void *compl_ctxt;
 311
 312	struct pci_message message[];
 313};
 314
 315/*
 316 * Specific message types supporting the PCI protocol.
 317 */
 318
 319/*
 320 * Version negotiation message. Sent from the guest to the host.
 321 * The guest is free to try different versions until the host
 322 * accepts the version.
 323 *
 324 * pci_version: The protocol version requested.
 325 * is_last_attempt: If TRUE, this is the last version guest will request.
 326 * reservedz: Reserved field, set to zero.
 327 */
 328
 329struct pci_version_request {
 330	struct pci_message message_type;
 331	u32 protocol_version;
 332} __packed;
 333
 334/*
 335 * Bus D0 Entry.  This is sent from the guest to the host when the virtual
 336 * bus (PCI Express port) is ready for action.
 337 */
 338
 339struct pci_bus_d0_entry {
 340	struct pci_message message_type;
 341	u32 reserved;
 342	u64 mmio_base;
 343} __packed;
 344
 345struct pci_bus_relations {
 346	struct pci_incoming_message incoming;
 347	u32 device_count;
 348	struct pci_function_description func[];
 349} __packed;
 350
 351struct pci_bus_relations2 {
 352	struct pci_incoming_message incoming;
 353	u32 device_count;
 354	struct pci_function_description2 func[];
 355} __packed;
 356
 357struct pci_q_res_req_response {
 358	struct vmpacket_descriptor hdr;
 359	s32 status;			/* negative values are failures */
 360	u32 probed_bar[PCI_STD_NUM_BARS];
 361} __packed;
 362
 363struct pci_set_power {
 364	struct pci_message message_type;
 365	union win_slot_encoding wslot;
 366	u32 power_state;		/* In Windows terms */
 367	u32 reserved;
 368} __packed;
 369
 370struct pci_set_power_response {
 371	struct vmpacket_descriptor hdr;
 372	s32 status;			/* negative values are failures */
 373	union win_slot_encoding wslot;
 374	u32 resultant_state;		/* In Windows terms */
 375	u32 reserved;
 376} __packed;
 377
 378struct pci_resources_assigned {
 379	struct pci_message message_type;
 380	union win_slot_encoding wslot;
 381	u8 memory_range[0x14][6];	/* not used here */
 382	u32 msi_descriptors;
 383	u32 reserved[4];
 384} __packed;
 385
 386struct pci_resources_assigned2 {
 387	struct pci_message message_type;
 388	union win_slot_encoding wslot;
 389	u8 memory_range[0x14][6];	/* not used here */
 390	u32 msi_descriptor_count;
 391	u8 reserved[70];
 392} __packed;
 393
 394struct pci_create_interrupt {
 395	struct pci_message message_type;
 396	union win_slot_encoding wslot;
 397	struct hv_msi_desc int_desc;
 398} __packed;
 399
 400struct pci_create_int_response {
 401	struct pci_response response;
 402	u32 reserved;
 403	struct tran_int_desc int_desc;
 404} __packed;
 405
 406struct pci_create_interrupt2 {
 407	struct pci_message message_type;
 408	union win_slot_encoding wslot;
 409	struct hv_msi_desc2 int_desc;
 410} __packed;
 411
 412struct pci_create_interrupt3 {
 413	struct pci_message message_type;
 414	union win_slot_encoding wslot;
 415	struct hv_msi_desc3 int_desc;
 416} __packed;
 417
 418struct pci_delete_interrupt {
 419	struct pci_message message_type;
 420	union win_slot_encoding wslot;
 421	struct tran_int_desc int_desc;
 422} __packed;
 423
 424/*
 425 * Note: the VM must pass a valid block id, wslot and bytes_requested.
 426 */
 427struct pci_read_block {
 428	struct pci_message message_type;
 429	u32 block_id;
 430	union win_slot_encoding wslot;
 431	u32 bytes_requested;
 432} __packed;
 433
 434struct pci_read_block_response {
 435	struct vmpacket_descriptor hdr;
 436	u32 status;
 437	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
 438} __packed;
 439
 440/*
 441 * Note: the VM must pass a valid block id, wslot and byte_count.
 442 */
 443struct pci_write_block {
 444	struct pci_message message_type;
 445	u32 block_id;
 446	union win_slot_encoding wslot;
 447	u32 byte_count;
 448	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
 449} __packed;
 450
 451struct pci_dev_inval_block {
 452	struct pci_incoming_message incoming;
 453	union win_slot_encoding wslot;
 454	u64 block_mask;
 455} __packed;
 456
 457struct pci_dev_incoming {
 458	struct pci_incoming_message incoming;
 459	union win_slot_encoding wslot;
 460} __packed;
 461
 462struct pci_eject_response {
 463	struct pci_message message_type;
 464	union win_slot_encoding wslot;
 465	u32 status;
 466} __packed;
 467
 468static int pci_ring_size = (4 * PAGE_SIZE);
 469
 470/*
 471 * Driver specific state.
 472 */
 473
 474enum hv_pcibus_state {
 475	hv_pcibus_init = 0,
 476	hv_pcibus_probed,
 477	hv_pcibus_installed,
 478	hv_pcibus_removing,
 479	hv_pcibus_maximum
 480};
 481
 482struct hv_pcibus_device {
 483#ifdef CONFIG_X86
 484	struct pci_sysdata sysdata;
 485#elif defined(CONFIG_ARM64)
 486	struct pci_config_window sysdata;
 487#endif
 488	struct pci_host_bridge *bridge;
 489	struct fwnode_handle *fwnode;
 490	/* Protocol version negotiated with the host */
 491	enum pci_protocol_version_t protocol_version;
 
 
 492	enum hv_pcibus_state state;
 
 493	struct hv_device *hdev;
 494	resource_size_t low_mmio_space;
 495	resource_size_t high_mmio_space;
 496	struct resource *mem_config;
 497	struct resource *low_mmio_res;
 498	struct resource *high_mmio_res;
 499	struct completion *survey_event;
 500	struct pci_bus *pci_bus;
 501	spinlock_t config_lock;	/* Avoid two threads writing index page */
 502	spinlock_t device_list_lock;	/* Protect lists below */
 503	void __iomem *cfg_addr;
 504
 505	struct list_head children;
 506	struct list_head dr_list;
 507
 508	struct msi_domain_info msi_info;
 509	struct irq_domain *irq_domain;
 510
 511	spinlock_t retarget_msi_interrupt_lock;
 512
 513	struct workqueue_struct *wq;
 514
 515	/* Highest slot of child device with resources allocated */
 516	int wslot_res_allocated;
 517
 518	/* hypercall arg, must not cross page boundary */
 519	struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
 520
 521	/*
 522	 * Don't put anything here: retarget_msi_interrupt_params must be last
 523	 */
 524};
 525
 526/*
 527 * Tracks "Device Relations" messages from the host, which must be both
 528 * processed in order and deferred so that they don't run in the context
 529 * of the incoming packet callback.
 530 */
 531struct hv_dr_work {
 532	struct work_struct wrk;
 533	struct hv_pcibus_device *bus;
 534};
 535
 536struct hv_pcidev_description {
 537	u16	v_id;	/* vendor ID */
 538	u16	d_id;	/* device ID */
 539	u8	rev;
 540	u8	prog_intf;
 541	u8	subclass;
 542	u8	base_class;
 543	u32	subsystem_id;
 544	union	win_slot_encoding win_slot;
 545	u32	ser;	/* serial number */
 546	u32	flags;
 547	u16	virtual_numa_node;
 548};
 549
 550struct hv_dr_state {
 551	struct list_head list_entry;
 552	u32 device_count;
 553	struct hv_pcidev_description func[];
 554};
 555
 556enum hv_pcichild_state {
 557	hv_pcichild_init = 0,
 558	hv_pcichild_requirements,
 559	hv_pcichild_resourced,
 560	hv_pcichild_ejecting,
 561	hv_pcichild_maximum
 562};
 563
 564struct hv_pci_dev {
 565	/* List protected by pci_rescan_remove_lock */
 566	struct list_head list_entry;
 567	refcount_t refs;
 568	enum hv_pcichild_state state;
 569	struct pci_slot *pci_slot;
 570	struct hv_pcidev_description desc;
 571	bool reported_missing;
 572	struct hv_pcibus_device *hbus;
 573	struct work_struct wrk;
 574
 575	void (*block_invalidate)(void *context, u64 block_mask);
 576	void *invalidate_context;
 577
 578	/*
 579	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
 580	 * read it back, for each of the BAR offsets within config space.
 581	 */
 582	u32 probed_bar[PCI_STD_NUM_BARS];
 583};
 584
 585struct hv_pci_compl {
 586	struct completion host_event;
 587	s32 completion_status;
 588};
 589
 590static void hv_pci_onchannelcallback(void *context);
 591
 592#ifdef CONFIG_X86
 593#define DELIVERY_MODE	APIC_DELIVERY_MODE_FIXED
 594#define FLOW_HANDLER	handle_edge_irq
 595#define FLOW_NAME	"edge"
 596
 597static int hv_pci_irqchip_init(void)
 598{
 599	return 0;
 600}
 601
 602static struct irq_domain *hv_pci_get_root_domain(void)
 603{
 604	return x86_vector_domain;
 605}
 606
 607static unsigned int hv_msi_get_int_vector(struct irq_data *data)
 608{
 609	struct irq_cfg *cfg = irqd_cfg(data);
 610
 611	return cfg->vector;
 612}
 613
 614#define hv_msi_prepare		pci_msi_prepare
 615
 616/**
 617 * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
 618 * affinity.
 619 * @data:	Describes the IRQ
 620 *
 621 * Build new a destination for the MSI and make a hypercall to
 622 * update the Interrupt Redirection Table. "Device Logical ID"
 623 * is built out of this PCI bus's instance GUID and the function
 624 * number of the device.
 625 */
 626static void hv_arch_irq_unmask(struct irq_data *data)
 627{
 628	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
 629	struct hv_retarget_device_interrupt *params;
 630	struct tran_int_desc *int_desc;
 631	struct hv_pcibus_device *hbus;
 632	const struct cpumask *dest;
 633	cpumask_var_t tmp;
 634	struct pci_bus *pbus;
 635	struct pci_dev *pdev;
 636	unsigned long flags;
 637	u32 var_size = 0;
 638	int cpu, nr_bank;
 639	u64 res;
 640
 641	dest = irq_data_get_effective_affinity_mask(data);
 642	pdev = msi_desc_to_pci_dev(msi_desc);
 643	pbus = pdev->bus;
 644	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
 645	int_desc = data->chip_data;
 
 
 
 
 
 646
 647	spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
 648
 649	params = &hbus->retarget_msi_interrupt_params;
 650	memset(params, 0, sizeof(*params));
 651	params->partition_id = HV_PARTITION_ID_SELF;
 652	params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
 653	params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
 654	params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
 655	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
 656			   (hbus->hdev->dev_instance.b[4] << 16) |
 657			   (hbus->hdev->dev_instance.b[7] << 8) |
 658			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
 659			   PCI_FUNC(pdev->devfn);
 660	params->int_target.vector = hv_msi_get_int_vector(data);
 661
 662	/*
 663	 * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
 664	 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
 665	 * spurious interrupt storm. Not doing so does not seem to have a
 666	 * negative effect (yet?).
 667	 */
 668
 669	if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
 670		/*
 671		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
 672		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
 673		 * with >64 VP support.
 674		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
 675		 * is not sufficient for this hypercall.
 676		 */
 677		params->int_target.flags |=
 678			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
 679
 680		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
 681			res = 1;
 682			goto exit_unlock;
 683		}
 684
 685		cpumask_and(tmp, dest, cpu_online_mask);
 686		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
 687		free_cpumask_var(tmp);
 688
 689		if (nr_bank <= 0) {
 690			res = 1;
 691			goto exit_unlock;
 692		}
 693
 694		/*
 695		 * var-sized hypercall, var-size starts after vp_mask (thus
 696		 * vp_set.format does not count, but vp_set.valid_bank_mask
 697		 * does).
 698		 */
 699		var_size = 1 + nr_bank;
 700	} else {
 701		for_each_cpu_and(cpu, dest, cpu_online_mask) {
 702			params->int_target.vp_mask |=
 703				(1ULL << hv_cpu_number_to_vp_number(cpu));
 704		}
 705	}
 706
 707	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
 708			      params, NULL);
 709
 710exit_unlock:
 711	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
 712
 713	/*
 714	 * During hibernation, when a CPU is offlined, the kernel tries
 715	 * to move the interrupt to the remaining CPUs that haven't
 716	 * been offlined yet. In this case, the below hv_do_hypercall()
 717	 * always fails since the vmbus channel has been closed:
 718	 * refer to cpu_disable_common() -> fixup_irqs() ->
 719	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
 720	 *
 721	 * Suppress the error message for hibernation because the failure
 722	 * during hibernation does not matter (at this time all the devices
 723	 * have been frozen). Note: the correct affinity info is still updated
 724	 * into the irqdata data structure in migrate_one_irq() ->
 725	 * irq_do_set_affinity(), so later when the VM resumes,
 726	 * hv_pci_restore_msi_state() is able to correctly restore the
 727	 * interrupt with the correct affinity.
 728	 */
 729	if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
 730		dev_err(&hbus->hdev->device,
 731			"%s() failed: %#llx", __func__, res);
 732}
 733#elif defined(CONFIG_ARM64)
 734/*
 735 * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
 736 * of room at the start to allow for SPIs to be specified through ACPI and
 737 * starting with a power of two to satisfy power of 2 multi-MSI requirement.
 738 */
 739#define HV_PCI_MSI_SPI_START	64
 740#define HV_PCI_MSI_SPI_NR	(1020 - HV_PCI_MSI_SPI_START)
 741#define DELIVERY_MODE		0
 742#define FLOW_HANDLER		NULL
 743#define FLOW_NAME		NULL
 744#define hv_msi_prepare		NULL
 745
 746struct hv_pci_chip_data {
 747	DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
 748	struct mutex	map_lock;
 749};
 750
 751/* Hyper-V vPCI MSI GIC IRQ domain */
 752static struct irq_domain *hv_msi_gic_irq_domain;
 753
 754/* Hyper-V PCI MSI IRQ chip */
 755static struct irq_chip hv_arm64_msi_irq_chip = {
 756	.name = "MSI",
 757	.irq_set_affinity = irq_chip_set_affinity_parent,
 758	.irq_eoi = irq_chip_eoi_parent,
 759	.irq_mask = irq_chip_mask_parent,
 760	.irq_unmask = irq_chip_unmask_parent
 761};
 762
 763static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
 764{
 765	return irqd->parent_data->hwirq;
 766}
 767
 768/*
 769 * @nr_bm_irqs:		Indicates the number of IRQs that were allocated from
 770 *			the bitmap.
 771 * @nr_dom_irqs:	Indicates the number of IRQs that were allocated from
 772 *			the parent domain.
 773 */
 774static void hv_pci_vec_irq_free(struct irq_domain *domain,
 775				unsigned int virq,
 776				unsigned int nr_bm_irqs,
 777				unsigned int nr_dom_irqs)
 778{
 779	struct hv_pci_chip_data *chip_data = domain->host_data;
 780	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 781	int first = d->hwirq - HV_PCI_MSI_SPI_START;
 782	int i;
 783
 784	mutex_lock(&chip_data->map_lock);
 785	bitmap_release_region(chip_data->spi_map,
 786			      first,
 787			      get_count_order(nr_bm_irqs));
 788	mutex_unlock(&chip_data->map_lock);
 789	for (i = 0; i < nr_dom_irqs; i++) {
 790		if (i)
 791			d = irq_domain_get_irq_data(domain, virq + i);
 792		irq_domain_reset_irq_data(d);
 793	}
 794
 795	irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
 796}
 797
 798static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
 799				       unsigned int virq,
 800				       unsigned int nr_irqs)
 801{
 802	hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
 803}
 804
 805static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
 806				       unsigned int nr_irqs,
 807				       irq_hw_number_t *hwirq)
 808{
 809	struct hv_pci_chip_data *chip_data = domain->host_data;
 810	int index;
 811
 812	/* Find and allocate region from the SPI bitmap */
 813	mutex_lock(&chip_data->map_lock);
 814	index = bitmap_find_free_region(chip_data->spi_map,
 815					HV_PCI_MSI_SPI_NR,
 816					get_count_order(nr_irqs));
 817	mutex_unlock(&chip_data->map_lock);
 818	if (index < 0)
 819		return -ENOSPC;
 820
 821	*hwirq = index + HV_PCI_MSI_SPI_START;
 822
 823	return 0;
 824}
 825
 826static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
 827					   unsigned int virq,
 828					   irq_hw_number_t hwirq)
 829{
 830	struct irq_fwspec fwspec;
 831	struct irq_data *d;
 832	int ret;
 833
 834	fwspec.fwnode = domain->parent->fwnode;
 835	fwspec.param_count = 2;
 836	fwspec.param[0] = hwirq;
 837	fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
 838
 839	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
 840	if (ret)
 841		return ret;
 842
 843	/*
 844	 * Since the interrupt specifier is not coming from ACPI or DT, the
 845	 * trigger type will need to be set explicitly. Otherwise, it will be
 846	 * set to whatever is in the GIC configuration.
 847	 */
 848	d = irq_domain_get_irq_data(domain->parent, virq);
 849
 850	return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
 851}
 852
 853static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
 854				       unsigned int virq, unsigned int nr_irqs,
 855				       void *args)
 856{
 857	irq_hw_number_t hwirq;
 858	unsigned int i;
 859	int ret;
 860
 861	ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
 862	if (ret)
 863		return ret;
 864
 865	for (i = 0; i < nr_irqs; i++) {
 866		ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
 867						      hwirq + i);
 868		if (ret) {
 869			hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
 870			return ret;
 871		}
 872
 873		irq_domain_set_hwirq_and_chip(domain, virq + i,
 874					      hwirq + i,
 875					      &hv_arm64_msi_irq_chip,
 876					      domain->host_data);
 877		pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
 878	}
 879
 880	return 0;
 881}
 882
 883/*
 884 * Pick the first cpu as the irq affinity that can be temporarily used for
 885 * composing MSI from the hypervisor. GIC will eventually set the right
 886 * affinity for the irq and the 'unmask' will retarget the interrupt to that
 887 * cpu.
 888 */
 889static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
 890					  struct irq_data *irqd, bool reserve)
 891{
 892	int cpu = cpumask_first(cpu_present_mask);
 893
 894	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
 895
 896	return 0;
 897}
 898
 899static const struct irq_domain_ops hv_pci_domain_ops = {
 900	.alloc	= hv_pci_vec_irq_domain_alloc,
 901	.free	= hv_pci_vec_irq_domain_free,
 902	.activate = hv_pci_vec_irq_domain_activate,
 903};
 904
 905static int hv_pci_irqchip_init(void)
 906{
 907	static struct hv_pci_chip_data *chip_data;
 908	struct fwnode_handle *fn = NULL;
 909	int ret = -ENOMEM;
 910
 911	chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
 912	if (!chip_data)
 913		return ret;
 914
 915	mutex_init(&chip_data->map_lock);
 916	fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
 917	if (!fn)
 918		goto free_chip;
 919
 920	/*
 921	 * IRQ domain once enabled, should not be removed since there is no
 922	 * way to ensure that all the corresponding devices are also gone and
 923	 * no interrupts will be generated.
 924	 */
 925	hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
 926							  fn, &hv_pci_domain_ops,
 927							  chip_data);
 928
 929	if (!hv_msi_gic_irq_domain) {
 930		pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
 931		goto free_chip;
 932	}
 933
 934	return 0;
 935
 936free_chip:
 937	kfree(chip_data);
 938	if (fn)
 939		irq_domain_free_fwnode(fn);
 940
 941	return ret;
 942}
 943
 944static struct irq_domain *hv_pci_get_root_domain(void)
 945{
 946	return hv_msi_gic_irq_domain;
 947}
 948
 949/*
 950 * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
 951 * registers which Hyper-V already supports, so no hypercall needed.
 952 */
 953static void hv_arch_irq_unmask(struct irq_data *data) { }
 954#endif /* CONFIG_ARM64 */
 955
 956/**
 957 * hv_pci_generic_compl() - Invoked for a completion packet
 958 * @context:		Set up by the sender of the packet.
 959 * @resp:		The response packet
 960 * @resp_packet_size:	Size in bytes of the packet
 961 *
 962 * This function is used to trigger an event and report status
 963 * for any message for which the completion packet contains a
 964 * status and nothing else.
 965 */
 966static void hv_pci_generic_compl(void *context, struct pci_response *resp,
 967				 int resp_packet_size)
 968{
 969	struct hv_pci_compl *comp_pkt = context;
 970
 971	comp_pkt->completion_status = resp->status;
 972	complete(&comp_pkt->host_event);
 973}
 974
 975static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
 976						u32 wslot);
 977
 978static void get_pcichild(struct hv_pci_dev *hpdev)
 979{
 980	refcount_inc(&hpdev->refs);
 981}
 982
 983static void put_pcichild(struct hv_pci_dev *hpdev)
 984{
 985	if (refcount_dec_and_test(&hpdev->refs))
 986		kfree(hpdev);
 987}
 988
 989/*
 990 * There is no good way to get notified from vmbus_onoffer_rescind(),
 991 * so let's use polling here, since this is not a hot path.
 992 */
 993static int wait_for_response(struct hv_device *hdev,
 994			     struct completion *comp)
 995{
 996	while (true) {
 997		if (hdev->channel->rescind) {
 998			dev_warn_once(&hdev->device, "The device is gone.\n");
 999			return -ENODEV;
1000		}
1001
1002		if (wait_for_completion_timeout(comp, HZ / 10))
1003			break;
1004	}
1005
1006	return 0;
1007}
1008
1009/**
1010 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
1011 * @devfn:	The Linux representation of PCI slot
1012 *
1013 * Windows uses a slightly different representation of PCI slot.
1014 *
1015 * Return: The Windows representation
1016 */
1017static u32 devfn_to_wslot(int devfn)
1018{
1019	union win_slot_encoding wslot;
1020
1021	wslot.slot = 0;
1022	wslot.bits.dev = PCI_SLOT(devfn);
1023	wslot.bits.func = PCI_FUNC(devfn);
1024
1025	return wslot.slot;
1026}
1027
1028/**
1029 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1030 * @wslot:	The Windows representation of PCI slot
1031 *
1032 * Windows uses a slightly different representation of PCI slot.
1033 *
1034 * Return: The Linux representation
1035 */
1036static int wslot_to_devfn(u32 wslot)
1037{
1038	union win_slot_encoding slot_no;
1039
1040	slot_no.slot = wslot;
1041	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1042}
1043
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1044/*
1045 * PCI Configuration Space for these root PCI buses is implemented as a pair
1046 * of pages in memory-mapped I/O space.  Writing to the first page chooses
1047 * the PCI function being written or read.  Once the first page has been
1048 * written to, the following page maps in the entire configuration space of
1049 * the function.
1050 */
1051
1052/**
1053 * _hv_pcifront_read_config() - Internal PCI config read
1054 * @hpdev:	The PCI driver's representation of the device
1055 * @where:	Offset within config space
1056 * @size:	Size of the transfer
1057 * @val:	Pointer to the buffer receiving the data
1058 */
1059static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1060				     int size, u32 *val)
1061{
 
 
 
1062	unsigned long flags;
1063	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
1064
1065	/*
1066	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1067	 */
1068	if (where + size <= PCI_COMMAND) {
1069		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1070	} else if (where >= PCI_CLASS_REVISION && where + size <=
1071		   PCI_CACHE_LINE_SIZE) {
1072		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1073		       PCI_CLASS_REVISION, size);
1074	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1075		   PCI_ROM_ADDRESS) {
1076		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1077		       PCI_SUBSYSTEM_VENDOR_ID, size);
1078	} else if (where >= PCI_ROM_ADDRESS && where + size <=
1079		   PCI_CAPABILITY_LIST) {
1080		/* ROM BARs are unimplemented */
1081		*val = 0;
1082	} else if (where >= PCI_INTERRUPT_LINE && where + size <=
1083		   PCI_INTERRUPT_PIN) {
1084		/*
1085		 * Interrupt Line and Interrupt PIN are hard-wired to zero
1086		 * because this front-end only supports message-signaled
1087		 * interrupts.
1088		 */
1089		*val = 0;
1090	} else if (where + size <= CFG_PAGE_SIZE) {
1091		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1092		/* Choose the function to be read. (See comment above) */
1093		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1094		/* Make sure the function was chosen before we start reading. */
1095		mb();
1096		/* Read from that function's config space. */
1097		switch (size) {
1098		case 1:
1099			*val = readb(addr);
1100			break;
1101		case 2:
1102			*val = readw(addr);
1103			break;
1104		default:
1105			*val = readl(addr);
1106			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107		}
1108		/*
1109		 * Make sure the read was done before we release the spinlock
1110		 * allowing consecutive reads/writes.
1111		 */
1112		mb();
1113		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1114	} else {
1115		dev_err(&hpdev->hbus->hdev->device,
1116			"Attempt to read beyond a function's config space.\n");
1117	}
1118}
1119
1120static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1121{
 
 
 
1122	u16 ret;
1123	unsigned long flags;
1124	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
1125			     PCI_VENDOR_ID;
1126
1127	spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1128
1129	/* Choose the function to be read. (See comment above) */
1130	writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1131	/* Make sure the function was chosen before we start reading. */
1132	mb();
1133	/* Read from that function's config space. */
1134	ret = readw(addr);
1135	/*
1136	 * mb() is not required here, because the spin_unlock_irqrestore()
1137	 * is a barrier.
1138	 */
 
 
 
 
 
 
 
 
 
 
 
 
1139
1140	spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1141
1142	return ret;
1143}
1144
1145/**
1146 * _hv_pcifront_write_config() - Internal PCI config write
1147 * @hpdev:	The PCI driver's representation of the device
1148 * @where:	Offset within config space
1149 * @size:	Size of the transfer
1150 * @val:	The data being transferred
1151 */
1152static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1153				      int size, u32 val)
1154{
 
 
 
1155	unsigned long flags;
1156	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
1157
1158	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1159	    where + size <= PCI_CAPABILITY_LIST) {
1160		/* SSIDs and ROM BARs are read-only */
1161	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1162		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1163		/* Choose the function to be written. (See comment above) */
1164		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1165		/* Make sure the function was chosen before we start writing. */
1166		wmb();
1167		/* Write to that function's config space. */
1168		switch (size) {
1169		case 1:
1170			writeb(val, addr);
1171			break;
1172		case 2:
1173			writew(val, addr);
1174			break;
1175		default:
1176			writel(val, addr);
1177			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1178		}
1179		/*
1180		 * Make sure the write was done before we release the spinlock
1181		 * allowing consecutive reads/writes.
1182		 */
1183		mb();
1184		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1185	} else {
1186		dev_err(&hpdev->hbus->hdev->device,
1187			"Attempt to write beyond a function's config space.\n");
1188	}
1189}
1190
1191/**
1192 * hv_pcifront_read_config() - Read configuration space
1193 * @bus: PCI Bus structure
1194 * @devfn: Device/function
1195 * @where: Offset from base
1196 * @size: Byte/word/dword
1197 * @val: Value to be read
1198 *
1199 * Return: PCIBIOS_SUCCESSFUL on success
1200 *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1201 */
1202static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1203				   int where, int size, u32 *val)
1204{
1205	struct hv_pcibus_device *hbus =
1206		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1207	struct hv_pci_dev *hpdev;
1208
1209	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1210	if (!hpdev)
1211		return PCIBIOS_DEVICE_NOT_FOUND;
1212
1213	_hv_pcifront_read_config(hpdev, where, size, val);
1214
1215	put_pcichild(hpdev);
1216	return PCIBIOS_SUCCESSFUL;
1217}
1218
1219/**
1220 * hv_pcifront_write_config() - Write configuration space
1221 * @bus: PCI Bus structure
1222 * @devfn: Device/function
1223 * @where: Offset from base
1224 * @size: Byte/word/dword
1225 * @val: Value to be written to device
1226 *
1227 * Return: PCIBIOS_SUCCESSFUL on success
1228 *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1229 */
1230static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1231				    int where, int size, u32 val)
1232{
1233	struct hv_pcibus_device *hbus =
1234	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1235	struct hv_pci_dev *hpdev;
1236
1237	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1238	if (!hpdev)
1239		return PCIBIOS_DEVICE_NOT_FOUND;
1240
1241	_hv_pcifront_write_config(hpdev, where, size, val);
1242
1243	put_pcichild(hpdev);
1244	return PCIBIOS_SUCCESSFUL;
1245}
1246
1247/* PCIe operations */
1248static struct pci_ops hv_pcifront_ops = {
1249	.read  = hv_pcifront_read_config,
1250	.write = hv_pcifront_write_config,
1251};
1252
1253/*
1254 * Paravirtual backchannel
1255 *
1256 * Hyper-V SR-IOV provides a backchannel mechanism in software for
1257 * communication between a VF driver and a PF driver.  These
1258 * "configuration blocks" are similar in concept to PCI configuration space,
1259 * but instead of doing reads and writes in 32-bit chunks through a very slow
1260 * path, packets of up to 128 bytes can be sent or received asynchronously.
1261 *
1262 * Nearly every SR-IOV device contains just such a communications channel in
1263 * hardware, so using this one in software is usually optional.  Using the
1264 * software channel, however, allows driver implementers to leverage software
1265 * tools that fuzz the communications channel looking for vulnerabilities.
1266 *
1267 * The usage model for these packets puts the responsibility for reading or
1268 * writing on the VF driver.  The VF driver sends a read or a write packet,
1269 * indicating which "block" is being referred to by number.
1270 *
1271 * If the PF driver wishes to initiate communication, it can "invalidate" one or
1272 * more of the first 64 blocks.  This invalidation is delivered via a callback
1273 * supplied by the VF driver by this driver.
1274 *
1275 * No protocol is implied, except that supplied by the PF and VF drivers.
1276 */
1277
1278struct hv_read_config_compl {
1279	struct hv_pci_compl comp_pkt;
1280	void *buf;
1281	unsigned int len;
1282	unsigned int bytes_returned;
1283};
1284
1285/**
1286 * hv_pci_read_config_compl() - Invoked when a response packet
1287 * for a read config block operation arrives.
1288 * @context:		Identifies the read config operation
1289 * @resp:		The response packet itself
1290 * @resp_packet_size:	Size in bytes of the response packet
1291 */
1292static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1293				     int resp_packet_size)
1294{
1295	struct hv_read_config_compl *comp = context;
1296	struct pci_read_block_response *read_resp =
1297		(struct pci_read_block_response *)resp;
1298	unsigned int data_len, hdr_len;
1299
1300	hdr_len = offsetof(struct pci_read_block_response, bytes);
1301	if (resp_packet_size < hdr_len) {
1302		comp->comp_pkt.completion_status = -1;
1303		goto out;
1304	}
1305
1306	data_len = resp_packet_size - hdr_len;
1307	if (data_len > 0 && read_resp->status == 0) {
1308		comp->bytes_returned = min(comp->len, data_len);
1309		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1310	} else {
1311		comp->bytes_returned = 0;
1312	}
1313
1314	comp->comp_pkt.completion_status = read_resp->status;
1315out:
1316	complete(&comp->comp_pkt.host_event);
1317}
1318
1319/**
1320 * hv_read_config_block() - Sends a read config block request to
1321 * the back-end driver running in the Hyper-V parent partition.
1322 * @pdev:		The PCI driver's representation for this device.
1323 * @buf:		Buffer into which the config block will be copied.
1324 * @len:		Size in bytes of buf.
1325 * @block_id:		Identifies the config block which has been requested.
1326 * @bytes_returned:	Size which came back from the back-end driver.
1327 *
1328 * Return: 0 on success, -errno on failure
1329 */
1330static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1331				unsigned int len, unsigned int block_id,
1332				unsigned int *bytes_returned)
1333{
1334	struct hv_pcibus_device *hbus =
1335		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1336			     sysdata);
1337	struct {
1338		struct pci_packet pkt;
1339		char buf[sizeof(struct pci_read_block)];
1340	} pkt;
1341	struct hv_read_config_compl comp_pkt;
1342	struct pci_read_block *read_blk;
1343	int ret;
1344
1345	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1346		return -EINVAL;
1347
1348	init_completion(&comp_pkt.comp_pkt.host_event);
1349	comp_pkt.buf = buf;
1350	comp_pkt.len = len;
1351
1352	memset(&pkt, 0, sizeof(pkt));
1353	pkt.pkt.completion_func = hv_pci_read_config_compl;
1354	pkt.pkt.compl_ctxt = &comp_pkt;
1355	read_blk = (struct pci_read_block *)&pkt.pkt.message;
1356	read_blk->message_type.type = PCI_READ_BLOCK;
1357	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1358	read_blk->block_id = block_id;
1359	read_blk->bytes_requested = len;
1360
1361	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1362			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
1363			       VM_PKT_DATA_INBAND,
1364			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1365	if (ret)
1366		return ret;
1367
1368	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1369	if (ret)
1370		return ret;
1371
1372	if (comp_pkt.comp_pkt.completion_status != 0 ||
1373	    comp_pkt.bytes_returned == 0) {
1374		dev_err(&hbus->hdev->device,
1375			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
1376			comp_pkt.comp_pkt.completion_status,
1377			comp_pkt.bytes_returned);
1378		return -EIO;
1379	}
1380
1381	*bytes_returned = comp_pkt.bytes_returned;
1382	return 0;
1383}
1384
1385/**
1386 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1387 * config block operation arrives.
1388 * @context:		Identifies the write config operation
1389 * @resp:		The response packet itself
1390 * @resp_packet_size:	Size in bytes of the response packet
1391 */
1392static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1393				      int resp_packet_size)
1394{
1395	struct hv_pci_compl *comp_pkt = context;
1396
1397	comp_pkt->completion_status = resp->status;
1398	complete(&comp_pkt->host_event);
1399}
1400
1401/**
1402 * hv_write_config_block() - Sends a write config block request to the
1403 * back-end driver running in the Hyper-V parent partition.
1404 * @pdev:		The PCI driver's representation for this device.
1405 * @buf:		Buffer from which the config block will	be copied.
1406 * @len:		Size in bytes of buf.
1407 * @block_id:		Identifies the config block which is being written.
1408 *
1409 * Return: 0 on success, -errno on failure
1410 */
1411static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1412				unsigned int len, unsigned int block_id)
1413{
1414	struct hv_pcibus_device *hbus =
1415		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1416			     sysdata);
1417	struct {
1418		struct pci_packet pkt;
1419		char buf[sizeof(struct pci_write_block)];
1420		u32 reserved;
1421	} pkt;
1422	struct hv_pci_compl comp_pkt;
1423	struct pci_write_block *write_blk;
1424	u32 pkt_size;
1425	int ret;
1426
1427	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1428		return -EINVAL;
1429
1430	init_completion(&comp_pkt.host_event);
1431
1432	memset(&pkt, 0, sizeof(pkt));
1433	pkt.pkt.completion_func = hv_pci_write_config_compl;
1434	pkt.pkt.compl_ctxt = &comp_pkt;
1435	write_blk = (struct pci_write_block *)&pkt.pkt.message;
1436	write_blk->message_type.type = PCI_WRITE_BLOCK;
1437	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1438	write_blk->block_id = block_id;
1439	write_blk->byte_count = len;
1440	memcpy(write_blk->bytes, buf, len);
1441	pkt_size = offsetof(struct pci_write_block, bytes) + len;
1442	/*
1443	 * This quirk is required on some hosts shipped around 2018, because
1444	 * these hosts don't check the pkt_size correctly (new hosts have been
1445	 * fixed since early 2019). The quirk is also safe on very old hosts
1446	 * and new hosts, because, on them, what really matters is the length
1447	 * specified in write_blk->byte_count.
1448	 */
1449	pkt_size += sizeof(pkt.reserved);
1450
1451	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1452			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1453			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1454	if (ret)
1455		return ret;
1456
1457	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1458	if (ret)
1459		return ret;
1460
1461	if (comp_pkt.completion_status != 0) {
1462		dev_err(&hbus->hdev->device,
1463			"Write Config Block failed: 0x%x\n",
1464			comp_pkt.completion_status);
1465		return -EIO;
1466	}
1467
1468	return 0;
1469}
1470
1471/**
1472 * hv_register_block_invalidate() - Invoked when a config block invalidation
1473 * arrives from the back-end driver.
1474 * @pdev:		The PCI driver's representation for this device.
1475 * @context:		Identifies the device.
1476 * @block_invalidate:	Identifies all of the blocks being invalidated.
1477 *
1478 * Return: 0 on success, -errno on failure
1479 */
1480static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1481					void (*block_invalidate)(void *context,
1482								 u64 block_mask))
1483{
1484	struct hv_pcibus_device *hbus =
1485		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1486			     sysdata);
1487	struct hv_pci_dev *hpdev;
1488
1489	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1490	if (!hpdev)
1491		return -ENODEV;
1492
1493	hpdev->block_invalidate = block_invalidate;
1494	hpdev->invalidate_context = context;
1495
1496	put_pcichild(hpdev);
1497	return 0;
1498
1499}
1500
1501/* Interrupt management hooks */
1502static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1503			     struct tran_int_desc *int_desc)
1504{
1505	struct pci_delete_interrupt *int_pkt;
1506	struct {
1507		struct pci_packet pkt;
1508		u8 buffer[sizeof(struct pci_delete_interrupt)];
1509	} ctxt;
1510
1511	if (!int_desc->vector_count) {
1512		kfree(int_desc);
1513		return;
1514	}
1515	memset(&ctxt, 0, sizeof(ctxt));
1516	int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1517	int_pkt->message_type.type =
1518		PCI_DELETE_INTERRUPT_MESSAGE;
1519	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1520	int_pkt->int_desc = *int_desc;
1521	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1522			 0, VM_PKT_DATA_INBAND, 0);
1523	kfree(int_desc);
1524}
1525
1526/**
1527 * hv_msi_free() - Free the MSI.
1528 * @domain:	The interrupt domain pointer
1529 * @info:	Extra MSI-related context
1530 * @irq:	Identifies the IRQ.
1531 *
1532 * The Hyper-V parent partition and hypervisor are tracking the
1533 * messages that are in use, keeping the interrupt redirection
1534 * table up to date.  This callback sends a message that frees
1535 * the IRT entry and related tracking nonsense.
1536 */
1537static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1538			unsigned int irq)
1539{
1540	struct hv_pcibus_device *hbus;
1541	struct hv_pci_dev *hpdev;
1542	struct pci_dev *pdev;
1543	struct tran_int_desc *int_desc;
1544	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1545	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1546
1547	pdev = msi_desc_to_pci_dev(msi);
1548	hbus = info->data;
1549	int_desc = irq_data_get_irq_chip_data(irq_data);
1550	if (!int_desc)
1551		return;
1552
1553	irq_data->chip_data = NULL;
1554	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1555	if (!hpdev) {
1556		kfree(int_desc);
1557		return;
1558	}
1559
1560	hv_int_desc_free(hpdev, int_desc);
1561	put_pcichild(hpdev);
1562}
1563
1564static void hv_irq_mask(struct irq_data *data)
1565{
1566	pci_msi_mask_irq(data);
1567	if (data->parent_data->chip->irq_mask)
1568		irq_chip_mask_parent(data);
1569}
1570
1571static void hv_irq_unmask(struct irq_data *data)
1572{
1573	hv_arch_irq_unmask(data);
1574
1575	if (data->parent_data->chip->irq_unmask)
1576		irq_chip_unmask_parent(data);
1577	pci_msi_unmask_irq(data);
1578}
1579
1580struct compose_comp_ctxt {
1581	struct hv_pci_compl comp_pkt;
1582	struct tran_int_desc int_desc;
1583};
1584
1585static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1586				 int resp_packet_size)
1587{
1588	struct compose_comp_ctxt *comp_pkt = context;
1589	struct pci_create_int_response *int_resp =
1590		(struct pci_create_int_response *)resp;
1591
1592	if (resp_packet_size < sizeof(*int_resp)) {
1593		comp_pkt->comp_pkt.completion_status = -1;
1594		goto out;
1595	}
1596	comp_pkt->comp_pkt.completion_status = resp->status;
1597	comp_pkt->int_desc = int_resp->int_desc;
1598out:
1599	complete(&comp_pkt->comp_pkt.host_event);
1600}
1601
1602static u32 hv_compose_msi_req_v1(
1603	struct pci_create_interrupt *int_pkt,
1604	u32 slot, u8 vector, u16 vector_count)
1605{
1606	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1607	int_pkt->wslot.slot = slot;
1608	int_pkt->int_desc.vector = vector;
1609	int_pkt->int_desc.vector_count = vector_count;
1610	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1611
1612	/*
1613	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1614	 * hv_irq_unmask().
1615	 */
1616	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1617
1618	return sizeof(*int_pkt);
1619}
1620
1621/*
1622 * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1623 * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1624 * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1625 * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1626 * not irrelevant because Hyper-V chooses the physical CPU to handle the
1627 * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1628 * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1629 * but assigning too many vPCI device interrupts to the same pCPU can cause a
1630 * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1631 * to spread out the pCPUs that it selects.
1632 *
1633 * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1634 * to always return the same dummy vCPU, because a second call to
1635 * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1636 * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1637 * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1638 * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1639 * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1640 * the same pCPU, even though the vCPUs will be spread out by later calls
1641 * to hv_irq_unmask(), but that is the best we can do now.
1642 *
1643 * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1644 * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1645 * enhancement is planned for a future version. With that enhancement, the
1646 * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1647 * device will be spread across multiple pCPUs.
1648 */
1649
1650/*
1651 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1652 * by subsequent retarget in hv_irq_unmask().
1653 */
1654static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1655{
1656	return cpumask_first_and(affinity, cpu_online_mask);
1657}
1658
1659/*
1660 * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1661 */
1662static int hv_compose_multi_msi_req_get_cpu(void)
1663{
1664	static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1665
1666	/* -1 means starting with CPU 0 */
1667	static int cpu_next = -1;
1668
1669	unsigned long flags;
1670	int cpu;
1671
1672	spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1673
1674	cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
1675				     false);
1676	cpu = cpu_next;
1677
1678	spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1679
1680	return cpu;
1681}
1682
1683static u32 hv_compose_msi_req_v2(
1684	struct pci_create_interrupt2 *int_pkt, int cpu,
1685	u32 slot, u8 vector, u16 vector_count)
1686{
1687	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1688	int_pkt->wslot.slot = slot;
1689	int_pkt->int_desc.vector = vector;
1690	int_pkt->int_desc.vector_count = vector_count;
1691	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1692	int_pkt->int_desc.processor_array[0] =
1693		hv_cpu_number_to_vp_number(cpu);
1694	int_pkt->int_desc.processor_count = 1;
1695
1696	return sizeof(*int_pkt);
1697}
1698
1699static u32 hv_compose_msi_req_v3(
1700	struct pci_create_interrupt3 *int_pkt, int cpu,
1701	u32 slot, u32 vector, u16 vector_count)
1702{
1703	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1704	int_pkt->wslot.slot = slot;
1705	int_pkt->int_desc.vector = vector;
1706	int_pkt->int_desc.reserved = 0;
1707	int_pkt->int_desc.vector_count = vector_count;
1708	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1709	int_pkt->int_desc.processor_array[0] =
1710		hv_cpu_number_to_vp_number(cpu);
1711	int_pkt->int_desc.processor_count = 1;
1712
1713	return sizeof(*int_pkt);
1714}
1715
1716/**
1717 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1718 * @data:	Everything about this MSI
1719 * @msg:	Buffer that is filled in by this function
1720 *
1721 * This function unpacks the IRQ looking for target CPU set, IDT
1722 * vector and mode and sends a message to the parent partition
1723 * asking for a mapping for that tuple in this partition.  The
1724 * response supplies a data value and address to which that data
1725 * should be written to trigger that interrupt.
1726 */
1727static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1728{
1729	struct hv_pcibus_device *hbus;
1730	struct vmbus_channel *channel;
1731	struct hv_pci_dev *hpdev;
1732	struct pci_bus *pbus;
1733	struct pci_dev *pdev;
1734	const struct cpumask *dest;
1735	struct compose_comp_ctxt comp;
1736	struct tran_int_desc *int_desc;
1737	struct msi_desc *msi_desc;
1738	/*
1739	 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1740	 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1741	 */
1742	u16 vector_count;
1743	u32 vector;
1744	struct {
1745		struct pci_packet pci_pkt;
1746		union {
1747			struct pci_create_interrupt v1;
1748			struct pci_create_interrupt2 v2;
1749			struct pci_create_interrupt3 v3;
1750		} int_pkts;
1751	} __packed ctxt;
1752	bool multi_msi;
1753	u64 trans_id;
1754	u32 size;
1755	int ret;
1756	int cpu;
1757
1758	msi_desc  = irq_data_get_msi_desc(data);
1759	multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1760		    msi_desc->nvec_used > 1;
1761
1762	/* Reuse the previous allocation */
1763	if (data->chip_data && multi_msi) {
1764		int_desc = data->chip_data;
1765		msg->address_hi = int_desc->address >> 32;
1766		msg->address_lo = int_desc->address & 0xffffffff;
1767		msg->data = int_desc->data;
1768		return;
1769	}
1770
1771	pdev = msi_desc_to_pci_dev(msi_desc);
1772	dest = irq_data_get_effective_affinity_mask(data);
1773	pbus = pdev->bus;
1774	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1775	channel = hbus->hdev->channel;
1776	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1777	if (!hpdev)
1778		goto return_null_message;
1779
1780	/* Free any previous message that might have already been composed. */
1781	if (data->chip_data && !multi_msi) {
1782		int_desc = data->chip_data;
1783		data->chip_data = NULL;
1784		hv_int_desc_free(hpdev, int_desc);
1785	}
1786
1787	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1788	if (!int_desc)
1789		goto drop_reference;
1790
1791	if (multi_msi) {
1792		/*
1793		 * If this is not the first MSI of Multi MSI, we already have
1794		 * a mapping.  Can exit early.
1795		 */
1796		if (msi_desc->irq != data->irq) {
1797			data->chip_data = int_desc;
1798			int_desc->address = msi_desc->msg.address_lo |
1799					    (u64)msi_desc->msg.address_hi << 32;
1800			int_desc->data = msi_desc->msg.data +
1801					 (data->irq - msi_desc->irq);
1802			msg->address_hi = msi_desc->msg.address_hi;
1803			msg->address_lo = msi_desc->msg.address_lo;
1804			msg->data = int_desc->data;
1805			put_pcichild(hpdev);
1806			return;
1807		}
1808		/*
1809		 * The vector we select here is a dummy value.  The correct
1810		 * value gets sent to the hypervisor in unmask().  This needs
1811		 * to be aligned with the count, and also not zero.  Multi-msi
1812		 * is powers of 2 up to 32, so 32 will always work here.
1813		 */
1814		vector = 32;
1815		vector_count = msi_desc->nvec_used;
1816		cpu = hv_compose_multi_msi_req_get_cpu();
1817	} else {
1818		vector = hv_msi_get_int_vector(data);
1819		vector_count = 1;
1820		cpu = hv_compose_msi_req_get_cpu(dest);
1821	}
1822
1823	/*
1824	 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1825	 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1826	 * for better readability.
1827	 */
1828	memset(&ctxt, 0, sizeof(ctxt));
1829	init_completion(&comp.comp_pkt.host_event);
1830	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1831	ctxt.pci_pkt.compl_ctxt = &comp;
1832
1833	switch (hbus->protocol_version) {
1834	case PCI_PROTOCOL_VERSION_1_1:
1835		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1836					hpdev->desc.win_slot.slot,
1837					(u8)vector,
1838					vector_count);
1839		break;
1840
1841	case PCI_PROTOCOL_VERSION_1_2:
1842	case PCI_PROTOCOL_VERSION_1_3:
1843		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1844					cpu,
1845					hpdev->desc.win_slot.slot,
1846					(u8)vector,
1847					vector_count);
1848		break;
1849
1850	case PCI_PROTOCOL_VERSION_1_4:
1851		size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
1852					cpu,
1853					hpdev->desc.win_slot.slot,
1854					vector,
1855					vector_count);
1856		break;
1857
1858	default:
1859		/* As we only negotiate protocol versions known to this driver,
1860		 * this path should never hit. However, this is it not a hot
1861		 * path so we print a message to aid future updates.
1862		 */
1863		dev_err(&hbus->hdev->device,
1864			"Unexpected vPCI protocol, update driver.");
1865		goto free_int_desc;
1866	}
1867
1868	ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1869				     size, (unsigned long)&ctxt.pci_pkt,
1870				     &trans_id, VM_PKT_DATA_INBAND,
1871				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1872	if (ret) {
1873		dev_err(&hbus->hdev->device,
1874			"Sending request for interrupt failed: 0x%x",
1875			comp.comp_pkt.completion_status);
1876		goto free_int_desc;
1877	}
1878
1879	/*
1880	 * Prevents hv_pci_onchannelcallback() from running concurrently
1881	 * in the tasklet.
1882	 */
1883	tasklet_disable_in_atomic(&channel->callback_event);
1884
1885	/*
1886	 * Since this function is called with IRQ locks held, can't
1887	 * do normal wait for completion; instead poll.
1888	 */
1889	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1890		unsigned long flags;
1891
1892		/* 0xFFFF means an invalid PCI VENDOR ID. */
1893		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1894			dev_err_once(&hbus->hdev->device,
1895				     "the device has gone\n");
1896			goto enable_tasklet;
1897		}
1898
1899		/*
1900		 * Make sure that the ring buffer data structure doesn't get
1901		 * freed while we dereference the ring buffer pointer.  Test
1902		 * for the channel's onchannel_callback being NULL within a
1903		 * sched_lock critical section.  See also the inline comments
1904		 * in vmbus_reset_channel_cb().
1905		 */
1906		spin_lock_irqsave(&channel->sched_lock, flags);
1907		if (unlikely(channel->onchannel_callback == NULL)) {
1908			spin_unlock_irqrestore(&channel->sched_lock, flags);
1909			goto enable_tasklet;
1910		}
1911		hv_pci_onchannelcallback(hbus);
1912		spin_unlock_irqrestore(&channel->sched_lock, flags);
1913
1914		if (hpdev->state == hv_pcichild_ejecting) {
1915			dev_err_once(&hbus->hdev->device,
1916				     "the device is being ejected\n");
1917			goto enable_tasklet;
1918		}
1919
1920		udelay(100);
1921	}
1922
1923	tasklet_enable(&channel->callback_event);
1924
1925	if (comp.comp_pkt.completion_status < 0) {
1926		dev_err(&hbus->hdev->device,
1927			"Request for interrupt failed: 0x%x",
1928			comp.comp_pkt.completion_status);
1929		goto free_int_desc;
1930	}
1931
1932	/*
1933	 * Record the assignment so that this can be unwound later. Using
1934	 * irq_set_chip_data() here would be appropriate, but the lock it takes
1935	 * is already held.
1936	 */
1937	*int_desc = comp.int_desc;
1938	data->chip_data = int_desc;
1939
1940	/* Pass up the result. */
1941	msg->address_hi = comp.int_desc.address >> 32;
1942	msg->address_lo = comp.int_desc.address & 0xffffffff;
1943	msg->data = comp.int_desc.data;
1944
1945	put_pcichild(hpdev);
1946	return;
1947
1948enable_tasklet:
1949	tasklet_enable(&channel->callback_event);
1950	/*
1951	 * The completion packet on the stack becomes invalid after 'return';
1952	 * remove the ID from the VMbus requestor if the identifier is still
1953	 * mapped to/associated with the packet.  (The identifier could have
1954	 * been 're-used', i.e., already removed and (re-)mapped.)
1955	 *
1956	 * Cf. hv_pci_onchannelcallback().
1957	 */
1958	vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
1959free_int_desc:
1960	kfree(int_desc);
1961drop_reference:
1962	put_pcichild(hpdev);
1963return_null_message:
1964	msg->address_hi = 0;
1965	msg->address_lo = 0;
1966	msg->data = 0;
1967}
1968
1969/* HW Interrupt Chip Descriptor */
1970static struct irq_chip hv_msi_irq_chip = {
1971	.name			= "Hyper-V PCIe MSI",
1972	.irq_compose_msi_msg	= hv_compose_msi_msg,
1973	.irq_set_affinity	= irq_chip_set_affinity_parent,
1974#ifdef CONFIG_X86
1975	.irq_ack		= irq_chip_ack_parent,
1976#elif defined(CONFIG_ARM64)
1977	.irq_eoi		= irq_chip_eoi_parent,
1978#endif
1979	.irq_mask		= hv_irq_mask,
1980	.irq_unmask		= hv_irq_unmask,
1981};
1982
1983static struct msi_domain_ops hv_msi_ops = {
1984	.msi_prepare	= hv_msi_prepare,
1985	.msi_free	= hv_msi_free,
1986};
1987
1988/**
1989 * hv_pcie_init_irq_domain() - Initialize IRQ domain
1990 * @hbus:	The root PCI bus
1991 *
1992 * This function creates an IRQ domain which will be used for
1993 * interrupts from devices that have been passed through.  These
1994 * devices only support MSI and MSI-X, not line-based interrupts
1995 * or simulations of line-based interrupts through PCIe's
1996 * fabric-layer messages.  Because interrupts are remapped, we
1997 * can support multi-message MSI here.
1998 *
1999 * Return: '0' on success and error value on failure
2000 */
2001static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2002{
2003	hbus->msi_info.chip = &hv_msi_irq_chip;
2004	hbus->msi_info.ops = &hv_msi_ops;
2005	hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
2006		MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
2007		MSI_FLAG_PCI_MSIX);
2008	hbus->msi_info.handler = FLOW_HANDLER;
2009	hbus->msi_info.handler_name = FLOW_NAME;
2010	hbus->msi_info.data = hbus;
2011	hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
2012						     &hbus->msi_info,
2013						     hv_pci_get_root_domain());
2014	if (!hbus->irq_domain) {
2015		dev_err(&hbus->hdev->device,
2016			"Failed to build an MSI IRQ domain\n");
2017		return -ENODEV;
2018	}
2019
2020	dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2021
2022	return 0;
2023}
2024
2025/**
2026 * get_bar_size() - Get the address space consumed by a BAR
2027 * @bar_val:	Value that a BAR returned after -1 was written
2028 *              to it.
2029 *
2030 * This function returns the size of the BAR, rounded up to 1
2031 * page.  It has to be rounded up because the hypervisor's page
2032 * table entry that maps the BAR into the VM can't specify an
2033 * offset within a page.  The invariant is that the hypervisor
2034 * must place any BARs of smaller than page length at the
2035 * beginning of a page.
2036 *
2037 * Return:	Size in bytes of the consumed MMIO space.
2038 */
2039static u64 get_bar_size(u64 bar_val)
2040{
2041	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2042			PAGE_SIZE);
2043}
2044
2045/**
2046 * survey_child_resources() - Total all MMIO requirements
2047 * @hbus:	Root PCI bus, as understood by this driver
2048 */
2049static void survey_child_resources(struct hv_pcibus_device *hbus)
2050{
2051	struct hv_pci_dev *hpdev;
2052	resource_size_t bar_size = 0;
2053	unsigned long flags;
2054	struct completion *event;
2055	u64 bar_val;
2056	int i;
2057
2058	/* If nobody is waiting on the answer, don't compute it. */
2059	event = xchg(&hbus->survey_event, NULL);
2060	if (!event)
2061		return;
2062
2063	/* If the answer has already been computed, go with it. */
2064	if (hbus->low_mmio_space || hbus->high_mmio_space) {
2065		complete(event);
2066		return;
2067	}
2068
2069	spin_lock_irqsave(&hbus->device_list_lock, flags);
2070
2071	/*
2072	 * Due to an interesting quirk of the PCI spec, all memory regions
2073	 * for a child device are a power of 2 in size and aligned in memory,
2074	 * so it's sufficient to just add them up without tracking alignment.
2075	 */
2076	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2077		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2078			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2079				dev_err(&hbus->hdev->device,
2080					"There's an I/O BAR in this list!\n");
2081
2082			if (hpdev->probed_bar[i] != 0) {
2083				/*
2084				 * A probed BAR has all the upper bits set that
2085				 * can be changed.
2086				 */
2087
2088				bar_val = hpdev->probed_bar[i];
2089				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2090					bar_val |=
2091					((u64)hpdev->probed_bar[++i] << 32);
2092				else
2093					bar_val |= 0xffffffff00000000ULL;
2094
2095				bar_size = get_bar_size(bar_val);
2096
2097				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2098					hbus->high_mmio_space += bar_size;
2099				else
2100					hbus->low_mmio_space += bar_size;
2101			}
2102		}
2103	}
2104
2105	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2106	complete(event);
2107}
2108
2109/**
2110 * prepopulate_bars() - Fill in BARs with defaults
2111 * @hbus:	Root PCI bus, as understood by this driver
2112 *
2113 * The core PCI driver code seems much, much happier if the BARs
2114 * for a device have values upon first scan. So fill them in.
2115 * The algorithm below works down from large sizes to small,
2116 * attempting to pack the assignments optimally. The assumption,
2117 * enforced in other parts of the code, is that the beginning of
2118 * the memory-mapped I/O space will be aligned on the largest
2119 * BAR size.
2120 */
2121static void prepopulate_bars(struct hv_pcibus_device *hbus)
2122{
2123	resource_size_t high_size = 0;
2124	resource_size_t low_size = 0;
2125	resource_size_t high_base = 0;
2126	resource_size_t low_base = 0;
2127	resource_size_t bar_size;
2128	struct hv_pci_dev *hpdev;
2129	unsigned long flags;
2130	u64 bar_val;
2131	u32 command;
2132	bool high;
2133	int i;
2134
2135	if (hbus->low_mmio_space) {
2136		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2137		low_base = hbus->low_mmio_res->start;
2138	}
2139
2140	if (hbus->high_mmio_space) {
2141		high_size = 1ULL <<
2142			(63 - __builtin_clzll(hbus->high_mmio_space));
2143		high_base = hbus->high_mmio_res->start;
2144	}
2145
2146	spin_lock_irqsave(&hbus->device_list_lock, flags);
2147
2148	/*
2149	 * Clear the memory enable bit, in case it's already set. This occurs
2150	 * in the suspend path of hibernation, where the device is suspended,
2151	 * resumed and suspended again: see hibernation_snapshot() and
2152	 * hibernation_platform_enter().
2153	 *
2154	 * If the memory enable bit is already set, Hyper-V silently ignores
2155	 * the below BAR updates, and the related PCI device driver can not
2156	 * work, because reading from the device register(s) always returns
2157	 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2158	 */
2159	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2160		_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2161		command &= ~PCI_COMMAND_MEMORY;
2162		_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2163	}
2164
2165	/* Pick addresses for the BARs. */
2166	do {
2167		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2168			for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2169				bar_val = hpdev->probed_bar[i];
2170				if (bar_val == 0)
2171					continue;
2172				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2173				if (high) {
2174					bar_val |=
2175						((u64)hpdev->probed_bar[i + 1]
2176						 << 32);
2177				} else {
2178					bar_val |= 0xffffffffULL << 32;
2179				}
2180				bar_size = get_bar_size(bar_val);
2181				if (high) {
2182					if (high_size != bar_size) {
2183						i++;
2184						continue;
2185					}
2186					_hv_pcifront_write_config(hpdev,
2187						PCI_BASE_ADDRESS_0 + (4 * i),
2188						4,
2189						(u32)(high_base & 0xffffff00));
2190					i++;
2191					_hv_pcifront_write_config(hpdev,
2192						PCI_BASE_ADDRESS_0 + (4 * i),
2193						4, (u32)(high_base >> 32));
2194					high_base += bar_size;
2195				} else {
2196					if (low_size != bar_size)
2197						continue;
2198					_hv_pcifront_write_config(hpdev,
2199						PCI_BASE_ADDRESS_0 + (4 * i),
2200						4,
2201						(u32)(low_base & 0xffffff00));
2202					low_base += bar_size;
2203				}
2204			}
2205			if (high_size <= 1 && low_size <= 1) {
2206				/*
2207				 * No need to set the PCI_COMMAND_MEMORY bit as
2208				 * the core PCI driver doesn't require the bit
2209				 * to be pre-set. Actually here we intentionally
2210				 * keep the bit off so that the PCI BAR probing
2211				 * in the core PCI driver doesn't cause Hyper-V
2212				 * to unnecessarily unmap/map the virtual BARs
2213				 * from/to the physical BARs multiple times.
2214				 * This reduces the VM boot time significantly
2215				 * if the BAR sizes are huge.
2216				 */
2217				break;
2218			}
2219		}
2220
2221		high_size >>= 1;
2222		low_size >>= 1;
2223	}  while (high_size || low_size);
2224
2225	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2226}
2227
2228/*
2229 * Assign entries in sysfs pci slot directory.
2230 *
2231 * Note that this function does not need to lock the children list
2232 * because it is called from pci_devices_present_work which
2233 * is serialized with hv_eject_device_work because they are on the
2234 * same ordered workqueue. Therefore hbus->children list will not change
2235 * even when pci_create_slot sleeps.
2236 */
2237static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2238{
2239	struct hv_pci_dev *hpdev;
2240	char name[SLOT_NAME_SIZE];
2241	int slot_nr;
2242
2243	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2244		if (hpdev->pci_slot)
2245			continue;
2246
2247		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2248		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2249		hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2250					  name, NULL);
2251		if (IS_ERR(hpdev->pci_slot)) {
2252			pr_warn("pci_create slot %s failed\n", name);
2253			hpdev->pci_slot = NULL;
2254		}
2255	}
2256}
2257
2258/*
2259 * Remove entries in sysfs pci slot directory.
2260 */
2261static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2262{
2263	struct hv_pci_dev *hpdev;
2264
2265	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2266		if (!hpdev->pci_slot)
2267			continue;
2268		pci_destroy_slot(hpdev->pci_slot);
2269		hpdev->pci_slot = NULL;
2270	}
2271}
2272
2273/*
2274 * Set NUMA node for the devices on the bus
2275 */
2276static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2277{
2278	struct pci_dev *dev;
2279	struct pci_bus *bus = hbus->bridge->bus;
2280	struct hv_pci_dev *hv_dev;
2281
2282	list_for_each_entry(dev, &bus->devices, bus_list) {
2283		hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2284		if (!hv_dev)
2285			continue;
2286
2287		if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2288		    hv_dev->desc.virtual_numa_node < num_possible_nodes())
2289			/*
2290			 * The kernel may boot with some NUMA nodes offline
2291			 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2292			 * "numa=off". In those cases, adjust the host provided
2293			 * NUMA node to a valid NUMA node used by the kernel.
2294			 */
2295			set_dev_node(&dev->dev,
2296				     numa_map_to_online_node(
2297					     hv_dev->desc.virtual_numa_node));
2298
2299		put_pcichild(hv_dev);
2300	}
2301}
2302
2303/**
2304 * create_root_hv_pci_bus() - Expose a new root PCI bus
2305 * @hbus:	Root PCI bus, as understood by this driver
2306 *
2307 * Return: 0 on success, -errno on failure
2308 */
2309static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2310{
2311	int error;
2312	struct pci_host_bridge *bridge = hbus->bridge;
2313
2314	bridge->dev.parent = &hbus->hdev->device;
2315	bridge->sysdata = &hbus->sysdata;
2316	bridge->ops = &hv_pcifront_ops;
2317
2318	error = pci_scan_root_bus_bridge(bridge);
2319	if (error)
2320		return error;
2321
2322	pci_lock_rescan_remove();
2323	hv_pci_assign_numa_node(hbus);
2324	pci_bus_assign_resources(bridge->bus);
2325	hv_pci_assign_slots(hbus);
2326	pci_bus_add_devices(bridge->bus);
2327	pci_unlock_rescan_remove();
2328	hbus->state = hv_pcibus_installed;
2329	return 0;
2330}
2331
2332struct q_res_req_compl {
2333	struct completion host_event;
2334	struct hv_pci_dev *hpdev;
2335};
2336
2337/**
2338 * q_resource_requirements() - Query Resource Requirements
2339 * @context:		The completion context.
2340 * @resp:		The response that came from the host.
2341 * @resp_packet_size:	The size in bytes of resp.
2342 *
2343 * This function is invoked on completion of a Query Resource
2344 * Requirements packet.
2345 */
2346static void q_resource_requirements(void *context, struct pci_response *resp,
2347				    int resp_packet_size)
2348{
2349	struct q_res_req_compl *completion = context;
2350	struct pci_q_res_req_response *q_res_req =
2351		(struct pci_q_res_req_response *)resp;
2352	s32 status;
2353	int i;
2354
2355	status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2356	if (status < 0) {
2357		dev_err(&completion->hpdev->hbus->hdev->device,
2358			"query resource requirements failed: %x\n",
2359			status);
2360	} else {
2361		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2362			completion->hpdev->probed_bar[i] =
2363				q_res_req->probed_bar[i];
2364		}
2365	}
2366
2367	complete(&completion->host_event);
2368}
2369
2370/**
2371 * new_pcichild_device() - Create a new child device
2372 * @hbus:	The internal struct tracking this root PCI bus.
2373 * @desc:	The information supplied so far from the host
2374 *              about the device.
2375 *
2376 * This function creates the tracking structure for a new child
2377 * device and kicks off the process of figuring out what it is.
2378 *
2379 * Return: Pointer to the new tracking struct
2380 */
2381static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2382		struct hv_pcidev_description *desc)
2383{
2384	struct hv_pci_dev *hpdev;
2385	struct pci_child_message *res_req;
2386	struct q_res_req_compl comp_pkt;
2387	struct {
2388		struct pci_packet init_packet;
2389		u8 buffer[sizeof(struct pci_child_message)];
2390	} pkt;
2391	unsigned long flags;
2392	int ret;
2393
2394	hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2395	if (!hpdev)
2396		return NULL;
2397
2398	hpdev->hbus = hbus;
2399
2400	memset(&pkt, 0, sizeof(pkt));
2401	init_completion(&comp_pkt.host_event);
2402	comp_pkt.hpdev = hpdev;
2403	pkt.init_packet.compl_ctxt = &comp_pkt;
2404	pkt.init_packet.completion_func = q_resource_requirements;
2405	res_req = (struct pci_child_message *)&pkt.init_packet.message;
2406	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2407	res_req->wslot.slot = desc->win_slot.slot;
2408
2409	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2410			       sizeof(struct pci_child_message),
2411			       (unsigned long)&pkt.init_packet,
2412			       VM_PKT_DATA_INBAND,
2413			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2414	if (ret)
2415		goto error;
2416
2417	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2418		goto error;
2419
2420	hpdev->desc = *desc;
2421	refcount_set(&hpdev->refs, 1);
2422	get_pcichild(hpdev);
2423	spin_lock_irqsave(&hbus->device_list_lock, flags);
2424
2425	list_add_tail(&hpdev->list_entry, &hbus->children);
2426	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2427	return hpdev;
2428
2429error:
2430	kfree(hpdev);
2431	return NULL;
2432}
2433
2434/**
2435 * get_pcichild_wslot() - Find device from slot
2436 * @hbus:	Root PCI bus, as understood by this driver
2437 * @wslot:	Location on the bus
2438 *
2439 * This function looks up a PCI device and returns the internal
2440 * representation of it.  It acquires a reference on it, so that
2441 * the device won't be deleted while somebody is using it.  The
2442 * caller is responsible for calling put_pcichild() to release
2443 * this reference.
2444 *
2445 * Return:	Internal representation of a PCI device
2446 */
2447static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2448					     u32 wslot)
2449{
2450	unsigned long flags;
2451	struct hv_pci_dev *iter, *hpdev = NULL;
2452
2453	spin_lock_irqsave(&hbus->device_list_lock, flags);
2454	list_for_each_entry(iter, &hbus->children, list_entry) {
2455		if (iter->desc.win_slot.slot == wslot) {
2456			hpdev = iter;
2457			get_pcichild(hpdev);
2458			break;
2459		}
2460	}
2461	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2462
2463	return hpdev;
2464}
2465
2466/**
2467 * pci_devices_present_work() - Handle new list of child devices
2468 * @work:	Work struct embedded in struct hv_dr_work
2469 *
2470 * "Bus Relations" is the Windows term for "children of this
2471 * bus."  The terminology is preserved here for people trying to
2472 * debug the interaction between Hyper-V and Linux.  This
2473 * function is called when the parent partition reports a list
2474 * of functions that should be observed under this PCI Express
2475 * port (bus).
2476 *
2477 * This function updates the list, and must tolerate being
2478 * called multiple times with the same information.  The typical
2479 * number of child devices is one, with very atypical cases
2480 * involving three or four, so the algorithms used here can be
2481 * simple and inefficient.
2482 *
2483 * It must also treat the omission of a previously observed device as
2484 * notification that the device no longer exists.
2485 *
2486 * Note that this function is serialized with hv_eject_device_work(),
2487 * because both are pushed to the ordered workqueue hbus->wq.
2488 */
2489static void pci_devices_present_work(struct work_struct *work)
2490{
2491	u32 child_no;
2492	bool found;
2493	struct hv_pcidev_description *new_desc;
2494	struct hv_pci_dev *hpdev;
2495	struct hv_pcibus_device *hbus;
2496	struct list_head removed;
2497	struct hv_dr_work *dr_wrk;
2498	struct hv_dr_state *dr = NULL;
2499	unsigned long flags;
2500
2501	dr_wrk = container_of(work, struct hv_dr_work, wrk);
2502	hbus = dr_wrk->bus;
2503	kfree(dr_wrk);
2504
2505	INIT_LIST_HEAD(&removed);
2506
2507	/* Pull this off the queue and process it if it was the last one. */
2508	spin_lock_irqsave(&hbus->device_list_lock, flags);
2509	while (!list_empty(&hbus->dr_list)) {
2510		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2511				      list_entry);
2512		list_del(&dr->list_entry);
2513
2514		/* Throw this away if the list still has stuff in it. */
2515		if (!list_empty(&hbus->dr_list)) {
2516			kfree(dr);
2517			continue;
2518		}
2519	}
2520	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2521
2522	if (!dr)
2523		return;
2524
 
 
2525	/* First, mark all existing children as reported missing. */
2526	spin_lock_irqsave(&hbus->device_list_lock, flags);
2527	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2528		hpdev->reported_missing = true;
2529	}
2530	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2531
2532	/* Next, add back any reported devices. */
2533	for (child_no = 0; child_no < dr->device_count; child_no++) {
2534		found = false;
2535		new_desc = &dr->func[child_no];
2536
2537		spin_lock_irqsave(&hbus->device_list_lock, flags);
2538		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2539			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2540			    (hpdev->desc.v_id == new_desc->v_id) &&
2541			    (hpdev->desc.d_id == new_desc->d_id) &&
2542			    (hpdev->desc.ser == new_desc->ser)) {
2543				hpdev->reported_missing = false;
2544				found = true;
2545			}
2546		}
2547		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2548
2549		if (!found) {
2550			hpdev = new_pcichild_device(hbus, new_desc);
2551			if (!hpdev)
2552				dev_err(&hbus->hdev->device,
2553					"couldn't record a child device.\n");
2554		}
2555	}
2556
2557	/* Move missing children to a list on the stack. */
2558	spin_lock_irqsave(&hbus->device_list_lock, flags);
2559	do {
2560		found = false;
2561		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2562			if (hpdev->reported_missing) {
2563				found = true;
2564				put_pcichild(hpdev);
2565				list_move_tail(&hpdev->list_entry, &removed);
2566				break;
2567			}
2568		}
2569	} while (found);
2570	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2571
2572	/* Delete everything that should no longer exist. */
2573	while (!list_empty(&removed)) {
2574		hpdev = list_first_entry(&removed, struct hv_pci_dev,
2575					 list_entry);
2576		list_del(&hpdev->list_entry);
2577
2578		if (hpdev->pci_slot)
2579			pci_destroy_slot(hpdev->pci_slot);
2580
2581		put_pcichild(hpdev);
2582	}
2583
2584	switch (hbus->state) {
2585	case hv_pcibus_installed:
2586		/*
2587		 * Tell the core to rescan bus
2588		 * because there may have been changes.
2589		 */
2590		pci_lock_rescan_remove();
2591		pci_scan_child_bus(hbus->bridge->bus);
2592		hv_pci_assign_numa_node(hbus);
2593		hv_pci_assign_slots(hbus);
2594		pci_unlock_rescan_remove();
2595		break;
2596
2597	case hv_pcibus_init:
2598	case hv_pcibus_probed:
2599		survey_child_resources(hbus);
2600		break;
2601
2602	default:
2603		break;
2604	}
2605
 
 
2606	kfree(dr);
2607}
2608
2609/**
2610 * hv_pci_start_relations_work() - Queue work to start device discovery
2611 * @hbus:	Root PCI bus, as understood by this driver
2612 * @dr:		The list of children returned from host
2613 *
2614 * Return:  0 on success, -errno on failure
2615 */
2616static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2617				       struct hv_dr_state *dr)
2618{
2619	struct hv_dr_work *dr_wrk;
2620	unsigned long flags;
2621	bool pending_dr;
2622
2623	if (hbus->state == hv_pcibus_removing) {
2624		dev_info(&hbus->hdev->device,
2625			 "PCI VMBus BUS_RELATIONS: ignored\n");
2626		return -ENOENT;
2627	}
2628
2629	dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2630	if (!dr_wrk)
2631		return -ENOMEM;
2632
2633	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2634	dr_wrk->bus = hbus;
2635
2636	spin_lock_irqsave(&hbus->device_list_lock, flags);
2637	/*
2638	 * If pending_dr is true, we have already queued a work,
2639	 * which will see the new dr. Otherwise, we need to
2640	 * queue a new work.
2641	 */
2642	pending_dr = !list_empty(&hbus->dr_list);
2643	list_add_tail(&dr->list_entry, &hbus->dr_list);
2644	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2645
2646	if (pending_dr)
2647		kfree(dr_wrk);
2648	else
2649		queue_work(hbus->wq, &dr_wrk->wrk);
2650
2651	return 0;
2652}
2653
2654/**
2655 * hv_pci_devices_present() - Handle list of new children
2656 * @hbus:      Root PCI bus, as understood by this driver
2657 * @relations: Packet from host listing children
2658 *
2659 * Process a new list of devices on the bus. The list of devices is
2660 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2661 * whenever a new list of devices for this bus appears.
2662 */
2663static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2664				   struct pci_bus_relations *relations)
2665{
2666	struct hv_dr_state *dr;
2667	int i;
2668
2669	dr = kzalloc(struct_size(dr, func, relations->device_count),
2670		     GFP_NOWAIT);
2671	if (!dr)
2672		return;
2673
2674	dr->device_count = relations->device_count;
2675	for (i = 0; i < dr->device_count; i++) {
2676		dr->func[i].v_id = relations->func[i].v_id;
2677		dr->func[i].d_id = relations->func[i].d_id;
2678		dr->func[i].rev = relations->func[i].rev;
2679		dr->func[i].prog_intf = relations->func[i].prog_intf;
2680		dr->func[i].subclass = relations->func[i].subclass;
2681		dr->func[i].base_class = relations->func[i].base_class;
2682		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2683		dr->func[i].win_slot = relations->func[i].win_slot;
2684		dr->func[i].ser = relations->func[i].ser;
2685	}
2686
2687	if (hv_pci_start_relations_work(hbus, dr))
2688		kfree(dr);
2689}
2690
2691/**
2692 * hv_pci_devices_present2() - Handle list of new children
2693 * @hbus:	Root PCI bus, as understood by this driver
2694 * @relations:	Packet from host listing children
2695 *
2696 * This function is the v2 version of hv_pci_devices_present()
2697 */
2698static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2699				    struct pci_bus_relations2 *relations)
2700{
2701	struct hv_dr_state *dr;
2702	int i;
2703
2704	dr = kzalloc(struct_size(dr, func, relations->device_count),
2705		     GFP_NOWAIT);
2706	if (!dr)
2707		return;
2708
2709	dr->device_count = relations->device_count;
2710	for (i = 0; i < dr->device_count; i++) {
2711		dr->func[i].v_id = relations->func[i].v_id;
2712		dr->func[i].d_id = relations->func[i].d_id;
2713		dr->func[i].rev = relations->func[i].rev;
2714		dr->func[i].prog_intf = relations->func[i].prog_intf;
2715		dr->func[i].subclass = relations->func[i].subclass;
2716		dr->func[i].base_class = relations->func[i].base_class;
2717		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2718		dr->func[i].win_slot = relations->func[i].win_slot;
2719		dr->func[i].ser = relations->func[i].ser;
2720		dr->func[i].flags = relations->func[i].flags;
2721		dr->func[i].virtual_numa_node =
2722			relations->func[i].virtual_numa_node;
2723	}
2724
2725	if (hv_pci_start_relations_work(hbus, dr))
2726		kfree(dr);
2727}
2728
2729/**
2730 * hv_eject_device_work() - Asynchronously handles ejection
2731 * @work:	Work struct embedded in internal device struct
2732 *
2733 * This function handles ejecting a device.  Windows will
2734 * attempt to gracefully eject a device, waiting 60 seconds to
2735 * hear back from the guest OS that this completed successfully.
2736 * If this timer expires, the device will be forcibly removed.
2737 */
2738static void hv_eject_device_work(struct work_struct *work)
2739{
2740	struct pci_eject_response *ejct_pkt;
2741	struct hv_pcibus_device *hbus;
2742	struct hv_pci_dev *hpdev;
2743	struct pci_dev *pdev;
2744	unsigned long flags;
2745	int wslot;
2746	struct {
2747		struct pci_packet pkt;
2748		u8 buffer[sizeof(struct pci_eject_response)];
2749	} ctxt;
2750
2751	hpdev = container_of(work, struct hv_pci_dev, wrk);
2752	hbus = hpdev->hbus;
2753
2754	WARN_ON(hpdev->state != hv_pcichild_ejecting);
2755
2756	/*
2757	 * Ejection can come before or after the PCI bus has been set up, so
2758	 * attempt to find it and tear down the bus state, if it exists.  This
2759	 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2760	 * because hbus->bridge->bus may not exist yet.
2761	 */
2762	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2763	pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2764	if (pdev) {
2765		pci_lock_rescan_remove();
2766		pci_stop_and_remove_bus_device(pdev);
2767		pci_dev_put(pdev);
2768		pci_unlock_rescan_remove();
2769	}
2770
2771	spin_lock_irqsave(&hbus->device_list_lock, flags);
2772	list_del(&hpdev->list_entry);
2773	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2774
2775	if (hpdev->pci_slot)
2776		pci_destroy_slot(hpdev->pci_slot);
2777
2778	memset(&ctxt, 0, sizeof(ctxt));
2779	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2780	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2781	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2782	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2783			 sizeof(*ejct_pkt), 0,
2784			 VM_PKT_DATA_INBAND, 0);
2785
2786	/* For the get_pcichild() in hv_pci_eject_device() */
2787	put_pcichild(hpdev);
2788	/* For the two refs got in new_pcichild_device() */
2789	put_pcichild(hpdev);
2790	put_pcichild(hpdev);
2791	/* hpdev has been freed. Do not use it any more. */
 
 
2792}
2793
2794/**
2795 * hv_pci_eject_device() - Handles device ejection
2796 * @hpdev:	Internal device tracking struct
2797 *
2798 * This function is invoked when an ejection packet arrives.  It
2799 * just schedules work so that we don't re-enter the packet
2800 * delivery code handling the ejection.
2801 */
2802static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2803{
2804	struct hv_pcibus_device *hbus = hpdev->hbus;
2805	struct hv_device *hdev = hbus->hdev;
2806
2807	if (hbus->state == hv_pcibus_removing) {
2808		dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2809		return;
2810	}
2811
2812	hpdev->state = hv_pcichild_ejecting;
2813	get_pcichild(hpdev);
2814	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2815	queue_work(hbus->wq, &hpdev->wrk);
2816}
2817
2818/**
2819 * hv_pci_onchannelcallback() - Handles incoming packets
2820 * @context:	Internal bus tracking struct
2821 *
2822 * This function is invoked whenever the host sends a packet to
2823 * this channel (which is private to this root PCI bus).
2824 */
2825static void hv_pci_onchannelcallback(void *context)
2826{
2827	const int packet_size = 0x100;
2828	int ret;
2829	struct hv_pcibus_device *hbus = context;
2830	struct vmbus_channel *chan = hbus->hdev->channel;
2831	u32 bytes_recvd;
2832	u64 req_id, req_addr;
2833	struct vmpacket_descriptor *desc;
2834	unsigned char *buffer;
2835	int bufferlen = packet_size;
2836	struct pci_packet *comp_packet;
2837	struct pci_response *response;
2838	struct pci_incoming_message *new_message;
2839	struct pci_bus_relations *bus_rel;
2840	struct pci_bus_relations2 *bus_rel2;
2841	struct pci_dev_inval_block *inval;
2842	struct pci_dev_incoming *dev_message;
2843	struct hv_pci_dev *hpdev;
2844	unsigned long flags;
2845
2846	buffer = kmalloc(bufferlen, GFP_ATOMIC);
2847	if (!buffer)
2848		return;
2849
2850	while (1) {
2851		ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
2852					   &bytes_recvd, &req_id);
2853
2854		if (ret == -ENOBUFS) {
2855			kfree(buffer);
2856			/* Handle large packet */
2857			bufferlen = bytes_recvd;
2858			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2859			if (!buffer)
2860				return;
2861			continue;
2862		}
2863
2864		/* Zero length indicates there are no more packets. */
2865		if (ret || !bytes_recvd)
2866			break;
2867
2868		/*
2869		 * All incoming packets must be at least as large as a
2870		 * response.
2871		 */
2872		if (bytes_recvd <= sizeof(struct pci_response))
2873			continue;
2874		desc = (struct vmpacket_descriptor *)buffer;
2875
2876		switch (desc->type) {
2877		case VM_PKT_COMP:
2878
2879			lock_requestor(chan, flags);
2880			req_addr = __vmbus_request_addr_match(chan, req_id,
2881							      VMBUS_RQST_ADDR_ANY);
2882			if (req_addr == VMBUS_RQST_ERROR) {
2883				unlock_requestor(chan, flags);
2884				dev_err(&hbus->hdev->device,
2885					"Invalid transaction ID %llx\n",
2886					req_id);
2887				break;
2888			}
2889			comp_packet = (struct pci_packet *)req_addr;
2890			response = (struct pci_response *)buffer;
2891			/*
2892			 * Call ->completion_func() within the critical section to make
2893			 * sure that the packet pointer is still valid during the call:
2894			 * here 'valid' means that there's a task still waiting for the
2895			 * completion, and that the packet data is still on the waiting
2896			 * task's stack.  Cf. hv_compose_msi_msg().
2897			 */
2898			comp_packet->completion_func(comp_packet->compl_ctxt,
2899						     response,
2900						     bytes_recvd);
2901			unlock_requestor(chan, flags);
2902			break;
2903
2904		case VM_PKT_DATA_INBAND:
2905
2906			new_message = (struct pci_incoming_message *)buffer;
2907			switch (new_message->message_type.type) {
2908			case PCI_BUS_RELATIONS:
2909
2910				bus_rel = (struct pci_bus_relations *)buffer;
2911				if (bytes_recvd < sizeof(*bus_rel) ||
2912				    bytes_recvd <
2913					struct_size(bus_rel, func,
2914						    bus_rel->device_count)) {
2915					dev_err(&hbus->hdev->device,
2916						"bus relations too small\n");
2917					break;
2918				}
2919
2920				hv_pci_devices_present(hbus, bus_rel);
2921				break;
2922
2923			case PCI_BUS_RELATIONS2:
2924
2925				bus_rel2 = (struct pci_bus_relations2 *)buffer;
2926				if (bytes_recvd < sizeof(*bus_rel2) ||
2927				    bytes_recvd <
2928					struct_size(bus_rel2, func,
2929						    bus_rel2->device_count)) {
2930					dev_err(&hbus->hdev->device,
2931						"bus relations v2 too small\n");
2932					break;
2933				}
2934
2935				hv_pci_devices_present2(hbus, bus_rel2);
2936				break;
2937
2938			case PCI_EJECT:
2939
2940				dev_message = (struct pci_dev_incoming *)buffer;
2941				if (bytes_recvd < sizeof(*dev_message)) {
2942					dev_err(&hbus->hdev->device,
2943						"eject message too small\n");
2944					break;
2945				}
2946				hpdev = get_pcichild_wslot(hbus,
2947						      dev_message->wslot.slot);
2948				if (hpdev) {
2949					hv_pci_eject_device(hpdev);
2950					put_pcichild(hpdev);
2951				}
2952				break;
2953
2954			case PCI_INVALIDATE_BLOCK:
2955
2956				inval = (struct pci_dev_inval_block *)buffer;
2957				if (bytes_recvd < sizeof(*inval)) {
2958					dev_err(&hbus->hdev->device,
2959						"invalidate message too small\n");
2960					break;
2961				}
2962				hpdev = get_pcichild_wslot(hbus,
2963							   inval->wslot.slot);
2964				if (hpdev) {
2965					if (hpdev->block_invalidate) {
2966						hpdev->block_invalidate(
2967						    hpdev->invalidate_context,
2968						    inval->block_mask);
2969					}
2970					put_pcichild(hpdev);
2971				}
2972				break;
2973
2974			default:
2975				dev_warn(&hbus->hdev->device,
2976					"Unimplemented protocol message %x\n",
2977					new_message->message_type.type);
2978				break;
2979			}
2980			break;
2981
2982		default:
2983			dev_err(&hbus->hdev->device,
2984				"unhandled packet type %d, tid %llx len %d\n",
2985				desc->type, req_id, bytes_recvd);
2986			break;
2987		}
2988	}
2989
2990	kfree(buffer);
2991}
2992
2993/**
2994 * hv_pci_protocol_negotiation() - Set up protocol
2995 * @hdev:		VMBus's tracking struct for this root PCI bus.
2996 * @version:		Array of supported channel protocol versions in
2997 *			the order of probing - highest go first.
2998 * @num_version:	Number of elements in the version array.
2999 *
3000 * This driver is intended to support running on Windows 10
3001 * (server) and later versions. It will not run on earlier
3002 * versions, as they assume that many of the operations which
3003 * Linux needs accomplished with a spinlock held were done via
3004 * asynchronous messaging via VMBus.  Windows 10 increases the
3005 * surface area of PCI emulation so that these actions can take
3006 * place by suspending a virtual processor for their duration.
3007 *
3008 * This function negotiates the channel protocol version,
3009 * failing if the host doesn't support the necessary protocol
3010 * level.
3011 */
3012static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3013				       enum pci_protocol_version_t version[],
3014				       int num_version)
3015{
3016	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3017	struct pci_version_request *version_req;
3018	struct hv_pci_compl comp_pkt;
3019	struct pci_packet *pkt;
3020	int ret;
3021	int i;
3022
3023	/*
3024	 * Initiate the handshake with the host and negotiate
3025	 * a version that the host can support. We start with the
3026	 * highest version number and go down if the host cannot
3027	 * support it.
3028	 */
3029	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3030	if (!pkt)
3031		return -ENOMEM;
3032
3033	init_completion(&comp_pkt.host_event);
3034	pkt->completion_func = hv_pci_generic_compl;
3035	pkt->compl_ctxt = &comp_pkt;
3036	version_req = (struct pci_version_request *)&pkt->message;
3037	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3038
3039	for (i = 0; i < num_version; i++) {
3040		version_req->protocol_version = version[i];
3041		ret = vmbus_sendpacket(hdev->channel, version_req,
3042				sizeof(struct pci_version_request),
3043				(unsigned long)pkt, VM_PKT_DATA_INBAND,
3044				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3045		if (!ret)
3046			ret = wait_for_response(hdev, &comp_pkt.host_event);
3047
3048		if (ret) {
3049			dev_err(&hdev->device,
3050				"PCI Pass-through VSP failed to request version: %d",
3051				ret);
3052			goto exit;
3053		}
3054
3055		if (comp_pkt.completion_status >= 0) {
3056			hbus->protocol_version = version[i];
3057			dev_info(&hdev->device,
3058				"PCI VMBus probing: Using version %#x\n",
3059				hbus->protocol_version);
3060			goto exit;
3061		}
3062
3063		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3064			dev_err(&hdev->device,
3065				"PCI Pass-through VSP failed version request: %#x",
3066				comp_pkt.completion_status);
3067			ret = -EPROTO;
3068			goto exit;
3069		}
3070
3071		reinit_completion(&comp_pkt.host_event);
3072	}
3073
3074	dev_err(&hdev->device,
3075		"PCI pass-through VSP failed to find supported version");
3076	ret = -EPROTO;
3077
3078exit:
3079	kfree(pkt);
3080	return ret;
3081}
3082
3083/**
3084 * hv_pci_free_bridge_windows() - Release memory regions for the
3085 * bus
3086 * @hbus:	Root PCI bus, as understood by this driver
3087 */
3088static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3089{
3090	/*
3091	 * Set the resources back to the way they looked when they
3092	 * were allocated by setting IORESOURCE_BUSY again.
3093	 */
3094
3095	if (hbus->low_mmio_space && hbus->low_mmio_res) {
3096		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3097		vmbus_free_mmio(hbus->low_mmio_res->start,
3098				resource_size(hbus->low_mmio_res));
3099	}
3100
3101	if (hbus->high_mmio_space && hbus->high_mmio_res) {
3102		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3103		vmbus_free_mmio(hbus->high_mmio_res->start,
3104				resource_size(hbus->high_mmio_res));
3105	}
3106}
3107
3108/**
3109 * hv_pci_allocate_bridge_windows() - Allocate memory regions
3110 * for the bus
3111 * @hbus:	Root PCI bus, as understood by this driver
3112 *
3113 * This function calls vmbus_allocate_mmio(), which is itself a
3114 * bit of a compromise.  Ideally, we might change the pnp layer
3115 * in the kernel such that it comprehends either PCI devices
3116 * which are "grandchildren of ACPI," with some intermediate bus
3117 * node (in this case, VMBus) or change it such that it
3118 * understands VMBus.  The pnp layer, however, has been declared
3119 * deprecated, and not subject to change.
3120 *
3121 * The workaround, implemented here, is to ask VMBus to allocate
3122 * MMIO space for this bus.  VMBus itself knows which ranges are
3123 * appropriate by looking at its own ACPI objects.  Then, after
3124 * these ranges are claimed, they're modified to look like they
3125 * would have looked if the ACPI and pnp code had allocated
3126 * bridge windows.  These descriptors have to exist in this form
3127 * in order to satisfy the code which will get invoked when the
3128 * endpoint PCI function driver calls request_mem_region() or
3129 * request_mem_region_exclusive().
3130 *
3131 * Return: 0 on success, -errno on failure
3132 */
3133static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3134{
3135	resource_size_t align;
3136	int ret;
3137
3138	if (hbus->low_mmio_space) {
3139		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3140		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3141					  (u64)(u32)0xffffffff,
3142					  hbus->low_mmio_space,
3143					  align, false);
3144		if (ret) {
3145			dev_err(&hbus->hdev->device,
3146				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3147				hbus->low_mmio_space);
3148			return ret;
3149		}
3150
3151		/* Modify this resource to become a bridge window. */
3152		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3153		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3154		pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3155	}
3156
3157	if (hbus->high_mmio_space) {
3158		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3159		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3160					  0x100000000, -1,
3161					  hbus->high_mmio_space, align,
3162					  false);
3163		if (ret) {
3164			dev_err(&hbus->hdev->device,
3165				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3166				hbus->high_mmio_space);
3167			goto release_low_mmio;
3168		}
3169
3170		/* Modify this resource to become a bridge window. */
3171		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3172		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3173		pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3174	}
3175
3176	return 0;
3177
3178release_low_mmio:
3179	if (hbus->low_mmio_res) {
3180		vmbus_free_mmio(hbus->low_mmio_res->start,
3181				resource_size(hbus->low_mmio_res));
3182	}
3183
3184	return ret;
3185}
3186
3187/**
3188 * hv_allocate_config_window() - Find MMIO space for PCI Config
3189 * @hbus:	Root PCI bus, as understood by this driver
3190 *
3191 * This function claims memory-mapped I/O space for accessing
3192 * configuration space for the functions on this bus.
3193 *
3194 * Return: 0 on success, -errno on failure
3195 */
3196static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3197{
3198	int ret;
3199
3200	/*
3201	 * Set up a region of MMIO space to use for accessing configuration
3202	 * space.
3203	 */
3204	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3205				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3206	if (ret)
3207		return ret;
3208
3209	/*
3210	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3211	 * resource claims (those which cannot be overlapped) and the ranges
3212	 * which are valid for the children of this bus, which are intended
3213	 * to be overlapped by those children.  Set the flag on this claim
3214	 * meaning that this region can't be overlapped.
3215	 */
3216
3217	hbus->mem_config->flags |= IORESOURCE_BUSY;
3218
3219	return 0;
3220}
3221
3222static void hv_free_config_window(struct hv_pcibus_device *hbus)
3223{
3224	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3225}
3226
3227static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3228
3229/**
3230 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3231 * @hdev:	VMBus's tracking struct for this root PCI bus
3232 *
3233 * Return: 0 on success, -errno on failure
3234 */
3235static int hv_pci_enter_d0(struct hv_device *hdev)
3236{
3237	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3238	struct pci_bus_d0_entry *d0_entry;
3239	struct hv_pci_compl comp_pkt;
3240	struct pci_packet *pkt;
 
3241	int ret;
3242
 
3243	/*
3244	 * Tell the host that the bus is ready to use, and moved into the
3245	 * powered-on state.  This includes telling the host which region
3246	 * of memory-mapped I/O space has been chosen for configuration space
3247	 * access.
3248	 */
3249	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3250	if (!pkt)
3251		return -ENOMEM;
3252
3253	init_completion(&comp_pkt.host_event);
3254	pkt->completion_func = hv_pci_generic_compl;
3255	pkt->compl_ctxt = &comp_pkt;
3256	d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
3257	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3258	d0_entry->mmio_base = hbus->mem_config->start;
3259
3260	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3261			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
3262			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3263	if (!ret)
3264		ret = wait_for_response(hdev, &comp_pkt.host_event);
3265
3266	if (ret)
3267		goto exit;
3268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3269	if (comp_pkt.completion_status < 0) {
3270		dev_err(&hdev->device,
3271			"PCI Pass-through VSP failed D0 Entry with status %x\n",
3272			comp_pkt.completion_status);
3273		ret = -EPROTO;
3274		goto exit;
3275	}
3276
3277	ret = 0;
3278
3279exit:
3280	kfree(pkt);
3281	return ret;
3282}
3283
3284/**
3285 * hv_pci_query_relations() - Ask host to send list of child
3286 * devices
3287 * @hdev:	VMBus's tracking struct for this root PCI bus
3288 *
3289 * Return: 0 on success, -errno on failure
3290 */
3291static int hv_pci_query_relations(struct hv_device *hdev)
3292{
3293	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3294	struct pci_message message;
3295	struct completion comp;
3296	int ret;
3297
3298	/* Ask the host to send along the list of child devices */
3299	init_completion(&comp);
3300	if (cmpxchg(&hbus->survey_event, NULL, &comp))
3301		return -ENOTEMPTY;
3302
3303	memset(&message, 0, sizeof(message));
3304	message.type = PCI_QUERY_BUS_RELATIONS;
3305
3306	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3307			       0, VM_PKT_DATA_INBAND, 0);
3308	if (!ret)
3309		ret = wait_for_response(hdev, &comp);
3310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3311	return ret;
3312}
3313
3314/**
3315 * hv_send_resources_allocated() - Report local resource choices
3316 * @hdev:	VMBus's tracking struct for this root PCI bus
3317 *
3318 * The host OS is expecting to be sent a request as a message
3319 * which contains all the resources that the device will use.
3320 * The response contains those same resources, "translated"
3321 * which is to say, the values which should be used by the
3322 * hardware, when it delivers an interrupt.  (MMIO resources are
3323 * used in local terms.)  This is nice for Windows, and lines up
3324 * with the FDO/PDO split, which doesn't exist in Linux.  Linux
3325 * is deeply expecting to scan an emulated PCI configuration
3326 * space.  So this message is sent here only to drive the state
3327 * machine on the host forward.
3328 *
3329 * Return: 0 on success, -errno on failure
3330 */
3331static int hv_send_resources_allocated(struct hv_device *hdev)
3332{
3333	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3334	struct pci_resources_assigned *res_assigned;
3335	struct pci_resources_assigned2 *res_assigned2;
3336	struct hv_pci_compl comp_pkt;
3337	struct hv_pci_dev *hpdev;
3338	struct pci_packet *pkt;
3339	size_t size_res;
3340	int wslot;
3341	int ret;
3342
3343	size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3344			? sizeof(*res_assigned) : sizeof(*res_assigned2);
3345
3346	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3347	if (!pkt)
3348		return -ENOMEM;
3349
3350	ret = 0;
3351
3352	for (wslot = 0; wslot < 256; wslot++) {
3353		hpdev = get_pcichild_wslot(hbus, wslot);
3354		if (!hpdev)
3355			continue;
3356
3357		memset(pkt, 0, sizeof(*pkt) + size_res);
3358		init_completion(&comp_pkt.host_event);
3359		pkt->completion_func = hv_pci_generic_compl;
3360		pkt->compl_ctxt = &comp_pkt;
3361
3362		if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3363			res_assigned =
3364				(struct pci_resources_assigned *)&pkt->message;
3365			res_assigned->message_type.type =
3366				PCI_RESOURCES_ASSIGNED;
3367			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3368		} else {
3369			res_assigned2 =
3370				(struct pci_resources_assigned2 *)&pkt->message;
3371			res_assigned2->message_type.type =
3372				PCI_RESOURCES_ASSIGNED2;
3373			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3374		}
3375		put_pcichild(hpdev);
3376
3377		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
3378				size_res, (unsigned long)pkt,
3379				VM_PKT_DATA_INBAND,
3380				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3381		if (!ret)
3382			ret = wait_for_response(hdev, &comp_pkt.host_event);
3383		if (ret)
3384			break;
3385
3386		if (comp_pkt.completion_status < 0) {
3387			ret = -EPROTO;
3388			dev_err(&hdev->device,
3389				"resource allocated returned 0x%x",
3390				comp_pkt.completion_status);
3391			break;
3392		}
3393
3394		hbus->wslot_res_allocated = wslot;
3395	}
3396
3397	kfree(pkt);
3398	return ret;
3399}
3400
3401/**
3402 * hv_send_resources_released() - Report local resources
3403 * released
3404 * @hdev:	VMBus's tracking struct for this root PCI bus
3405 *
3406 * Return: 0 on success, -errno on failure
3407 */
3408static int hv_send_resources_released(struct hv_device *hdev)
3409{
3410	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3411	struct pci_child_message pkt;
3412	struct hv_pci_dev *hpdev;
3413	int wslot;
3414	int ret;
3415
3416	for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3417		hpdev = get_pcichild_wslot(hbus, wslot);
3418		if (!hpdev)
3419			continue;
3420
3421		memset(&pkt, 0, sizeof(pkt));
3422		pkt.message_type.type = PCI_RESOURCES_RELEASED;
3423		pkt.wslot.slot = hpdev->desc.win_slot.slot;
3424
3425		put_pcichild(hpdev);
3426
3427		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3428				       VM_PKT_DATA_INBAND, 0);
3429		if (ret)
3430			return ret;
3431
3432		hbus->wslot_res_allocated = wslot - 1;
3433	}
3434
3435	hbus->wslot_res_allocated = -1;
3436
3437	return 0;
3438}
3439
3440#define HVPCI_DOM_MAP_SIZE (64 * 1024)
3441static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3442
3443/*
3444 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3445 * as invalid for passthrough PCI devices of this driver.
3446 */
3447#define HVPCI_DOM_INVALID 0
3448
3449/**
3450 * hv_get_dom_num() - Get a valid PCI domain number
3451 * Check if the PCI domain number is in use, and return another number if
3452 * it is in use.
3453 *
3454 * @dom: Requested domain number
3455 *
3456 * return: domain number on success, HVPCI_DOM_INVALID on failure
3457 */
3458static u16 hv_get_dom_num(u16 dom)
3459{
3460	unsigned int i;
3461
3462	if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3463		return dom;
3464
3465	for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3466		if (test_and_set_bit(i, hvpci_dom_map) == 0)
3467			return i;
3468	}
3469
3470	return HVPCI_DOM_INVALID;
3471}
3472
3473/**
3474 * hv_put_dom_num() - Mark the PCI domain number as free
3475 * @dom: Domain number to be freed
3476 */
3477static void hv_put_dom_num(u16 dom)
3478{
3479	clear_bit(dom, hvpci_dom_map);
3480}
3481
3482/**
3483 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3484 * @hdev:	VMBus's tracking struct for this root PCI bus
3485 * @dev_id:	Identifies the device itself
3486 *
3487 * Return: 0 on success, -errno on failure
3488 */
3489static int hv_pci_probe(struct hv_device *hdev,
3490			const struct hv_vmbus_device_id *dev_id)
3491{
3492	struct pci_host_bridge *bridge;
3493	struct hv_pcibus_device *hbus;
3494	u16 dom_req, dom;
3495	char *name;
3496	bool enter_d0_retry = true;
3497	int ret;
3498
3499	/*
3500	 * hv_pcibus_device contains the hypercall arguments for retargeting in
3501	 * hv_irq_unmask(). Those must not cross a page boundary.
3502	 */
3503	BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
3504
3505	bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3506	if (!bridge)
3507		return -ENOMEM;
3508
3509	/*
3510	 * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
3511	 * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
3512	 * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
3513	 * alignment of hbus is important because hbus's field
3514	 * retarget_msi_interrupt_params must not cross a 4KB page boundary.
3515	 *
3516	 * Here we prefer kzalloc to get_zeroed_page(), because a buffer
3517	 * allocated by the latter is not tracked and scanned by kmemleak, and
3518	 * hence kmemleak reports the pointer contained in the hbus buffer
3519	 * (i.e. the hpdev struct, which is created in new_pcichild_device() and
3520	 * is tracked by hbus->children) as memory leak (false positive).
3521	 *
3522	 * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
3523	 * used to allocate the hbus buffer and we can avoid the kmemleak false
3524	 * positive by using kmemleak_alloc() and kmemleak_free() to ask
3525	 * kmemleak to track and scan the hbus buffer.
3526	 */
3527	hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
3528	if (!hbus)
3529		return -ENOMEM;
3530
3531	hbus->bridge = bridge;
 
3532	hbus->state = hv_pcibus_init;
3533	hbus->wslot_res_allocated = -1;
3534
3535	/*
3536	 * The PCI bus "domain" is what is called "segment" in ACPI and other
3537	 * specs. Pull it from the instance ID, to get something usually
3538	 * unique. In rare cases of collision, we will find out another number
3539	 * not in use.
3540	 *
3541	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3542	 * together with this guest driver can guarantee that (1) The only
3543	 * domain used by Gen1 VMs for something that looks like a physical
3544	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3545	 * (2) There will be no overlap between domains (after fixing possible
3546	 * collisions) in the same VM.
3547	 */
3548	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3549	dom = hv_get_dom_num(dom_req);
3550
3551	if (dom == HVPCI_DOM_INVALID) {
3552		dev_err(&hdev->device,
3553			"Unable to use dom# 0x%x or other numbers", dom_req);
3554		ret = -EINVAL;
3555		goto free_bus;
3556	}
3557
3558	if (dom != dom_req)
3559		dev_info(&hdev->device,
3560			 "PCI dom# 0x%x has collision, using 0x%x",
3561			 dom_req, dom);
3562
3563	hbus->bridge->domain_nr = dom;
3564#ifdef CONFIG_X86
3565	hbus->sysdata.domain = dom;
 
3566#elif defined(CONFIG_ARM64)
3567	/*
3568	 * Set the PCI bus parent to be the corresponding VMbus
3569	 * device. Then the VMbus device will be assigned as the
3570	 * ACPI companion in pcibios_root_bridge_prepare() and
3571	 * pci_dma_configure() will propagate device coherence
3572	 * information to devices created on the bus.
3573	 */
3574	hbus->sysdata.parent = hdev->device.parent;
 
3575#endif
3576
3577	hbus->hdev = hdev;
3578	INIT_LIST_HEAD(&hbus->children);
3579	INIT_LIST_HEAD(&hbus->dr_list);
3580	spin_lock_init(&hbus->config_lock);
3581	spin_lock_init(&hbus->device_list_lock);
3582	spin_lock_init(&hbus->retarget_msi_interrupt_lock);
3583	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3584					   hbus->bridge->domain_nr);
3585	if (!hbus->wq) {
3586		ret = -ENOMEM;
3587		goto free_dom;
3588	}
3589
3590	hdev->channel->next_request_id_callback = vmbus_next_request_id;
3591	hdev->channel->request_addr_callback = vmbus_request_addr;
3592	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3593
3594	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3595			 hv_pci_onchannelcallback, hbus);
3596	if (ret)
3597		goto destroy_wq;
3598
3599	hv_set_drvdata(hdev, hbus);
3600
3601	ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3602					  ARRAY_SIZE(pci_protocol_versions));
3603	if (ret)
3604		goto close;
3605
3606	ret = hv_allocate_config_window(hbus);
3607	if (ret)
3608		goto close;
3609
3610	hbus->cfg_addr = ioremap(hbus->mem_config->start,
3611				 PCI_CONFIG_MMIO_LENGTH);
3612	if (!hbus->cfg_addr) {
3613		dev_err(&hdev->device,
3614			"Unable to map a virtual address for config space\n");
3615		ret = -ENOMEM;
3616		goto free_config;
3617	}
3618
3619	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3620	if (!name) {
3621		ret = -ENOMEM;
3622		goto unmap;
3623	}
3624
3625	hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3626	kfree(name);
3627	if (!hbus->fwnode) {
3628		ret = -ENOMEM;
3629		goto unmap;
3630	}
3631
3632	ret = hv_pcie_init_irq_domain(hbus);
3633	if (ret)
3634		goto free_fwnode;
3635
3636retry:
3637	ret = hv_pci_query_relations(hdev);
3638	if (ret)
3639		goto free_irq_domain;
3640
 
 
3641	ret = hv_pci_enter_d0(hdev);
3642	/*
3643	 * In certain case (Kdump) the pci device of interest was
3644	 * not cleanly shut down and resource is still held on host
3645	 * side, the host could return invalid device status.
3646	 * We need to explicitly request host to release the resource
3647	 * and try to enter D0 again.
3648	 * Since the hv_pci_bus_exit() call releases structures
3649	 * of all its child devices, we need to start the retry from
3650	 * hv_pci_query_relations() call, requesting host to send
3651	 * the synchronous child device relations message before this
3652	 * information is needed in hv_send_resources_allocated()
3653	 * call later.
3654	 */
3655	if (ret == -EPROTO && enter_d0_retry) {
3656		enter_d0_retry = false;
3657
3658		dev_err(&hdev->device, "Retrying D0 Entry\n");
3659
3660		/*
3661		 * Hv_pci_bus_exit() calls hv_send_resources_released()
3662		 * to free up resources of its child devices.
3663		 * In the kdump kernel we need to set the
3664		 * wslot_res_allocated to 255 so it scans all child
3665		 * devices to release resources allocated in the
3666		 * normal kernel before panic happened.
3667		 */
3668		hbus->wslot_res_allocated = 255;
3669		ret = hv_pci_bus_exit(hdev, true);
3670
3671		if (ret == 0)
3672			goto retry;
3673
3674		dev_err(&hdev->device,
3675			"Retrying D0 failed with ret %d\n", ret);
3676	}
3677	if (ret)
3678		goto free_irq_domain;
3679
3680	ret = hv_pci_allocate_bridge_windows(hbus);
3681	if (ret)
3682		goto exit_d0;
3683
3684	ret = hv_send_resources_allocated(hdev);
3685	if (ret)
3686		goto free_windows;
3687
3688	prepopulate_bars(hbus);
3689
3690	hbus->state = hv_pcibus_probed;
3691
3692	ret = create_root_hv_pci_bus(hbus);
3693	if (ret)
3694		goto free_windows;
3695
 
3696	return 0;
3697
3698free_windows:
3699	hv_pci_free_bridge_windows(hbus);
3700exit_d0:
3701	(void) hv_pci_bus_exit(hdev, true);
 
 
3702free_irq_domain:
3703	irq_domain_remove(hbus->irq_domain);
3704free_fwnode:
3705	irq_domain_free_fwnode(hbus->fwnode);
3706unmap:
3707	iounmap(hbus->cfg_addr);
3708free_config:
3709	hv_free_config_window(hbus);
3710close:
3711	vmbus_close(hdev->channel);
3712destroy_wq:
3713	destroy_workqueue(hbus->wq);
3714free_dom:
3715	hv_put_dom_num(hbus->bridge->domain_nr);
3716free_bus:
3717	kfree(hbus);
3718	return ret;
3719}
3720
3721static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3722{
3723	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3724	struct vmbus_channel *chan = hdev->channel;
3725	struct {
3726		struct pci_packet teardown_packet;
3727		u8 buffer[sizeof(struct pci_message)];
3728	} pkt;
3729	struct hv_pci_compl comp_pkt;
3730	struct hv_pci_dev *hpdev, *tmp;
3731	unsigned long flags;
3732	u64 trans_id;
3733	int ret;
3734
3735	/*
3736	 * After the host sends the RESCIND_CHANNEL message, it doesn't
3737	 * access the per-channel ringbuffer any longer.
3738	 */
3739	if (chan->rescind)
3740		return 0;
3741
3742	if (!keep_devs) {
3743		struct list_head removed;
3744
3745		/* Move all present children to the list on stack */
3746		INIT_LIST_HEAD(&removed);
3747		spin_lock_irqsave(&hbus->device_list_lock, flags);
3748		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3749			list_move_tail(&hpdev->list_entry, &removed);
3750		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3751
3752		/* Remove all children in the list */
3753		list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3754			list_del(&hpdev->list_entry);
3755			if (hpdev->pci_slot)
3756				pci_destroy_slot(hpdev->pci_slot);
3757			/* For the two refs got in new_pcichild_device() */
3758			put_pcichild(hpdev);
3759			put_pcichild(hpdev);
3760		}
3761	}
3762
3763	ret = hv_send_resources_released(hdev);
3764	if (ret) {
3765		dev_err(&hdev->device,
3766			"Couldn't send resources released packet(s)\n");
3767		return ret;
3768	}
3769
3770	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3771	init_completion(&comp_pkt.host_event);
3772	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3773	pkt.teardown_packet.compl_ctxt = &comp_pkt;
3774	pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3775
3776	ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
3777				     sizeof(struct pci_message),
3778				     (unsigned long)&pkt.teardown_packet,
3779				     &trans_id, VM_PKT_DATA_INBAND,
3780				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3781	if (ret)
3782		return ret;
3783
3784	if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3785		/*
3786		 * The completion packet on the stack becomes invalid after
3787		 * 'return'; remove the ID from the VMbus requestor if the
3788		 * identifier is still mapped to/associated with the packet.
3789		 *
3790		 * Cf. hv_pci_onchannelcallback().
3791		 */
3792		vmbus_request_addr_match(chan, trans_id,
3793					 (unsigned long)&pkt.teardown_packet);
3794		return -ETIMEDOUT;
3795	}
3796
3797	return 0;
3798}
3799
3800/**
3801 * hv_pci_remove() - Remove routine for this VMBus channel
3802 * @hdev:	VMBus's tracking struct for this root PCI bus
3803 *
3804 * Return: 0 on success, -errno on failure
3805 */
3806static int hv_pci_remove(struct hv_device *hdev)
3807{
3808	struct hv_pcibus_device *hbus;
3809	int ret;
3810
3811	hbus = hv_get_drvdata(hdev);
3812	if (hbus->state == hv_pcibus_installed) {
3813		tasklet_disable(&hdev->channel->callback_event);
3814		hbus->state = hv_pcibus_removing;
3815		tasklet_enable(&hdev->channel->callback_event);
3816		destroy_workqueue(hbus->wq);
3817		hbus->wq = NULL;
3818		/*
3819		 * At this point, no work is running or can be scheduled
3820		 * on hbus-wq. We can't race with hv_pci_devices_present()
3821		 * or hv_pci_eject_device(), it's safe to proceed.
3822		 */
3823
3824		/* Remove the bus from PCI's point of view. */
3825		pci_lock_rescan_remove();
3826		pci_stop_root_bus(hbus->bridge->bus);
3827		hv_pci_remove_slots(hbus);
3828		pci_remove_root_bus(hbus->bridge->bus);
3829		pci_unlock_rescan_remove();
3830	}
3831
3832	ret = hv_pci_bus_exit(hdev, false);
3833
3834	vmbus_close(hdev->channel);
3835
3836	iounmap(hbus->cfg_addr);
3837	hv_free_config_window(hbus);
3838	hv_pci_free_bridge_windows(hbus);
3839	irq_domain_remove(hbus->irq_domain);
3840	irq_domain_free_fwnode(hbus->fwnode);
3841
3842	hv_put_dom_num(hbus->bridge->domain_nr);
3843
3844	kfree(hbus);
3845	return ret;
3846}
3847
3848static int hv_pci_suspend(struct hv_device *hdev)
3849{
3850	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3851	enum hv_pcibus_state old_state;
3852	int ret;
3853
3854	/*
3855	 * hv_pci_suspend() must make sure there are no pending work items
3856	 * before calling vmbus_close(), since it runs in a process context
3857	 * as a callback in dpm_suspend().  When it starts to run, the channel
3858	 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3859	 * context, can be still running concurrently and scheduling new work
3860	 * items onto hbus->wq in hv_pci_devices_present() and
3861	 * hv_pci_eject_device(), and the work item handlers can access the
3862	 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3863	 * the work item handler pci_devices_present_work() ->
3864	 * new_pcichild_device() writes to the vmbus channel.
3865	 *
3866	 * To eliminate the race, hv_pci_suspend() disables the channel
3867	 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3868	 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3869	 * it knows that no new work item can be scheduled, and then it flushes
3870	 * hbus->wq and safely closes the vmbus channel.
3871	 */
3872	tasklet_disable(&hdev->channel->callback_event);
3873
3874	/* Change the hbus state to prevent new work items. */
3875	old_state = hbus->state;
3876	if (hbus->state == hv_pcibus_installed)
3877		hbus->state = hv_pcibus_removing;
3878
3879	tasklet_enable(&hdev->channel->callback_event);
3880
3881	if (old_state != hv_pcibus_installed)
3882		return -EINVAL;
3883
3884	flush_workqueue(hbus->wq);
3885
3886	ret = hv_pci_bus_exit(hdev, true);
3887	if (ret)
3888		return ret;
3889
3890	vmbus_close(hdev->channel);
3891
3892	return 0;
3893}
3894
3895static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
3896{
3897	struct irq_data *irq_data;
3898	struct msi_desc *entry;
3899	int ret = 0;
3900
 
 
 
3901	msi_lock_descs(&pdev->dev);
3902	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
3903		irq_data = irq_get_irq_data(entry->irq);
3904		if (WARN_ON_ONCE(!irq_data)) {
3905			ret = -EINVAL;
3906			break;
3907		}
3908
3909		hv_compose_msi_msg(irq_data, &entry->msg);
3910	}
3911	msi_unlock_descs(&pdev->dev);
3912
3913	return ret;
3914}
3915
3916/*
3917 * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
3918 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
3919 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
3920 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
3921 * Table entries.
3922 */
3923static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
3924{
3925	pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
3926}
3927
3928static int hv_pci_resume(struct hv_device *hdev)
3929{
3930	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3931	enum pci_protocol_version_t version[1];
3932	int ret;
3933
3934	hbus->state = hv_pcibus_init;
3935
3936	hdev->channel->next_request_id_callback = vmbus_next_request_id;
3937	hdev->channel->request_addr_callback = vmbus_request_addr;
3938	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3939
3940	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3941			 hv_pci_onchannelcallback, hbus);
3942	if (ret)
3943		return ret;
3944
3945	/* Only use the version that was in use before hibernation. */
3946	version[0] = hbus->protocol_version;
3947	ret = hv_pci_protocol_negotiation(hdev, version, 1);
3948	if (ret)
3949		goto out;
3950
3951	ret = hv_pci_query_relations(hdev);
3952	if (ret)
3953		goto out;
3954
 
 
3955	ret = hv_pci_enter_d0(hdev);
3956	if (ret)
3957		goto out;
3958
3959	ret = hv_send_resources_allocated(hdev);
3960	if (ret)
3961		goto out;
3962
3963	prepopulate_bars(hbus);
3964
3965	hv_pci_restore_msi_state(hbus);
3966
3967	hbus->state = hv_pcibus_installed;
 
3968	return 0;
 
 
 
3969out:
3970	vmbus_close(hdev->channel);
3971	return ret;
3972}
3973
3974static const struct hv_vmbus_device_id hv_pci_id_table[] = {
3975	/* PCI Pass-through Class ID */
3976	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
3977	{ HV_PCIE_GUID, },
3978	{ },
3979};
3980
3981MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
3982
3983static struct hv_driver hv_pci_drv = {
3984	.name		= "hv_pci",
3985	.id_table	= hv_pci_id_table,
3986	.probe		= hv_pci_probe,
3987	.remove		= hv_pci_remove,
3988	.suspend	= hv_pci_suspend,
3989	.resume		= hv_pci_resume,
3990};
3991
3992static void __exit exit_hv_pci_drv(void)
3993{
3994	vmbus_driver_unregister(&hv_pci_drv);
3995
3996	hvpci_block_ops.read_block = NULL;
3997	hvpci_block_ops.write_block = NULL;
3998	hvpci_block_ops.reg_blk_invalidate = NULL;
3999}
4000
4001static int __init init_hv_pci_drv(void)
4002{
4003	int ret;
4004
4005	if (!hv_is_hyperv_initialized())
4006		return -ENODEV;
4007
4008	ret = hv_pci_irqchip_init();
4009	if (ret)
4010		return ret;
4011
4012	/* Set the invalid domain number's bit, so it will not be used */
4013	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4014
4015	/* Initialize PCI block r/w interface */
4016	hvpci_block_ops.read_block = hv_read_config_block;
4017	hvpci_block_ops.write_block = hv_write_config_block;
4018	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4019
4020	return vmbus_driver_register(&hv_pci_drv);
4021}
4022
4023module_init(init_hv_pci_drv);
4024module_exit(exit_hv_pci_drv);
4025
4026MODULE_DESCRIPTION("Hyper-V PCI");
4027MODULE_LICENSE("GPL v2");