1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) Microsoft Corporation.
4  *
5  * Author:
6  *   Jake Oshins <jakeo@microsoft.com>
7  *
8  * This driver acts as a paravirtual front-end for PCI Express root buses.
9  * When a PCI Express function (either an entire device or an SR-IOV
10  * Virtual Function) is being passed through to the VM, this driver exposes
11  * a new bus to the guest VM.  This is modeled as a root PCI bus because
12  * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
13  * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14  * until a device as been exposed using this driver.
15  *
16  * Each root PCI bus has its own PCI domain, which is called "Segment" in
17  * the PCI Firmware Specifications.  Thus while each device passed through
18  * to the VM using this front-end will appear at "device 0", the domain will
19  * be unique.  Typically, each bus will have one PCI function on it, though
20  * this driver does support more than one.
21  *
22  * In order to map the interrupts from the device through to the guest VM,
23  * this driver also implements an IRQ Domain, which handles interrupts (either
24  * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
25  * set up, torn down, or reaffined, this driver communicates with the
26  * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27  * interrupt will be delivered to the correct virtual processor at the right
28  * vector.  This driver does not support level-triggered (line-based)
29  * interrupts, and will report that the Interrupt Line register in the
30  * function's configuration space is zero.
31  *
32  * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33  * facilities.  For instance, the configuration space of a function exposed
34  * by Hyper-V is mapped into a single page of memory space, and the
35  * read and write handlers for config space must be aware of this mechanism.
36  * Similarly, device setup and teardown involves messages sent to and from
37  * the PCI back-end driver in Hyper-V.
38  */
39 
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irq.h>
47 #include <linux/msi.h>
48 #include <linux/hyperv.h>
49 #include <linux/refcount.h>
50 #include <linux/irqdomain.h>
51 #include <linux/acpi.h>
52 #include <asm/mshyperv.h>
53 
54 /*
55  * Protocol versions. The low word is the minor version, the high word the
56  * major version.
57  */
58 
59 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
60 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
61 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
62 
63 enum pci_protocol_version_t {
64 	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
65 	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
66 	PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),	/* Vibranium */
67 	PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4),	/* WS2022 */
68 };
69 
70 #define CPU_AFFINITY_ALL	-1ULL
71 
72 /*
73  * Supported protocol versions in the order of probing - highest go
74  * first.
75  */
76 static enum pci_protocol_version_t pci_protocol_versions[] = {
77 	PCI_PROTOCOL_VERSION_1_4,
78 	PCI_PROTOCOL_VERSION_1_3,
79 	PCI_PROTOCOL_VERSION_1_2,
80 	PCI_PROTOCOL_VERSION_1_1,
81 };
82 
83 #define PCI_CONFIG_MMIO_LENGTH	0x2000
84 #define CFG_PAGE_OFFSET 0x1000
85 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
86 
87 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
88 
89 #define STATUS_REVISION_MISMATCH 0xC0000059
90 
91 /* space for 32bit serial number as string */
92 #define SLOT_NAME_SIZE 11
93 
94 /*
95  * Message Types
96  */
97 
98 enum pci_message_type {
99 	/*
100 	 * Version 1.1
101 	 */
102 	PCI_MESSAGE_BASE                = 0x42490000,
103 	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
104 	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
105 	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
106 	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
107 	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
108 	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
109 	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
110 	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
111 	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
112 	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
113 	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
114 	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
115 	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
116 	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
117 	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
118 	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
119 	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
120 	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
121 	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
122 	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
123 	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
124 	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
125 	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
126 	PCI_BUS_RELATIONS2		= PCI_MESSAGE_BASE + 0x19,
127 	PCI_RESOURCES_ASSIGNED3         = PCI_MESSAGE_BASE + 0x1A,
128 	PCI_CREATE_INTERRUPT_MESSAGE3   = PCI_MESSAGE_BASE + 0x1B,
129 	PCI_MESSAGE_MAXIMUM
130 };
131 
132 /*
133  * Structures defining the virtual PCI Express protocol.
134  */
135 
136 union pci_version {
137 	struct {
138 		u16 minor_version;
139 		u16 major_version;
140 	} parts;
141 	u32 version;
142 } __packed;
143 
144 /*
145  * Function numbers are 8-bits wide on Express, as interpreted through ARI,
146  * which is all this driver does.  This representation is the one used in
147  * Windows, which is what is expected when sending this back and forth with
148  * the Hyper-V parent partition.
149  */
150 union win_slot_encoding {
151 	struct {
152 		u32	dev:5;
153 		u32	func:3;
154 		u32	reserved:24;
155 	} bits;
156 	u32 slot;
157 } __packed;
158 
159 /*
160  * Pretty much as defined in the PCI Specifications.
161  */
162 struct pci_function_description {
163 	u16	v_id;	/* vendor ID */
164 	u16	d_id;	/* device ID */
165 	u8	rev;
166 	u8	prog_intf;
167 	u8	subclass;
168 	u8	base_class;
169 	u32	subsystem_id;
170 	union win_slot_encoding win_slot;
171 	u32	ser;	/* serial number */
172 } __packed;
173 
174 enum pci_device_description_flags {
175 	HV_PCI_DEVICE_FLAG_NONE			= 0x0,
176 	HV_PCI_DEVICE_FLAG_NUMA_AFFINITY	= 0x1,
177 };
178 
179 struct pci_function_description2 {
180 	u16	v_id;	/* vendor ID */
181 	u16	d_id;	/* device ID */
182 	u8	rev;
183 	u8	prog_intf;
184 	u8	subclass;
185 	u8	base_class;
186 	u32	subsystem_id;
187 	union	win_slot_encoding win_slot;
188 	u32	ser;	/* serial number */
189 	u32	flags;
190 	u16	virtual_numa_node;
191 	u16	reserved;
192 } __packed;
193 
194 /**
195  * struct hv_msi_desc
196  * @vector:		IDT entry
197  * @delivery_mode:	As defined in Intel's Programmer's
198  *			Reference Manual, Volume 3, Chapter 8.
199  * @vector_count:	Number of contiguous entries in the
200  *			Interrupt Descriptor Table that are
201  *			occupied by this Message-Signaled
202  *			Interrupt. For "MSI", as first defined
203  *			in PCI 2.2, this can be between 1 and
204  *			32. For "MSI-X," as first defined in PCI
205  *			3.0, this must be 1, as each MSI-X table
206  *			entry would have its own descriptor.
207  * @reserved:		Empty space
208  * @cpu_mask:		All the target virtual processors.
209  */
210 struct hv_msi_desc {
211 	u8	vector;
212 	u8	delivery_mode;
213 	u16	vector_count;
214 	u32	reserved;
215 	u64	cpu_mask;
216 } __packed;
217 
218 /**
219  * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
220  * @vector:		IDT entry
221  * @delivery_mode:	As defined in Intel's Programmer's
222  *			Reference Manual, Volume 3, Chapter 8.
223  * @vector_count:	Number of contiguous entries in the
224  *			Interrupt Descriptor Table that are
225  *			occupied by this Message-Signaled
226  *			Interrupt. For "MSI", as first defined
227  *			in PCI 2.2, this can be between 1 and
228  *			32. For "MSI-X," as first defined in PCI
229  *			3.0, this must be 1, as each MSI-X table
230  *			entry would have its own descriptor.
231  * @processor_count:	number of bits enabled in array.
232  * @processor_array:	All the target virtual processors.
233  */
234 struct hv_msi_desc2 {
235 	u8	vector;
236 	u8	delivery_mode;
237 	u16	vector_count;
238 	u16	processor_count;
239 	u16	processor_array[32];
240 } __packed;
241 
242 /*
243  * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
244  *	Everything is the same as in 'hv_msi_desc2' except that the size of the
245  *	'vector' field is larger to support bigger vector values. For ex: LPI
246  *	vectors on ARM.
247  */
248 struct hv_msi_desc3 {
249 	u32	vector;
250 	u8	delivery_mode;
251 	u8	reserved;
252 	u16	vector_count;
253 	u16	processor_count;
254 	u16	processor_array[32];
255 } __packed;
256 
257 /**
258  * struct tran_int_desc
259  * @reserved:		unused, padding
260  * @vector_count:	same as in hv_msi_desc
261  * @data:		This is the "data payload" value that is
262  *			written by the device when it generates
263  *			a message-signaled interrupt, either MSI
264  *			or MSI-X.
265  * @address:		This is the address to which the data
266  *			payload is written on interrupt
267  *			generation.
268  */
269 struct tran_int_desc {
270 	u16	reserved;
271 	u16	vector_count;
272 	u32	data;
273 	u64	address;
274 } __packed;
275 
276 /*
277  * A generic message format for virtual PCI.
278  * Specific message formats are defined later in the file.
279  */
280 
281 struct pci_message {
282 	u32 type;
283 } __packed;
284 
285 struct pci_child_message {
286 	struct pci_message message_type;
287 	union win_slot_encoding wslot;
288 } __packed;
289 
290 struct pci_incoming_message {
291 	struct vmpacket_descriptor hdr;
292 	struct pci_message message_type;
293 } __packed;
294 
295 struct pci_response {
296 	struct vmpacket_descriptor hdr;
297 	s32 status;			/* negative values are failures */
298 } __packed;
299 
300 struct pci_packet {
301 	void (*completion_func)(void *context, struct pci_response *resp,
302 				int resp_packet_size);
303 	void *compl_ctxt;
304 
305 	struct pci_message message[];
306 };
307 
308 /*
309  * Specific message types supporting the PCI protocol.
310  */
311 
312 /*
313  * Version negotiation message. Sent from the guest to the host.
314  * The guest is free to try different versions until the host
315  * accepts the version.
316  *
317  * pci_version: The protocol version requested.
318  * is_last_attempt: If TRUE, this is the last version guest will request.
319  * reservedz: Reserved field, set to zero.
320  */
321 
322 struct pci_version_request {
323 	struct pci_message message_type;
324 	u32 protocol_version;
325 } __packed;
326 
327 /*
328  * Bus D0 Entry.  This is sent from the guest to the host when the virtual
329  * bus (PCI Express port) is ready for action.
330  */
331 
332 struct pci_bus_d0_entry {
333 	struct pci_message message_type;
334 	u32 reserved;
335 	u64 mmio_base;
336 } __packed;
337 
338 struct pci_bus_relations {
339 	struct pci_incoming_message incoming;
340 	u32 device_count;
341 	struct pci_function_description func[];
342 } __packed;
343 
344 struct pci_bus_relations2 {
345 	struct pci_incoming_message incoming;
346 	u32 device_count;
347 	struct pci_function_description2 func[];
348 } __packed;
349 
350 struct pci_q_res_req_response {
351 	struct vmpacket_descriptor hdr;
352 	s32 status;			/* negative values are failures */
353 	u32 probed_bar[PCI_STD_NUM_BARS];
354 } __packed;
355 
356 struct pci_set_power {
357 	struct pci_message message_type;
358 	union win_slot_encoding wslot;
359 	u32 power_state;		/* In Windows terms */
360 	u32 reserved;
361 } __packed;
362 
363 struct pci_set_power_response {
364 	struct vmpacket_descriptor hdr;
365 	s32 status;			/* negative values are failures */
366 	union win_slot_encoding wslot;
367 	u32 resultant_state;		/* In Windows terms */
368 	u32 reserved;
369 } __packed;
370 
371 struct pci_resources_assigned {
372 	struct pci_message message_type;
373 	union win_slot_encoding wslot;
374 	u8 memory_range[0x14][6];	/* not used here */
375 	u32 msi_descriptors;
376 	u32 reserved[4];
377 } __packed;
378 
379 struct pci_resources_assigned2 {
380 	struct pci_message message_type;
381 	union win_slot_encoding wslot;
382 	u8 memory_range[0x14][6];	/* not used here */
383 	u32 msi_descriptor_count;
384 	u8 reserved[70];
385 } __packed;
386 
387 struct pci_create_interrupt {
388 	struct pci_message message_type;
389 	union win_slot_encoding wslot;
390 	struct hv_msi_desc int_desc;
391 } __packed;
392 
393 struct pci_create_int_response {
394 	struct pci_response response;
395 	u32 reserved;
396 	struct tran_int_desc int_desc;
397 } __packed;
398 
399 struct pci_create_interrupt2 {
400 	struct pci_message message_type;
401 	union win_slot_encoding wslot;
402 	struct hv_msi_desc2 int_desc;
403 } __packed;
404 
405 struct pci_create_interrupt3 {
406 	struct pci_message message_type;
407 	union win_slot_encoding wslot;
408 	struct hv_msi_desc3 int_desc;
409 } __packed;
410 
411 struct pci_delete_interrupt {
412 	struct pci_message message_type;
413 	union win_slot_encoding wslot;
414 	struct tran_int_desc int_desc;
415 } __packed;
416 
417 /*
418  * Note: the VM must pass a valid block id, wslot and bytes_requested.
419  */
420 struct pci_read_block {
421 	struct pci_message message_type;
422 	u32 block_id;
423 	union win_slot_encoding wslot;
424 	u32 bytes_requested;
425 } __packed;
426 
427 struct pci_read_block_response {
428 	struct vmpacket_descriptor hdr;
429 	u32 status;
430 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
431 } __packed;
432 
433 /*
434  * Note: the VM must pass a valid block id, wslot and byte_count.
435  */
436 struct pci_write_block {
437 	struct pci_message message_type;
438 	u32 block_id;
439 	union win_slot_encoding wslot;
440 	u32 byte_count;
441 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
442 } __packed;
443 
444 struct pci_dev_inval_block {
445 	struct pci_incoming_message incoming;
446 	union win_slot_encoding wslot;
447 	u64 block_mask;
448 } __packed;
449 
450 struct pci_dev_incoming {
451 	struct pci_incoming_message incoming;
452 	union win_slot_encoding wslot;
453 } __packed;
454 
455 struct pci_eject_response {
456 	struct pci_message message_type;
457 	union win_slot_encoding wslot;
458 	u32 status;
459 } __packed;
460 
461 static int pci_ring_size = (4 * PAGE_SIZE);
462 
463 /*
464  * Driver specific state.
465  */
466 
467 enum hv_pcibus_state {
468 	hv_pcibus_init = 0,
469 	hv_pcibus_probed,
470 	hv_pcibus_installed,
471 	hv_pcibus_removing,
472 	hv_pcibus_maximum
473 };
474 
475 struct hv_pcibus_device {
476 #ifdef CONFIG_X86
477 	struct pci_sysdata sysdata;
478 #elif defined(CONFIG_ARM64)
479 	struct pci_config_window sysdata;
480 #endif
481 	struct pci_host_bridge *bridge;
482 	struct fwnode_handle *fwnode;
483 	/* Protocol version negotiated with the host */
484 	enum pci_protocol_version_t protocol_version;
485 	enum hv_pcibus_state state;
486 	struct hv_device *hdev;
487 	resource_size_t low_mmio_space;
488 	resource_size_t high_mmio_space;
489 	struct resource *mem_config;
490 	struct resource *low_mmio_res;
491 	struct resource *high_mmio_res;
492 	struct completion *survey_event;
493 	struct pci_bus *pci_bus;
494 	spinlock_t config_lock;	/* Avoid two threads writing index page */
495 	spinlock_t device_list_lock;	/* Protect lists below */
496 	void __iomem *cfg_addr;
497 
498 	struct list_head children;
499 	struct list_head dr_list;
500 
501 	struct msi_domain_info msi_info;
502 	struct irq_domain *irq_domain;
503 
504 	spinlock_t retarget_msi_interrupt_lock;
505 
506 	struct workqueue_struct *wq;
507 
508 	/* Highest slot of child device with resources allocated */
509 	int wslot_res_allocated;
510 
511 	/* hypercall arg, must not cross page boundary */
512 	struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
513 
514 	/*
515 	 * Don't put anything here: retarget_msi_interrupt_params must be last
516 	 */
517 };
518 
519 /*
520  * Tracks "Device Relations" messages from the host, which must be both
521  * processed in order and deferred so that they don't run in the context
522  * of the incoming packet callback.
523  */
524 struct hv_dr_work {
525 	struct work_struct wrk;
526 	struct hv_pcibus_device *bus;
527 };
528 
529 struct hv_pcidev_description {
530 	u16	v_id;	/* vendor ID */
531 	u16	d_id;	/* device ID */
532 	u8	rev;
533 	u8	prog_intf;
534 	u8	subclass;
535 	u8	base_class;
536 	u32	subsystem_id;
537 	union	win_slot_encoding win_slot;
538 	u32	ser;	/* serial number */
539 	u32	flags;
540 	u16	virtual_numa_node;
541 };
542 
543 struct hv_dr_state {
544 	struct list_head list_entry;
545 	u32 device_count;
546 	struct hv_pcidev_description func[];
547 };
548 
549 enum hv_pcichild_state {
550 	hv_pcichild_init = 0,
551 	hv_pcichild_requirements,
552 	hv_pcichild_resourced,
553 	hv_pcichild_ejecting,
554 	hv_pcichild_maximum
555 };
556 
557 struct hv_pci_dev {
558 	/* List protected by pci_rescan_remove_lock */
559 	struct list_head list_entry;
560 	refcount_t refs;
561 	enum hv_pcichild_state state;
562 	struct pci_slot *pci_slot;
563 	struct hv_pcidev_description desc;
564 	bool reported_missing;
565 	struct hv_pcibus_device *hbus;
566 	struct work_struct wrk;
567 
568 	void (*block_invalidate)(void *context, u64 block_mask);
569 	void *invalidate_context;
570 
571 	/*
572 	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
573 	 * read it back, for each of the BAR offsets within config space.
574 	 */
575 	u32 probed_bar[PCI_STD_NUM_BARS];
576 };
577 
578 struct hv_pci_compl {
579 	struct completion host_event;
580 	s32 completion_status;
581 };
582 
583 static void hv_pci_onchannelcallback(void *context);
584 
585 #ifdef CONFIG_X86
586 #define DELIVERY_MODE	APIC_DELIVERY_MODE_FIXED
587 #define FLOW_HANDLER	handle_edge_irq
588 #define FLOW_NAME	"edge"
589 
590 static int hv_pci_irqchip_init(void)
591 {
592 	return 0;
593 }
594 
595 static struct irq_domain *hv_pci_get_root_domain(void)
596 {
597 	return x86_vector_domain;
598 }
599 
600 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
601 {
602 	struct irq_cfg *cfg = irqd_cfg(data);
603 
604 	return cfg->vector;
605 }
606 
607 static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
608 				       struct msi_desc *msi_desc)
609 {
610 	msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
611 	msi_entry->data.as_uint32 = msi_desc->msg.data;
612 }
613 
614 static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
615 			  int nvec, msi_alloc_info_t *info)
616 {
617 	return pci_msi_prepare(domain, dev, nvec, info);
618 }
619 #elif defined(CONFIG_ARM64)
620 /*
621  * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
622  * of room at the start to allow for SPIs to be specified through ACPI and
623  * starting with a power of two to satisfy power of 2 multi-MSI requirement.
624  */
625 #define HV_PCI_MSI_SPI_START	64
626 #define HV_PCI_MSI_SPI_NR	(1020 - HV_PCI_MSI_SPI_START)
627 #define DELIVERY_MODE		0
628 #define FLOW_HANDLER		NULL
629 #define FLOW_NAME		NULL
630 #define hv_msi_prepare		NULL
631 
632 struct hv_pci_chip_data {
633 	DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
634 	struct mutex	map_lock;
635 };
636 
637 /* Hyper-V vPCI MSI GIC IRQ domain */
638 static struct irq_domain *hv_msi_gic_irq_domain;
639 
640 /* Hyper-V PCI MSI IRQ chip */
641 static struct irq_chip hv_arm64_msi_irq_chip = {
642 	.name = "MSI",
643 	.irq_set_affinity = irq_chip_set_affinity_parent,
644 	.irq_eoi = irq_chip_eoi_parent,
645 	.irq_mask = irq_chip_mask_parent,
646 	.irq_unmask = irq_chip_unmask_parent
647 };
648 
649 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
650 {
651 	return irqd->parent_data->hwirq;
652 }
653 
654 static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
655 				       struct msi_desc *msi_desc)
656 {
657 	msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) |
658 			      msi_desc->msg.address_lo;
659 	msi_entry->data = msi_desc->msg.data;
660 }
661 
662 /*
663  * @nr_bm_irqs:		Indicates the number of IRQs that were allocated from
664  *			the bitmap.
665  * @nr_dom_irqs:	Indicates the number of IRQs that were allocated from
666  *			the parent domain.
667  */
668 static void hv_pci_vec_irq_free(struct irq_domain *domain,
669 				unsigned int virq,
670 				unsigned int nr_bm_irqs,
671 				unsigned int nr_dom_irqs)
672 {
673 	struct hv_pci_chip_data *chip_data = domain->host_data;
674 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
675 	int first = d->hwirq - HV_PCI_MSI_SPI_START;
676 	int i;
677 
678 	mutex_lock(&chip_data->map_lock);
679 	bitmap_release_region(chip_data->spi_map,
680 			      first,
681 			      get_count_order(nr_bm_irqs));
682 	mutex_unlock(&chip_data->map_lock);
683 	for (i = 0; i < nr_dom_irqs; i++) {
684 		if (i)
685 			d = irq_domain_get_irq_data(domain, virq + i);
686 		irq_domain_reset_irq_data(d);
687 	}
688 
689 	irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
690 }
691 
692 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
693 				       unsigned int virq,
694 				       unsigned int nr_irqs)
695 {
696 	hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
697 }
698 
699 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
700 				       unsigned int nr_irqs,
701 				       irq_hw_number_t *hwirq)
702 {
703 	struct hv_pci_chip_data *chip_data = domain->host_data;
704 	int index;
705 
706 	/* Find and allocate region from the SPI bitmap */
707 	mutex_lock(&chip_data->map_lock);
708 	index = bitmap_find_free_region(chip_data->spi_map,
709 					HV_PCI_MSI_SPI_NR,
710 					get_count_order(nr_irqs));
711 	mutex_unlock(&chip_data->map_lock);
712 	if (index < 0)
713 		return -ENOSPC;
714 
715 	*hwirq = index + HV_PCI_MSI_SPI_START;
716 
717 	return 0;
718 }
719 
720 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
721 					   unsigned int virq,
722 					   irq_hw_number_t hwirq)
723 {
724 	struct irq_fwspec fwspec;
725 	struct irq_data *d;
726 	int ret;
727 
728 	fwspec.fwnode = domain->parent->fwnode;
729 	fwspec.param_count = 2;
730 	fwspec.param[0] = hwirq;
731 	fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
732 
733 	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
734 	if (ret)
735 		return ret;
736 
737 	/*
738 	 * Since the interrupt specifier is not coming from ACPI or DT, the
739 	 * trigger type will need to be set explicitly. Otherwise, it will be
740 	 * set to whatever is in the GIC configuration.
741 	 */
742 	d = irq_domain_get_irq_data(domain->parent, virq);
743 
744 	return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
745 }
746 
747 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
748 				       unsigned int virq, unsigned int nr_irqs,
749 				       void *args)
750 {
751 	irq_hw_number_t hwirq;
752 	unsigned int i;
753 	int ret;
754 
755 	ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
756 	if (ret)
757 		return ret;
758 
759 	for (i = 0; i < nr_irqs; i++) {
760 		ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
761 						      hwirq + i);
762 		if (ret) {
763 			hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
764 			return ret;
765 		}
766 
767 		irq_domain_set_hwirq_and_chip(domain, virq + i,
768 					      hwirq + i,
769 					      &hv_arm64_msi_irq_chip,
770 					      domain->host_data);
771 		pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
772 	}
773 
774 	return 0;
775 }
776 
777 /*
778  * Pick the first cpu as the irq affinity that can be temporarily used for
779  * composing MSI from the hypervisor. GIC will eventually set the right
780  * affinity for the irq and the 'unmask' will retarget the interrupt to that
781  * cpu.
782  */
783 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
784 					  struct irq_data *irqd, bool reserve)
785 {
786 	int cpu = cpumask_first(cpu_present_mask);
787 
788 	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
789 
790 	return 0;
791 }
792 
793 static const struct irq_domain_ops hv_pci_domain_ops = {
794 	.alloc	= hv_pci_vec_irq_domain_alloc,
795 	.free	= hv_pci_vec_irq_domain_free,
796 	.activate = hv_pci_vec_irq_domain_activate,
797 };
798 
799 static int hv_pci_irqchip_init(void)
800 {
801 	static struct hv_pci_chip_data *chip_data;
802 	struct fwnode_handle *fn = NULL;
803 	int ret = -ENOMEM;
804 
805 	chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
806 	if (!chip_data)
807 		return ret;
808 
809 	mutex_init(&chip_data->map_lock);
810 	fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
811 	if (!fn)
812 		goto free_chip;
813 
814 	/*
815 	 * IRQ domain once enabled, should not be removed since there is no
816 	 * way to ensure that all the corresponding devices are also gone and
817 	 * no interrupts will be generated.
818 	 */
819 	hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
820 							  fn, &hv_pci_domain_ops,
821 							  chip_data);
822 
823 	if (!hv_msi_gic_irq_domain) {
824 		pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
825 		goto free_chip;
826 	}
827 
828 	return 0;
829 
830 free_chip:
831 	kfree(chip_data);
832 	if (fn)
833 		irq_domain_free_fwnode(fn);
834 
835 	return ret;
836 }
837 
838 static struct irq_domain *hv_pci_get_root_domain(void)
839 {
840 	return hv_msi_gic_irq_domain;
841 }
842 #endif /* CONFIG_ARM64 */
843 
844 /**
845  * hv_pci_generic_compl() - Invoked for a completion packet
846  * @context:		Set up by the sender of the packet.
847  * @resp:		The response packet
848  * @resp_packet_size:	Size in bytes of the packet
849  *
850  * This function is used to trigger an event and report status
851  * for any message for which the completion packet contains a
852  * status and nothing else.
853  */
854 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
855 				 int resp_packet_size)
856 {
857 	struct hv_pci_compl *comp_pkt = context;
858 
859 	if (resp_packet_size >= offsetofend(struct pci_response, status))
860 		comp_pkt->completion_status = resp->status;
861 	else
862 		comp_pkt->completion_status = -1;
863 
864 	complete(&comp_pkt->host_event);
865 }
866 
867 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
868 						u32 wslot);
869 
870 static void get_pcichild(struct hv_pci_dev *hpdev)
871 {
872 	refcount_inc(&hpdev->refs);
873 }
874 
875 static void put_pcichild(struct hv_pci_dev *hpdev)
876 {
877 	if (refcount_dec_and_test(&hpdev->refs))
878 		kfree(hpdev);
879 }
880 
881 /*
882  * There is no good way to get notified from vmbus_onoffer_rescind(),
883  * so let's use polling here, since this is not a hot path.
884  */
885 static int wait_for_response(struct hv_device *hdev,
886 			     struct completion *comp)
887 {
888 	while (true) {
889 		if (hdev->channel->rescind) {
890 			dev_warn_once(&hdev->device, "The device is gone.\n");
891 			return -ENODEV;
892 		}
893 
894 		if (wait_for_completion_timeout(comp, HZ / 10))
895 			break;
896 	}
897 
898 	return 0;
899 }
900 
901 /**
902  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
903  * @devfn:	The Linux representation of PCI slot
904  *
905  * Windows uses a slightly different representation of PCI slot.
906  *
907  * Return: The Windows representation
908  */
909 static u32 devfn_to_wslot(int devfn)
910 {
911 	union win_slot_encoding wslot;
912 
913 	wslot.slot = 0;
914 	wslot.bits.dev = PCI_SLOT(devfn);
915 	wslot.bits.func = PCI_FUNC(devfn);
916 
917 	return wslot.slot;
918 }
919 
920 /**
921  * wslot_to_devfn() - Convert from Windows PCI slot to Linux
922  * @wslot:	The Windows representation of PCI slot
923  *
924  * Windows uses a slightly different representation of PCI slot.
925  *
926  * Return: The Linux representation
927  */
928 static int wslot_to_devfn(u32 wslot)
929 {
930 	union win_slot_encoding slot_no;
931 
932 	slot_no.slot = wslot;
933 	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
934 }
935 
936 /*
937  * PCI Configuration Space for these root PCI buses is implemented as a pair
938  * of pages in memory-mapped I/O space.  Writing to the first page chooses
939  * the PCI function being written or read.  Once the first page has been
940  * written to, the following page maps in the entire configuration space of
941  * the function.
942  */
943 
944 /**
945  * _hv_pcifront_read_config() - Internal PCI config read
946  * @hpdev:	The PCI driver's representation of the device
947  * @where:	Offset within config space
948  * @size:	Size of the transfer
949  * @val:	Pointer to the buffer receiving the data
950  */
951 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
952 				     int size, u32 *val)
953 {
954 	unsigned long flags;
955 	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
956 
957 	/*
958 	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
959 	 */
960 	if (where + size <= PCI_COMMAND) {
961 		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
962 	} else if (where >= PCI_CLASS_REVISION && where + size <=
963 		   PCI_CACHE_LINE_SIZE) {
964 		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
965 		       PCI_CLASS_REVISION, size);
966 	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
967 		   PCI_ROM_ADDRESS) {
968 		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
969 		       PCI_SUBSYSTEM_VENDOR_ID, size);
970 	} else if (where >= PCI_ROM_ADDRESS && where + size <=
971 		   PCI_CAPABILITY_LIST) {
972 		/* ROM BARs are unimplemented */
973 		*val = 0;
974 	} else if (where >= PCI_INTERRUPT_LINE && where + size <=
975 		   PCI_INTERRUPT_PIN) {
976 		/*
977 		 * Interrupt Line and Interrupt PIN are hard-wired to zero
978 		 * because this front-end only supports message-signaled
979 		 * interrupts.
980 		 */
981 		*val = 0;
982 	} else if (where + size <= CFG_PAGE_SIZE) {
983 		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
984 		/* Choose the function to be read. (See comment above) */
985 		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
986 		/* Make sure the function was chosen before we start reading. */
987 		mb();
988 		/* Read from that function's config space. */
989 		switch (size) {
990 		case 1:
991 			*val = readb(addr);
992 			break;
993 		case 2:
994 			*val = readw(addr);
995 			break;
996 		default:
997 			*val = readl(addr);
998 			break;
999 		}
1000 		/*
1001 		 * Make sure the read was done before we release the spinlock
1002 		 * allowing consecutive reads/writes.
1003 		 */
1004 		mb();
1005 		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1006 	} else {
1007 		dev_err(&hpdev->hbus->hdev->device,
1008 			"Attempt to read beyond a function's config space.\n");
1009 	}
1010 }
1011 
1012 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1013 {
1014 	u16 ret;
1015 	unsigned long flags;
1016 	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
1017 			     PCI_VENDOR_ID;
1018 
1019 	spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1020 
1021 	/* Choose the function to be read. (See comment above) */
1022 	writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1023 	/* Make sure the function was chosen before we start reading. */
1024 	mb();
1025 	/* Read from that function's config space. */
1026 	ret = readw(addr);
1027 	/*
1028 	 * mb() is not required here, because the spin_unlock_irqrestore()
1029 	 * is a barrier.
1030 	 */
1031 
1032 	spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1033 
1034 	return ret;
1035 }
1036 
1037 /**
1038  * _hv_pcifront_write_config() - Internal PCI config write
1039  * @hpdev:	The PCI driver's representation of the device
1040  * @where:	Offset within config space
1041  * @size:	Size of the transfer
1042  * @val:	The data being transferred
1043  */
1044 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1045 				      int size, u32 val)
1046 {
1047 	unsigned long flags;
1048 	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
1049 
1050 	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1051 	    where + size <= PCI_CAPABILITY_LIST) {
1052 		/* SSIDs and ROM BARs are read-only */
1053 	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1054 		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1055 		/* Choose the function to be written. (See comment above) */
1056 		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1057 		/* Make sure the function was chosen before we start writing. */
1058 		wmb();
1059 		/* Write to that function's config space. */
1060 		switch (size) {
1061 		case 1:
1062 			writeb(val, addr);
1063 			break;
1064 		case 2:
1065 			writew(val, addr);
1066 			break;
1067 		default:
1068 			writel(val, addr);
1069 			break;
1070 		}
1071 		/*
1072 		 * Make sure the write was done before we release the spinlock
1073 		 * allowing consecutive reads/writes.
1074 		 */
1075 		mb();
1076 		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1077 	} else {
1078 		dev_err(&hpdev->hbus->hdev->device,
1079 			"Attempt to write beyond a function's config space.\n");
1080 	}
1081 }
1082 
1083 /**
1084  * hv_pcifront_read_config() - Read configuration space
1085  * @bus: PCI Bus structure
1086  * @devfn: Device/function
1087  * @where: Offset from base
1088  * @size: Byte/word/dword
1089  * @val: Value to be read
1090  *
1091  * Return: PCIBIOS_SUCCESSFUL on success
1092  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1093  */
1094 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1095 				   int where, int size, u32 *val)
1096 {
1097 	struct hv_pcibus_device *hbus =
1098 		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1099 	struct hv_pci_dev *hpdev;
1100 
1101 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1102 	if (!hpdev)
1103 		return PCIBIOS_DEVICE_NOT_FOUND;
1104 
1105 	_hv_pcifront_read_config(hpdev, where, size, val);
1106 
1107 	put_pcichild(hpdev);
1108 	return PCIBIOS_SUCCESSFUL;
1109 }
1110 
1111 /**
1112  * hv_pcifront_write_config() - Write configuration space
1113  * @bus: PCI Bus structure
1114  * @devfn: Device/function
1115  * @where: Offset from base
1116  * @size: Byte/word/dword
1117  * @val: Value to be written to device
1118  *
1119  * Return: PCIBIOS_SUCCESSFUL on success
1120  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1121  */
1122 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1123 				    int where, int size, u32 val)
1124 {
1125 	struct hv_pcibus_device *hbus =
1126 	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1127 	struct hv_pci_dev *hpdev;
1128 
1129 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1130 	if (!hpdev)
1131 		return PCIBIOS_DEVICE_NOT_FOUND;
1132 
1133 	_hv_pcifront_write_config(hpdev, where, size, val);
1134 
1135 	put_pcichild(hpdev);
1136 	return PCIBIOS_SUCCESSFUL;
1137 }
1138 
1139 /* PCIe operations */
1140 static struct pci_ops hv_pcifront_ops = {
1141 	.read  = hv_pcifront_read_config,
1142 	.write = hv_pcifront_write_config,
1143 };
1144 
1145 /*
1146  * Paravirtual backchannel
1147  *
1148  * Hyper-V SR-IOV provides a backchannel mechanism in software for
1149  * communication between a VF driver and a PF driver.  These
1150  * "configuration blocks" are similar in concept to PCI configuration space,
1151  * but instead of doing reads and writes in 32-bit chunks through a very slow
1152  * path, packets of up to 128 bytes can be sent or received asynchronously.
1153  *
1154  * Nearly every SR-IOV device contains just such a communications channel in
1155  * hardware, so using this one in software is usually optional.  Using the
1156  * software channel, however, allows driver implementers to leverage software
1157  * tools that fuzz the communications channel looking for vulnerabilities.
1158  *
1159  * The usage model for these packets puts the responsibility for reading or
1160  * writing on the VF driver.  The VF driver sends a read or a write packet,
1161  * indicating which "block" is being referred to by number.
1162  *
1163  * If the PF driver wishes to initiate communication, it can "invalidate" one or
1164  * more of the first 64 blocks.  This invalidation is delivered via a callback
1165  * supplied by the VF driver by this driver.
1166  *
1167  * No protocol is implied, except that supplied by the PF and VF drivers.
1168  */
1169 
1170 struct hv_read_config_compl {
1171 	struct hv_pci_compl comp_pkt;
1172 	void *buf;
1173 	unsigned int len;
1174 	unsigned int bytes_returned;
1175 };
1176 
1177 /**
1178  * hv_pci_read_config_compl() - Invoked when a response packet
1179  * for a read config block operation arrives.
1180  * @context:		Identifies the read config operation
1181  * @resp:		The response packet itself
1182  * @resp_packet_size:	Size in bytes of the response packet
1183  */
1184 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1185 				     int resp_packet_size)
1186 {
1187 	struct hv_read_config_compl *comp = context;
1188 	struct pci_read_block_response *read_resp =
1189 		(struct pci_read_block_response *)resp;
1190 	unsigned int data_len, hdr_len;
1191 
1192 	hdr_len = offsetof(struct pci_read_block_response, bytes);
1193 	if (resp_packet_size < hdr_len) {
1194 		comp->comp_pkt.completion_status = -1;
1195 		goto out;
1196 	}
1197 
1198 	data_len = resp_packet_size - hdr_len;
1199 	if (data_len > 0 && read_resp->status == 0) {
1200 		comp->bytes_returned = min(comp->len, data_len);
1201 		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1202 	} else {
1203 		comp->bytes_returned = 0;
1204 	}
1205 
1206 	comp->comp_pkt.completion_status = read_resp->status;
1207 out:
1208 	complete(&comp->comp_pkt.host_event);
1209 }
1210 
1211 /**
1212  * hv_read_config_block() - Sends a read config block request to
1213  * the back-end driver running in the Hyper-V parent partition.
1214  * @pdev:		The PCI driver's representation for this device.
1215  * @buf:		Buffer into which the config block will be copied.
1216  * @len:		Size in bytes of buf.
1217  * @block_id:		Identifies the config block which has been requested.
1218  * @bytes_returned:	Size which came back from the back-end driver.
1219  *
1220  * Return: 0 on success, -errno on failure
1221  */
1222 static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1223 				unsigned int len, unsigned int block_id,
1224 				unsigned int *bytes_returned)
1225 {
1226 	struct hv_pcibus_device *hbus =
1227 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1228 			     sysdata);
1229 	struct {
1230 		struct pci_packet pkt;
1231 		char buf[sizeof(struct pci_read_block)];
1232 	} pkt;
1233 	struct hv_read_config_compl comp_pkt;
1234 	struct pci_read_block *read_blk;
1235 	int ret;
1236 
1237 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1238 		return -EINVAL;
1239 
1240 	init_completion(&comp_pkt.comp_pkt.host_event);
1241 	comp_pkt.buf = buf;
1242 	comp_pkt.len = len;
1243 
1244 	memset(&pkt, 0, sizeof(pkt));
1245 	pkt.pkt.completion_func = hv_pci_read_config_compl;
1246 	pkt.pkt.compl_ctxt = &comp_pkt;
1247 	read_blk = (struct pci_read_block *)&pkt.pkt.message;
1248 	read_blk->message_type.type = PCI_READ_BLOCK;
1249 	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1250 	read_blk->block_id = block_id;
1251 	read_blk->bytes_requested = len;
1252 
1253 	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1254 			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
1255 			       VM_PKT_DATA_INBAND,
1256 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1257 	if (ret)
1258 		return ret;
1259 
1260 	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1261 	if (ret)
1262 		return ret;
1263 
1264 	if (comp_pkt.comp_pkt.completion_status != 0 ||
1265 	    comp_pkt.bytes_returned == 0) {
1266 		dev_err(&hbus->hdev->device,
1267 			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
1268 			comp_pkt.comp_pkt.completion_status,
1269 			comp_pkt.bytes_returned);
1270 		return -EIO;
1271 	}
1272 
1273 	*bytes_returned = comp_pkt.bytes_returned;
1274 	return 0;
1275 }
1276 
1277 /**
1278  * hv_pci_write_config_compl() - Invoked when a response packet for a write
1279  * config block operation arrives.
1280  * @context:		Identifies the write config operation
1281  * @resp:		The response packet itself
1282  * @resp_packet_size:	Size in bytes of the response packet
1283  */
1284 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1285 				      int resp_packet_size)
1286 {
1287 	struct hv_pci_compl *comp_pkt = context;
1288 
1289 	comp_pkt->completion_status = resp->status;
1290 	complete(&comp_pkt->host_event);
1291 }
1292 
1293 /**
1294  * hv_write_config_block() - Sends a write config block request to the
1295  * back-end driver running in the Hyper-V parent partition.
1296  * @pdev:		The PCI driver's representation for this device.
1297  * @buf:		Buffer from which the config block will	be copied.
1298  * @len:		Size in bytes of buf.
1299  * @block_id:		Identifies the config block which is being written.
1300  *
1301  * Return: 0 on success, -errno on failure
1302  */
1303 static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1304 				unsigned int len, unsigned int block_id)
1305 {
1306 	struct hv_pcibus_device *hbus =
1307 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1308 			     sysdata);
1309 	struct {
1310 		struct pci_packet pkt;
1311 		char buf[sizeof(struct pci_write_block)];
1312 		u32 reserved;
1313 	} pkt;
1314 	struct hv_pci_compl comp_pkt;
1315 	struct pci_write_block *write_blk;
1316 	u32 pkt_size;
1317 	int ret;
1318 
1319 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1320 		return -EINVAL;
1321 
1322 	init_completion(&comp_pkt.host_event);
1323 
1324 	memset(&pkt, 0, sizeof(pkt));
1325 	pkt.pkt.completion_func = hv_pci_write_config_compl;
1326 	pkt.pkt.compl_ctxt = &comp_pkt;
1327 	write_blk = (struct pci_write_block *)&pkt.pkt.message;
1328 	write_blk->message_type.type = PCI_WRITE_BLOCK;
1329 	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1330 	write_blk->block_id = block_id;
1331 	write_blk->byte_count = len;
1332 	memcpy(write_blk->bytes, buf, len);
1333 	pkt_size = offsetof(struct pci_write_block, bytes) + len;
1334 	/*
1335 	 * This quirk is required on some hosts shipped around 2018, because
1336 	 * these hosts don't check the pkt_size correctly (new hosts have been
1337 	 * fixed since early 2019). The quirk is also safe on very old hosts
1338 	 * and new hosts, because, on them, what really matters is the length
1339 	 * specified in write_blk->byte_count.
1340 	 */
1341 	pkt_size += sizeof(pkt.reserved);
1342 
1343 	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1344 			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1345 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1346 	if (ret)
1347 		return ret;
1348 
1349 	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1350 	if (ret)
1351 		return ret;
1352 
1353 	if (comp_pkt.completion_status != 0) {
1354 		dev_err(&hbus->hdev->device,
1355 			"Write Config Block failed: 0x%x\n",
1356 			comp_pkt.completion_status);
1357 		return -EIO;
1358 	}
1359 
1360 	return 0;
1361 }
1362 
1363 /**
1364  * hv_register_block_invalidate() - Invoked when a config block invalidation
1365  * arrives from the back-end driver.
1366  * @pdev:		The PCI driver's representation for this device.
1367  * @context:		Identifies the device.
1368  * @block_invalidate:	Identifies all of the blocks being invalidated.
1369  *
1370  * Return: 0 on success, -errno on failure
1371  */
1372 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1373 					void (*block_invalidate)(void *context,
1374 								 u64 block_mask))
1375 {
1376 	struct hv_pcibus_device *hbus =
1377 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1378 			     sysdata);
1379 	struct hv_pci_dev *hpdev;
1380 
1381 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1382 	if (!hpdev)
1383 		return -ENODEV;
1384 
1385 	hpdev->block_invalidate = block_invalidate;
1386 	hpdev->invalidate_context = context;
1387 
1388 	put_pcichild(hpdev);
1389 	return 0;
1390 
1391 }
1392 
1393 /* Interrupt management hooks */
1394 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1395 			     struct tran_int_desc *int_desc)
1396 {
1397 	struct pci_delete_interrupt *int_pkt;
1398 	struct {
1399 		struct pci_packet pkt;
1400 		u8 buffer[sizeof(struct pci_delete_interrupt)];
1401 	} ctxt;
1402 
1403 	memset(&ctxt, 0, sizeof(ctxt));
1404 	int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1405 	int_pkt->message_type.type =
1406 		PCI_DELETE_INTERRUPT_MESSAGE;
1407 	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1408 	int_pkt->int_desc = *int_desc;
1409 	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1410 			 (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
1411 	kfree(int_desc);
1412 }
1413 
1414 /**
1415  * hv_msi_free() - Free the MSI.
1416  * @domain:	The interrupt domain pointer
1417  * @info:	Extra MSI-related context
1418  * @irq:	Identifies the IRQ.
1419  *
1420  * The Hyper-V parent partition and hypervisor are tracking the
1421  * messages that are in use, keeping the interrupt redirection
1422  * table up to date.  This callback sends a message that frees
1423  * the IRT entry and related tracking nonsense.
1424  */
1425 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1426 			unsigned int irq)
1427 {
1428 	struct hv_pcibus_device *hbus;
1429 	struct hv_pci_dev *hpdev;
1430 	struct pci_dev *pdev;
1431 	struct tran_int_desc *int_desc;
1432 	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1433 	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1434 
1435 	pdev = msi_desc_to_pci_dev(msi);
1436 	hbus = info->data;
1437 	int_desc = irq_data_get_irq_chip_data(irq_data);
1438 	if (!int_desc)
1439 		return;
1440 
1441 	irq_data->chip_data = NULL;
1442 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1443 	if (!hpdev) {
1444 		kfree(int_desc);
1445 		return;
1446 	}
1447 
1448 	hv_int_desc_free(hpdev, int_desc);
1449 	put_pcichild(hpdev);
1450 }
1451 
1452 static void hv_irq_mask(struct irq_data *data)
1453 {
1454 	pci_msi_mask_irq(data);
1455 	if (data->parent_data->chip->irq_mask)
1456 		irq_chip_mask_parent(data);
1457 }
1458 
1459 /**
1460  * hv_irq_unmask() - "Unmask" the IRQ by setting its current
1461  * affinity.
1462  * @data:	Describes the IRQ
1463  *
1464  * Build new a destination for the MSI and make a hypercall to
1465  * update the Interrupt Redirection Table. "Device Logical ID"
1466  * is built out of this PCI bus's instance GUID and the function
1467  * number of the device.
1468  */
1469 static void hv_irq_unmask(struct irq_data *data)
1470 {
1471 	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
1472 	struct hv_retarget_device_interrupt *params;
1473 	struct hv_pcibus_device *hbus;
1474 	struct cpumask *dest;
1475 	cpumask_var_t tmp;
1476 	struct pci_bus *pbus;
1477 	struct pci_dev *pdev;
1478 	unsigned long flags;
1479 	u32 var_size = 0;
1480 	int cpu, nr_bank;
1481 	u64 res;
1482 
1483 	dest = irq_data_get_effective_affinity_mask(data);
1484 	pdev = msi_desc_to_pci_dev(msi_desc);
1485 	pbus = pdev->bus;
1486 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1487 
1488 	spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
1489 
1490 	params = &hbus->retarget_msi_interrupt_params;
1491 	memset(params, 0, sizeof(*params));
1492 	params->partition_id = HV_PARTITION_ID_SELF;
1493 	params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
1494 	hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
1495 	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
1496 			   (hbus->hdev->dev_instance.b[4] << 16) |
1497 			   (hbus->hdev->dev_instance.b[7] << 8) |
1498 			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
1499 			   PCI_FUNC(pdev->devfn);
1500 	params->int_target.vector = hv_msi_get_int_vector(data);
1501 
1502 	/*
1503 	 * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
1504 	 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
1505 	 * spurious interrupt storm. Not doing so does not seem to have a
1506 	 * negative effect (yet?).
1507 	 */
1508 
1509 	if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
1510 		/*
1511 		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
1512 		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
1513 		 * with >64 VP support.
1514 		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
1515 		 * is not sufficient for this hypercall.
1516 		 */
1517 		params->int_target.flags |=
1518 			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
1519 
1520 		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
1521 			res = 1;
1522 			goto exit_unlock;
1523 		}
1524 
1525 		cpumask_and(tmp, dest, cpu_online_mask);
1526 		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
1527 		free_cpumask_var(tmp);
1528 
1529 		if (nr_bank <= 0) {
1530 			res = 1;
1531 			goto exit_unlock;
1532 		}
1533 
1534 		/*
1535 		 * var-sized hypercall, var-size starts after vp_mask (thus
1536 		 * vp_set.format does not count, but vp_set.valid_bank_mask
1537 		 * does).
1538 		 */
1539 		var_size = 1 + nr_bank;
1540 	} else {
1541 		for_each_cpu_and(cpu, dest, cpu_online_mask) {
1542 			params->int_target.vp_mask |=
1543 				(1ULL << hv_cpu_number_to_vp_number(cpu));
1544 		}
1545 	}
1546 
1547 	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
1548 			      params, NULL);
1549 
1550 exit_unlock:
1551 	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
1552 
1553 	/*
1554 	 * During hibernation, when a CPU is offlined, the kernel tries
1555 	 * to move the interrupt to the remaining CPUs that haven't
1556 	 * been offlined yet. In this case, the below hv_do_hypercall()
1557 	 * always fails since the vmbus channel has been closed:
1558 	 * refer to cpu_disable_common() -> fixup_irqs() ->
1559 	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
1560 	 *
1561 	 * Suppress the error message for hibernation because the failure
1562 	 * during hibernation does not matter (at this time all the devices
1563 	 * have been frozen). Note: the correct affinity info is still updated
1564 	 * into the irqdata data structure in migrate_one_irq() ->
1565 	 * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
1566 	 * resumes, hv_pci_restore_msi_state() is able to correctly restore
1567 	 * the interrupt with the correct affinity.
1568 	 */
1569 	if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
1570 		dev_err(&hbus->hdev->device,
1571 			"%s() failed: %#llx", __func__, res);
1572 
1573 	if (data->parent_data->chip->irq_unmask)
1574 		irq_chip_unmask_parent(data);
1575 	pci_msi_unmask_irq(data);
1576 }
1577 
1578 struct compose_comp_ctxt {
1579 	struct hv_pci_compl comp_pkt;
1580 	struct tran_int_desc int_desc;
1581 };
1582 
1583 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1584 				 int resp_packet_size)
1585 {
1586 	struct compose_comp_ctxt *comp_pkt = context;
1587 	struct pci_create_int_response *int_resp =
1588 		(struct pci_create_int_response *)resp;
1589 
1590 	comp_pkt->comp_pkt.completion_status = resp->status;
1591 	comp_pkt->int_desc = int_resp->int_desc;
1592 	complete(&comp_pkt->comp_pkt.host_event);
1593 }
1594 
1595 static u32 hv_compose_msi_req_v1(
1596 	struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
1597 	u32 slot, u8 vector)
1598 {
1599 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1600 	int_pkt->wslot.slot = slot;
1601 	int_pkt->int_desc.vector = vector;
1602 	int_pkt->int_desc.vector_count = 1;
1603 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1604 
1605 	/*
1606 	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1607 	 * hv_irq_unmask().
1608 	 */
1609 	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1610 
1611 	return sizeof(*int_pkt);
1612 }
1613 
1614 /*
1615  * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1616  * by subsequent retarget in hv_irq_unmask().
1617  */
1618 static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
1619 {
1620 	return cpumask_first_and(affinity, cpu_online_mask);
1621 }
1622 
1623 static u32 hv_compose_msi_req_v2(
1624 	struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
1625 	u32 slot, u8 vector)
1626 {
1627 	int cpu;
1628 
1629 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1630 	int_pkt->wslot.slot = slot;
1631 	int_pkt->int_desc.vector = vector;
1632 	int_pkt->int_desc.vector_count = 1;
1633 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1634 	cpu = hv_compose_msi_req_get_cpu(affinity);
1635 	int_pkt->int_desc.processor_array[0] =
1636 		hv_cpu_number_to_vp_number(cpu);
1637 	int_pkt->int_desc.processor_count = 1;
1638 
1639 	return sizeof(*int_pkt);
1640 }
1641 
1642 static u32 hv_compose_msi_req_v3(
1643 	struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
1644 	u32 slot, u32 vector)
1645 {
1646 	int cpu;
1647 
1648 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1649 	int_pkt->wslot.slot = slot;
1650 	int_pkt->int_desc.vector = vector;
1651 	int_pkt->int_desc.reserved = 0;
1652 	int_pkt->int_desc.vector_count = 1;
1653 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1654 	cpu = hv_compose_msi_req_get_cpu(affinity);
1655 	int_pkt->int_desc.processor_array[0] =
1656 		hv_cpu_number_to_vp_number(cpu);
1657 	int_pkt->int_desc.processor_count = 1;
1658 
1659 	return sizeof(*int_pkt);
1660 }
1661 
1662 /**
1663  * hv_compose_msi_msg() - Supplies a valid MSI address/data
1664  * @data:	Everything about this MSI
1665  * @msg:	Buffer that is filled in by this function
1666  *
1667  * This function unpacks the IRQ looking for target CPU set, IDT
1668  * vector and mode and sends a message to the parent partition
1669  * asking for a mapping for that tuple in this partition.  The
1670  * response supplies a data value and address to which that data
1671  * should be written to trigger that interrupt.
1672  */
1673 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1674 {
1675 	struct hv_pcibus_device *hbus;
1676 	struct vmbus_channel *channel;
1677 	struct hv_pci_dev *hpdev;
1678 	struct pci_bus *pbus;
1679 	struct pci_dev *pdev;
1680 	struct cpumask *dest;
1681 	struct compose_comp_ctxt comp;
1682 	struct tran_int_desc *int_desc;
1683 	struct {
1684 		struct pci_packet pci_pkt;
1685 		union {
1686 			struct pci_create_interrupt v1;
1687 			struct pci_create_interrupt2 v2;
1688 			struct pci_create_interrupt3 v3;
1689 		} int_pkts;
1690 	} __packed ctxt;
1691 
1692 	u32 size;
1693 	int ret;
1694 
1695 	pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
1696 	dest = irq_data_get_effective_affinity_mask(data);
1697 	pbus = pdev->bus;
1698 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1699 	channel = hbus->hdev->channel;
1700 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1701 	if (!hpdev)
1702 		goto return_null_message;
1703 
1704 	/* Free any previous message that might have already been composed. */
1705 	if (data->chip_data) {
1706 		int_desc = data->chip_data;
1707 		data->chip_data = NULL;
1708 		hv_int_desc_free(hpdev, int_desc);
1709 	}
1710 
1711 	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1712 	if (!int_desc)
1713 		goto drop_reference;
1714 
1715 	memset(&ctxt, 0, sizeof(ctxt));
1716 	init_completion(&comp.comp_pkt.host_event);
1717 	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1718 	ctxt.pci_pkt.compl_ctxt = &comp;
1719 
1720 	switch (hbus->protocol_version) {
1721 	case PCI_PROTOCOL_VERSION_1_1:
1722 		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1723 					dest,
1724 					hpdev->desc.win_slot.slot,
1725 					hv_msi_get_int_vector(data));
1726 		break;
1727 
1728 	case PCI_PROTOCOL_VERSION_1_2:
1729 	case PCI_PROTOCOL_VERSION_1_3:
1730 		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1731 					dest,
1732 					hpdev->desc.win_slot.slot,
1733 					hv_msi_get_int_vector(data));
1734 		break;
1735 
1736 	case PCI_PROTOCOL_VERSION_1_4:
1737 		size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
1738 					dest,
1739 					hpdev->desc.win_slot.slot,
1740 					hv_msi_get_int_vector(data));
1741 		break;
1742 
1743 	default:
1744 		/* As we only negotiate protocol versions known to this driver,
1745 		 * this path should never hit. However, this is it not a hot
1746 		 * path so we print a message to aid future updates.
1747 		 */
1748 		dev_err(&hbus->hdev->device,
1749 			"Unexpected vPCI protocol, update driver.");
1750 		goto free_int_desc;
1751 	}
1752 
1753 	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1754 			       size, (unsigned long)&ctxt.pci_pkt,
1755 			       VM_PKT_DATA_INBAND,
1756 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1757 	if (ret) {
1758 		dev_err(&hbus->hdev->device,
1759 			"Sending request for interrupt failed: 0x%x",
1760 			comp.comp_pkt.completion_status);
1761 		goto free_int_desc;
1762 	}
1763 
1764 	/*
1765 	 * Prevents hv_pci_onchannelcallback() from running concurrently
1766 	 * in the tasklet.
1767 	 */
1768 	tasklet_disable_in_atomic(&channel->callback_event);
1769 
1770 	/*
1771 	 * Since this function is called with IRQ locks held, can't
1772 	 * do normal wait for completion; instead poll.
1773 	 */
1774 	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1775 		unsigned long flags;
1776 
1777 		/* 0xFFFF means an invalid PCI VENDOR ID. */
1778 		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1779 			dev_err_once(&hbus->hdev->device,
1780 				     "the device has gone\n");
1781 			goto enable_tasklet;
1782 		}
1783 
1784 		/*
1785 		 * Make sure that the ring buffer data structure doesn't get
1786 		 * freed while we dereference the ring buffer pointer.  Test
1787 		 * for the channel's onchannel_callback being NULL within a
1788 		 * sched_lock critical section.  See also the inline comments
1789 		 * in vmbus_reset_channel_cb().
1790 		 */
1791 		spin_lock_irqsave(&channel->sched_lock, flags);
1792 		if (unlikely(channel->onchannel_callback == NULL)) {
1793 			spin_unlock_irqrestore(&channel->sched_lock, flags);
1794 			goto enable_tasklet;
1795 		}
1796 		hv_pci_onchannelcallback(hbus);
1797 		spin_unlock_irqrestore(&channel->sched_lock, flags);
1798 
1799 		if (hpdev->state == hv_pcichild_ejecting) {
1800 			dev_err_once(&hbus->hdev->device,
1801 				     "the device is being ejected\n");
1802 			goto enable_tasklet;
1803 		}
1804 
1805 		udelay(100);
1806 	}
1807 
1808 	tasklet_enable(&channel->callback_event);
1809 
1810 	if (comp.comp_pkt.completion_status < 0) {
1811 		dev_err(&hbus->hdev->device,
1812 			"Request for interrupt failed: 0x%x",
1813 			comp.comp_pkt.completion_status);
1814 		goto free_int_desc;
1815 	}
1816 
1817 	/*
1818 	 * Record the assignment so that this can be unwound later. Using
1819 	 * irq_set_chip_data() here would be appropriate, but the lock it takes
1820 	 * is already held.
1821 	 */
1822 	*int_desc = comp.int_desc;
1823 	data->chip_data = int_desc;
1824 
1825 	/* Pass up the result. */
1826 	msg->address_hi = comp.int_desc.address >> 32;
1827 	msg->address_lo = comp.int_desc.address & 0xffffffff;
1828 	msg->data = comp.int_desc.data;
1829 
1830 	put_pcichild(hpdev);
1831 	return;
1832 
1833 enable_tasklet:
1834 	tasklet_enable(&channel->callback_event);
1835 free_int_desc:
1836 	kfree(int_desc);
1837 drop_reference:
1838 	put_pcichild(hpdev);
1839 return_null_message:
1840 	msg->address_hi = 0;
1841 	msg->address_lo = 0;
1842 	msg->data = 0;
1843 }
1844 
1845 /* HW Interrupt Chip Descriptor */
1846 static struct irq_chip hv_msi_irq_chip = {
1847 	.name			= "Hyper-V PCIe MSI",
1848 	.irq_compose_msi_msg	= hv_compose_msi_msg,
1849 	.irq_set_affinity	= irq_chip_set_affinity_parent,
1850 #ifdef CONFIG_X86
1851 	.irq_ack		= irq_chip_ack_parent,
1852 #elif defined(CONFIG_ARM64)
1853 	.irq_eoi		= irq_chip_eoi_parent,
1854 #endif
1855 	.irq_mask		= hv_irq_mask,
1856 	.irq_unmask		= hv_irq_unmask,
1857 };
1858 
1859 static struct msi_domain_ops hv_msi_ops = {
1860 	.msi_prepare	= hv_msi_prepare,
1861 	.msi_free	= hv_msi_free,
1862 };
1863 
1864 /**
1865  * hv_pcie_init_irq_domain() - Initialize IRQ domain
1866  * @hbus:	The root PCI bus
1867  *
1868  * This function creates an IRQ domain which will be used for
1869  * interrupts from devices that have been passed through.  These
1870  * devices only support MSI and MSI-X, not line-based interrupts
1871  * or simulations of line-based interrupts through PCIe's
1872  * fabric-layer messages.  Because interrupts are remapped, we
1873  * can support multi-message MSI here.
1874  *
1875  * Return: '0' on success and error value on failure
1876  */
1877 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
1878 {
1879 	hbus->msi_info.chip = &hv_msi_irq_chip;
1880 	hbus->msi_info.ops = &hv_msi_ops;
1881 	hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
1882 		MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
1883 		MSI_FLAG_PCI_MSIX);
1884 	hbus->msi_info.handler = FLOW_HANDLER;
1885 	hbus->msi_info.handler_name = FLOW_NAME;
1886 	hbus->msi_info.data = hbus;
1887 	hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
1888 						     &hbus->msi_info,
1889 						     hv_pci_get_root_domain());
1890 	if (!hbus->irq_domain) {
1891 		dev_err(&hbus->hdev->device,
1892 			"Failed to build an MSI IRQ domain\n");
1893 		return -ENODEV;
1894 	}
1895 
1896 	dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
1897 
1898 	return 0;
1899 }
1900 
1901 /**
1902  * get_bar_size() - Get the address space consumed by a BAR
1903  * @bar_val:	Value that a BAR returned after -1 was written
1904  *              to it.
1905  *
1906  * This function returns the size of the BAR, rounded up to 1
1907  * page.  It has to be rounded up because the hypervisor's page
1908  * table entry that maps the BAR into the VM can't specify an
1909  * offset within a page.  The invariant is that the hypervisor
1910  * must place any BARs of smaller than page length at the
1911  * beginning of a page.
1912  *
1913  * Return:	Size in bytes of the consumed MMIO space.
1914  */
1915 static u64 get_bar_size(u64 bar_val)
1916 {
1917 	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
1918 			PAGE_SIZE);
1919 }
1920 
1921 /**
1922  * survey_child_resources() - Total all MMIO requirements
1923  * @hbus:	Root PCI bus, as understood by this driver
1924  */
1925 static void survey_child_resources(struct hv_pcibus_device *hbus)
1926 {
1927 	struct hv_pci_dev *hpdev;
1928 	resource_size_t bar_size = 0;
1929 	unsigned long flags;
1930 	struct completion *event;
1931 	u64 bar_val;
1932 	int i;
1933 
1934 	/* If nobody is waiting on the answer, don't compute it. */
1935 	event = xchg(&hbus->survey_event, NULL);
1936 	if (!event)
1937 		return;
1938 
1939 	/* If the answer has already been computed, go with it. */
1940 	if (hbus->low_mmio_space || hbus->high_mmio_space) {
1941 		complete(event);
1942 		return;
1943 	}
1944 
1945 	spin_lock_irqsave(&hbus->device_list_lock, flags);
1946 
1947 	/*
1948 	 * Due to an interesting quirk of the PCI spec, all memory regions
1949 	 * for a child device are a power of 2 in size and aligned in memory,
1950 	 * so it's sufficient to just add them up without tracking alignment.
1951 	 */
1952 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
1953 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1954 			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
1955 				dev_err(&hbus->hdev->device,
1956 					"There's an I/O BAR in this list!\n");
1957 
1958 			if (hpdev->probed_bar[i] != 0) {
1959 				/*
1960 				 * A probed BAR has all the upper bits set that
1961 				 * can be changed.
1962 				 */
1963 
1964 				bar_val = hpdev->probed_bar[i];
1965 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1966 					bar_val |=
1967 					((u64)hpdev->probed_bar[++i] << 32);
1968 				else
1969 					bar_val |= 0xffffffff00000000ULL;
1970 
1971 				bar_size = get_bar_size(bar_val);
1972 
1973 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1974 					hbus->high_mmio_space += bar_size;
1975 				else
1976 					hbus->low_mmio_space += bar_size;
1977 			}
1978 		}
1979 	}
1980 
1981 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1982 	complete(event);
1983 }
1984 
1985 /**
1986  * prepopulate_bars() - Fill in BARs with defaults
1987  * @hbus:	Root PCI bus, as understood by this driver
1988  *
1989  * The core PCI driver code seems much, much happier if the BARs
1990  * for a device have values upon first scan. So fill them in.
1991  * The algorithm below works down from large sizes to small,
1992  * attempting to pack the assignments optimally. The assumption,
1993  * enforced in other parts of the code, is that the beginning of
1994  * the memory-mapped I/O space will be aligned on the largest
1995  * BAR size.
1996  */
1997 static void prepopulate_bars(struct hv_pcibus_device *hbus)
1998 {
1999 	resource_size_t high_size = 0;
2000 	resource_size_t low_size = 0;
2001 	resource_size_t high_base = 0;
2002 	resource_size_t low_base = 0;
2003 	resource_size_t bar_size;
2004 	struct hv_pci_dev *hpdev;
2005 	unsigned long flags;
2006 	u64 bar_val;
2007 	u32 command;
2008 	bool high;
2009 	int i;
2010 
2011 	if (hbus->low_mmio_space) {
2012 		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2013 		low_base = hbus->low_mmio_res->start;
2014 	}
2015 
2016 	if (hbus->high_mmio_space) {
2017 		high_size = 1ULL <<
2018 			(63 - __builtin_clzll(hbus->high_mmio_space));
2019 		high_base = hbus->high_mmio_res->start;
2020 	}
2021 
2022 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2023 
2024 	/*
2025 	 * Clear the memory enable bit, in case it's already set. This occurs
2026 	 * in the suspend path of hibernation, where the device is suspended,
2027 	 * resumed and suspended again: see hibernation_snapshot() and
2028 	 * hibernation_platform_enter().
2029 	 *
2030 	 * If the memory enable bit is already set, Hyper-V silently ignores
2031 	 * the below BAR updates, and the related PCI device driver can not
2032 	 * work, because reading from the device register(s) always returns
2033 	 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2034 	 */
2035 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2036 		_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2037 		command &= ~PCI_COMMAND_MEMORY;
2038 		_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2039 	}
2040 
2041 	/* Pick addresses for the BARs. */
2042 	do {
2043 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2044 			for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2045 				bar_val = hpdev->probed_bar[i];
2046 				if (bar_val == 0)
2047 					continue;
2048 				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2049 				if (high) {
2050 					bar_val |=
2051 						((u64)hpdev->probed_bar[i + 1]
2052 						 << 32);
2053 				} else {
2054 					bar_val |= 0xffffffffULL << 32;
2055 				}
2056 				bar_size = get_bar_size(bar_val);
2057 				if (high) {
2058 					if (high_size != bar_size) {
2059 						i++;
2060 						continue;
2061 					}
2062 					_hv_pcifront_write_config(hpdev,
2063 						PCI_BASE_ADDRESS_0 + (4 * i),
2064 						4,
2065 						(u32)(high_base & 0xffffff00));
2066 					i++;
2067 					_hv_pcifront_write_config(hpdev,
2068 						PCI_BASE_ADDRESS_0 + (4 * i),
2069 						4, (u32)(high_base >> 32));
2070 					high_base += bar_size;
2071 				} else {
2072 					if (low_size != bar_size)
2073 						continue;
2074 					_hv_pcifront_write_config(hpdev,
2075 						PCI_BASE_ADDRESS_0 + (4 * i),
2076 						4,
2077 						(u32)(low_base & 0xffffff00));
2078 					low_base += bar_size;
2079 				}
2080 			}
2081 			if (high_size <= 1 && low_size <= 1) {
2082 				/* Set the memory enable bit. */
2083 				_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
2084 							 &command);
2085 				command |= PCI_COMMAND_MEMORY;
2086 				_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
2087 							  command);
2088 				break;
2089 			}
2090 		}
2091 
2092 		high_size >>= 1;
2093 		low_size >>= 1;
2094 	}  while (high_size || low_size);
2095 
2096 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2097 }
2098 
2099 /*
2100  * Assign entries in sysfs pci slot directory.
2101  *
2102  * Note that this function does not need to lock the children list
2103  * because it is called from pci_devices_present_work which
2104  * is serialized with hv_eject_device_work because they are on the
2105  * same ordered workqueue. Therefore hbus->children list will not change
2106  * even when pci_create_slot sleeps.
2107  */
2108 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2109 {
2110 	struct hv_pci_dev *hpdev;
2111 	char name[SLOT_NAME_SIZE];
2112 	int slot_nr;
2113 
2114 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2115 		if (hpdev->pci_slot)
2116 			continue;
2117 
2118 		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2119 		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2120 		hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2121 					  name, NULL);
2122 		if (IS_ERR(hpdev->pci_slot)) {
2123 			pr_warn("pci_create slot %s failed\n", name);
2124 			hpdev->pci_slot = NULL;
2125 		}
2126 	}
2127 }
2128 
2129 /*
2130  * Remove entries in sysfs pci slot directory.
2131  */
2132 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2133 {
2134 	struct hv_pci_dev *hpdev;
2135 
2136 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2137 		if (!hpdev->pci_slot)
2138 			continue;
2139 		pci_destroy_slot(hpdev->pci_slot);
2140 		hpdev->pci_slot = NULL;
2141 	}
2142 }
2143 
2144 /*
2145  * Set NUMA node for the devices on the bus
2146  */
2147 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2148 {
2149 	struct pci_dev *dev;
2150 	struct pci_bus *bus = hbus->bridge->bus;
2151 	struct hv_pci_dev *hv_dev;
2152 
2153 	list_for_each_entry(dev, &bus->devices, bus_list) {
2154 		hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2155 		if (!hv_dev)
2156 			continue;
2157 
2158 		if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
2159 			set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
2160 
2161 		put_pcichild(hv_dev);
2162 	}
2163 }
2164 
2165 /**
2166  * create_root_hv_pci_bus() - Expose a new root PCI bus
2167  * @hbus:	Root PCI bus, as understood by this driver
2168  *
2169  * Return: 0 on success, -errno on failure
2170  */
2171 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2172 {
2173 	int error;
2174 	struct pci_host_bridge *bridge = hbus->bridge;
2175 
2176 	bridge->dev.parent = &hbus->hdev->device;
2177 	bridge->sysdata = &hbus->sysdata;
2178 	bridge->ops = &hv_pcifront_ops;
2179 
2180 	error = pci_scan_root_bus_bridge(bridge);
2181 	if (error)
2182 		return error;
2183 
2184 	pci_lock_rescan_remove();
2185 	hv_pci_assign_numa_node(hbus);
2186 	pci_bus_assign_resources(bridge->bus);
2187 	hv_pci_assign_slots(hbus);
2188 	pci_bus_add_devices(bridge->bus);
2189 	pci_unlock_rescan_remove();
2190 	hbus->state = hv_pcibus_installed;
2191 	return 0;
2192 }
2193 
2194 struct q_res_req_compl {
2195 	struct completion host_event;
2196 	struct hv_pci_dev *hpdev;
2197 };
2198 
2199 /**
2200  * q_resource_requirements() - Query Resource Requirements
2201  * @context:		The completion context.
2202  * @resp:		The response that came from the host.
2203  * @resp_packet_size:	The size in bytes of resp.
2204  *
2205  * This function is invoked on completion of a Query Resource
2206  * Requirements packet.
2207  */
2208 static void q_resource_requirements(void *context, struct pci_response *resp,
2209 				    int resp_packet_size)
2210 {
2211 	struct q_res_req_compl *completion = context;
2212 	struct pci_q_res_req_response *q_res_req =
2213 		(struct pci_q_res_req_response *)resp;
2214 	int i;
2215 
2216 	if (resp->status < 0) {
2217 		dev_err(&completion->hpdev->hbus->hdev->device,
2218 			"query resource requirements failed: %x\n",
2219 			resp->status);
2220 	} else {
2221 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2222 			completion->hpdev->probed_bar[i] =
2223 				q_res_req->probed_bar[i];
2224 		}
2225 	}
2226 
2227 	complete(&completion->host_event);
2228 }
2229 
2230 /**
2231  * new_pcichild_device() - Create a new child device
2232  * @hbus:	The internal struct tracking this root PCI bus.
2233  * @desc:	The information supplied so far from the host
2234  *              about the device.
2235  *
2236  * This function creates the tracking structure for a new child
2237  * device and kicks off the process of figuring out what it is.
2238  *
2239  * Return: Pointer to the new tracking struct
2240  */
2241 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2242 		struct hv_pcidev_description *desc)
2243 {
2244 	struct hv_pci_dev *hpdev;
2245 	struct pci_child_message *res_req;
2246 	struct q_res_req_compl comp_pkt;
2247 	struct {
2248 		struct pci_packet init_packet;
2249 		u8 buffer[sizeof(struct pci_child_message)];
2250 	} pkt;
2251 	unsigned long flags;
2252 	int ret;
2253 
2254 	hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2255 	if (!hpdev)
2256 		return NULL;
2257 
2258 	hpdev->hbus = hbus;
2259 
2260 	memset(&pkt, 0, sizeof(pkt));
2261 	init_completion(&comp_pkt.host_event);
2262 	comp_pkt.hpdev = hpdev;
2263 	pkt.init_packet.compl_ctxt = &comp_pkt;
2264 	pkt.init_packet.completion_func = q_resource_requirements;
2265 	res_req = (struct pci_child_message *)&pkt.init_packet.message;
2266 	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2267 	res_req->wslot.slot = desc->win_slot.slot;
2268 
2269 	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2270 			       sizeof(struct pci_child_message),
2271 			       (unsigned long)&pkt.init_packet,
2272 			       VM_PKT_DATA_INBAND,
2273 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2274 	if (ret)
2275 		goto error;
2276 
2277 	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2278 		goto error;
2279 
2280 	hpdev->desc = *desc;
2281 	refcount_set(&hpdev->refs, 1);
2282 	get_pcichild(hpdev);
2283 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2284 
2285 	list_add_tail(&hpdev->list_entry, &hbus->children);
2286 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2287 	return hpdev;
2288 
2289 error:
2290 	kfree(hpdev);
2291 	return NULL;
2292 }
2293 
2294 /**
2295  * get_pcichild_wslot() - Find device from slot
2296  * @hbus:	Root PCI bus, as understood by this driver
2297  * @wslot:	Location on the bus
2298  *
2299  * This function looks up a PCI device and returns the internal
2300  * representation of it.  It acquires a reference on it, so that
2301  * the device won't be deleted while somebody is using it.  The
2302  * caller is responsible for calling put_pcichild() to release
2303  * this reference.
2304  *
2305  * Return:	Internal representation of a PCI device
2306  */
2307 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2308 					     u32 wslot)
2309 {
2310 	unsigned long flags;
2311 	struct hv_pci_dev *iter, *hpdev = NULL;
2312 
2313 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2314 	list_for_each_entry(iter, &hbus->children, list_entry) {
2315 		if (iter->desc.win_slot.slot == wslot) {
2316 			hpdev = iter;
2317 			get_pcichild(hpdev);
2318 			break;
2319 		}
2320 	}
2321 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2322 
2323 	return hpdev;
2324 }
2325 
2326 /**
2327  * pci_devices_present_work() - Handle new list of child devices
2328  * @work:	Work struct embedded in struct hv_dr_work
2329  *
2330  * "Bus Relations" is the Windows term for "children of this
2331  * bus."  The terminology is preserved here for people trying to
2332  * debug the interaction between Hyper-V and Linux.  This
2333  * function is called when the parent partition reports a list
2334  * of functions that should be observed under this PCI Express
2335  * port (bus).
2336  *
2337  * This function updates the list, and must tolerate being
2338  * called multiple times with the same information.  The typical
2339  * number of child devices is one, with very atypical cases
2340  * involving three or four, so the algorithms used here can be
2341  * simple and inefficient.
2342  *
2343  * It must also treat the omission of a previously observed device as
2344  * notification that the device no longer exists.
2345  *
2346  * Note that this function is serialized with hv_eject_device_work(),
2347  * because both are pushed to the ordered workqueue hbus->wq.
2348  */
2349 static void pci_devices_present_work(struct work_struct *work)
2350 {
2351 	u32 child_no;
2352 	bool found;
2353 	struct hv_pcidev_description *new_desc;
2354 	struct hv_pci_dev *hpdev;
2355 	struct hv_pcibus_device *hbus;
2356 	struct list_head removed;
2357 	struct hv_dr_work *dr_wrk;
2358 	struct hv_dr_state *dr = NULL;
2359 	unsigned long flags;
2360 
2361 	dr_wrk = container_of(work, struct hv_dr_work, wrk);
2362 	hbus = dr_wrk->bus;
2363 	kfree(dr_wrk);
2364 
2365 	INIT_LIST_HEAD(&removed);
2366 
2367 	/* Pull this off the queue and process it if it was the last one. */
2368 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2369 	while (!list_empty(&hbus->dr_list)) {
2370 		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2371 				      list_entry);
2372 		list_del(&dr->list_entry);
2373 
2374 		/* Throw this away if the list still has stuff in it. */
2375 		if (!list_empty(&hbus->dr_list)) {
2376 			kfree(dr);
2377 			continue;
2378 		}
2379 	}
2380 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2381 
2382 	if (!dr)
2383 		return;
2384 
2385 	/* First, mark all existing children as reported missing. */
2386 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2387 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2388 		hpdev->reported_missing = true;
2389 	}
2390 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2391 
2392 	/* Next, add back any reported devices. */
2393 	for (child_no = 0; child_no < dr->device_count; child_no++) {
2394 		found = false;
2395 		new_desc = &dr->func[child_no];
2396 
2397 		spin_lock_irqsave(&hbus->device_list_lock, flags);
2398 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2399 			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2400 			    (hpdev->desc.v_id == new_desc->v_id) &&
2401 			    (hpdev->desc.d_id == new_desc->d_id) &&
2402 			    (hpdev->desc.ser == new_desc->ser)) {
2403 				hpdev->reported_missing = false;
2404 				found = true;
2405 			}
2406 		}
2407 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2408 
2409 		if (!found) {
2410 			hpdev = new_pcichild_device(hbus, new_desc);
2411 			if (!hpdev)
2412 				dev_err(&hbus->hdev->device,
2413 					"couldn't record a child device.\n");
2414 		}
2415 	}
2416 
2417 	/* Move missing children to a list on the stack. */
2418 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2419 	do {
2420 		found = false;
2421 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2422 			if (hpdev->reported_missing) {
2423 				found = true;
2424 				put_pcichild(hpdev);
2425 				list_move_tail(&hpdev->list_entry, &removed);
2426 				break;
2427 			}
2428 		}
2429 	} while (found);
2430 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2431 
2432 	/* Delete everything that should no longer exist. */
2433 	while (!list_empty(&removed)) {
2434 		hpdev = list_first_entry(&removed, struct hv_pci_dev,
2435 					 list_entry);
2436 		list_del(&hpdev->list_entry);
2437 
2438 		if (hpdev->pci_slot)
2439 			pci_destroy_slot(hpdev->pci_slot);
2440 
2441 		put_pcichild(hpdev);
2442 	}
2443 
2444 	switch (hbus->state) {
2445 	case hv_pcibus_installed:
2446 		/*
2447 		 * Tell the core to rescan bus
2448 		 * because there may have been changes.
2449 		 */
2450 		pci_lock_rescan_remove();
2451 		pci_scan_child_bus(hbus->bridge->bus);
2452 		hv_pci_assign_numa_node(hbus);
2453 		hv_pci_assign_slots(hbus);
2454 		pci_unlock_rescan_remove();
2455 		break;
2456 
2457 	case hv_pcibus_init:
2458 	case hv_pcibus_probed:
2459 		survey_child_resources(hbus);
2460 		break;
2461 
2462 	default:
2463 		break;
2464 	}
2465 
2466 	kfree(dr);
2467 }
2468 
2469 /**
2470  * hv_pci_start_relations_work() - Queue work to start device discovery
2471  * @hbus:	Root PCI bus, as understood by this driver
2472  * @dr:		The list of children returned from host
2473  *
2474  * Return:  0 on success, -errno on failure
2475  */
2476 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2477 				       struct hv_dr_state *dr)
2478 {
2479 	struct hv_dr_work *dr_wrk;
2480 	unsigned long flags;
2481 	bool pending_dr;
2482 
2483 	if (hbus->state == hv_pcibus_removing) {
2484 		dev_info(&hbus->hdev->device,
2485 			 "PCI VMBus BUS_RELATIONS: ignored\n");
2486 		return -ENOENT;
2487 	}
2488 
2489 	dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2490 	if (!dr_wrk)
2491 		return -ENOMEM;
2492 
2493 	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2494 	dr_wrk->bus = hbus;
2495 
2496 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2497 	/*
2498 	 * If pending_dr is true, we have already queued a work,
2499 	 * which will see the new dr. Otherwise, we need to
2500 	 * queue a new work.
2501 	 */
2502 	pending_dr = !list_empty(&hbus->dr_list);
2503 	list_add_tail(&dr->list_entry, &hbus->dr_list);
2504 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2505 
2506 	if (pending_dr)
2507 		kfree(dr_wrk);
2508 	else
2509 		queue_work(hbus->wq, &dr_wrk->wrk);
2510 
2511 	return 0;
2512 }
2513 
2514 /**
2515  * hv_pci_devices_present() - Handle list of new children
2516  * @hbus:      Root PCI bus, as understood by this driver
2517  * @relations: Packet from host listing children
2518  *
2519  * Process a new list of devices on the bus. The list of devices is
2520  * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2521  * whenever a new list of devices for this bus appears.
2522  */
2523 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2524 				   struct pci_bus_relations *relations)
2525 {
2526 	struct hv_dr_state *dr;
2527 	int i;
2528 
2529 	dr = kzalloc(struct_size(dr, func, relations->device_count),
2530 		     GFP_NOWAIT);
2531 	if (!dr)
2532 		return;
2533 
2534 	dr->device_count = relations->device_count;
2535 	for (i = 0; i < dr->device_count; i++) {
2536 		dr->func[i].v_id = relations->func[i].v_id;
2537 		dr->func[i].d_id = relations->func[i].d_id;
2538 		dr->func[i].rev = relations->func[i].rev;
2539 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2540 		dr->func[i].subclass = relations->func[i].subclass;
2541 		dr->func[i].base_class = relations->func[i].base_class;
2542 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2543 		dr->func[i].win_slot = relations->func[i].win_slot;
2544 		dr->func[i].ser = relations->func[i].ser;
2545 	}
2546 
2547 	if (hv_pci_start_relations_work(hbus, dr))
2548 		kfree(dr);
2549 }
2550 
2551 /**
2552  * hv_pci_devices_present2() - Handle list of new children
2553  * @hbus:	Root PCI bus, as understood by this driver
2554  * @relations:	Packet from host listing children
2555  *
2556  * This function is the v2 version of hv_pci_devices_present()
2557  */
2558 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2559 				    struct pci_bus_relations2 *relations)
2560 {
2561 	struct hv_dr_state *dr;
2562 	int i;
2563 
2564 	dr = kzalloc(struct_size(dr, func, relations->device_count),
2565 		     GFP_NOWAIT);
2566 	if (!dr)
2567 		return;
2568 
2569 	dr->device_count = relations->device_count;
2570 	for (i = 0; i < dr->device_count; i++) {
2571 		dr->func[i].v_id = relations->func[i].v_id;
2572 		dr->func[i].d_id = relations->func[i].d_id;
2573 		dr->func[i].rev = relations->func[i].rev;
2574 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2575 		dr->func[i].subclass = relations->func[i].subclass;
2576 		dr->func[i].base_class = relations->func[i].base_class;
2577 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2578 		dr->func[i].win_slot = relations->func[i].win_slot;
2579 		dr->func[i].ser = relations->func[i].ser;
2580 		dr->func[i].flags = relations->func[i].flags;
2581 		dr->func[i].virtual_numa_node =
2582 			relations->func[i].virtual_numa_node;
2583 	}
2584 
2585 	if (hv_pci_start_relations_work(hbus, dr))
2586 		kfree(dr);
2587 }
2588 
2589 /**
2590  * hv_eject_device_work() - Asynchronously handles ejection
2591  * @work:	Work struct embedded in internal device struct
2592  *
2593  * This function handles ejecting a device.  Windows will
2594  * attempt to gracefully eject a device, waiting 60 seconds to
2595  * hear back from the guest OS that this completed successfully.
2596  * If this timer expires, the device will be forcibly removed.
2597  */
2598 static void hv_eject_device_work(struct work_struct *work)
2599 {
2600 	struct pci_eject_response *ejct_pkt;
2601 	struct hv_pcibus_device *hbus;
2602 	struct hv_pci_dev *hpdev;
2603 	struct pci_dev *pdev;
2604 	unsigned long flags;
2605 	int wslot;
2606 	struct {
2607 		struct pci_packet pkt;
2608 		u8 buffer[sizeof(struct pci_eject_response)];
2609 	} ctxt;
2610 
2611 	hpdev = container_of(work, struct hv_pci_dev, wrk);
2612 	hbus = hpdev->hbus;
2613 
2614 	WARN_ON(hpdev->state != hv_pcichild_ejecting);
2615 
2616 	/*
2617 	 * Ejection can come before or after the PCI bus has been set up, so
2618 	 * attempt to find it and tear down the bus state, if it exists.  This
2619 	 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2620 	 * because hbus->bridge->bus may not exist yet.
2621 	 */
2622 	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2623 	pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2624 	if (pdev) {
2625 		pci_lock_rescan_remove();
2626 		pci_stop_and_remove_bus_device(pdev);
2627 		pci_dev_put(pdev);
2628 		pci_unlock_rescan_remove();
2629 	}
2630 
2631 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2632 	list_del(&hpdev->list_entry);
2633 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2634 
2635 	if (hpdev->pci_slot)
2636 		pci_destroy_slot(hpdev->pci_slot);
2637 
2638 	memset(&ctxt, 0, sizeof(ctxt));
2639 	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2640 	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2641 	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2642 	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2643 			 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
2644 			 VM_PKT_DATA_INBAND, 0);
2645 
2646 	/* For the get_pcichild() in hv_pci_eject_device() */
2647 	put_pcichild(hpdev);
2648 	/* For the two refs got in new_pcichild_device() */
2649 	put_pcichild(hpdev);
2650 	put_pcichild(hpdev);
2651 	/* hpdev has been freed. Do not use it any more. */
2652 }
2653 
2654 /**
2655  * hv_pci_eject_device() - Handles device ejection
2656  * @hpdev:	Internal device tracking struct
2657  *
2658  * This function is invoked when an ejection packet arrives.  It
2659  * just schedules work so that we don't re-enter the packet
2660  * delivery code handling the ejection.
2661  */
2662 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2663 {
2664 	struct hv_pcibus_device *hbus = hpdev->hbus;
2665 	struct hv_device *hdev = hbus->hdev;
2666 
2667 	if (hbus->state == hv_pcibus_removing) {
2668 		dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2669 		return;
2670 	}
2671 
2672 	hpdev->state = hv_pcichild_ejecting;
2673 	get_pcichild(hpdev);
2674 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2675 	queue_work(hbus->wq, &hpdev->wrk);
2676 }
2677 
2678 /**
2679  * hv_pci_onchannelcallback() - Handles incoming packets
2680  * @context:	Internal bus tracking struct
2681  *
2682  * This function is invoked whenever the host sends a packet to
2683  * this channel (which is private to this root PCI bus).
2684  */
2685 static void hv_pci_onchannelcallback(void *context)
2686 {
2687 	const int packet_size = 0x100;
2688 	int ret;
2689 	struct hv_pcibus_device *hbus = context;
2690 	u32 bytes_recvd;
2691 	u64 req_id;
2692 	struct vmpacket_descriptor *desc;
2693 	unsigned char *buffer;
2694 	int bufferlen = packet_size;
2695 	struct pci_packet *comp_packet;
2696 	struct pci_response *response;
2697 	struct pci_incoming_message *new_message;
2698 	struct pci_bus_relations *bus_rel;
2699 	struct pci_bus_relations2 *bus_rel2;
2700 	struct pci_dev_inval_block *inval;
2701 	struct pci_dev_incoming *dev_message;
2702 	struct hv_pci_dev *hpdev;
2703 
2704 	buffer = kmalloc(bufferlen, GFP_ATOMIC);
2705 	if (!buffer)
2706 		return;
2707 
2708 	while (1) {
2709 		ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
2710 					   bufferlen, &bytes_recvd, &req_id);
2711 
2712 		if (ret == -ENOBUFS) {
2713 			kfree(buffer);
2714 			/* Handle large packet */
2715 			bufferlen = bytes_recvd;
2716 			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2717 			if (!buffer)
2718 				return;
2719 			continue;
2720 		}
2721 
2722 		/* Zero length indicates there are no more packets. */
2723 		if (ret || !bytes_recvd)
2724 			break;
2725 
2726 		/*
2727 		 * All incoming packets must be at least as large as a
2728 		 * response.
2729 		 */
2730 		if (bytes_recvd <= sizeof(struct pci_response))
2731 			continue;
2732 		desc = (struct vmpacket_descriptor *)buffer;
2733 
2734 		switch (desc->type) {
2735 		case VM_PKT_COMP:
2736 
2737 			/*
2738 			 * The host is trusted, and thus it's safe to interpret
2739 			 * this transaction ID as a pointer.
2740 			 */
2741 			comp_packet = (struct pci_packet *)req_id;
2742 			response = (struct pci_response *)buffer;
2743 			comp_packet->completion_func(comp_packet->compl_ctxt,
2744 						     response,
2745 						     bytes_recvd);
2746 			break;
2747 
2748 		case VM_PKT_DATA_INBAND:
2749 
2750 			new_message = (struct pci_incoming_message *)buffer;
2751 			switch (new_message->message_type.type) {
2752 			case PCI_BUS_RELATIONS:
2753 
2754 				bus_rel = (struct pci_bus_relations *)buffer;
2755 				if (bytes_recvd <
2756 					struct_size(bus_rel, func,
2757 						    bus_rel->device_count)) {
2758 					dev_err(&hbus->hdev->device,
2759 						"bus relations too small\n");
2760 					break;
2761 				}
2762 
2763 				hv_pci_devices_present(hbus, bus_rel);
2764 				break;
2765 
2766 			case PCI_BUS_RELATIONS2:
2767 
2768 				bus_rel2 = (struct pci_bus_relations2 *)buffer;
2769 				if (bytes_recvd <
2770 					struct_size(bus_rel2, func,
2771 						    bus_rel2->device_count)) {
2772 					dev_err(&hbus->hdev->device,
2773 						"bus relations v2 too small\n");
2774 					break;
2775 				}
2776 
2777 				hv_pci_devices_present2(hbus, bus_rel2);
2778 				break;
2779 
2780 			case PCI_EJECT:
2781 
2782 				dev_message = (struct pci_dev_incoming *)buffer;
2783 				hpdev = get_pcichild_wslot(hbus,
2784 						      dev_message->wslot.slot);
2785 				if (hpdev) {
2786 					hv_pci_eject_device(hpdev);
2787 					put_pcichild(hpdev);
2788 				}
2789 				break;
2790 
2791 			case PCI_INVALIDATE_BLOCK:
2792 
2793 				inval = (struct pci_dev_inval_block *)buffer;
2794 				hpdev = get_pcichild_wslot(hbus,
2795 							   inval->wslot.slot);
2796 				if (hpdev) {
2797 					if (hpdev->block_invalidate) {
2798 						hpdev->block_invalidate(
2799 						    hpdev->invalidate_context,
2800 						    inval->block_mask);
2801 					}
2802 					put_pcichild(hpdev);
2803 				}
2804 				break;
2805 
2806 			default:
2807 				dev_warn(&hbus->hdev->device,
2808 					"Unimplemented protocol message %x\n",
2809 					new_message->message_type.type);
2810 				break;
2811 			}
2812 			break;
2813 
2814 		default:
2815 			dev_err(&hbus->hdev->device,
2816 				"unhandled packet type %d, tid %llx len %d\n",
2817 				desc->type, req_id, bytes_recvd);
2818 			break;
2819 		}
2820 	}
2821 
2822 	kfree(buffer);
2823 }
2824 
2825 /**
2826  * hv_pci_protocol_negotiation() - Set up protocol
2827  * @hdev:		VMBus's tracking struct for this root PCI bus.
2828  * @version:		Array of supported channel protocol versions in
2829  *			the order of probing - highest go first.
2830  * @num_version:	Number of elements in the version array.
2831  *
2832  * This driver is intended to support running on Windows 10
2833  * (server) and later versions. It will not run on earlier
2834  * versions, as they assume that many of the operations which
2835  * Linux needs accomplished with a spinlock held were done via
2836  * asynchronous messaging via VMBus.  Windows 10 increases the
2837  * surface area of PCI emulation so that these actions can take
2838  * place by suspending a virtual processor for their duration.
2839  *
2840  * This function negotiates the channel protocol version,
2841  * failing if the host doesn't support the necessary protocol
2842  * level.
2843  */
2844 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
2845 				       enum pci_protocol_version_t version[],
2846 				       int num_version)
2847 {
2848 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2849 	struct pci_version_request *version_req;
2850 	struct hv_pci_compl comp_pkt;
2851 	struct pci_packet *pkt;
2852 	int ret;
2853 	int i;
2854 
2855 	/*
2856 	 * Initiate the handshake with the host and negotiate
2857 	 * a version that the host can support. We start with the
2858 	 * highest version number and go down if the host cannot
2859 	 * support it.
2860 	 */
2861 	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
2862 	if (!pkt)
2863 		return -ENOMEM;
2864 
2865 	init_completion(&comp_pkt.host_event);
2866 	pkt->completion_func = hv_pci_generic_compl;
2867 	pkt->compl_ctxt = &comp_pkt;
2868 	version_req = (struct pci_version_request *)&pkt->message;
2869 	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
2870 
2871 	for (i = 0; i < num_version; i++) {
2872 		version_req->protocol_version = version[i];
2873 		ret = vmbus_sendpacket(hdev->channel, version_req,
2874 				sizeof(struct pci_version_request),
2875 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
2876 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2877 		if (!ret)
2878 			ret = wait_for_response(hdev, &comp_pkt.host_event);
2879 
2880 		if (ret) {
2881 			dev_err(&hdev->device,
2882 				"PCI Pass-through VSP failed to request version: %d",
2883 				ret);
2884 			goto exit;
2885 		}
2886 
2887 		if (comp_pkt.completion_status >= 0) {
2888 			hbus->protocol_version = version[i];
2889 			dev_info(&hdev->device,
2890 				"PCI VMBus probing: Using version %#x\n",
2891 				hbus->protocol_version);
2892 			goto exit;
2893 		}
2894 
2895 		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
2896 			dev_err(&hdev->device,
2897 				"PCI Pass-through VSP failed version request: %#x",
2898 				comp_pkt.completion_status);
2899 			ret = -EPROTO;
2900 			goto exit;
2901 		}
2902 
2903 		reinit_completion(&comp_pkt.host_event);
2904 	}
2905 
2906 	dev_err(&hdev->device,
2907 		"PCI pass-through VSP failed to find supported version");
2908 	ret = -EPROTO;
2909 
2910 exit:
2911 	kfree(pkt);
2912 	return ret;
2913 }
2914 
2915 /**
2916  * hv_pci_free_bridge_windows() - Release memory regions for the
2917  * bus
2918  * @hbus:	Root PCI bus, as understood by this driver
2919  */
2920 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
2921 {
2922 	/*
2923 	 * Set the resources back to the way they looked when they
2924 	 * were allocated by setting IORESOURCE_BUSY again.
2925 	 */
2926 
2927 	if (hbus->low_mmio_space && hbus->low_mmio_res) {
2928 		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
2929 		vmbus_free_mmio(hbus->low_mmio_res->start,
2930 				resource_size(hbus->low_mmio_res));
2931 	}
2932 
2933 	if (hbus->high_mmio_space && hbus->high_mmio_res) {
2934 		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
2935 		vmbus_free_mmio(hbus->high_mmio_res->start,
2936 				resource_size(hbus->high_mmio_res));
2937 	}
2938 }
2939 
2940 /**
2941  * hv_pci_allocate_bridge_windows() - Allocate memory regions
2942  * for the bus
2943  * @hbus:	Root PCI bus, as understood by this driver
2944  *
2945  * This function calls vmbus_allocate_mmio(), which is itself a
2946  * bit of a compromise.  Ideally, we might change the pnp layer
2947  * in the kernel such that it comprehends either PCI devices
2948  * which are "grandchildren of ACPI," with some intermediate bus
2949  * node (in this case, VMBus) or change it such that it
2950  * understands VMBus.  The pnp layer, however, has been declared
2951  * deprecated, and not subject to change.
2952  *
2953  * The workaround, implemented here, is to ask VMBus to allocate
2954  * MMIO space for this bus.  VMBus itself knows which ranges are
2955  * appropriate by looking at its own ACPI objects.  Then, after
2956  * these ranges are claimed, they're modified to look like they
2957  * would have looked if the ACPI and pnp code had allocated
2958  * bridge windows.  These descriptors have to exist in this form
2959  * in order to satisfy the code which will get invoked when the
2960  * endpoint PCI function driver calls request_mem_region() or
2961  * request_mem_region_exclusive().
2962  *
2963  * Return: 0 on success, -errno on failure
2964  */
2965 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
2966 {
2967 	resource_size_t align;
2968 	int ret;
2969 
2970 	if (hbus->low_mmio_space) {
2971 		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2972 		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
2973 					  (u64)(u32)0xffffffff,
2974 					  hbus->low_mmio_space,
2975 					  align, false);
2976 		if (ret) {
2977 			dev_err(&hbus->hdev->device,
2978 				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
2979 				hbus->low_mmio_space);
2980 			return ret;
2981 		}
2982 
2983 		/* Modify this resource to become a bridge window. */
2984 		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
2985 		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
2986 		pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
2987 	}
2988 
2989 	if (hbus->high_mmio_space) {
2990 		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
2991 		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
2992 					  0x100000000, -1,
2993 					  hbus->high_mmio_space, align,
2994 					  false);
2995 		if (ret) {
2996 			dev_err(&hbus->hdev->device,
2997 				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
2998 				hbus->high_mmio_space);
2999 			goto release_low_mmio;
3000 		}
3001 
3002 		/* Modify this resource to become a bridge window. */
3003 		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3004 		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3005 		pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3006 	}
3007 
3008 	return 0;
3009 
3010 release_low_mmio:
3011 	if (hbus->low_mmio_res) {
3012 		vmbus_free_mmio(hbus->low_mmio_res->start,
3013 				resource_size(hbus->low_mmio_res));
3014 	}
3015 
3016 	return ret;
3017 }
3018 
3019 /**
3020  * hv_allocate_config_window() - Find MMIO space for PCI Config
3021  * @hbus:	Root PCI bus, as understood by this driver
3022  *
3023  * This function claims memory-mapped I/O space for accessing
3024  * configuration space for the functions on this bus.
3025  *
3026  * Return: 0 on success, -errno on failure
3027  */
3028 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3029 {
3030 	int ret;
3031 
3032 	/*
3033 	 * Set up a region of MMIO space to use for accessing configuration
3034 	 * space.
3035 	 */
3036 	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3037 				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3038 	if (ret)
3039 		return ret;
3040 
3041 	/*
3042 	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3043 	 * resource claims (those which cannot be overlapped) and the ranges
3044 	 * which are valid for the children of this bus, which are intended
3045 	 * to be overlapped by those children.  Set the flag on this claim
3046 	 * meaning that this region can't be overlapped.
3047 	 */
3048 
3049 	hbus->mem_config->flags |= IORESOURCE_BUSY;
3050 
3051 	return 0;
3052 }
3053 
3054 static void hv_free_config_window(struct hv_pcibus_device *hbus)
3055 {
3056 	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3057 }
3058 
3059 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3060 
3061 /**
3062  * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3063  * @hdev:	VMBus's tracking struct for this root PCI bus
3064  *
3065  * Return: 0 on success, -errno on failure
3066  */
3067 static int hv_pci_enter_d0(struct hv_device *hdev)
3068 {
3069 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3070 	struct pci_bus_d0_entry *d0_entry;
3071 	struct hv_pci_compl comp_pkt;
3072 	struct pci_packet *pkt;
3073 	int ret;
3074 
3075 	/*
3076 	 * Tell the host that the bus is ready to use, and moved into the
3077 	 * powered-on state.  This includes telling the host which region
3078 	 * of memory-mapped I/O space has been chosen for configuration space
3079 	 * access.
3080 	 */
3081 	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3082 	if (!pkt)
3083 		return -ENOMEM;
3084 
3085 	init_completion(&comp_pkt.host_event);
3086 	pkt->completion_func = hv_pci_generic_compl;
3087 	pkt->compl_ctxt = &comp_pkt;
3088 	d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
3089 	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3090 	d0_entry->mmio_base = hbus->mem_config->start;
3091 
3092 	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3093 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
3094 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3095 	if (!ret)
3096 		ret = wait_for_response(hdev, &comp_pkt.host_event);
3097 
3098 	if (ret)
3099 		goto exit;
3100 
3101 	if (comp_pkt.completion_status < 0) {
3102 		dev_err(&hdev->device,
3103 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
3104 			comp_pkt.completion_status);
3105 		ret = -EPROTO;
3106 		goto exit;
3107 	}
3108 
3109 	ret = 0;
3110 
3111 exit:
3112 	kfree(pkt);
3113 	return ret;
3114 }
3115 
3116 /**
3117  * hv_pci_query_relations() - Ask host to send list of child
3118  * devices
3119  * @hdev:	VMBus's tracking struct for this root PCI bus
3120  *
3121  * Return: 0 on success, -errno on failure
3122  */
3123 static int hv_pci_query_relations(struct hv_device *hdev)
3124 {
3125 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3126 	struct pci_message message;
3127 	struct completion comp;
3128 	int ret;
3129 
3130 	/* Ask the host to send along the list of child devices */
3131 	init_completion(&comp);
3132 	if (cmpxchg(&hbus->survey_event, NULL, &comp))
3133 		return -ENOTEMPTY;
3134 
3135 	memset(&message, 0, sizeof(message));
3136 	message.type = PCI_QUERY_BUS_RELATIONS;
3137 
3138 	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3139 			       0, VM_PKT_DATA_INBAND, 0);
3140 	if (!ret)
3141 		ret = wait_for_response(hdev, &comp);
3142 
3143 	return ret;
3144 }
3145 
3146 /**
3147  * hv_send_resources_allocated() - Report local resource choices
3148  * @hdev:	VMBus's tracking struct for this root PCI bus
3149  *
3150  * The host OS is expecting to be sent a request as a message
3151  * which contains all the resources that the device will use.
3152  * The response contains those same resources, "translated"
3153  * which is to say, the values which should be used by the
3154  * hardware, when it delivers an interrupt.  (MMIO resources are
3155  * used in local terms.)  This is nice for Windows, and lines up
3156  * with the FDO/PDO split, which doesn't exist in Linux.  Linux
3157  * is deeply expecting to scan an emulated PCI configuration
3158  * space.  So this message is sent here only to drive the state
3159  * machine on the host forward.
3160  *
3161  * Return: 0 on success, -errno on failure
3162  */
3163 static int hv_send_resources_allocated(struct hv_device *hdev)
3164 {
3165 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3166 	struct pci_resources_assigned *res_assigned;
3167 	struct pci_resources_assigned2 *res_assigned2;
3168 	struct hv_pci_compl comp_pkt;
3169 	struct hv_pci_dev *hpdev;
3170 	struct pci_packet *pkt;
3171 	size_t size_res;
3172 	int wslot;
3173 	int ret;
3174 
3175 	size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3176 			? sizeof(*res_assigned) : sizeof(*res_assigned2);
3177 
3178 	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3179 	if (!pkt)
3180 		return -ENOMEM;
3181 
3182 	ret = 0;
3183 
3184 	for (wslot = 0; wslot < 256; wslot++) {
3185 		hpdev = get_pcichild_wslot(hbus, wslot);
3186 		if (!hpdev)
3187 			continue;
3188 
3189 		memset(pkt, 0, sizeof(*pkt) + size_res);
3190 		init_completion(&comp_pkt.host_event);
3191 		pkt->completion_func = hv_pci_generic_compl;
3192 		pkt->compl_ctxt = &comp_pkt;
3193 
3194 		if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3195 			res_assigned =
3196 				(struct pci_resources_assigned *)&pkt->message;
3197 			res_assigned->message_type.type =
3198 				PCI_RESOURCES_ASSIGNED;
3199 			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3200 		} else {
3201 			res_assigned2 =
3202 				(struct pci_resources_assigned2 *)&pkt->message;
3203 			res_assigned2->message_type.type =
3204 				PCI_RESOURCES_ASSIGNED2;
3205 			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3206 		}
3207 		put_pcichild(hpdev);
3208 
3209 		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
3210 				size_res, (unsigned long)pkt,
3211 				VM_PKT_DATA_INBAND,
3212 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3213 		if (!ret)
3214 			ret = wait_for_response(hdev, &comp_pkt.host_event);
3215 		if (ret)
3216 			break;
3217 
3218 		if (comp_pkt.completion_status < 0) {
3219 			ret = -EPROTO;
3220 			dev_err(&hdev->device,
3221 				"resource allocated returned 0x%x",
3222 				comp_pkt.completion_status);
3223 			break;
3224 		}
3225 
3226 		hbus->wslot_res_allocated = wslot;
3227 	}
3228 
3229 	kfree(pkt);
3230 	return ret;
3231 }
3232 
3233 /**
3234  * hv_send_resources_released() - Report local resources
3235  * released
3236  * @hdev:	VMBus's tracking struct for this root PCI bus
3237  *
3238  * Return: 0 on success, -errno on failure
3239  */
3240 static int hv_send_resources_released(struct hv_device *hdev)
3241 {
3242 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3243 	struct pci_child_message pkt;
3244 	struct hv_pci_dev *hpdev;
3245 	int wslot;
3246 	int ret;
3247 
3248 	for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3249 		hpdev = get_pcichild_wslot(hbus, wslot);
3250 		if (!hpdev)
3251 			continue;
3252 
3253 		memset(&pkt, 0, sizeof(pkt));
3254 		pkt.message_type.type = PCI_RESOURCES_RELEASED;
3255 		pkt.wslot.slot = hpdev->desc.win_slot.slot;
3256 
3257 		put_pcichild(hpdev);
3258 
3259 		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3260 				       VM_PKT_DATA_INBAND, 0);
3261 		if (ret)
3262 			return ret;
3263 
3264 		hbus->wslot_res_allocated = wslot - 1;
3265 	}
3266 
3267 	hbus->wslot_res_allocated = -1;
3268 
3269 	return 0;
3270 }
3271 
3272 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
3273 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3274 
3275 /*
3276  * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3277  * as invalid for passthrough PCI devices of this driver.
3278  */
3279 #define HVPCI_DOM_INVALID 0
3280 
3281 /**
3282  * hv_get_dom_num() - Get a valid PCI domain number
3283  * Check if the PCI domain number is in use, and return another number if
3284  * it is in use.
3285  *
3286  * @dom: Requested domain number
3287  *
3288  * return: domain number on success, HVPCI_DOM_INVALID on failure
3289  */
3290 static u16 hv_get_dom_num(u16 dom)
3291 {
3292 	unsigned int i;
3293 
3294 	if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3295 		return dom;
3296 
3297 	for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3298 		if (test_and_set_bit(i, hvpci_dom_map) == 0)
3299 			return i;
3300 	}
3301 
3302 	return HVPCI_DOM_INVALID;
3303 }
3304 
3305 /**
3306  * hv_put_dom_num() - Mark the PCI domain number as free
3307  * @dom: Domain number to be freed
3308  */
3309 static void hv_put_dom_num(u16 dom)
3310 {
3311 	clear_bit(dom, hvpci_dom_map);
3312 }
3313 
3314 /**
3315  * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3316  * @hdev:	VMBus's tracking struct for this root PCI bus
3317  * @dev_id:	Identifies the device itself
3318  *
3319  * Return: 0 on success, -errno on failure
3320  */
3321 static int hv_pci_probe(struct hv_device *hdev,
3322 			const struct hv_vmbus_device_id *dev_id)
3323 {
3324 	struct pci_host_bridge *bridge;
3325 	struct hv_pcibus_device *hbus;
3326 	u16 dom_req, dom;
3327 	char *name;
3328 	bool enter_d0_retry = true;
3329 	int ret;
3330 
3331 	/*
3332 	 * hv_pcibus_device contains the hypercall arguments for retargeting in
3333 	 * hv_irq_unmask(). Those must not cross a page boundary.
3334 	 */
3335 	BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
3336 
3337 	bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3338 	if (!bridge)
3339 		return -ENOMEM;
3340 
3341 	/*
3342 	 * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
3343 	 * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
3344 	 * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
3345 	 * alignment of hbus is important because hbus's field
3346 	 * retarget_msi_interrupt_params must not cross a 4KB page boundary.
3347 	 *
3348 	 * Here we prefer kzalloc to get_zeroed_page(), because a buffer
3349 	 * allocated by the latter is not tracked and scanned by kmemleak, and
3350 	 * hence kmemleak reports the pointer contained in the hbus buffer
3351 	 * (i.e. the hpdev struct, which is created in new_pcichild_device() and
3352 	 * is tracked by hbus->children) as memory leak (false positive).
3353 	 *
3354 	 * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
3355 	 * used to allocate the hbus buffer and we can avoid the kmemleak false
3356 	 * positive by using kmemleak_alloc() and kmemleak_free() to ask
3357 	 * kmemleak to track and scan the hbus buffer.
3358 	 */
3359 	hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
3360 	if (!hbus)
3361 		return -ENOMEM;
3362 
3363 	hbus->bridge = bridge;
3364 	hbus->state = hv_pcibus_init;
3365 	hbus->wslot_res_allocated = -1;
3366 
3367 	/*
3368 	 * The PCI bus "domain" is what is called "segment" in ACPI and other
3369 	 * specs. Pull it from the instance ID, to get something usually
3370 	 * unique. In rare cases of collision, we will find out another number
3371 	 * not in use.
3372 	 *
3373 	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3374 	 * together with this guest driver can guarantee that (1) The only
3375 	 * domain used by Gen1 VMs for something that looks like a physical
3376 	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3377 	 * (2) There will be no overlap between domains (after fixing possible
3378 	 * collisions) in the same VM.
3379 	 */
3380 	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3381 	dom = hv_get_dom_num(dom_req);
3382 
3383 	if (dom == HVPCI_DOM_INVALID) {
3384 		dev_err(&hdev->device,
3385 			"Unable to use dom# 0x%x or other numbers", dom_req);
3386 		ret = -EINVAL;
3387 		goto free_bus;
3388 	}
3389 
3390 	if (dom != dom_req)
3391 		dev_info(&hdev->device,
3392 			 "PCI dom# 0x%x has collision, using 0x%x",
3393 			 dom_req, dom);
3394 
3395 	hbus->bridge->domain_nr = dom;
3396 #ifdef CONFIG_X86
3397 	hbus->sysdata.domain = dom;
3398 #endif
3399 
3400 	hbus->hdev = hdev;
3401 	INIT_LIST_HEAD(&hbus->children);
3402 	INIT_LIST_HEAD(&hbus->dr_list);
3403 	spin_lock_init(&hbus->config_lock);
3404 	spin_lock_init(&hbus->device_list_lock);
3405 	spin_lock_init(&hbus->retarget_msi_interrupt_lock);
3406 	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3407 					   hbus->bridge->domain_nr);
3408 	if (!hbus->wq) {
3409 		ret = -ENOMEM;
3410 		goto free_dom;
3411 	}
3412 
3413 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3414 			 hv_pci_onchannelcallback, hbus);
3415 	if (ret)
3416 		goto destroy_wq;
3417 
3418 	hv_set_drvdata(hdev, hbus);
3419 
3420 	ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3421 					  ARRAY_SIZE(pci_protocol_versions));
3422 	if (ret)
3423 		goto close;
3424 
3425 	ret = hv_allocate_config_window(hbus);
3426 	if (ret)
3427 		goto close;
3428 
3429 	hbus->cfg_addr = ioremap(hbus->mem_config->start,
3430 				 PCI_CONFIG_MMIO_LENGTH);
3431 	if (!hbus->cfg_addr) {
3432 		dev_err(&hdev->device,
3433 			"Unable to map a virtual address for config space\n");
3434 		ret = -ENOMEM;
3435 		goto free_config;
3436 	}
3437 
3438 	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3439 	if (!name) {
3440 		ret = -ENOMEM;
3441 		goto unmap;
3442 	}
3443 
3444 	hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3445 	kfree(name);
3446 	if (!hbus->fwnode) {
3447 		ret = -ENOMEM;
3448 		goto unmap;
3449 	}
3450 
3451 	ret = hv_pcie_init_irq_domain(hbus);
3452 	if (ret)
3453 		goto free_fwnode;
3454 
3455 retry:
3456 	ret = hv_pci_query_relations(hdev);
3457 	if (ret)
3458 		goto free_irq_domain;
3459 
3460 	ret = hv_pci_enter_d0(hdev);
3461 	/*
3462 	 * In certain case (Kdump) the pci device of interest was
3463 	 * not cleanly shut down and resource is still held on host
3464 	 * side, the host could return invalid device status.
3465 	 * We need to explicitly request host to release the resource
3466 	 * and try to enter D0 again.
3467 	 * Since the hv_pci_bus_exit() call releases structures
3468 	 * of all its child devices, we need to start the retry from
3469 	 * hv_pci_query_relations() call, requesting host to send
3470 	 * the synchronous child device relations message before this
3471 	 * information is needed in hv_send_resources_allocated()
3472 	 * call later.
3473 	 */
3474 	if (ret == -EPROTO && enter_d0_retry) {
3475 		enter_d0_retry = false;
3476 
3477 		dev_err(&hdev->device, "Retrying D0 Entry\n");
3478 
3479 		/*
3480 		 * Hv_pci_bus_exit() calls hv_send_resources_released()
3481 		 * to free up resources of its child devices.
3482 		 * In the kdump kernel we need to set the
3483 		 * wslot_res_allocated to 255 so it scans all child
3484 		 * devices to release resources allocated in the
3485 		 * normal kernel before panic happened.
3486 		 */
3487 		hbus->wslot_res_allocated = 255;
3488 		ret = hv_pci_bus_exit(hdev, true);
3489 
3490 		if (ret == 0)
3491 			goto retry;
3492 
3493 		dev_err(&hdev->device,
3494 			"Retrying D0 failed with ret %d\n", ret);
3495 	}
3496 	if (ret)
3497 		goto free_irq_domain;
3498 
3499 	ret = hv_pci_allocate_bridge_windows(hbus);
3500 	if (ret)
3501 		goto exit_d0;
3502 
3503 	ret = hv_send_resources_allocated(hdev);
3504 	if (ret)
3505 		goto free_windows;
3506 
3507 	prepopulate_bars(hbus);
3508 
3509 	hbus->state = hv_pcibus_probed;
3510 
3511 	ret = create_root_hv_pci_bus(hbus);
3512 	if (ret)
3513 		goto free_windows;
3514 
3515 	return 0;
3516 
3517 free_windows:
3518 	hv_pci_free_bridge_windows(hbus);
3519 exit_d0:
3520 	(void) hv_pci_bus_exit(hdev, true);
3521 free_irq_domain:
3522 	irq_domain_remove(hbus->irq_domain);
3523 free_fwnode:
3524 	irq_domain_free_fwnode(hbus->fwnode);
3525 unmap:
3526 	iounmap(hbus->cfg_addr);
3527 free_config:
3528 	hv_free_config_window(hbus);
3529 close:
3530 	vmbus_close(hdev->channel);
3531 destroy_wq:
3532 	destroy_workqueue(hbus->wq);
3533 free_dom:
3534 	hv_put_dom_num(hbus->bridge->domain_nr);
3535 free_bus:
3536 	kfree(hbus);
3537 	return ret;
3538 }
3539 
3540 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3541 {
3542 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3543 	struct {
3544 		struct pci_packet teardown_packet;
3545 		u8 buffer[sizeof(struct pci_message)];
3546 	} pkt;
3547 	struct hv_pci_compl comp_pkt;
3548 	struct hv_pci_dev *hpdev, *tmp;
3549 	unsigned long flags;
3550 	int ret;
3551 
3552 	/*
3553 	 * After the host sends the RESCIND_CHANNEL message, it doesn't
3554 	 * access the per-channel ringbuffer any longer.
3555 	 */
3556 	if (hdev->channel->rescind)
3557 		return 0;
3558 
3559 	if (!keep_devs) {
3560 		struct list_head removed;
3561 
3562 		/* Move all present children to the list on stack */
3563 		INIT_LIST_HEAD(&removed);
3564 		spin_lock_irqsave(&hbus->device_list_lock, flags);
3565 		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3566 			list_move_tail(&hpdev->list_entry, &removed);
3567 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3568 
3569 		/* Remove all children in the list */
3570 		list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3571 			list_del(&hpdev->list_entry);
3572 			if (hpdev->pci_slot)
3573 				pci_destroy_slot(hpdev->pci_slot);
3574 			/* For the two refs got in new_pcichild_device() */
3575 			put_pcichild(hpdev);
3576 			put_pcichild(hpdev);
3577 		}
3578 	}
3579 
3580 	ret = hv_send_resources_released(hdev);
3581 	if (ret) {
3582 		dev_err(&hdev->device,
3583 			"Couldn't send resources released packet(s)\n");
3584 		return ret;
3585 	}
3586 
3587 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3588 	init_completion(&comp_pkt.host_event);
3589 	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3590 	pkt.teardown_packet.compl_ctxt = &comp_pkt;
3591 	pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3592 
3593 	ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
3594 			       sizeof(struct pci_message),
3595 			       (unsigned long)&pkt.teardown_packet,
3596 			       VM_PKT_DATA_INBAND,
3597 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3598 	if (ret)
3599 		return ret;
3600 
3601 	if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
3602 		return -ETIMEDOUT;
3603 
3604 	return 0;
3605 }
3606 
3607 /**
3608  * hv_pci_remove() - Remove routine for this VMBus channel
3609  * @hdev:	VMBus's tracking struct for this root PCI bus
3610  *
3611  * Return: 0 on success, -errno on failure
3612  */
3613 static int hv_pci_remove(struct hv_device *hdev)
3614 {
3615 	struct hv_pcibus_device *hbus;
3616 	int ret;
3617 
3618 	hbus = hv_get_drvdata(hdev);
3619 	if (hbus->state == hv_pcibus_installed) {
3620 		tasklet_disable(&hdev->channel->callback_event);
3621 		hbus->state = hv_pcibus_removing;
3622 		tasklet_enable(&hdev->channel->callback_event);
3623 		destroy_workqueue(hbus->wq);
3624 		hbus->wq = NULL;
3625 		/*
3626 		 * At this point, no work is running or can be scheduled
3627 		 * on hbus-wq. We can't race with hv_pci_devices_present()
3628 		 * or hv_pci_eject_device(), it's safe to proceed.
3629 		 */
3630 
3631 		/* Remove the bus from PCI's point of view. */
3632 		pci_lock_rescan_remove();
3633 		pci_stop_root_bus(hbus->bridge->bus);
3634 		hv_pci_remove_slots(hbus);
3635 		pci_remove_root_bus(hbus->bridge->bus);
3636 		pci_unlock_rescan_remove();
3637 	}
3638 
3639 	ret = hv_pci_bus_exit(hdev, false);
3640 
3641 	vmbus_close(hdev->channel);
3642 
3643 	iounmap(hbus->cfg_addr);
3644 	hv_free_config_window(hbus);
3645 	hv_pci_free_bridge_windows(hbus);
3646 	irq_domain_remove(hbus->irq_domain);
3647 	irq_domain_free_fwnode(hbus->fwnode);
3648 
3649 	hv_put_dom_num(hbus->bridge->domain_nr);
3650 
3651 	kfree(hbus);
3652 	return ret;
3653 }
3654 
3655 static int hv_pci_suspend(struct hv_device *hdev)
3656 {
3657 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3658 	enum hv_pcibus_state old_state;
3659 	int ret;
3660 
3661 	/*
3662 	 * hv_pci_suspend() must make sure there are no pending work items
3663 	 * before calling vmbus_close(), since it runs in a process context
3664 	 * as a callback in dpm_suspend().  When it starts to run, the channel
3665 	 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3666 	 * context, can be still running concurrently and scheduling new work
3667 	 * items onto hbus->wq in hv_pci_devices_present() and
3668 	 * hv_pci_eject_device(), and the work item handlers can access the
3669 	 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3670 	 * the work item handler pci_devices_present_work() ->
3671 	 * new_pcichild_device() writes to the vmbus channel.
3672 	 *
3673 	 * To eliminate the race, hv_pci_suspend() disables the channel
3674 	 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3675 	 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3676 	 * it knows that no new work item can be scheduled, and then it flushes
3677 	 * hbus->wq and safely closes the vmbus channel.
3678 	 */
3679 	tasklet_disable(&hdev->channel->callback_event);
3680 
3681 	/* Change the hbus state to prevent new work items. */
3682 	old_state = hbus->state;
3683 	if (hbus->state == hv_pcibus_installed)
3684 		hbus->state = hv_pcibus_removing;
3685 
3686 	tasklet_enable(&hdev->channel->callback_event);
3687 
3688 	if (old_state != hv_pcibus_installed)
3689 		return -EINVAL;
3690 
3691 	flush_workqueue(hbus->wq);
3692 
3693 	ret = hv_pci_bus_exit(hdev, true);
3694 	if (ret)
3695 		return ret;
3696 
3697 	vmbus_close(hdev->channel);
3698 
3699 	return 0;
3700 }
3701 
3702 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
3703 {
3704 	struct irq_data *irq_data;
3705 	struct msi_desc *entry;
3706 	int ret = 0;
3707 
3708 	msi_lock_descs(&pdev->dev);
3709 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
3710 		irq_data = irq_get_irq_data(entry->irq);
3711 		if (WARN_ON_ONCE(!irq_data)) {
3712 			ret = -EINVAL;
3713 			break;
3714 		}
3715 
3716 		hv_compose_msi_msg(irq_data, &entry->msg);
3717 	}
3718 	msi_unlock_descs(&pdev->dev);
3719 
3720 	return ret;
3721 }
3722 
3723 /*
3724  * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
3725  * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
3726  * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
3727  * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
3728  * Table entries.
3729  */
3730 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
3731 {
3732 	pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
3733 }
3734 
3735 static int hv_pci_resume(struct hv_device *hdev)
3736 {
3737 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3738 	enum pci_protocol_version_t version[1];
3739 	int ret;
3740 
3741 	hbus->state = hv_pcibus_init;
3742 
3743 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3744 			 hv_pci_onchannelcallback, hbus);
3745 	if (ret)
3746 		return ret;
3747 
3748 	/* Only use the version that was in use before hibernation. */
3749 	version[0] = hbus->protocol_version;
3750 	ret = hv_pci_protocol_negotiation(hdev, version, 1);
3751 	if (ret)
3752 		goto out;
3753 
3754 	ret = hv_pci_query_relations(hdev);
3755 	if (ret)
3756 		goto out;
3757 
3758 	ret = hv_pci_enter_d0(hdev);
3759 	if (ret)
3760 		goto out;
3761 
3762 	ret = hv_send_resources_allocated(hdev);
3763 	if (ret)
3764 		goto out;
3765 
3766 	prepopulate_bars(hbus);
3767 
3768 	hv_pci_restore_msi_state(hbus);
3769 
3770 	hbus->state = hv_pcibus_installed;
3771 	return 0;
3772 out:
3773 	vmbus_close(hdev->channel);
3774 	return ret;
3775 }
3776 
3777 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
3778 	/* PCI Pass-through Class ID */
3779 	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
3780 	{ HV_PCIE_GUID, },
3781 	{ },
3782 };
3783 
3784 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
3785 
3786 static struct hv_driver hv_pci_drv = {
3787 	.name		= "hv_pci",
3788 	.id_table	= hv_pci_id_table,
3789 	.probe		= hv_pci_probe,
3790 	.remove		= hv_pci_remove,
3791 	.suspend	= hv_pci_suspend,
3792 	.resume		= hv_pci_resume,
3793 };
3794 
3795 static void __exit exit_hv_pci_drv(void)
3796 {
3797 	vmbus_driver_unregister(&hv_pci_drv);
3798 
3799 	hvpci_block_ops.read_block = NULL;
3800 	hvpci_block_ops.write_block = NULL;
3801 	hvpci_block_ops.reg_blk_invalidate = NULL;
3802 }
3803 
3804 static int __init init_hv_pci_drv(void)
3805 {
3806 	int ret;
3807 
3808 	if (!hv_is_hyperv_initialized())
3809 		return -ENODEV;
3810 
3811 	ret = hv_pci_irqchip_init();
3812 	if (ret)
3813 		return ret;
3814 
3815 	/* Set the invalid domain number's bit, so it will not be used */
3816 	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
3817 
3818 	/* Initialize PCI block r/w interface */
3819 	hvpci_block_ops.read_block = hv_read_config_block;
3820 	hvpci_block_ops.write_block = hv_write_config_block;
3821 	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
3822 
3823 	return vmbus_driver_register(&hv_pci_drv);
3824 }
3825 
3826 module_init(init_hv_pci_drv);
3827 module_exit(exit_hv_pci_drv);
3828 
3829 MODULE_DESCRIPTION("Hyper-V PCI");
3830 MODULE_LICENSE("GPL v2");
3831